blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
691c82419b8a163915a77906ae4ef46481a1aa35
|
Shell
|
arakaki-tokyo/logoca
|
/container_run.sh
|
UTF-8
| 167 | 2.875 | 3 |
[] |
no_license
|
#!/bin/bash
if [ ! $1 ]; then
NAME=a1
else
NAME=$1
fi
docker run -dit --name $NAME \
--restart=always \
-v $PWD/src:/usr/share/nginx/html \
nginx:stable-alpine
| true |
22b7bdd6a59d1157c1592691ed506f88357f133a
|
Shell
|
micgro42/dotfiles
|
/zsh/.shell_aliases
|
UTF-8
| 3,322 | 3.203125 | 3 |
[] |
no_license
|
#!/usr/bin/env zsh
if command -v lsd &> /dev/null
then
alias ls=lsd
else
alias ls='ls --color=tty'
fi
alias rm='rm -iv'
alias mv='mv -iv'
alias cp='cp -iv'
alias ln='ln -i'
alias ll='ls -lhaF'
alias lt='ls -lhatr'
alias l.='ls -d .*'
alias mkdir='mkdir -pv'
alias wget='wget -Nc'
alias cal="cal -mw"
alias grep='grep --color=auto'
alias xosview='xosview -xrm "xosview*cpuFormat: all" -xrm "xosview*netBandwidth: 786432"'
alias qtiplot='qtiplot -l=en'
alias gnucash="LANG=de_DE.utf8; LANGUAGE=de_DE.utf8; gnucash"
# see: https://www.digitalocean.com/community/tutorials/an-introduction-to-useful-bash-aliases-and-functions
# df - report file system disk space usage
alias df="df -Tha --total" # print total, show fs-type,
# du - estimate file space usage
# alias du="du -ach | sort -h"
alias duh="du -d1 -h | sort -h"
# Display amount of free and used memory in the system
alias free="free -th" #human readable and totals
# ps - report a snapshot of the current processes
# search table for process, psg expects 1 argument
alias psg="ps aux | grep -v grep | grep -i -e VSZ -e"
# some things from http://www.cyberciti.biz/tips/bash-aliases-mac-centos-linux-unix.html and comments
## get top process eating memory
alias psmem='ps auxf | head -n 1 && ps auxf | sort -nr -k 4'
alias psmem10='ps auxf | head -n 1 && ps auxf | sort -nr -k 4 | head -10'
## get top process eating cpu ##
alias pscpu='ps auxf | head -n 1 && ps auxf | sort -nr -k 3'
alias pscpu10='ps auxf | head -n 1 && ps auxf | sort -nr -k 3 | head -10'
## Get server cpu info ##
alias cpuinfo='lscpu'
## older system use /proc/cpuinfo ##
##alias cpuinfo='less /proc/cpuinfo' ##
## get GPU ram on desktop / laptop##
alias gpumeminfo='grep -i --color memory /var/log/Xorg.0.log'
alias gitkira='GIT_SSH_COMMAND="ssh -p 36000" git'
#progress bar on file copy. Useful evenlocal.
alias cpProgress="rsync --progress -ravz"
# -a archive: recursive, etc
# -v be more verbose
# -z use compression
# -P progress bar and keeping partial files
# --append-verify do a checksum after finshing copying files
alias dircopy="rsync -avzP --append-verify"
alias watchtail='watch -n .5 tail -n 20'
alias watchdir='watch -n .5 ls -la'
alias watchsize='watch -n .5 du -h --max-depth=1'
#alias which='alias | which --tty-only --read-alias --show-dot --show-tilde'
alias phpcsPSR2='phpcs --standard=PSR2 --ignore="*/node_modules/*,*/vendor*/" --extensions=php --colors -p .'
# The following part is from http://www.splitbrain.org/blog/2008-02/27-keeping_your_home_directory_organized :
export TD="$HOME/temp/`date +'%Y-%m-%d'`"
td(){
td=$TD
if [ ! -z "$1" ]; then
td="$HOME/temp/`date -d "$1 days" +'%Y-%m-%d'`";
else
ln -s -f -T $td "$HOME/temp/00-today"
fi
mkdir -p $td; cd $td
unset td
}
# Shows the number of failed login attempts and doesn't fork and thus indicates the time it was running in the next shell prompt
alias i3lock=i3lock -nf
# the next chunk of code is from http://notes.splitbrain.org/bashrc
# color in man pages http://icanhaz.com/colors
export LESS_TERMCAP_mb=$'\E[01;31m'
export LESS_TERMCAP_md=$'\E[01;31m'
export LESS_TERMCAP_me=$'\E[0m'
export LESS_TERMCAP_se=$'\E[0m'
export LESS_TERMCAP_so=$'\E[01;44;33m'
export LESS_TERMCAP_ue=$'\E[0m'
export LESS_TERMCAP_us=$'\E[01;32m'
| true |
8f2e645f012cd4656f04164f96813620e16c0c4d
|
Shell
|
zhou199604/nubia
|
/cxll.sh
|
UTF-8
| 569 | 2.546875 | 3 |
[] |
no_license
|
cat > /root/cxll <<'GOST'
python /usr/local/SSR-Bash-Python/show_flow.py > /root/cx.txt
a=`sed -n '2p' /root/cx.txt | grep -Eo '[0-9]+MB' | grep -Eo '[0-9]+'`
b=`sed -n '3p' /root/cx.txt | grep -Eo '[0-9]+MB' | grep -Eo '[0-9]+'`
c=`sed -n '4p' /root/cx.txt | grep -Eo '[0-9]+MB' | grep -Eo '[0-9]+'`
let "b+=$a"
let "b+=$c"
b=`expr $b / 1024`
if [ "$b" -gt "1024" ] ; then
bash /usr/local/shadowsocksr/stop.sh
fi
echo $b > /accept/f/ll.log || mkdir /accept/f/
echo $b > /accept/f/ll.log
GOST
chmod +x /root/cxll
echo '*/10 * * * * sh /root/cxll' > cx.sh
crontab cx.sh
| true |
46ac7aee3dab998140176065bcff0373ef1b533a
|
Shell
|
Ketrel/scripts-and-scriptlets
|
/dotfiles/.alias
|
UTF-8
| 419 | 2.78125 | 3 |
[] |
no_license
|
#!/bin/sh
# Aliases for navigation sharing
alias cdw='cd $(cat "${HOME}/pwd-working")'
alias cds='cd $(cat "${HOME}/pwd-share")'
# Aliases for apt
alias sau='sudo apt update'
alias saug='sudo apt upgrade'
alias sai='sudo apt install'
alias ali='apt list --installed'
alias alu='apt list --upgradable'
# Alias for options
alias diff='colordiff'
# Git shorthands
alias gitpa='git remote | xargs -L1 -i{} git push {}'
| true |
2884a0e2c3a8dd673a9ce4dbec4057fc8d83da67
|
Shell
|
statdivlab/rre_sims
|
/sdl-rre/scripts/test_grid.sh
|
UTF-8
| 216 | 2.59375 | 3 |
[] |
no_license
|
#!/bin/sh
# index the lines
# PARAMS=$(sed -n ${LINE}p nb_biglambda_grid.txt)
PARAMS=$(sed -n 5p nb_biglambda_grid.txt)
# run the simulator script
echo "Rscript nb-03-biglambdagrid-input-c-r-nsim-ncores ${PARAMS}"
| true |
cf3e87d6ba2fc19b7579c947062c05162175862f
|
Shell
|
b1glord/ispconfig_setup_extra
|
/centos7/hhvm/askquestions.sh
|
UTF-8
| 297 | 2.953125 | 3 |
[] |
no_license
|
if [[ ! "$CFG_HHVM" =~ $RE ]]; then
if (whiptail --title "HHVM" --backtitle "$WT_BACKTITLE" --nocancel --radiolist "Do you want to install HHVM (Hip Hop Virtual Machine) as PHP engine?" 10 50 2 "no" "(default)" ON "yes" "" OFF 3>&1 1>&2 2>&3) then
CFG_HHVM=yes
else
CFG_HHVM=no
fi
fi
| true |
e7e68911596fe8940afb103fe2242f9eb7e441a5
|
Shell
|
carlosjulioperez/bash
|
/launch-sqldeveloper.sh
|
UTF-8
| 862 | 3.6875 | 4 |
[] |
no_license
|
#!/bin/bash
#JAVA_HOME=/usr/lib/jvm/java-8-oracle
JAVA_HOME=/opt/java-jdk/jdk1.8.0_161
SQLD_HOME=/usr/share/sqldeveloper
# Launch Oracle's startup script asynchronously
env JAVA_HOME=$JAVA_HOME $SQLD_HOME/sqldeveloper.sh $* &
i="0"
while [ $i -lt 20 ]
do
# Try to get SQL Developer window ID
WIN_ID=$(xwininfo -root -tree \
| grep -i 'oracle sql developer' \
| grep -oP '(0x[a-f0-9]+)')
# If it is non-empty (window already exists)
if [ -n "$WIN_ID" ]
then
echo "WIN_ID=$WIN_ID"
# Set WM_CLASS property of main window to same value
# that is used for the launcher window
xprop -id $WIN_ID \
-f WM_CLASS 8s \
-set WM_CLASS "oracle-ide-osgi-boot-OracleIdeLauncher"
# and exit loop
break
else
# Otherwise sleep for one second and increment loop counter
echo "Sleeping: $i"
sleep 1s
i=$[$i+1]
fi
done
echo "Done"
| true |
d1fb04d8a702b66a93baa9962c2f62c27851db0e
|
Shell
|
pronvis/stm32_drawing_robot
|
/build_and_load.sh
|
UTF-8
| 960 | 3.65625 | 4 |
[] |
no_license
|
#!/bin/bash
arguments_count=$#
cargo_addition=""
if [ "$1" == '-e' ] || [ "$1" == '--example' ]; then
if [ $arguments_count != 3 ]; then
echo "should be 3 arguments, but you provide only" $arguments_count
exit 1
fi
example_name=$2
cargo_addition='--example '$example_name
elf_file=$CARGO_TARGET_DIR'/thumbv7m-none-eabi/release/examples/'$example_name
bin_filepath=$3
else
if [ $arguments_count != 1 ]; then
echo "should be 1 argument, but you provide only" $arguments_count
exit 1
fi
elf_file=$CARGO_TARGET_DIR/thumbv7m-none-eabi/release/bare-metal
bin_filepath=$1
fi
echo "==="
echo "building file '"$elf_file"' to '"$bin_filepath"' and uploading it to device"
echo "==="
cargo build --release $cargo_addition
if [ $? != 0 ]; then
echo "building failed"
exit
fi
arm-none-eabi-objcopy -O binary $elf_file $bin_filepath
if [ $? != 0 ]; then
echo "binary file creation failed"
exit
fi
st-flash write $bin_filepath 0x08000000
| true |
f1e59d1765e63b2e5ade282588d0c7c7e0718b78
|
Shell
|
johncobb/avr_328p_blinky
|
/setup.sh
|
UTF-8
| 398 | 3.015625 | 3 |
[] |
no_license
|
#!/bin/bash
if [[ "$OSTYPE" == "linux-gnu"* ]]; then
echo "Linux detected."
sudo apt install gcc-avr avr-libc avrdude
elif [[ "$OSTYPE" == "darwin"* ]]; then
echo "MacOS detected."
echo "Installing osx-cross/avr..."
brew tap osx-cross/avr
brew install avr-gcc
echo "Installing avrdude..."
#brew install avrdude --with-usb
brew install avrdude
echo "Install complete."
fi
| true |
b25b9ec3b2fa7581f7fe1b2b1e202c1f95c91ba8
|
Shell
|
openbsd/src
|
/regress/sys/ffs/tests/rmdir/12.t
|
UTF-8
| 384 | 2.828125 | 3 |
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
# $FreeBSD: src/tools/regression/fstest/tests/rmdir/12.t,v 1.1 2007/01/17 01:42:11 pjd Exp $
desc="rmdir returns EINVAL if the last component of the path is '.' or '..'"
n0=`namegen`
n1=`namegen`
expect 0 mkdir ${n0} 0755
expect 0 mkdir ${n0}/${n1} 0755
expect EINVAL rmdir ${n0}/${n1}/.
expect EINVAL rmdir ${n0}/${n1}/..
expect 0 rmdir ${n0}/${n1}
expect 0 rmdir ${n0}
| true |
0249cc65375f0beaedd55d4dd0431099d6090133
|
Shell
|
sunminghong/autoinstall-mongodb-shard-repsets
|
/start/initmongos.sh
|
UTF-8
| 629 | 2.640625 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
source set.sh
#confjs="${mongosdir}/conf.js"
#mongo_cmd="docker exec -it ${mongosname} mongo "
mongos_host=127.0.0.1
echo "
db = connect('${mongos_host}:${mongos_port}/admin');
db.getSiblingDB('admin');
sh.addShard('shard1/${host1}:${shard1_port},${host2}:${shard1_port},${host3}:${shard1_port}')
sh.addShard('shard2/${host1}:${shard2_port},${host2}:${shard2_port},${host3}:${shard2_port}')
sh.addShard('shard3/${host1}:${shard3_port},${host2}:${shard3_port},${host3}:${shard3_port}')
db.printShardingStatus()
" > $confjs
echo 'To add shards for init mongos'
cat $confjs
$mongo_cmd --nodb $con_confjs
rm $confjs
| true |
10b1a6ad5086232e05bccac702906b18aa3e84c6
|
Shell
|
steven-sortable/git-ls
|
/git-ls
|
UTF-8
| 2,208 | 4.21875 | 4 |
[] |
no_license
|
#!/bin/bash
usage(){
echo 'Usage: git ls [-h | --help] [-b | --batch | -c | --color] [-a | --all] [[--] directory]'
}
dotfiles=0
colors=1
formatting=1
while :
do
case "$1" in
--help|-h)
usage
exit 0
;;
-a|-A|-all)
shift
dotfiles=1
;;
-c|-C|--color)
shift
colors=0
;;
-b|-B|--batch)
shift
formatting=0
colors=0
;;
--)
shift
break
;;
*)
break
;;
esac
done
if [ "$#" -ne 0 ]
then
if [ ! -d "$1" ]
then
usage
exit 1
fi
cd "$1"
fi
if ! git rev-parse --is-inside-work-tree > /dev/null 2>&1
then
git rev-parse --is-inside-work-tree
exit 1
fi
if [ "$colors" -eq 1 ]
then
cStaged=$'\e[0;32m'
cUntracked=$'\e[0;31m'
cIgnored=$'\e[0;37m'
cHash=$'\e[0;33m'
cReset=$'\e[0m'
fi
if [ "$formatting" -eq 1 ]
then
file_length="$(ls | wc -L)"
if [ "$dotfiles" -eq 1 ]
then
file_length="$(ls -A | wc -L)"
fi
fi
if [ "$dotfiles" -eq 1 ]
then
shopt -s dotglob
fi
comitted="$(git ls-tree --name-only HEAD)"
for file in *
do
if [ "$file" = ".git" ]
then
continue
fi
if [ "$formatting" -eq 0 ]
then
echo -ne "$file\t"
else
printf "%-${file_length}s\t" "$file"
fi
if echo "$comitted" | grep -Fxq "$file"
then
log="$(git log -1 --pretty="format:%cr"$'\t'"%H"$'\t'"%s" "$file")"
if [ "$formatting" -eq 0 ]
then
echo "$log"
else
awk -v gitlog="$log" 'BEGIN { split(gitlog,l,"\t"); printf "%-8.15s\t'"$cHash"'%.7s'"$cReset"'\t%s\n", file, l[1], l[2], l[3] }'
fi
else
status="$(git status --porcelain "$file")"
if git check-ignore "$file" > /dev/null
then
echo -e "${cIgnored}Gitignored$cReset"
elif [[ "$status" == "??"* ]]
then
echo -e "${cUntracked}Untracked$cReset"
elif [[ "$status" == "A"* ]]
then
echo -e "${cStaged}Staged$cReset"
elif [ -z "$status" ]
then
echo "Empty"
fi
fi
done
| true |
3a1ed9dbc4b20cbbf9b9c2af9aad01c7177728de
|
Shell
|
andrekulpin/meta5
|
/mocha.sh
|
UTF-8
| 135 | 2.875 | 3 |
[] |
no_license
|
#!/bin/bash
export NODE_PATH=$NODE_PATH:.
if [ -z $1 ];
then mocha --recursive --bail tests;
else
mocha --recursive --bail $1;
fi;
| true |
6755d0f29579311926124000f3d1eb51608118d1
|
Shell
|
andreaswachowski/dotfiles
|
/.config/bash/bashrc.host.macbook2021
|
UTF-8
| 2,415 | 3.03125 | 3 |
[] |
no_license
|
# vi: filetype=sh
# bashrc might be sourced for non-interactive shells, in particular scp,
# see http://lists.gnu.org/archive/html/bug-bash/2012-06/msg00028.html
# scp breaks when it encounters output, so all echos etc. must be avoided,
# and explicitly only happen when in interactive mode
#
# I also put aliases and anything else not needed for non-interactive
# shells here:
case "$-" in *i*)
alias less="less -R"
alias a2ps="a2ps --medium=a4"
alias urxvt="urxvt --perl-lib $HOME/bin -pe tabbed -geometry 120x40+0+0"
alias start_mysql="sudo /Library/StartupItems/MySQLCOM/MySQLCOM start"
alias stop_mysql="sudo /Library/StartupItems/MySQLCOM/MySQLCOM stop"
alias cdkanban="cd ~/Documents/qa-kanban"
alias cdrails="cd ~/Documents/rails-app"
alias blender=/Applications/blender.app/Contents/MacOS/blender
#- Helpify settings -------------------------------------------------------
# alias be="bundle exec"
# #alias bec="bundle exec cucumber --require features"
# alias server="be rails server"
# #alias solr="be rake sunspot:solr:run"
# alias solr="/usr/local/bin/solr /Users/andreas/Documents/Startup/helpify/solr/"
# alias tst="COVERAGE=true bec;COVERAGE=true rspec"
#
# echo mailcatcher running?
#- End Helpify settings ---------------------------------------------------
#- Rails settings ---------------------------------------------------------
# Nifty tab naming for iTerm 2 and Rails
# See http://superuser.com/questions/175799/does-bash-have-a-hook-that-is-run-before-executing-a-command
# source /usr/local/Cellar/git/2.4.3/etc/bash_completion.d/git-prompt.sh
#Clear out PROMPT_COMMAND to avoid conflicts
#export PROMPT_COMMAND=
#export PS1="\h:\w\$ "
#source ~/bin/preexec.bash
#
#preexec () {
# local cmd="$*";
# echo "$cmd"
# case $cmd in
# "bundle exec rails server"|server)
# echo -e "\033];Server\007"
# ;;
# "bundle exec rake sunspot:solr:run"|solr|/usr/local/bin/solr)
# echo -e "\033];Solr\007"
# ;;
# esac
#}
#
#precmd () {
# echo -e "\033];$(__git_ps1 " [%s]")\007"
#}
#
#preexec_install
#- End Rails settings -----------------------------------------------------
# - Enable crontab editing on Mac OS X ------------------------------------
# http://superuser.com/questions/359580/error-adding-cronjobs-in-mac-os-x-lion
alias crontab="VIM_CRONTAB=true crontab"
;;
esac
| true |
537e99f22a0ff3e987994f72e273baf7d6001492
|
Shell
|
liuhb86/leetcode
|
/shell/194 Transpose File.sh
|
UTF-8
| 299 | 2.828125 | 3 |
[] |
no_license
|
# copied
# Read from the file file.txt and print its transposed content to stdout.
awk '{for(i=1;i<=NF;i++){array[NR][i]=$i;}} END{for (col=1; col <=NF; col++) {for (row=1; row <= NR; row++) {if (row==NR){printf("%s", array[row][col]);}else {printf ("%s ",array[row][col])}}printf "\n";}}' file.txt
| true |
79358b79105386ca0c7cea7ba6030b9c74767776
|
Shell
|
praveen4g0/plumbing-psi
|
/post-install.sh
|
UTF-8
| 2,198 | 3.546875 | 4 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
export AWS_PROFILE=${AWS_PROFILE:-"aws-pipelines"}
CLUSTER_NAME=$1
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd)"
DOMAIN=$(jq -r '.base_domain' "cluster/$CLUSTER_NAME/terraform.tfvars.json")
NETWORK=$(jq -r '."*installconfig.InstallConfig".config.platform.openstack.externalNetwork' "cluster/$CLUSTER_NAME/.openshift_install_state.json")
export OS_CLOUD=${OS_CLOUD:-"psi-pipelines"}
if [ -z $CLUSTER_NAME ]; then
echo -e "Specify desired cluster name as a parameter of this script \n"
echo "Usage:"
echo " $0 [name]"
exit 1
fi
echo "Allocating a floating IP for cluster's ingress"
INGRESS_PORT=$(openstack port list -f value -c Name | grep $CLUSTER_NAME- | grep ingress-port)
FIP=$(openstack floating ip create --description "$CLUSTER_NAME-ingress" -f value -c floating_ip_address --port $INGRESS_PORT $NETWORK)
if [ $? != 0 ]; then
echo "Failed to allocate a floating IP for ingress"
exit 10
fi
echo "Getting zone ID in Route53"
ZONES=$(aws route53 list-hosted-zones --output json)
ZONE_ID=$(echo $ZONES | jq -r ".HostedZones[] | select(.Name==\"$DOMAIN.\") | .Id")
if [ -z $ZONE_ID ]; then
echo "Domain $DOMAIN not found in Route53"
exit 20
fi
echo "Updating DNS records in Route53"
RESPONSE=$(aws route53 change-resource-record-sets --hosted-zone-id $ZONE_ID --change-batch '{ "Comment": "Update A record for cluster API", "Changes": [ { "Action": "CREATE", "ResourceRecordSet": { "Name": "*.apps.'$CLUSTER_NAME'.'$DOMAIN'", "Type": "A", "TTL": 60, "ResourceRecords": [ { "Value": "'$FIP'" } ] } } ] }' --output json)
if [ $? != 0 ]; then
echo "Failed to update A record for cluster"
echo "Releasing previously allocated floating IP"
openstack floating ip delete $FIP
exit 25
fi
echo "Waiting for DNS change to propagate"
aws route53 wait resource-record-sets-changed --id $(echo $RESPONSE | jq -r '.ChangeInfo.Id')
echo "Logging in to cluster $CLUSTER_NAME as kubeadmin"
export KUBECONFIG=$DIR/cluster/$CLUSTER_NAME/auth/kubeconfig
oc login -u kubeadmin -p $(cat $DIR/cluster/$CLUSTER_NAME/auth/kubeadmin-password)
echo "Configuring OAuth"
if [ -z $CI ]; then
$DIR/config/auth/01-prod-auth.sh
else
$DIR/config/auth/01-test-auth.sh
fi
| true |
3767cbb1048eee27e57abe413c2a7efe6de5433b
|
Shell
|
gaithoben/android-libtdjson
|
/build_td_abi.sh
|
UTF-8
| 863 | 3.5625 | 4 |
[
"MIT"
] |
permissive
|
#!/bin/bash -e
__DIR__="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
if [ -z "$ANDROID_NDK_HOME" ]; then
>&2 echo "ANDROID_NDK_HOME not set"
exit 2
fi
if [ -z "$1" ]; then
exit 2
fi
abi=$1
mkdir -p $__DIR__/build/td/$abi
cd $__DIR__/build/td/$abi
OPENSSL_ROOT_DIR="$__DIR__/openssl"
OPENSSL_CRYPTO_LIBRARY="$OPENSSL_ROOT_DIR/$abi/lib/libcrypto.a"
OPENSSL_SSL_LIBRARY="$OPENSSL_ROOT_DIR/$abi/lib/libssl.a"
cmake $__DIR__/td -DCMAKE_TOOLCHAIN_FILE=${ANDROID_NDK_HOME}/build/cmake/android.toolchain.cmake -DCMAKE_BUILD_TYPE=MinSizeRel -DANDROID_ABI=${abi} \
-DOPENSSL_FOUND=1 \
-DOPENSSL_INCLUDE_DIR="$OPENSSL_ROOT_DIR/include" \
-DOPENSSL_CRYPTO_LIBRARY="$OPENSSL_CRYPTO_LIBRARY" \
-DOPENSSL_SSL_LIBRARY="$OPENSSL_SSL_LIBRARY" \
-DOPENSSL_LIBRARIES="$OPENSSL_SSL_LIBRARY;$OPENSSL_CRYPTO_LIBRARY" \
|| exit 1
cmake --build . || exit 1
| true |
1297b7922f7833741900c158b8ff0a3613777f69
|
Shell
|
zapling/dotfiles
|
/setup.sh
|
UTF-8
| 468 | 3.734375 | 4 |
[] |
no_license
|
#!/usr/bin/bash
if ! type "git" > /dev/null; then
pacman -Sy --needed git
fi
if [[ ! -d ~/dotfiles ]]; then
git clone [email protected]:zapling/dotfiles.git ~/dotfiles
fi
echo "You are about to run a bunch of scripts, are you sure?"
read -p "Press any key to continue" -n 1
for setup_step in ~/dotfiles/setup/steps/*; do
echo -e "\nExecuting ${setup_step}\n"
source "$setup_step"
done
echo -e "\nAlmost there, restart your terminal and you should be done!"
| true |
425bb56aebacd9f9b3dbe8e1b56546a244acb3a8
|
Shell
|
xfnw/shoxf
|
/whois
|
UTF-8
| 187 | 2.625 | 3 |
[] |
no_license
|
#!/bin/bash
for fn in $@; do
echo "scanning $fn"
echo "Starting WHOIS $fn" | tee -a "index/$fn.txt"
whois $fn | grep 'Abuse\|abuse\|NetName\|descr:\|Org' | tee -a "index/$fn.txt"
done
| true |
63c3821ac22552f6cfba58cb9246ff1433fa6c8c
|
Shell
|
vncloudsco/Documentation
|
/scripts/OpenStack-Queens-No-HA/UbuntuScriptsQueens/viewlog.sh
|
UTF-8
| 614 | 2.703125 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
# rapid CLI oneliner log inspection inside all log files
# Lookig for keywords "fail", "error", "unable", "warning".
# Ref: https://raw.githubusercontent.com/AJNOURI/COA/master/misc/oneliner_log_inspection.sh
#################################################### GUIDE####################
# Go lenh voi cac tu khoa ERORR hoac FAIL hoac thay bang tu ban muon
# bash viewlog.sh ERROR
# bash viewlog.sh "ERROR|FAIL"
##############################################################################
for i in $(ls /var/log/*/*.log); do echo "=========="; echo $i; echo "========="; tail $i| egrep -i $1; done
| true |
1451e7a89e05f44b6e63241e60092fcf0f7bab54
|
Shell
|
rayje/Stor
|
/tools/util.sh
|
UTF-8
| 243 | 2.890625 | 3 |
[] |
no_license
|
#!/bin/sh
get_ips() {
if [ ! -d ".tmp" ]; then
mkdir -p .tmp
fi
euca-describe-instances | grep ami-00000200 | awk '{print $4}' > .tmp/ips
echo >> .tmp/ips
echo ".tmp/ips"
}
list_inst() {
euca-describe-instances | grep ami-00000200
}
| true |
db184278e7df73e1d7b5e8518f5b4284d5f5996c
|
Shell
|
conjurdemos/cdemo
|
/conjurDemo/roles/conjurConfig/files/conjur_cli_image/apiInteraction/utils.sh
|
UTF-8
| 4,643 | 3.75 | 4 |
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
function pause(){
read -p "$*"
}
function urlify(){
local str=$1; shift
str=$(echo $str | sed 's= =%20=g')
str=$(echo $str | sed 's=/=%2F=g')
str=$(echo $str | sed 's=:=%3A=g')
URLIFIED=$str
}
function menu(){
PS3='Please enter your choice: '
options=("Jenkins" "Webapp" "Tomcat" "Ansible")
select opt in "${options[@]}"
do
case $opt in
"Jenkins")
id=jenkins
break
;;
"Webapp")
id=webapp
break
;;
"Tomcat")
id=tomcat
break
;;
"Ansible")
id=ansible
break
;;
esac
done
echo $id
}
identity_interactive(){
printf "\nPlease select hostfactory token to use for identity generation:\n"
local hftoken=$(menu)
local conjurCert="/root/conjur-cyberark.pem"
local id="$hftoken-$(openssl rand -hex 2)"
local token=$(cat /hostfactoryTokens/"$hftoken"_hostfactory | jq '.[0] | {token}' | awk '{print $2}' | tr -d '"\n\r')
local newidentity=$(curl -X POST -s --cacert $conjurCert -H "Authorization: Token token=\"$token\"" --data-urlencode id=$id https://conjur-master/host_factories/hosts)
printf "\nHostfactory token: $token"
printf "\nNew host name in Conjur: $id"
printf "\n"
pause 'Press [ENTER] key to continue...'
printf '\nNew Identity:\n'
echo $newidentity | jq .
printf "\nOutputing file to /identity/"$hftoken"_identity"
echo $newidentity > /identity/"$hftoken"_identity
}
identity_jenkins(){
local hftoken=jenkins
local id="$hftoken-$(openssl rand -hex 2)"
local conjurCert="/root/conjur-cyberark.pem"
local token=$(cat /hostfactoryTokens/"$hftoken"_hostfactory | jq '.[0] | {token}' | awk '{print $2}' | tr -d '"\n\r')
local newidentity=$(curl -X POST -s --cacert $conjurCert -H "Authorization: Token token=\"$token\"" --data-urlencode id=$id https://conjur-master/host_factories/hosts)
local hostname=$(echo $newidentity | jq -r '.id' | awk -F: '{print $NF}')
local api=$(echo $newidentity | jq -r '.api_key')
cp /root/*.pem /identity/
cp /root/.conjurrc /identity/.conjurrc
echo "machine https://conjur-master/authn" > /identity/.netrc
echo " login host/$hostname" >> /identity/.netrc
echo " password $api" >> /identity/.netrc
}
hostfactory_interactive(){
if [ ! -f ~/.netrc ];
then
echo "Can\'t find .netrc file in the home folder of user."
echo "Please run conjur authn login"
exit
elif [ ! -f ~/.conjurrc ];
then
echo "Can\'t find .conjurrc file in the home folder of user."
echo "Please run conjur init"
exit
else
local api=$(cat ~/.netrc | grep password | awk '{print $2}')
local account=$(cat ~/.conjurrc | grep account | awk '{print $2}')
local conjurCert="/root/conjur-cyberark.pem"
local login=$(cat ~/.netrc | grep login | awk '{print $2}')
printf "\nSelect hostfactory to create.\n"
local hf=$(menu)
printf "Generating hostfactory for $hf.\n"
printf "Using login = $login\n"
printf "This is the API key = $api"
local auth=$(curl -s --cacert $conjurCert -H "Content-Type: text/plain" -X POST -d "$api" https://conjur-master/authn/$account/$login/authenticate)
local auth_token=$(echo -n $auth | base64 | tr -d '\r\n')
local hostfactory=$(curl --cacert $conjurCert -s -X POST --data-urlencode "host_factory=$account:host_factory:$hf/nodes" --data-urlencode "expiration=2065-08-04T22:27:20+00:00" -H "Authorization: Token token=\"$auth_token\"" https://conjur-master/host_factory_tokens)
printf "\n"
pause 'Press [ENTER] key to continue...'
printf "\nThis is the hostfactory token:\n"
echo $hostfactory | jq .
printf "\nSaving HF token for use in file /hostfactoryTokens/"$hf"_hostfactory"
echo $hostfactory > /hostfactoryTokens/$hf"_hostfactory"
fi
}
hostfactory_jenkins(){
local api=$(cat ~/.netrc | grep password | awk '{print $2}')
local account=$(cat ~/.conjurrc | grep account | awk '{print $2}')
local conjurCert="/root/conjur-cyberark.pem"
local login=$(cat ~/.netrc | grep login | awk '{print $2}')
local hf=jenkins
local auth=$(curl -s --cacert $conjurCert -H "Content-Type: text/plain" -X POST -d "$api" https://conjur-master/authn/$account/$login/authenticate)
local auth_token=$(echo -n $auth | base64 | tr -d '\r\n')
local hostfactory=$(curl --cacert $conjurCert -s -X POST --data-urlencode "host_factory=$account:host_factory:$hf/nodes" --data-urlencode "expiration=2065-08-04T22:27:20+00:00" -H "Authorization: Token token=\"$auth_token\"" https://conjur-master/host_factory_tokens)
echo $hostfactory > /hostfactoryTokens/$hf"_hostfactory"
}
| true |
16ebb940228fc75241731d8c4ed8a792bdd9a090
|
Shell
|
fortibase/proxmox-dab-templates
|
/scripts/install-postgresql.sh
|
UTF-8
| 810 | 3.515625 | 4 |
[] |
no_license
|
#!/bin/bash
# Installs requested version of PostgreSQL for requested Ubuntu version. Allows connection to PG from anywhere.
VERSION=$1
BASEDIR=$(dab basedir)
UBUNTUVERSION=$(dab exec lsb_release -c -s)
echo $UBUNTUVERSION
echo "deb http://apt.postgresql.org/pub/repos/apt/ $UBUNTUVERSION-pgdg main" | tee $BASEDIR/etc/apt/sources.list.d/pgdg.list
wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | dab exec apt-key add -
dab install postgresql-$VERSION postgresql-contrib-$VERSION libpq-dev postgresql-server-dev-$VERSION
printf "local all postgres peer\nlocal all all md5\nhost all all 192.168.0.0/16 md5" | tee $BASEDIR/etc/postgresql/$VERSION/main/pg_hba.conf
sed -i "s/^#listen_addresses = 'localhost'/listen_addresses = '*'\t/" $BASEDIR/etc/postgresql/$VERSION/main/postgresql.conf
| true |
e02ec280249d5377c153422ca28b9f778a9b826e
|
Shell
|
akosela/civicrm-drupal
|
/install.sh
|
UTF-8
| 17,963 | 3.109375 | 3 |
[] |
no_license
|
#!/bin/sh
# Copyright (c) 2018 Andy Kosela. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
###############################
# Passwords - CHANGE IT! #
###############################
admin='admin'
admin_pass='pass123'
civicrm='civicrm'
domain='example.com'
drupal='drupal'
drupal_admin='admin'
drupal_admin_pass='pass'
email='[email protected]'
mysql_root='pass'
###############################
# Start of script
# Make sure selinux is disabled
if [ ! `grep SELINUX=disabled /etc/sysconfig/selinux` ]; then
sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/sysconfig/selinux
fi
setenforce 0 >/dev/null 2>&1
# Make swap (for AWS microinstance)
if [[ ! `swapon | grep NAME` ]]; then
echo "Make swap (for AWS microinstance)..."
dd if=/dev/zero of=/var/swap.1 bs=1M count=1024
chmod 600 /var/swap.1
mkswap /var/swap.1
swapon /var/swap.1
fi
# Install Nginx and MariDB
echo "Install Nginx and MariaDB..."
yum -q -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
yum -q -y install nginx mariadb-server
systemctl enable nginx
systemctl enable mariadb
systemctl start mariadb
# Mysql_secure_installation + CiviCRM db create
echo "MySQL_secure_installation..."
mysql -u root <<EOF
update mysql.user set password=password('$mysql_root') where user='root';
delete from mysql.user where user='root' and host not in ('localhost', '127.0.0.1', '::1');
delete from mysql.user where user='';
delete from mysql.db where db='test' or db='test_%';
create database civicrm;
grant all privileges on civicrm.* to civicrm@localhost identified by '$civicrm';
flush privileges;
EOF
cat > /root/.my.cnf <<EOF
[client]
user=root
password=$mysql_root
EOF
chmod 600 /root/.my.cnf
# Configure Nginx
echo "Configure Nginx..."
sed -i -e '1,/server_name/{/server_name/d;}' \
-e '1,/root/{/root/d;}' /etc/nginx/nginx.conf
mkdir /usr/share/nginx/html/drupal
mkdir /etc/nginx/default.d
cat > /etc/nginx/default.d/site.conf <<EOF
server_name $domain;
root /usr/share/nginx/html/drupal;
EOF
# Start Nginx
echo "Start Nginx..."
systemctl start nginx
# Install Certbot
echo "Install Certbot..."
yum-config-manager --enable rhui-REGION-rhel-server-extras \
rhui-REGION-rhel-server-optional >/dev/null
yum -q -y install certbot-nginx >/dev/null 2>&1
certbot -n --authenticator webroot --installer nginx -d $domain \
--agree-tos --email $email --webroot-path /usr/share/nginx/html/drupal
certbot renew --dry-run
echo "0 23 * * * root certbot renew" >> /etc/crontab
systemctl reload crond
# Install PHP
echo "Install PHP..."
yum -q -y install http://rpms.remirepo.net/enterprise/remi-release-7.rpm
yum -q -y install php56 php56-php-fpm php56-php-mysql php56-php-gd \
php56-php-mbstring
sed -i 's/memory_limit.*M/memory_limit = 512M/' /etc/opt/remi/php56/php.ini
ln -s /usr/bin/php56 /usr/bin/php
cat > /etc/nginx/default.d/php-fpm.conf <<'EOF'
# pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000
#
location ~ \.php$ {
fastcgi_pass 127.0.0.1:9000;
fastcgi_index index.php;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
include fastcgi_params;
}
EOF
# Install Drupal Nginx conf
echo "Install Drupal Nginx conf..."
cat >> /etc/nginx/default.d/site.conf <<'EOF'
location / {
index index.php index.html;
try_files $uri /index.php?$query_string; # For Drupal >= 7
}
location = /favicon.ico {
log_not_found off;
access_log off;
}
location = /robots.txt {
allow all;
log_not_found off;
access_log off;
}
# Very rarely should these ever be accessed outside of your lan
location ~* \.(txt|log)$ {
allow 192.168.0.0/16;
deny all;
}
location ~ \..*/.*\.php$ {
return 403;
}
location ~ ^/sites/.*/private/ {
return 403;
}
# Allow "Well-Known URIs" as per RFC 5785
location ~* ^/.well-known/ {
allow all;
}
# Block access to "hidden" files and directories whose names begin with a
# period. This includes directories used by version control systems such
# as Subversion or Git to store control files.
location ~ (^|/)\. {
return 403;
}
location @rewrite {
rewrite ^/(.*)$ /index.php?q=$1;
}
# Don't allow direct access to PHP files in the vendor directory.
location ~ /vendor/.*\.php$ {
deny all;
return 404;
}
# Fighting with Styles? This little gem is amazing.
location ~ ^/sites/.*/files/styles/ { # For Drupal >= 7
try_files $uri @rewrite;
}
# Handle private files through Drupal. Private file's path can come
# with a language prefix.
location ~ ^(/[a-z\-]+)?/system/files/ { # For Drupal >= 7
try_files $uri /index.php?$query_string;
}
location ~* \.(js|css|png|jpg|jpeg|gif|ico|svg)$ {
try_files $uri @rewrite;
expires max;
log_not_found off;
}
# civiCRM security
location ~* /(sites/default/)?files/civicrm/(ConfigAndLog|custom|upload|templates_c)/ {
deny all;
}
EOF
sed -i '/location \//,+1 d' /etc/nginx/nginx.conf
systemctl start php56-php-fpm
systemctl restart nginx
# Install Drush
echo "Install Drush..."
unset module
cd /root
curl -sS https://getcomposer.org/installer | php
mv composer.phar /usr/local/bin/composer
composer -q --no-plugins --no-scripts global require drush/drush:8.*
ln -s /root/.config/composer/vendor/bin/drush /usr/local/bin/drush
# Install Drupal
echo "Install Drupal..."
cd /usr/share/nginx/html
drush dl drupal-7.56
cp -r drupal-7.56/* drupal
rm -rf drupal-7.56
cd drupal
drush si standard --account-name=$drupal_admin \
--account-pass=$drupal_admin_pass \
--db-url=mysql://drupal:$drupal@localhost/drupal \
--db-su=root --db-su-pw=$mysql_root -y >/dev/null
chmod 755 /usr/share/nginx/html/drupal/sites/default
# Install Civicrm
echo "Install CiviCRM..."
cd /root/.drush
curl -O https://raw.githubusercontent.com/civicrm/civicrm-drupal/7.x-master/drush/civicrm.drush.inc
drush cc -y
cd /usr/share/nginx/html
curl -o civicrm-drupal.tar.gz -L https://sourceforge.net/projects/civicrm/files/civicrm-stable/4.7.29/civicrm-4.7.29-drupal.tar.gz/download
cd drupal
drush cvi --dbuser=civicrm --dbpass=$civicrm --dbhost=localhost \
--dbname=civicrm --tarfile=/usr/share/nginx/html/civicrm-drupal.tar.gz \
--destination=sites/all/modules --site_url=$domain --ssl=on \
--load_generated_data=0 -y >/dev/null
# create admin user
echo "Create $admin user..."
useradd $admin
echo $admin_pass | passwd --stdin $admin
cp -r /root/.drush /home/$admin
chown -R $admin:$admin /home/$admin/.drush
chown -R $admin:$admin /usr/share/nginx/html
su - admin -c "composer -q --no-plugins --no-scripts global require drush/drush:8.*"
# Drupal private file directory
echo "Drupal private file directory..."
mkdir files
chown apache:apache files
drush -y vset file_private_path /usr/share/nginx/html/drupal/files
# Install Backup and Migrate module for Drupal
echo "Install backup_migrate module for Drupal..."
chown -R apache:apache /usr/share/nginx/html/drupal/sites/default/files
drush -y en backup_migrate
# CiviCRM views integration
echo "CiviCRM views integration..."
drush -y en views
mysql -u root -D civicrm <<EOF
grant select on civicrm.* to drupal@localhost identified by '$drupal';
EOF
chown -R $admin:$admin /usr/share/nginx/html/drupal/site/all
rm -f /root/.my.cnf
rm /usr/local/bin/drush
ln -s /home/admin/.config/composer/vendor/bin/drush /usr/local/bin/drush
cat >> /usr/share/nginx/html/drupal/sites/default/settings.php <<'EOF'
# integrate views
$databases['default']['default']['prefix']= array(
'default' => '',
'civicrm_acl' => '`civicrm`.',
'civicrm_acl_cache' => '`civicrm`.',
'civicrm_acl_contact_cache' => '`civicrm`.',
'civicrm_acl_entity_role' => '`civicrm`.',
'civicrm_action_log' => '`civicrm`.',
'civicrm_action_mapping' => '`civicrm`.',
'civicrm_action_schedule' => '`civicrm`.',
'civicrm_activity' => '`civicrm`.',
'civicrm_activity_contact' => '`civicrm`.',
'civicrm_address' => '`civicrm`.',
'civicrm_address_format' => '`civicrm`.',
'civicrm_batch' => '`civicrm`.',
'civicrm_cache' => '`civicrm`.',
'civicrm_campaign' => '`civicrm`.',
'civicrm_campaign_group' => '`civicrm`.',
'civicrm_case' => '`civicrm`.',
'civicrm_case_activity' => '`civicrm`.',
'civicrm_case_contact' => '`civicrm`.',
'civicrm_case_type' => '`civicrm`.',
'civicrm_component' => '`civicrm`.',
'civicrm_contact' => '`civicrm`.',
'civicrm_contact_type' => '`civicrm`.',
'civicrm_contribution' => '`civicrm`.',
'civicrm_contribution_page' => '`civicrm`.',
'civicrm_contribution_product' => '`civicrm`.',
'civicrm_contribution_recur' => '`civicrm`.',
'civicrm_contribution_soft' => '`civicrm`.',
'civicrm_contribution_widget' => '`civicrm`.',
'civicrm_country' => '`civicrm`.',
'civicrm_county' => '`civicrm`.',
'civicrm_currency' => '`civicrm`.',
'civicrm_custom_field' => '`civicrm`.',
'civicrm_custom_group' => '`civicrm`.',
'civicrm_cxn' => '`civicrm`.',
'civicrm_dashboard' => '`civicrm`.',
'civicrm_dashboard_contact' => '`civicrm`.',
'civicrm_dedupe_exception' => '`civicrm`.',
'civicrm_dedupe_rule' => '`civicrm`.',
'civicrm_dedupe_rule_group' => '`civicrm`.',
'civicrm_discount' => '`civicrm`.',
'civicrm_domain' => '`civicrm`.',
'civicrm_email' => '`civicrm`.',
'civicrm_entity_batch' => '`civicrm`.',
'civicrm_entity_file' => '`civicrm`.',
'civicrm_entity_financial_account' => '`civicrm`.',
'civicrm_entity_financial_trxn' => '`civicrm`.',
'civicrm_entity_tag' => '`civicrm`.',
'civicrm_event' => '`civicrm`.',
'civicrm_event_carts' => '`civicrm`.',
'civicrm_events_in_carts' => '`civicrm`.',
'civicrm_extension' => '`civicrm`.',
'civicrm_file' => '`civicrm`.',
'civicrm_financial_account' => '`civicrm`.',
'civicrm_financial_item' => '`civicrm`.',
'civicrm_financial_trxn' => '`civicrm`.',
'civicrm_financial_type' => '`civicrm`.',
'civicrm_grant' => '`civicrm`.',
'civicrm_group' => '`civicrm`.',
'civicrm_group_contact' => '`civicrm`.',
'civicrm_group_contact_cache' => '`civicrm`.',
'civicrm_group_nesting' => '`civicrm`.',
'civicrm_group_organization' => '`civicrm`.',
'civicrm_im' => '`civicrm`.',
'civicrm_job' => '`civicrm`.',
'civicrm_job_log' => '`civicrm`.',
'civicrm_line_item' => '`civicrm`.',
'civicrm_loc_block' => '`civicrm`.',
'civicrm_location_type' => '`civicrm`.',
'civicrm_log' => '`civicrm`.',
'civicrm_mail_settings' => '`civicrm`.',
'civicrm_mailing' => '`civicrm`.',
'civicrm_mailing_abtest' => '`civicrm`.',
'civicrm_mailing_bounce_pattern' => '`civicrm`.',
'civicrm_mailing_bounce_type' => '`civicrm`.',
'civicrm_mailing_component' => '`civicrm`.',
'civicrm_mailing_event_bounce' => '`civicrm`.',
'civicrm_mailing_event_confirm' => '`civicrm`.',
'civicrm_mailing_event_delivered' => '`civicrm`.',
'civicrm_mailing_event_forward' => '`civicrm`.',
'civicrm_mailing_event_opened' => '`civicrm`.',
'civicrm_mailing_event_queue' => '`civicrm`.',
'civicrm_mailing_event_reply' => '`civicrm`.',
'civicrm_mailing_event_subscribe' => '`civicrm`.',
'civicrm_mailing_event_trackable_url_open' => '`civicrm`.',
'civicrm_mailing_event_unsubscribe' => '`civicrm`.',
'civicrm_mailing_group' => '`civicrm`.',
'civicrm_mailing_job' => '`civicrm`.',
'civicrm_mailing_recipients' => '`civicrm`.',
'civicrm_mailing_spool' => '`civicrm`.',
'civicrm_mailing_trackable_url' => '`civicrm`.',
'civicrm_managed' => '`civicrm`.',
'civicrm_mapping' => '`civicrm`.',
'civicrm_mapping_field' => '`civicrm`.',
'civicrm_membership' => '`civicrm`.',
'civicrm_membership_block' => '`civicrm`.',
'civicrm_membership_log' => '`civicrm`.',
'civicrm_membership_payment' => '`civicrm`.',
'civicrm_membership_status' => '`civicrm`.',
'civicrm_membership_type' => '`civicrm`.',
'civicrm_menu' => '`civicrm`.',
'civicrm_navigation' => '`civicrm`.',
'civicrm_note' => '`civicrm`.',
'civicrm_openid' => '`civicrm`.',
'civicrm_option_group' => '`civicrm`.',
'civicrm_option_value' => '`civicrm`.',
'civicrm_participant' => '`civicrm`.',
'civicrm_participant_payment' => '`civicrm`.',
'civicrm_participant_status_type' => '`civicrm`.',
'civicrm_payment_processor' => '`civicrm`.',
'civicrm_payment_processor_type' => '`civicrm`.',
'civicrm_payment_token' => '`civicrm`.',
'civicrm_pcp' => '`civicrm`.',
'civicrm_pcp_block' => '`civicrm`.',
'civicrm_persistent' => '`civicrm`.',
'civicrm_phone' => '`civicrm`.',
'civicrm_pledge' => '`civicrm`.',
'civicrm_pledge_block' => '`civicrm`.',
'civicrm_pledge_payment' => '`civicrm`.',
'civicrm_preferences_date' => '`civicrm`.',
'civicrm_premiums' => '`civicrm`.',
'civicrm_premiums_product' => '`civicrm`.',
'civicrm_prevnext_cache' => '`civicrm`.',
'civicrm_price_field' => '`civicrm`.',
'civicrm_price_field_value' => '`civicrm`.',
'civicrm_price_set' => '`civicrm`.',
'civicrm_price_set_entity' => '`civicrm`.',
'civicrm_print_label' => '`civicrm`.',
'civicrm_product' => '`civicrm`.',
'civicrm_queue_item' => '`civicrm`.',
'civicrm_recurring_entity' => '`civicrm`.',
'civicrm_relationship' => '`civicrm`.',
'civicrm_relationship_type' => '`civicrm`.',
'civicrm_report_instance' => '`civicrm`.',
'civicrm_saved_search' => '`civicrm`.',
'civicrm_setting' => '`civicrm`.',
'civicrm_sms_provider' => '`civicrm`.',
'civicrm_state_province' => '`civicrm`.',
'civicrm_subscription_history' => '`civicrm`.',
'civicrm_survey' => '`civicrm`.',
'civicrm_system_log' => '`civicrm`.',
'civicrm_tag' => '`civicrm`.',
'civicrm_tell_friend' => '`civicrm`.',
'civicrm_timezone' => '`civicrm`.',
'civicrm_uf_field' => '`civicrm`.',
'civicrm_uf_group' => '`civicrm`.',
'civicrm_uf_join' => '`civicrm`.',
'civicrm_uf_match' => '`civicrm`.',
'civicrm_website' => '`civicrm`.',
'civicrm_word_replacement' => '`civicrm`.',
'civicrm_worldregion' => '`civicrm`.',
);
EOF
echo "Script finished."
| true |
e4da8e6aa1191dd270fba9689b67f22de7de8dfa
|
Shell
|
LuCh1Monster/ShellDemos
|
/ch_06/demo06_15.sh
|
UTF-8
| 138 | 3.34375 | 3 |
[] |
no_license
|
#!/bin/bash
if [ ! -e $1 ]
then
echo "No such file."
exit
fi
[ -f $1 ] && echo "The file exists."
(echo; cat $1; exit 1)
exit 0
| true |
6ec9761c58b1e611972413ce95f1c6d53d5f8662
|
Shell
|
tomsiwek/wspolsysob
|
/skrypty/s2.sh
|
UTF-8
| 80 | 2.59375 | 3 |
[] |
no_license
|
#!/bin/bash
for f in *; do echo $f; done
for i in $(seq 1 10); do echo $i; done
| true |
70a0bb64b559859d2c2c7cd8169f11bf616a77ed
|
Shell
|
msrivastav13/Dreamforce2018Demo
|
/scripts/generatePkgXML.sh
|
UTF-8
| 579 | 3.15625 | 3 |
[] |
no_license
|
#!/bin/bash
if [ $# -lt 1 ]
then
echo Usage: generatepkgXML.sh orgalias packageName
exit
fi
## Retrieve the PackageXML from Unmanaged Container
sfdx force:mdapi:retrieve -s -r ./mdapipkg -u $1 -p "$2" # Retrieve Metadata API Source from Package Name
unzip -o -qq ./mdapipkg/unpackaged.zip -d ./mdapipkg # Unzip the file
rm -rf ./manifest/ # If manifest directory exists delete it
mkdir ./manifest/ # Create a New Manifest Directory
cp -a ./mdapipkg/package.xml ./manifest/ # Copy package.XML to manifest directory
rm -rf ./mdapipkg # Delete the mdapipkg source
| true |
d2ea915fadf98554c6122e4420997245be3d510a
|
Shell
|
vtardia/docker-php
|
/prod
|
UTF-8
| 1,031 | 3.75 | 4 |
[] |
no_license
|
#!/bin/bash
path=$0
if [[ -L $0 ]]; then
path=$(readlink $0)
fi
DIR="$(cd "$(dirname "$path")" && pwd)"
if [ $# == 0 ]; then
echo "Usage: ./prod <start|stop|logs>"
exit 1
fi
if [ $1 == "start" ]; then
echo "Starting..."
WEB_VERSION=$(if [ -f web.version ]; then cat $DIR/build/web.version; fi) \
WORKER_VERSION=$(if [ -f worker.version ]; then cat $DIR/build/worker.version; fi) \
docker-compose -f $DIR/docker-compose.prod.yml -f $DIR/docker-compose.override.yml up $3 -d $2
exit $?
fi
if [ $1 == "stop" ]; then
echo "Stopping..."
if [ $2 ]; then
docker-compose -f $DIR/docker-compose.prod.yml -f $DIR/docker-compose.override.yml stop $2
else
docker-compose -f $DIR/docker-compose.prod.yml -f $DIR/docker-compose.override.yml down
fi
exit $?
fi
if [ $1 == "logs" ]; then
echo "Reading logs..."
docker-compose logs -f $2
fi
if [ $1 == "ps" ]; then
docker-compose -f $DIR/docker-compose.prod.yml -f $DIR/docker-compose.override.yml ps
fi
| true |
186f5f6d22c6827d39f192cfe51a60fb1d2900c4
|
Shell
|
uileyar/scrapy_cl
|
/cltest/start.sh
|
UTF-8
| 449 | 2.953125 | 3 |
[] |
no_license
|
#! /bin/bash
start_cl() {
echo "start_cl"
scrapyd --pidfile /data/scrapy/scrapyd.pid -l /var/log/scrapyd/scrapyd.log &
sleep 10
curl http://localhost:6800/schedule.json -d project=cltest -d spider=cl
echo "done."
}
retry_cl() {
echo "retry_cl"
curl http://localhost:6800/schedule.json -d project=cltest -d spider=cl
echo "done."
}
if [ $1 == "start" ]
then
start_cl
elif [ $1 == "retry" ]
then
retry_cl
fi
| true |
671199ca32118060a2ec10b75fa54984afbc6121
|
Shell
|
ryanj/vagrant-origin
|
/release/release.sh
|
UTF-8
| 1,785 | 4.09375 | 4 |
[] |
no_license
|
#!/usr/bin/env bash
#
# Maintainer: Jorge Morales <[email protected]>
#
# Package a box for uploading into Atlas as an OpenShift Origin all-in-one release
#
# $1 : Origin version
# $2 : Public host name
__dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
__previous="$(cd $__dir/.. && pwd)"
help() {
echo "This script will create a .box file ready to upload into Hashicorp's Atlas"
echo ""
echo " $0 <origin_version> <options>"
echo ""
echo "This will cerate a box named openshift3-origin-version.box"
echo ""
echo "Version format: 1.1.0 1.1.6 1.2.1.1"
}
[ "$#" -lt 1 ] && help && exit 1
ORIGIN_BRANCH="v$1"
: ${CONFIG:=$2}
if [ ! -z $CONFIG ]; then
__config="CONFIG=$CONFIG"
fi
pushd ${__previous}
# TODO: There's no checks, so it will run through the end even if it fails
# Execute the provisioning script
ORIGIN_BRANCH=${ORIGIN_BRANCH} ${__config} vagrant up
# Before packaging we need to make sure that everything has been provisioned correcly and that provisioning has finished
# TODO:
sleep 300 # For now, we'll wait 5 minutes
# Clean the box
vagrant ssh -c 'sudo /utils/pre-package.sh'
# vagrant package will halt the box for you
if [ -f release/openshift3-origin-${ORIGIN_BRANCH}.box ]; then
echo "As there was a box already with that name, we will move it, appending timestamp"
mv release/openshift3-origin-${ORIGIN_BRANCH}.box release/openshift3-origin-${ORIGIN_BRANCH}.box.$(date "+%Y%m%d%H%M%S")
fi
vagrant package --base origin --output release/openshift3-origin-${ORIGIN_BRANCH}.box --vagrantfile release/Vagrantfile
echo "If you want to try this locally, add it as: "
echo ""
echo " vagrant box add --name thesteve0/openshift-origin release/openshift3-origin-${ORIGIN_BRANCH}.box"
echo ""
echo "otherwise, upload it to Atlas"
popd
| true |
3e613bc154682f3dadeb38a6558899d7a0dd9fdb
|
Shell
|
Artimirche6/Shell-Programming
|
/Selection-Statement/compareNumMinMax.sh
|
UTF-8
| 797 | 3.546875 | 4 |
[] |
no_license
|
#!/bin/bash -x
digit1=$(((RANDOM%899)+100))
digit2=$(((RANDOM%899)+100))
digit3=$(((RANDOM%899)+100))
if [ $digit1 -gt $digit2 ] && [ $digit1 -gt $digit3 ]
then
echo "$digit1 is greater than $digit2 and $digit3"
Max=$digit1
else
if [ $digit2 -gt $digit3 ]
then
echo "$digit2 is greater than $digit1 and $digit3"
Max=$digit2
else
echo "$digit3 is greater than $digit2 and $digit3"
Max=$digit3
fi
fi
if [ $digit1 -lt $digit2 ] && [ $digit1 -lt $digit3 ]
then
echo "$digit1 is Less than $digit2 and $digit3"
Min=$digit1
else
if [ $digit2 -lt $digit3 ]
then
echo "$digit2 is Less than $digit1 and $digit3"
Min=$digit2
else
echo "$digit3 is Less than $digit2 and $digit3"
Min=$digit3
fi
fi
| true |
984ff93b95805dfeb767db9549be18615eca0375
|
Shell
|
BabylonSix/web
|
/sj/sjb.sh
|
UTF-8
| 2,273 | 3.78125 | 4 |
[] |
no_license
|
sjb() {
# branch simple javascript project
sjLogic() {
if [[ $# -lt 3 ]]; then # when less than 3 arguments are entered
case $# in
'0') # for zero arguments
sjError.CantBranch
;;
'1') # for one arguments
if [[ $1 = . ]]; then # if a . is entered as an argument
# throw a branching error
sjError.CantBranch
else
if [[ -a ./.sj ]]; then # if active directory has .sj file
#set projectName to current directory
local sj_projectName=$(printf '%s\n' "${PWD##*/}")
# set branchName to argument
local sj_branchName=$1
sjBranch
else
sjError.CantBranch
fi
fi
;;
'2') # for two arguments
if [[ -a $1/.sj ]]; then # if arg1 is sj project
#set projectName to arg1
local sj_projectName=$1
# set branchName to arg2
local sj_branchName=$2
cd $sj_projectName
# update variable
local sj_projectName=$(printf '%s\n' "${PWD##*/}")
sjBranch
else
sjError.CantBranch
fi
;;
esac
else
sjError.CantBranch
fi
} # end sjLogic
sjBranch() {
# start sjBranch
print "\n${GREEN}Branching Project:${NC}\n\n$sj_projectName \-\> $sj_branchName\n"
# copy sjProject to branchName
cp -r . ../$sj_branchName
# go to the new directory
cd ../$sj_branchName
# erase old css and html files
trash ./**/*.{css,html}*
# suppress std-out
disableOutput
# replace all instances of <sj_projectName> with <sj_branchName> in index.pug file
cat ./src/pug/index.pug \
| sed -E "s/$sj_projectName/$sj_branchName/g" \
| tee ./src/pug/index.pug
# re-enable std-out
enableOutput
# rename all project files of type <pName.*> to <bName.*>
sjRenameFileNames $sj_projectName $sj_branchName
# update git with branchName
sjGitUpdateProjectName
# start project
print "\n"
sjo .
} # end sjBranch
sjGitUpdateProjectName() {
print "\n\n${GREEN}Source Control:${NC}\n"
ga .
gc changed project name from \<$sj_projectName\> to \<$sj_branchName\>
} # end sjGitUpdateProjectName
# rename multiple files in same directory
sjRenameFileNames() {
# declare local variables
local oldNamePattern="(**/)$1(.*)"
local newNamePattern="\$1$2\$2"
$(zmv $oldNamePattern $newNamePattern)
} # end sj_rn
#run sjLogic
sjLogic $@
}
| true |
f5892649f028722f0d4e7bc087eeec6416ea7362
|
Shell
|
daleghent/openbmc
|
/meta-facebook/meta-bletchley/recipes-bletchley/board-type-checker/files/board-type-checker-fpb
|
UTF-8
| 2,466 | 3.796875 | 4 |
[
"MIT",
"Apache-2.0"
] |
permissive
|
#!/bin/bash
add_i2c_device()
{
local I2C_BUS=$1
local DEV_ADDR=$2
local DEVICE_COMP=$3
local I2C_BUS_DIR="/sys/bus/i2c/devices/i2c-$I2C_BUS"
local NEW_DEVIVE="$I2C_BUS_DIR/new_device"
echo "$DEVICE_COMP" "$DEV_ADDR" > "$NEW_DEVIVE"
}
delete_i2c_device()
{
local I2C_BUS=$1
local DEV_ADDR=$2
local I2C_BUS_DIR="/sys/bus/i2c/devices/i2c-$I2C_BUS"
local DEL_DEVIVE="$I2C_BUS_DIR/delete_device"
echo "$DEV_ADDR" > "$DEL_DEVIVE"
}
is_valid_hdc1080()
{
local I2C_BUS=$1
local DEV_ADDR=$2
MFR_ID=$(i2ctransfer -y -f "${I2C_BUS}" w1@"${DEV_ADDR}" 0xfe r2)
if [ "$MFR_ID" != "0x54 0x49" ]; then
return 1;
fi
DEV_ID=$(i2ctransfer -y -f "${I2C_BUS}" w1@"${DEV_ADDR}" 0xff r2)
if [ "$DEV_ID" != "0x10 0x50" ]; then
return 1;
fi
return 0;
}
I2C_BUS=10
DEV_ADDR=0x40
I2C_BUS_DIR="/sys/bus/i2c/devices/i2c-$I2C_BUS"
DEV_ADDR_HEXSTR="$(printf %04X $DEV_ADDR)"
DEV_DIR="$I2C_BUS_DIR/$I2C_BUS-$DEV_ADDR_HEXSTR"
DEV_NAME_FILE="$DEV_DIR/name"
# Check chip type
if is_valid_hdc1080 "$I2C_BUS" "$DEV_ADDR"; then
CHIP_TYPE="HDC1080"
DEVICE_COMP="ti,hdc1080"
else
CHIP_TYPE="SI7021"
DEVICE_COMP="silabs,si7020"
fi
# Check and probe i2c device
if [ ! -f "$DEV_NAME_FILE" ]; then
# i2c device not porbed yet
add_i2c_device "$I2C_BUS" "$DEV_ADDR" "$DEVICE_COMP"
else
DEV_NAME=$(cat "$DEV_NAME_FILE")
if [ "$DEV_NAME" != "$DEVICE_COMP" ]; then
# incorrect driver probed, delete and add again
delete_i2c_device "$I2C_BUS" "$DEV_ADDR"
add_i2c_device "$I2C_BUS" "$DEV_ADDR" "$DEVICE_COMP"
fi
fi
VIRT_SNR_CONF="/usr/share/phosphor-virtual-sensor/virtual_sensor_config.json"
HDC1080_VIRT_SNR_CONF="/usr/share/phosphor-virtual-sensor/virtual_sensor_config_hdc1080.json"
SI7021_VIRT_SNR_CONF="/usr/share/phosphor-virtual-sensor/virtual_sensor_config_si7021.json"
# Setup virtual_sensor_config.json for phosphor-virtual-sensor
case "$CHIP_TYPE" in
"HDC1080")
REQUIRED_CONF_PATH="$HDC1080_VIRT_SNR_CONF"
;;
"SI7021")
REQUIRED_CONF_PATH="$SI7021_VIRT_SNR_CONF"
;;
*)
REQUIRED_CONF_PATH="$HDC1080_VIRT_SNR_CONF"
;;
esac
if [ ! -e "$VIRT_SNR_CONF" ]; then
ln -s "$REQUIRED_CONF_PATH" "$VIRT_SNR_CONF"
else
REAL_CONF_PATH="$(realpath $VIRT_SNR_CONF)"
if [ "$REAL_CONF_PATH" != "$REQUIRED_CONF_PATH" ]; then
rm "$VIRT_SNR_CONF"
ln -s "$REQUIRED_CONF_PATH" "$VIRT_SNR_CONF"
fi
fi
| true |
2414720567de522fb018a29a99c6e20270679222
|
Shell
|
dashpole/allocatable
|
/scripts/run_binary.sh
|
UTF-8
| 784 | 3.875 | 4 |
[] |
no_license
|
#!/bin/sh
set +e
echo "starting shell script"
cleanup ()
{
if ! sudo rm $BINARY; then
echo "failed sudo rm $BINARY"
fi
exit 0
}
# cd into directory that lets us create and execute binaries
if ! cd /var/lib/kubelet; then
echo "failed cd /var/lib/kubelet"
exit 0
fi
# create the file
if ! sudo touch $BINARY; then
echo "failed sudo touch $BINARY"
cleanup
fi
# Allow execution and writing to the binary
if ! sudo chmod +777 $BINARY; then
echo "failed sudo chmod +777 $BINARY"
cleanup
fi
# download binary
if ! sudo curl https://storage.googleapis.com/allocatable/$BINARY > $BINARY; then
echo "failed sudo curl https://storage.googleapis.com/allocatable/$BINARY > $BINARY"
cleanup
fi
# execute binary
if ! ./$BINARY; then
echo "failed ./$BINARY"
cleanup
fi
cleanup
| true |
32b57cfdeaf5606f7c3b9eb65efab54fc9e7cb9c
|
Shell
|
apache/zookeeper
|
/zookeeper-contrib/zookeeper-contrib-zkpython/src/test/run_tests.sh
|
UTF-8
| 1,630 | 3.46875 | 3 |
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
#!/bin/sh
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Usage: run_tests.sh testdir [logdir]
# logdir is optional, defaults to cwd
set -e
# get the number of command-line arguments given
ARGC=$#
# check to make sure enough arguments were given or exit
if [ $ARGC -lt 2 ]; then
export ZKPY_LOG_DIR="."
else
export ZKPY_LOG_DIR=$2
fi
# Find the build directory containing zookeeper.so
SO_PATH=`find ./target/ -name 'zookeeper*.so' | head -1`
PYTHONPATH=`dirname $SO_PATH`
LIB_PATH=../../zookeeper-client/zookeeper-client-c/target/c/.libs
for test in `ls $1/*_test.py`;
do
echo "Running $test"
echo "Running LD_LIBRARY_PATH=$LIB_PATH:$LD_LIBRARY_PATH DYLD_LIBRARY_PATH=$LIB_PATH:$DYLD_LIBRARY_PATH PYTHONPATH=$PYTHONPATH python $test"
LD_LIBRARY_PATH=$LIB_PATH:$LD_LIBRARY_PATH DYLD_LIBRARY_PATH=$LIB_PATH:$DYLD_LIBRARY_PATH PYTHONPATH=$PYTHONPATH python $test
done
| true |
f0777428a52bd84902b8d525067917576afeacba
|
Shell
|
it2media/scripts
|
/Bash/docker_cleanup.sh
|
UTF-8
| 6,024 | 3.953125 | 4 |
[] |
no_license
|
#!/usr/bin/env bash
#
# Removes old stopped docker container
#######################################
# Check if this is a running docker container with that entrypoint name
# Arguments:
# $1: The docker containers com.docker.compose.service name
# $2: The docker inspect json
# Returns:
# Exit code 0 on success, 1 on failure
#######################################
docker_is_running() {
local service=$(echo "$2" | jq -r '.[0] | .Config.Labels["com.docker.compose.service"]')
if [[ "$service" == "$1" ]]; then
local has_state_running=$(echo "$2" | jq -r '.[0] | .State.Running')
local status=$(echo "$2" | jq -r '.[0] | .State.Status')
if [[ "$has_state_running" == true && "$status" == "running" ]]; then
return 0 # 0 is success exit code so equivalent to true
fi
fi
return 1 # every other exit code than 0 is an error code (false)
}
#######################################
# Check if there is a running docker container with that entrypoint name
# Arguments:
# $1: The docker containers com.docker.compose.service name
# Returns:
# Exit code 0 for true, 1 for false
#######################################
docker_exists_running() {
for container_id in $(docker container ls -a --no-trunc | awk 'FNR == 1 {next}{print $1}')
do
local docker_inspect_json=$(docker inspect "$container_id")
docker_is_running "$1" "$docker_inspect_json" && return 0 # if we found any running container for this service, we return true
done
return 1 # if no running container was found for this service name we return false
}
#######################################
# Remove if this is an old intermediate container to launch the real container with docker-compose (compose-launcher)
# Arguments:
# $1: The docker inspect json (string)
# $2: Current unix time in seconds since epoch (int)
# $3: The FinishedAt timespan of the docker inspect json in unix time (seconds since epoch as int)
# $4: If container is definitivly stopped (not running or restarting and status exited) (true or false)
# Returns:
# None
#######################################
docker_remove_intermediate_container() {
local image=$(echo "$1" | jq -r '.[0] | .Config.Image')
echo "$image"
if [[ "$image" == null ]]; then
return 0 # if there should be no image name set, simply silently return
else
imagename=".it2media.de/compose-launcher:latest"
if [[ $image =~ $imagename ]]; then
echo "image: $imagename found in $image"
echo "now_t: $2"
echo "finished_at_t: $3"
echo "is_stopped: $4"
if [[ $is_stopped == true ]]; then
local m=5 # 5 minutes should be long enough for the intermediate container
echo "m: $m"
local difference=$(( 60*m ))
local now_t=$2
local finished_at_t=$3
local calculated_diff=$(( now_t-finished_at_t ))
echo "calculated_diff: $calculated_diff"
if [[ $calculated_diff -gt $difference ]]; then
echo "true: $calculated_diff > $difference"
local id=$(echo "$1" | jq -r '.[0] | .Id')
echo "docker rm $id"
docker rm "$id"
fi
fi
else
echo "image: $imagename not found in $image"
fi
fi
}
#######################################
# Removes old stopped docker container
# Arguments:
# $1: The docker inspect json
# $2: The timespan in h
# Returns:
# None
#######################################
docker_remove_stopped() {
local now=$(date +%Y-%m-%dT%H:%M:%S)
local now_t=`date --date="$now" +%s`
local finished_at=$(echo "$1" | jq -r '.[0] | .State.FinishedAt')
local finished_at_t=`date --date="$finished_at" +%s`
local service=$(echo "$1" | jq -r '.[0] | .Config.Labels["com.docker.compose.service"]')
local restarting=$(echo "$1" | jq -r '.[0] | .State.Restarting')
local running=$(echo "$1" | jq -r '.[0] | .State.Running')
local status=$(echo "$1" | jq -r '.[0] | .State.Status')
local is_stopped=false; [[ $restarting == false && $running == false && $status == "exited" ]] && is_stopped=true || is_stopped=false
if [[ "$service" == null ]]; then
echo "null => calling docker_remove_intermediate_container"
docker_remove_intermediate_container "$1" "$now_t" "$finished_at_t" "$is_stopped"
else
echo "$service"
echo "now: $now"
echo "now_t: $now_t"
if [[ -z "$2" ]]; then
echo "ERROR: You need to provide a second argument with the timespan in h!"
return 1
else
local h="$2"
echo "h: $h"
local difference=$(( 3600*h ))
echo "difference: $difference"
echo "finished_at: $finished_at"
echo "finished_at_t: $finished_at_t"
local calculated_diff=$(( now_t-finished_at_t ))
echo "calculated_diff: $calculated_diff"
if [[ $calculated_diff -gt $difference ]]; then
echo "true: $calculated_diff > $difference"
echo "restarting: $restarting"
echo "running: $running"
echo "status: $status"
if [[ $is_stopped == true ]]; then
docker_exists_running "$service"
if [[ $? -eq 0 ]]; then
echo "docker_exists_running: 0 / $is_stopped (There is a running service with the same name.)"
local id=$(echo "$1" | jq -r '.[0] | .Id')
echo "docker rm $id"
docker rm "$id"
else
echo "No running service found with service name: $service"
fi
fi
else
echo "false: $calculated_diff > $difference"
fi
fi
fi
}
#######################################
# Removes old stopped docker containers
# Arguments:
# $1: The timespan in h
# Returns:
# None
#######################################
docker_cleanup() {
for container_id in $(docker container ls -a --no-trunc | awk 'FNR == 1 {next}{print $1}')
do
local docker_inspect_json=$(docker inspect "$container_id")
echo ">>>"
echo "$container_id"
docker_remove_stopped "$docker_inspect_json" "$1"
echo "<<<"
done
return 0
}
| true |
8993af07718c5b96fa1d9ceeca250792298f6486
|
Shell
|
Beramos/Snippets
|
/lifeHackScripts/convertPDF2PNG.sh
|
UTF-8
| 173 | 2.953125 | 3 |
[] |
no_license
|
# !/bin/bash
# This script uses the convert function from imagemagick
for f in *.pdf; do
convert -density 600x600 -resize 1920x1080 -quality 90 "$f" "${f%.pdf}.png"
done
| true |
926680baddbb0adee15742b8f86cb29677761003
|
Shell
|
lifenoodles/robo-sumo
|
/scripts/build.sh
|
UTF-8
| 1,020 | 3.65625 | 4 |
[] |
no_license
|
#!/usr/bin/env bash
if [ "$#" == "0" ] || [ "$#" -gt "2" ]; then
echo "Usage: build [robot-id]"
exit 0
fi
CURRENT=$(pwd)
BASENAME=$(basename $CURRENT)
while [ $BASENAME != "robo-sumo" ]; do
cd ..
CURRENT=$(pwd)
BASENAME=$(basename $CURRENT)
done
cd src
debug="0"
sumo_id=""
flags=
while [ "$#" -gt 0 ]; do
case "$1" in
"-d")
debug="1"
;;
*)
sumo_id=$1
;;
esac
shift
done
path=""
case $sumo_id in
"phobos")
name="phobos"
id="PHOBOS"
;;
"titan")
name="titan"
id="TITAN"
;;
*)
echo "Unrecognised Sumo ID"
exit 0
;;
esac
path="../build/make-$name"
if [ "$debug" == "1" ]; then
EXTRA_FLAGS="-DDEBUG -D$id" OBJDIR="../build/bin/$name-debug" make -f "$path"
else
EXTRA_FLAGS="-DDEBUG -D$id" OBJDIR="../build/bin/ $name" make -f "$path"
fi
if [ "$?" = "0" ]; then
echo "Compiled OK!"
else
echo "ERROR while compiling!"
fi
| true |
b1526dbf2501f9168d1ed7d9e9839f0c9a691bda
|
Shell
|
liuqi605752176/HuaQing-Note
|
/7.shell以及makefile/1/expr.sh
|
UTF-8
| 90 | 2.90625 | 3 |
[] |
no_license
|
read var1
read var2
#符号的左右必须有空格
ret=`expr $var1 \* $var2`
echo $ret
| true |
b5b91950c61a014eccf7a09853a290347c2a56bd
|
Shell
|
dhrishi/distributed-compliance-ledger
|
/genlocalnetconfig.sh
|
UTF-8
| 6,153 | 2.78125 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Copyright 2020 DSR Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -euo pipefail
DCL_OBSERVERS="${DCL_OBSERVERS:-}"
SED_EXT=
if [ "$(uname)" == "Darwin" ]; then
# Mac OS X sed needs the file extension when -i flag is used. Keeping it empty as we don't need backupfile
SED_EXT="''"
fi
rm -rf ~/.dclcli
rm -rf ~/.dcld
rm -rf localnet
mkdir localnet localnet/client localnet/node0 localnet/node1 localnet/node2 localnet/node3
if [[ -n "$DCL_OBSERVERS" ]]; then
mkdir localnet/observer0
fi
# client
dclcli config chain-id dclchain
dclcli config output json
dclcli config indent true
dclcli config trust-node false
echo 'test1234' | dclcli keys add jack
echo 'test1234' | dclcli keys add alice
echo 'test1234' | dclcli keys add bob
echo 'test1234' | dclcli keys add anna
cp -r ~/.dclcli/* localnet/client
# node 0
dcld init node0 --chain-id dclchain
jack_address=$(dclcli keys show jack -a)
jack_pubkey=$(dclcli keys show jack -p)
alice_address=$(dclcli keys show alice -a)
alice_pubkey=$(dclcli keys show alice -p)
bob_address=$(dclcli keys show bob -a)
bob_pubkey=$(dclcli keys show bob -p)
anna_address=$(dclcli keys show anna -a)
anna_pubkey=$(dclcli keys show anna -p)
dcld add-genesis-account --address=$jack_address --pubkey=$jack_pubkey --roles="Trustee,NodeAdmin"
dcld add-genesis-account --address=$alice_address --pubkey=$alice_pubkey --roles="Trustee,NodeAdmin"
dcld add-genesis-account --address=$bob_address --pubkey=$bob_pubkey --roles="Trustee,NodeAdmin"
dcld add-genesis-account --address=$anna_address --pubkey=$anna_pubkey --roles="NodeAdmin"
echo 'test1234' | dcld gentx --from jack
mv ~/.dcld/* localnet/node0
# node 1
dcld init node1 --chain-id dclchain
dcld add-genesis-account --address=$jack_address --pubkey=$jack_pubkey --roles="Trustee,NodeAdmin"
dcld add-genesis-account --address=$alice_address --pubkey=$alice_pubkey --roles="Trustee,NodeAdmin"
dcld add-genesis-account --address=$bob_address --pubkey=$bob_pubkey --roles="Trustee,NodeAdmin"
dcld add-genesis-account --address=$anna_address --pubkey=$anna_pubkey --roles="NodeAdmin"
echo 'test1234' | dcld gentx --from alice
mv ~/.dcld/* localnet/node1
# node 2
dcld init node2 --chain-id dclchain
dcld add-genesis-account --address=$jack_address --pubkey=$jack_pubkey --roles="Trustee,NodeAdmin"
dcld add-genesis-account --address=$alice_address --pubkey=$alice_pubkey --roles="Trustee,NodeAdmin"
dcld add-genesis-account --address=$bob_address --pubkey=$bob_pubkey --roles="Trustee,NodeAdmin"
dcld add-genesis-account --address=$anna_address --pubkey=$anna_pubkey --roles="NodeAdmin"
echo 'test1234' | dcld gentx --from bob
mv ~/.dcld/* localnet/node2
# node 3
dcld init node3 --chain-id dclchain
dcld add-genesis-account --address=$jack_address --pubkey=$jack_pubkey --roles="Trustee,NodeAdmin"
dcld add-genesis-account --address=$alice_address --pubkey=$alice_pubkey --roles="Trustee,NodeAdmin"
dcld add-genesis-account --address=$bob_address --pubkey=$bob_pubkey --roles="Trustee,NodeAdmin"
dcld add-genesis-account --address=$anna_address --pubkey=$anna_pubkey --roles="NodeAdmin"
echo 'test1234' | dcld gentx --from anna
cp -r ~/.dcld/* localnet/node3
if [[ -d "localnet/observer0" ]]; then
rm -rf ~/.dcld/*
# observer0
dcld init observer0 --chain-id dclchain
dcld add-genesis-account --address=$jack_address --pubkey=$jack_pubkey --roles="Trustee,NodeAdmin"
dcld add-genesis-account --address=$alice_address --pubkey=$alice_pubkey --roles="Trustee,NodeAdmin"
dcld add-genesis-account --address=$bob_address --pubkey=$bob_pubkey --roles="Trustee,NodeAdmin"
dcld add-genesis-account --address=$anna_address --pubkey=$anna_pubkey --roles="NodeAdmin"
cp -r ~/.dcld/* localnet/observer0
fi
# Collect all validator creation transactions
mkdir -p ~/.dcld/config/gentx
cp localnet/node0/config/gentx/* ~/.dcld/config/gentx
cp localnet/node1/config/gentx/* ~/.dcld/config/gentx
cp localnet/node2/config/gentx/* ~/.dcld/config/gentx
cp localnet/node3/config/gentx/* ~/.dcld/config/gentx
# Embed them into genesis
dcld collect-gentxs
dcld validate-genesis
# Update genesis for all nodes
cp ~/.dcld/config/genesis.json localnet/node0/config/
cp ~/.dcld/config/genesis.json localnet/node1/config/
cp ~/.dcld/config/genesis.json localnet/node2/config/
cp ~/.dcld/config/genesis.json localnet/node3/config/
if [[ -d "localnet/observer0" ]]; then
cp ~/.dcld/config/genesis.json localnet/observer0/config/
fi
# Find out node ids
id0=$(ls localnet/node0/config/gentx | sed 's/gentx-\(.*\).json/\1/')
id1=$(ls localnet/node1/config/gentx | sed 's/gentx-\(.*\).json/\1/')
id2=$(ls localnet/node2/config/gentx | sed 's/gentx-\(.*\).json/\1/')
id3=$(ls localnet/node3/config/gentx | sed 's/gentx-\(.*\).json/\1/')
# Update address book of the first node
peers="[email protected]:26656,[email protected]:26656,[email protected]:26656,[email protected]:26656"
# Update address book of the first node
sed -i $SED_EXT "s/persistent_peers = \"\"/persistent_peers = \"$peers\"/g" localnet/node0/config/config.toml
if [[ -d "localnet/observer0" ]]; then
sed -i $SED_EXT "s/persistent_peers = \"\"/persistent_peers = \"$peers\"/g" localnet/observer0/config/config.toml
fi
# Make RPC endpoint available externally
for node_id in node0 node1 node2 node3 observer0; do
if [[ -d "localnet/${node_id}" ]]; then
sed -i $SED_EXT 's/laddr = "tcp:\/\/127.0.0.1:26657"/laddr = "tcp:\/\/0.0.0.0:26657"/g' "localnet/${node_id}/config/config.toml"
sed -i $SED_EXT 's/prometheus = false/prometheus = true/g' "localnet/${node_id}/config/config.toml"
fi
done
| true |
ea97bcff9a6c69a99d5031bbcacf9ed6afb17dff
|
Shell
|
jlamalop/kolla
|
/tools/stop-all-pods
|
UTF-8
| 557 | 3.484375 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
cd $(git rev-parse --show-toplevel)
# Delete the replication is not cleaning up its pods
# These pods need to be deleted according to their UUID
uuids=$(kubectl get pods -o json | jq '.[][].id' 2>/dev/null | grep -o -E '"[a-fA-F|0-9|\-]*' | cut -c 2- | grep '\-')
for uuid in $uuids; do
if [ $uuid ]; then
kubectl delete pod $uuid
fi
done
pods=$(kubectl get pods -o json| jq '.[][].id' 2>/dev/null)
# Removes quotes from jquery
pods=${pods//\"/}
for pod in $pods; do
kubectl delete -f "k8s/pod/${pod}-pod.yaml" 2>/dev/null
done
| true |
50265092bb374b7a183f50c6213f377da8ffe650
|
Shell
|
rozzzly/rozfiguration
|
/configs/misc/run-for-x-min-then-kill.sh
|
UTF-8
| 213 | 2.515625 | 3 |
[] |
no_license
|
#!/bin/bash
source ./setup.sh
#n=5
#./tesh.sh &
#pid=$!
#at now + $n minutes <<<"kill -HUP $pid"
#echo "after derp"
# http://stackoverflow.com/questions/4423934/bash-run-a-command-for-n-minutes-then-sighup-it
| true |
cb373980ad933cd16dfc7438c4cfb45810e71028
|
Shell
|
nikkoenggaliano/AlProg
|
/bash/random.sh
|
UTF-8
| 129 | 2.640625 | 3 |
[] |
no_license
|
#!/bin/bash
read -p "input minimum value -> " min
read -p "input maximum value -> " max
echo "Result : ";shuf -i $min-$max -n 1
| true |
450b82b126464c3139bfff5e98d56b4a15ce0f82
|
Shell
|
bdecoste/proxy-rpm
|
/istio-proxy/recipes/boringssl.sh
|
UTF-8
| 521 | 2.84375 | 3 |
[] |
no_license
|
#!/bin/bash
set -e
#COMMIT=a20bb7ff8bb5057065a2e7941249773f9676cf45 # chromium-64.0.3282.119
#git clone https://boringssl.googlesource.com/boringssl
#cd boringssl
#git reset --hard "$COMMIT"
cp -rf ${RPM_BUILD_DIR}/istio-proxy/boringssl .
cd boringssl
cmake -DCMAKE_CXX_FLAGS:STRING="${CXXFLAGS} ${CPPFLAGS}" \
-DCMAKE_C_FLAGS:STRING="${CFLAGS} ${CPPFLAGS}" \
-DCMAKE_BUILD_TYPE=RelWithDebInfo .
make VERBOSE=1
cp -r include/* "$THIRDPARTY_BUILD"/include
cp ssl/libssl.a "$THIRDPARTY_BUILD"/lib
cp crypto/libcrypto.a "$THIRDPARTY_BUILD"/lib
| true |
25a1a7d3b32363e541bb77e042e4a91a2d718ecd
|
Shell
|
0dataexpert0/openvpn-docker-alpine
|
/init.sh
|
UTF-8
| 729 | 2.84375 | 3 |
[] |
no_license
|
#!/bin/sh
docker build . -t openvpn > /dev/null
if [ ! -d easy-rsa ]; then
git clone https://github.com/OpenVPN/easy-rsa.git
fi
cd easy-rsa/easyrsa3/
cp vars.example vars
if [ ! -d pki ]; then
./easyrsa init-pki
fi
if [ ! -f ../../volumes/keys/dh.pem ]; then
./easyrsa gen-dh
cp pki/dh.pem ../../volumes/keys
fi
if [ ! -f ../../volumes/keys/ca.crt ]; then
./easyrsa build-ca nopass
cp pki/ca.crt ../../volumes/keys
fi
# Generate server keys
if [ ! -f ../../volumes/keys/vpn-server.crt ]; then
./easyrsa build-server-full vpn-server nopass
cp pki/issued/vpn-server.crt ../../volumes/keys
cp pki/private/vpn-server.key ../../volumes/keys
fi
# Generate client keys
#./easyrsa build-server-full USER nopass
| true |
16b5e65e35d50fb8bb82aed6f6f51037e9db3100
|
Shell
|
Benoit-LAGUET/my-kubernetes-advanced-lab-02
|
/Infrastructure/pre/create-aws-bucket.sh
|
UTF-8
| 749 | 3.25 | 3 |
[] |
no_license
|
#/bin/bash
BucketName=lab-terraformstate-bucket-02
Region=eu-central-1
AwsAccountId=`aws sts get-caller-identity --query 'Account' --output text`
aws s3api head-bucket --bucket $BucketName --expected-bucket-owner $AwsAccountId >/dev/null
BucketExist=$?
if [ $BucketExist -ne 0 ]
then
aws s3 mb s3://$BucketName \
--region $Region
aws s3api put-bucket-versioning \
--bucket $BucketName \
--versioning-configuration Status=Enabled \
--region $Region
aws s3api put-public-access-block \
--bucket $BucketName \
--public-access-block-configuration '{"BlockPublicAcls": true, "IgnorePublicAcls": true, "BlockPublicPolicy": true, "RestrictPublicBuckets": true}'
else
echo "Bucket $BucketName already exist"
fi
| true |
9c2abfc3464500a7ee4a994cd278bf54f3f6b9b0
|
Shell
|
michel-rodrigues/viggio_infrastructure
|
/apply_cluster_manifests.sh
|
UTF-8
| 856 | 2.671875 | 3 |
[] |
no_license
|
#!/bin/sh
cd $(dirname $0)
kubectl apply -f redis/redis-data-persistentvolumeclaim.yaml
kubectl apply -f redis/redis-deployment.yaml
kubectl apply -f redis/redis-service.yaml
kubectl apply -f frontend/frontend-deployment.yaml
kubectl apply -f frontend/frontend-service.yaml
kubectl apply -f django/django-env-configmap.yaml
kubectl apply -f django/django-deployment.yaml
kubectl apply -f django/django-service.yaml
kubectl apply -f celery/celery-deployment.yaml
helm repo add stable https://kubernetes-charts.storage.googleapis.com/
OUTPUT=$(helm install nginx-ingress nginx/nginx-ingress --set rbac.create=true --set controller.publishService.enabled=true)
if [ -z "${OUTPUT##*DEPLOYED*}" ] ;then
echo "helm installed nginx-ingress"
else
echo "ERROR: nginx-ingress install status isn't DEPLOYED"
fi
kubectl apply -f ingress/ingress-resource.yaml
| true |
16991e5d5a37d31160a5bdb38b8a0068d55cc029
|
Shell
|
1qq1qq1/scripts
|
/use_mem.el7.sh
|
UTF-8
| 217 | 2.921875 | 3 |
[] |
no_license
|
#!/bin/sh
TOTAL=`free | grep ^Mem | awk '{print $2}'`
USED=`free | grep ^Mem | awk '{print $3}'`
BUCA=`free | grep ^Mem | awk '{print $6}'`
ACTUAL=`echo "100*($USED-$BUCA)/$TOTAL" | bc -l`
echo ACTUAL=${ACTUAL:0:5}%
| true |
f497bfc5b105f259d3483b83ba1d9ef91605d75a
|
Shell
|
pahomov-and/WebRTC_Camera
|
/build_webrtc.sh
|
UTF-8
| 3,300 | 3.609375 | 4 |
[] |
no_license
|
ROOT_DIR=$(pwd)
ARCH=amd64
case $1 in
amd64*|i386*|arm64*) echo "OK!"; ARCH=$1 ;;
*) echo "$0 [default amd64]|[i386]|[arm64]" ;;
esac
export PATH=$PATH:$ROOT_DIR/depot_tools
# export GYP_CROSSCOMPILE=1
NINJA_TARGET="webrtc rtc_json jsoncpp builtin_video_decoder_factory builtin_video_encoder_factory peerconnection p2p_server_utils task_queue default_task_queue_factory"
echo "root dir: $ROOT_DIR"
options=(
"Install depot_tools" \
"Fetch WebRtc" \
"Install sysroot" \
"Build WebRTC" \
"Clean build" \
"Quit")
PS3="Please enter your choice: "
select opt in "${options[@]}"
do
case $opt in
"${options[0]}")
################################################ Install depot_tools
echo "$opt"
git clone https://chromium.googlesource.com/chromium/tools/depot_tools.git
PS3="Please enter your choice ($opt): "
;;
"${options[1]}")
################################################ Fetch WebRtc
echo "$opt"
cd $ROOT_DIR/webrtc
#fetch --no-history webrtc
fetch --no-history --nohooks webrtc
#./build/linux/sysroot_scripts/install-sysroot.py --arch=$ARCH
#gclient sync
gclient sync --force
PS3="Please enter your choice ($opt): "
;;
"${options[2]}")
################################################ Install sysroot
echo "$opt"
$ROOT_DIR/webrtc/src//build/linux/sysroot_scripts/install-sysroot.py --arch=$ARCH
PS3="Please enter your choice ($opt): "
;;
"${options[3]}")
################################################ Build WebRTC
pushd $ROOT_DIR/webrtc/src/
if [[ $ARCH == "amd64" ]]; then
echo "ARCH: $ARCH"
#exit 0
#gn gen out/$ARCH/Release --args='use_rtti=true rtc_desktop_capture_supported=false target_cpu="x64" is_debug=false symbol_level=2 use_custom_libcxx=false treat_warnings_as_errors=false rtc_use_h264=true ffmpeg_branding="Chrome" rtc_include_tests=false use_ozone=true rtc_include_pulse_audio=false '
gn gen out/$ARCH/Release --args='is_chrome_branded=true use_rtti=true rtc_desktop_capture_supported=false is_debug=false symbol_level=2 use_custom_libcxx=false treat_warnings_as_errors=false rtc_use_h264=true ffmpeg_branding="Chrome" rtc_include_tests=false use_ozone=true rtc_include_pulse_audio=false '
elif [[ $ARCH == "i386" ]]; then
echo "ARCH: $ARCH"
exit 0
elif [[ $ARCH == "arm64" ]]; then
echo "ARCH: $ARCH"
exit 0
gn gen out/$ARCH/Release --args='is_chrome_branded=true use_rtti=true rtc_desktop_capture_supported=false target_cpu="arm64" is_debug=false symbol_level=2 use_custom_libcxx=false treat_warnings_as_errors=false rtc_use_h264=true ffmpeg_branding="Chrome" rtc_include_tests=false use_ozone=true rtc_include_pulse_audio=false '
fi
#ninja -C out/$ARCH/Release -t clean
# ninja -C out/$ARCH/Release webrtc
ninja -C out/$ARCH/Release $NINJA_TARGET
popd
PS3="Please enter your choice ($opt): "
;;
"${options[4]}")
rm -rf $ROOT_DIR/webrtc/src/out/$ARCH/Release
;;
"${options[5]}")
exit 0
;;
*) echo "invalid option $REPLY";;
esac
done
| true |
6d65a30f54b6f761c72fda5fe7f743e829e4b8e7
|
Shell
|
Limoto/arch
|
/qgamex/PKGBUILD
|
UTF-8
| 547 | 2.5625 | 3 |
[] |
no_license
|
# Contributor: Jakub Luzny <[email protected]>
pkgname=qgamex
pkgver=1.1.0
pkgrel=1
pkgdesc='Qt4 application to help you with launching games on separate X server'
arch=(i686 x86_64)
url="http://dolezel.info/projects/qgamex"
license=('GPL')
depends=('qt>=4.3.0')
source=(http://www.dolezel.info/sub/projekty/${pkgname}-${pkgver}.tar.bz2)
md5sums=('06196afc853823b846fd318161fc16eb')
build() {
cd "$srcdir/${pkgname}-${pkgver}/"
qmake
make
mkdir -p "$pkgdir/usr/bin/"
install -Dm 755 qgamex "$pkgdir/usr/bin/"
}
#category: games
# vim:set ts=2 sw=2 et:
| true |
7fb015a3acfa0db83face24e4067c9f325e8dadf
|
Shell
|
MadPidgeon/Order-versus-Chaos
|
/table/generate_table.sh
|
UTF-8
| 881 | 3.4375 | 3 |
[] |
no_license
|
#!/bin/bash
mkdir -p data
# initialize the job pool
thread_count=4;
if [ $# -eq 1 ]; then
thread_count=$1
fi
. job_pool.sh
job_pool_init $thread_count 0
# run jobs
p="CHAOS"
cp=1
dims=(8 10 12)
lens=(6 7 8)
deps=(100000 100000 100000)
for a in ${!dims[@]}; do
w=${dims[$a]}
m=${lens[$a]}
d=${deps[$a]}
jobstr="${cp}_${p}_${w}_${m}_${d}"
if [ -s "data/res_${jobstr}.txt" ]
then
echo "Skipping job ${jobstr}"
else
g++ -std=c++17 mmcts.cc -o "mmcts_${jobstr}" -Dboard_w=${w} -Dboard_m=${m} -Dboard_d=${d} -DCAN_PASS=${cp} -DPASS_PLAYER=${p}
for j in {1..100}; do
job_pool_run ./job.sh "${jobstr}" $j
done
# sync
job_pool_wait
# merge
cat data/res_${jobstr}_*.txt > data/res_${jobstr}.txt
rm mmcts_${jobstr}
rm data/res_${jobstr}_*.txt
echo "Job ${jobstr} done!"
fi
done
# don't forget to shut down the job pool
job_pool_shutdown
echo "Finished"
| true |
e8b1f3013241dbfdbf99c772c2d01d586ca96360
|
Shell
|
gmccreight/mkbasebox
|
/create_vagrant_box_from_veewee_template.sh
|
UTF-8
| 2,901 | 4.21875 | 4 |
[] |
no_license
|
#!/bin/bash
# ./create_vagrant_box_from_veewee_template.sh
# Create a vagrant box using a veewee template
#------------------------------------------------------------------------------
# Configuration
#------------------------------------------------------------------------------
ruby_version="1.9.3"
veewee_template_name="ubuntu-12.04-server-amd64"
locally_saved_iso_file="`pwd`/ubuntu-12.04-server-amd64.iso"
basebox_revision_number="v2"
# | no underscore on purpose... breaks build
name_of_box_to_create="precise64$basebox_revision_number"
#------------------------------------------------------------------------------
# RVM
#------------------------------------------------------------------------------
# Load RVM into a shell session *as a function*
if [[ -s "$HOME/.rvm/scripts/rvm" ]] ; then
# First try to load from a user install
source "$HOME/.rvm/scripts/rvm"
elif [[ -s "/usr/local/rvm/scripts/rvm" ]] ; then
# Then try to load from a root install
source "/usr/local/rvm/scripts/rvm"
else
printf "ERROR: An RVM installation was not found.\n"
cd ../..
exit 1
fi
# # Install the various gems in their own gemset
rvm $ruby_version
rvm --force gemset delete mkbasebox
rvm gemset create mkbasebox
rvm $ruby_version@mkbasebox
gem install bundler
bundle
#------------------------------------------------------------------------------
# Possibly cleanup last installation - needs RVM
#------------------------------------------------------------------------------
if [[ -d ./temp ]]; then
cd temp
#[tag:question:gem] why do we do this inside the folder?
bundle exec vagrant destroy 2>/dev/null
bundle exec vagrant box remove $name_of_box_to_create 2>/dev/null
bundle exec veewee vbox destroy $name_of_box_to_create 2>/dev/null
bundle exec veewee vbox undefine $name_of_box_to_create 2>/dev/null
cd ..
rm -rf ./temp
fi
# #------------------------------------------------------------------------------
# # Install the vagrant box from the template
# #------------------------------------------------------------------------------
mkdir temp
cd temp
# if the iso is provided, then soft link it into the .iso folder where veewee
# expects it to be
if [[ -f $locally_saved_iso_file ]] ; then
mkdir ./iso
cd ./iso
ln -s "$locally_saved_iso_file" $(basename "$locally_saved_iso_file")
cd ..
fi
#Create the new basebox, using a local iso file if it specified and exists
bundle exec veewee vbox define $name_of_box_to_create $veewee_template_name
bundle exec veewee vbox build $name_of_box_to_create
bundle exec veewee vbox validate $name_of_box_to_create
bundle exec vagrant package --base $name_of_box_to_create --output $name_of_box_to_create.box
bundle exec vagrant box add $name_of_box_to_create $name_of_box_to_create.box
echo you can now upload the $name_of_box_to_create.box somewhere, like to s3 for example
| true |
4282ba1d426c68b5412f03297f7ba29a5105ca8a
|
Shell
|
spyrosoft/shell-scripts
|
/software-development/upload-on-file-change
|
UTF-8
| 369 | 3.796875 | 4 |
[] |
no_license
|
#!/usr/bin/env bash
if [[ $# -ne 2 ]]; then
echo "Usage: $0 user@host:optional/path (no trailing slash) file-extension-expression"
else
upload_path=$1
file_extension_expression=\.${2}$
inotifywait --format "%f" -me close_write -q . | while read file
do
if [[ ! "$file" =~ $file_extension_expression ]]; then continue; fi
scp $file $upload_path/$file
done
fi
| true |
42f8f31122789a0829e51540fe65c73675aa744b
|
Shell
|
freebsd/freebsd-ports
|
/sysutils/swapmon/files/swapmon.in
|
UTF-8
| 605 | 3.328125 | 3 |
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
#
# swapmon - add / remove swap as needed
#
# PROVIDE: swapmon
# REQUIRE: LOGIN
# KEYWORD: shutdown
#
# Add the following lines to /etc/rc.conf to enable swapmon:
#
#swapmon_enable="YES"
#
. /etc/rc.subr
name=swapmon
rcvar=swapmon_enable
command="%%PREFIX%%/sbin/${name}"
command_interpreter="/bin/sh"
CONFIG=%%PREFIX%%/etc/${name}rc
if [ -r "${CONFIG}" ]
then . "${CONFIG}"
fi
pidfile=${PIDFILE:-"/var/run/${name}.pid"}
swapmon_enable=${swapmon_enable:-"NO"}
load_rc_config ${name}
start_precmd='command_args="-F <&- 2>&1 >/dev/null &"'
stop_postcmd='rm $pidfile'
run_rc_command "$1"
| true |
f352e24f95947e500141f05f5de86f71950deb25
|
Shell
|
shakfu/start-vm
|
/setup/ubuntu_17.10_artful-rlang.sh
|
UTF-8
| 21,658 | 3.15625 | 3 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
COLOR_BOLD_YELLOW="\033[1;33m"
COLOR_BOLD_BLUE="\033[1;34m"
COLOR_BOLD_MAGENTA="\033[1;35m"
COLOR_BOLD_CYAN="\033[1;36m"
COLOR_RESET="\033[m"
CONFIG=config/artful
DEFAULT=default
CONFIG_DST=$HOME/.config
BIN=$HOME/bin
function recipe {
echo
echo -e $COLOR_BOLD_MAGENTA$1 $COLOR_RESET
echo "=========================================================="
}
function section {
echo
echo -e $COLOR_BOLD_CYAN$1 $COLOR_RESET
echo "----------------------------------------------------------"
}
function install_default {
echo "installing $1"
cp -rf $DEFAULT/$1 $HOME/
}
function install_config {
echo "installing $1"
cp -rf $CONFIG/$1 $CONFIG_DST/
}
recipe "name: rlang"
echo "platform: ubuntu:17.10"
echo
section ">>> installing default dotfiles"
install_default .fonts
install_default bin
install_default .bashrc
install_default .SciTEUser.properties
install_default .xinitrc
install_default .gtkrc-2.0
install_default .vimrc
install_default .ghci
install_default src
section ">>> installing .config folders"
if [ ! -d "$CONFIG_DST" ]; then
mkdir -p $CONFIG_DST
fi
install_config conky
install_config gtk-3.0
install_config xfce4
install_config i3status
install_config i3
install_config awesome
###########################################################################
section ">>> core"
echo "Install core debian_packages?"
echo "build-essential, cmake, ncdu, htop, vim, exuberant-ctags, tig, ranger, bmon, pv, rpl, unzip, p7zip-full, open-vm-tools"
read -p "Are you sure? " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]
then
echo "pre-install scripts"
sudo apt-get update && sudo apt-get dist-upgrade -y
sudo apt-get update && sudo apt-get install -y \
build-essential \
cmake \
ncdu \
htop \
vim \
exuberant-ctags \
tig \
ranger \
bmon \
pv \
rpl \
unzip \
p7zip-full \
open-vm-tools \
&& echo "core debian packages installed"
sudo apt-get purge -y \
snapd \
&& echo "core packages purged"
echo "post-install scripts"
mkdir -p ~/.host-shared
# Reset font cache on Linux
if command -v fc-cache @>/dev/null ; then
echo "Resetting font cache"
fc-cache -f $HOME/.fonts
fi
fi
###########################################################################
section ">>> java"
echo "Install java debian_packages?"
echo "default-jdk"
read -p "Are you sure? " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]
then
sudo apt-get update && sudo apt-get install -y \
default-jdk \
&& echo "java debian packages installed"
fi
###########################################################################
section ">>> python"
echo "Install python debian_packages?"
echo "python3-dev, python3-setuptools, python3-pip"
read -p "Are you sure? " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]
then
sudo apt-get update && sudo apt-get install -y \
python3-dev \
python3-setuptools \
python3-pip \
&& echo "python debian packages installed"
fi
###########################################################################
section ">>> py_modules"
echo "Install py_modules python_packages?"
echo "wheel, virtualenv, ipython, cython, psycopg2, pgcli, grin, isort, pylint, radon, autopep8, glances, rtichoke"
read -p "Are you sure? " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]
then
sudo -H pip3 install \
wheel \
virtualenv \
ipython \
cython \
psycopg2 \
pgcli \
grin \
isort \
pylint \
radon \
autopep8 \
glances \
rtichoke \
&& echo "py_modules python packages installed"
fi
###########################################################################
section ">>> database"
echo "Install database debian_packages?"
echo "libpq-dev, postgresql-client-10, postgresql-10, postgresql-contrib-10, postgresql-plpython3-10, postgresql-10-pllua, luajit, postgresql-10-pgtap, pgtap"
read -p "Are you sure? " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]
then
sudo apt-get update && sudo apt-get install -y \
libpq-dev \
postgresql-client-10 \
postgresql-10 \
postgresql-contrib-10 \
postgresql-plpython3-10 \
postgresql-10-pllua \
luajit \
postgresql-10-pgtap \
pgtap \
&& echo "database debian packages installed"
echo "post-install scripts"
sudo -u postgres createuser -s $USER
sudo -u postgres createdb $USER
fi
###########################################################################
section ">>> gui"
echo "Install gui debian_packages?"
echo "xorg, xserver-xorg-input-all, open-vm-tools-desktop, fonts-dejavu, gnome-icon-theme, awesome, i3, xfce4-terminal, lxappearance, gtk2-engines, conky, scite, gtkorphan, fslint, bleachbit"
read -p "Are you sure? " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]
then
sudo apt-get update && sudo apt-get install -y \
xorg \
xserver-xorg-input-all \
open-vm-tools-desktop \
fonts-dejavu \
gnome-icon-theme \
awesome \
i3 \
xfce4-terminal \
lxappearance \
gtk2-engines \
conky \
scite \
gtkorphan \
fslint \
bleachbit \
&& echo "gui debian packages installed"
fi
###########################################################################
section ">>> latex"
echo "Install latex debian_packages?"
echo "pandoc, lmodern, texlive-generic-recommended, texlive-fonts-recommended, texlive-humanities, texlive-latex-extra, texlive-xetex, texinfo"
read -p "Are you sure? " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]
then
sudo apt-get update && sudo apt-get install -y \
pandoc \
lmodern \
texlive-generic-recommended \
texlive-fonts-recommended \
texlive-humanities \
texlive-latex-extra \
texlive-xetex \
texinfo \
&& echo "latex debian packages installed"
sudo apt-get purge -y \
texlive-latex-extra-doc \
texlive-pictures-doc \
texlive-latex-base-doc \
texlive-latex-recommended-doc \
texlive-humanities-doc \
&& echo "latex packages purged"
echo "post-install scripts"
cd /usr/share/texlive/texmf-dist
sudo wget http://mirrors.ctan.org/install/fonts/inconsolata.tds.zip
sudo unzip inconsolata.tds.zip
sudo rm inconsolata.tds.zip
echo "Map zi4.map" | sudo tee --append /usr/share/texlive/texmf-dist/web2c/updmap.cfg
sudo mktexlsr
sudo updmap-sys
fi
###########################################################################
section ">>> rlang-debian"
echo "Install rlang-debian debian_packages?"
echo "fonts-texgyre, libssl-dev, libxml2-dev, libcurl4-openssl-dev, libcairo2-dev, libxt-dev, libssh2-1-dev, r-base, r-base-dev, r-recommended"
read -p "Are you sure? " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]
then
sudo apt-get update && sudo apt-get install -y \
fonts-texgyre \
libssl-dev \
libxml2-dev \
libcurl4-openssl-dev \
libcairo2-dev \
libxt-dev \
libssh2-1-dev \
r-base \
r-base-dev \
r-recommended \
&& echo "rlang-debian debian packages installed"
fi
###########################################################################
section ">>> rlang-bayesian"
echo "Install rlang-bayesian rlang_packages?"
echo "bayesplot, bridgesampling, brms, coda, rstan, rstanarm, rstantools, MCMCpack"
read -p "Are you sure? " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]
then
sudo Rscript -e "install.packages(c('bayesplot', 'bridgesampling', 'brms', 'coda', 'rstan', 'rstanarm', 'rstantools', 'MCMCpack'), repos='http://cran.rstudio.com/')" \
&& echo "rlang packages installed"
fi
###########################################################################
section ">>> rlang-regression"
echo "Install rlang-regression rlang_packages?"
echo "arm, car, caret, e1071, lme4, lmtest, visreg, Boruta"
read -p "Are you sure? " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]
then
sudo Rscript -e "install.packages(c('arm', 'car', 'caret', 'e1071', 'lme4', 'lmtest', 'visreg', 'Boruta'), repos='http://cran.rstudio.com/')" \
&& echo "rlang packages installed"
fi
###########################################################################
section ">>> rlang-clustering"
echo "Install rlang-clustering rlang_packages?"
echo "fpc, gclus, trimcluster"
read -p "Are you sure? " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]
then
sudo Rscript -e "install.packages(c('fpc', 'gclus', 'trimcluster'), repos='http://cran.rstudio.com/')" \
&& echo "rlang packages installed"
fi
###########################################################################
section ">>> rlang-decision-trees"
echo "Install rlang-decision-trees rlang_packages?"
echo "dtree, rpart.plot, party, partykit, randomForest, ranger, tree"
read -p "Are you sure? " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]
then
sudo Rscript -e "install.packages(c('dtree', 'rpart.plot', 'party', 'partykit', 'randomForest', 'ranger', 'tree'), repos='http://cran.rstudio.com/')" \
&& echo "rlang packages installed"
fi
###########################################################################
section ">>> rlang-graphs"
echo "Install rlang-graphs rlang_packages?"
echo "DiagrammeR, ggiraph, igraph, influenceR, visNetwork, networkD3"
read -p "Are you sure? " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]
then
sudo Rscript -e "install.packages(c('DiagrammeR', 'ggiraph', 'igraph', 'influenceR', 'visNetwork', 'networkD3'), repos='http://cran.rstudio.com/')" \
&& echo "rlang packages installed"
fi
###########################################################################
section ">>> rlang-jupyter"
echo "Install rlang-jupyter rlang_packages?"
echo "IRkernel"
read -p "Are you sure? " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]
then
sudo Rscript -e "install.packages(c('IRkernel'), repos='http://cran.rstudio.com/')" \
&& echo "rlang packages installed"
fi
###########################################################################
section ">>> rlang-forecast"
echo "Install rlang-forecast rlang_packages?"
echo "forecast, prophet, tseries, xts, zoo"
read -p "Are you sure? " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]
then
sudo Rscript -e "install.packages(c('forecast', 'prophet', 'tseries', 'xts', 'zoo'), repos='http://cran.rstudio.com/')" \
&& echo "rlang packages installed"
fi
###########################################################################
section ">>> rlang-microsoft"
echo "Install rlang-microsoft rlang_packages?"
echo "officer, openxlsx, WordR, rvg"
read -p "Are you sure? " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]
then
sudo Rscript -e "install.packages(c('officer', 'openxlsx', 'WordR', 'rvg'), repos='http://cran.rstudio.com/')" \
&& echo "rlang packages installed"
fi
###########################################################################
section ">>> rlang-text"
echo "Install rlang-text rlang_packages?"
echo "tm, qdap"
read -p "Are you sure? " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]
then
sudo Rscript -e "install.packages(c('tm', 'qdap'), repos='http://cran.rstudio.com/')" \
&& echo "rlang packages installed"
fi
###########################################################################
section ">>> rlang-map"
echo "Install rlang-map rlang_packages?"
echo "leaflet, geosphere, mapproj, maps, maptools, RgoogleMaps, ggmap"
read -p "Are you sure? " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]
then
sudo Rscript -e "install.packages(c('leaflet', 'geosphere', 'mapproj', 'maps', 'maptools', 'RgoogleMaps', 'ggmap'), repos='http://cran.rstudio.com/')" \
&& echo "rlang packages installed"
fi
###########################################################################
section ">>> rlang-gantt"
echo "Install rlang-gantt rlang_packages?"
echo "plan, projmanr, plotrix, timelineS, timevis, vistime"
read -p "Are you sure? " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]
then
sudo Rscript -e "install.packages(c('plan', 'projmanr', 'plotrix', 'timelineS', 'timevis', 'vistime'), repos='http://cran.rstudio.com/')" \
&& echo "rlang packages installed"
fi
###########################################################################
section ">>> rlang-visual"
echo "Install rlang-visual rlang_packages?"
echo "corrplot, corrr, d3heatmap, dygraphs, ggalt, ggcorrplot, ggedit, ggExtra, ggfittext, ggfortify, ggplot2, ggrepel, ggridges, ggthemes, ggvis, gplots, grid, gridExtra, gtable, heatmaply, highcharter, metricsgraphics, plotly, qcc, qicharts2, rbokeh, RColorBrewer, scales, threejs, treemapify, vcd, venneuler, viridis, viridisLite, waffle, wesanderson"
read -p "Are you sure? " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]
then
sudo Rscript -e "install.packages(c('corrplot', 'corrr', 'd3heatmap', 'dygraphs', 'ggalt', 'ggcorrplot', 'ggedit', 'ggExtra', 'ggfittext', 'ggfortify', 'ggplot2', 'ggrepel', 'ggridges', 'ggthemes', 'ggvis', 'gplots', 'grid', 'gridExtra', 'gtable', 'heatmaply', 'highcharter', 'metricsgraphics', 'plotly', 'qcc', 'qicharts2', 'rbokeh', 'RColorBrewer', 'scales', 'threejs', 'treemapify', 'vcd', 'venneuler', 'viridis', 'viridisLite', 'waffle', 'wesanderson'), repos='http://cran.rstudio.com/')" \
&& echo "rlang packages installed"
fi
###########################################################################
section ">>> rlang-tidyverse"
echo "Install rlang-tidyverse rlang_packages?"
echo "tidyverse, dplyr, dtplyr, forcats, glue, purrr, readr, readxl, reshape2, rlang, stringr, tibble, tidyr, tidyselect, usethis, widyr, withr"
read -p "Are you sure? " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]
then
sudo Rscript -e "install.packages(c('tidyverse', 'dplyr', 'dtplyr', 'forcats', 'glue', 'purrr', 'readr', 'readxl', 'reshape2', 'rlang', 'stringr', 'tibble', 'tidyr', 'tidyselect', 'usethis', 'widyr', 'withr'), repos='http://cran.rstudio.com/')" \
&& echo "rlang packages installed"
fi
###########################################################################
section ">>> rlang-modelling"
echo "Install rlang-modelling rlang_packages?"
echo "broom, gmodels, modelr, modeltools, recipes"
read -p "Are you sure? " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]
then
sudo Rscript -e "install.packages(c('broom', 'gmodels', 'modelr', 'modeltools', 'recipes'), repos='http://cran.rstudio.com/')" \
&& echo "rlang packages installed"
fi
###########################################################################
section ">>> rlang-reporting"
echo "Install rlang-reporting rlang_packages?"
echo "bookdown, brew, DT, flexdashboard, flextable, formattable, hrbrthemes, htmlTable, htmltools, htmlwidgets, janitor, kableExtra, knitr, labeling, pander, pixiedust, prettydoc, prettyunits, revealjs, rhandsontable, rmarkdown, rmdformats, rmdshower, rpivotTable, tables, tint, tufte, xaringan, xtable, wordcloud"
read -p "Are you sure? " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]
then
sudo Rscript -e "install.packages(c('bookdown', 'brew', 'DT', 'flexdashboard', 'flextable', 'formattable', 'hrbrthemes', 'htmlTable', 'htmltools', 'htmlwidgets', 'janitor', 'kableExtra', 'knitr', 'labeling', 'pander', 'pixiedust', 'prettydoc', 'prettyunits', 'revealjs', 'rhandsontable', 'rmarkdown', 'rmdformats', 'rmdshower', 'rpivotTable', 'tables', 'tint', 'tufte', 'xaringan', 'xtable', 'wordcloud'), repos='http://cran.rstudio.com/')" \
&& echo "rlang packages installed"
fi
###########################################################################
section ">>> rlang-testing"
echo "Install rlang-testing rlang_packages?"
echo "assertr, assertthat, covr, testthat"
read -p "Are you sure? " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]
then
sudo Rscript -e "install.packages(c('assertr', 'assertthat', 'covr', 'testthat'), repos='http://cran.rstudio.com/')" \
&& echo "rlang packages installed"
fi
###########################################################################
section ">>> rlang-pkg"
echo "Install rlang-pkg rlang_packages?"
echo "devtools, formatR, lintr, packrat, roxygen2, sinew, styler"
read -p "Are you sure? " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]
then
sudo Rscript -e "install.packages(c('devtools', 'formatR', 'lintr', 'packrat', 'roxygen2', 'sinew', 'styler'), repos='http://cran.rstudio.com/')" \
&& echo "rlang packages installed"
fi
###########################################################################
section ">>> rlang-shiny"
echo "Install rlang-shiny rlang_packages?"
echo "shiny, shinyBS, shinydashboard, shinyjs, shinythemes"
read -p "Are you sure? " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]
then
sudo Rscript -e "install.packages(c('shiny', 'shinyBS', 'shinydashboard', 'shinyjs', 'shinythemes'), repos='http://cran.rstudio.com/')" \
&& echo "rlang packages installed"
fi
###########################################################################
section ">>> rlang-db"
echo "Install rlang-db rlang_packages?"
echo "DBI, dbplyr, pool, RPostgres"
read -p "Are you sure? " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]
then
sudo Rscript -e "install.packages(c('DBI', 'dbplyr', 'pool', 'RPostgres'), repos='http://cran.rstudio.com/')" \
&& echo "rlang packages installed"
fi
###########################################################################
section ">>> rlang-text"
echo "Install rlang-text rlang_packages?"
echo "snakecase, stringdist, stringi, textclean, tidytext, whisker, crayon"
read -p "Are you sure? " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]
then
sudo Rscript -e "install.packages(c('snakecase', 'stringdist', 'stringi', 'textclean', 'tidytext', 'whisker', 'crayon'), repos='http://cran.rstudio.com/')" \
&& echo "rlang packages installed"
fi
###########################################################################
section ">>> rlang-net"
echo "Install rlang-net rlang_packages?"
echo "httr, RCurl, servr"
read -p "Are you sure? " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]
then
sudo Rscript -e "install.packages(c('httr', 'RCurl', 'servr'), repos='http://cran.rstudio.com/')" \
&& echo "rlang packages installed"
fi
###########################################################################
section ">>> rlang-datastructures"
echo "Install rlang-datastructures rlang_packages?"
echo "data.table, data.tree, jsonlite, reticulate, yaml"
read -p "Are you sure? " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]
then
sudo Rscript -e "install.packages(c('data.table', 'data.tree', 'jsonlite', 'reticulate', 'yaml'), repos='http://cran.rstudio.com/')" \
&& echo "rlang packages installed"
fi
###########################################################################
section ">>> rlang-misctools"
echo "Install rlang-misctools rlang_packages?"
echo "gdata, gtools, Hmisc, psych, arsenal, descriptr"
read -p "Are you sure? " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]
then
sudo Rscript -e "install.packages(c('gdata', 'gtools', 'Hmisc', 'psych', 'arsenal', 'descriptr'), repos='http://cran.rstudio.com/')" \
&& echo "rlang packages installed"
fi
###########################################################################
section ">>> rlang-time"
echo "Install rlang-time rlang_packages?"
echo "hms, lubridate"
read -p "Are you sure? " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]
then
sudo Rscript -e "install.packages(c('hms', 'lubridate'), repos='http://cran.rstudio.com/')" \
&& echo "rlang packages installed"
fi
###########################################################################
section ">>> rlang-core"
echo "Install rlang-core rlang_packages?"
echo "docopt"
read -p "Are you sure? " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]
then
sudo Rscript -e "install.packages(c('docopt'), repos='http://cran.rstudio.com/')" \
&& echo "rlang packages installed"
fi
###########################################################################
section ">>> rstudio"
echo "Install rstudio debian_packages?"
echo "libjpeg62, libgstreamer1.0-0, libgstreamer-plugins-base1.0-0"
read -p "Are you sure? " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]
then
sudo apt-get update && sudo apt-get install -y \
libjpeg62 \
libgstreamer1.0-0 \
libgstreamer-plugins-base1.0-0 \
&& echo "rstudio debian packages installed"
echo "post-install scripts"
RSTUDIO=rstudio-xenial-1.1.442-amd64.deb
wget https://download1.rstudio.org/$RSTUDIO
sudo dpkg -i $RSTUDIO
rm $RSTUDIO
fi
###########################################################################
section ">>> docker"
echo "Install docker?"
read -p "Are you sure? " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]
then
DOCKER=get-docker.sh
curl -fsSL get.docker.com -o $DOCKER
sudo sh $DOCKER
sudo usermod -aG docker $USER
rm $DOCKER
echo "docker installed"
fi
###########################################################################
section ">>> sublime-text"
echo "Install sublime-text debian_packages?"
echo "sublime-text"
read -p "Are you sure? " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]
then
echo "pre-install scripts"
wget -qO - https://download.sublimetext.com/sublimehq-pub.gpg | sudo apt-key add
sudo apt-get install apt-transport-https
echo "deb https://download.sublimetext.com/ apt/stable/" | sudo tee /etc/apt/sources.list.d/sublime-text.list
sudo apt-get update && sudo apt-get install -y \
sublime-text \
&& echo "sublime-text debian packages installed"
fi
###########################################################################
section ">>> golang"
echo "Install golang?"
read -p "Are you sure? " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]
then
GOLANG=go1.9.2.linux-amd64.tar.gz
wget https://redirector.gvt1.com/edgedl/go/$GOLANG
sudo tar -C /usr/local -xzf $GOLANG
rm -rf $GOLANG
echo "golang installed"
fi
###########################################################################
section ">>> rust-lang"
echo "Install rust-lang?"
read -p "Are you sure? " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]
then
curl https://sh.rustup.rs -sSf | sh
echo "rust-lang installed"
fi
###########################################################################
section ">>> powerline-fonts"
echo "Install powerline-fonts?"
read -p "Are you sure? " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]
then
git clone https://github.com/powerline/fonts.git --depth=1
cd fonts
./install.sh
cd ..
rm -rf fonts
echo "powerline-fonts installed"
echo "post-install scripts"
sudo apt-get autoremove
sudo apt-get autoclean
sudo apt-get clean
fi
| true |
20ff2247ef19e02465900fb75d278d7ed3b9efb3
|
Shell
|
brandonwkerns/lpt-python-public
|
/realtime/download_cmorph_rt.MASTER.sh
|
UTF-8
| 1,794 | 3.921875 | 4 |
[] |
no_license
|
#!/bin/bash
####### Download real time version of CMORPH.
ftpsite=ftp://ftp.cpc.ncep.noaa.gov/precip/CMORPH_RT/GLOBE/data
## Working directory is the root directory where data will be downloaded.
workdir=/path/to/keep/your/data
#######################################################################
cd $workdir
## Give input as YYYYMMDD, or it will get today's date using the Linux date command.
if [ -z $1 ]
then
today=`/bin/date -u +%Y%m%d`
else
today=$1
fi
yyyy=`/bin/date --date=$today +%Y`
mm=`/bin/date --date=$today +%m`
for hh in {00..23}
do
filewanted=CMORPH_V0.x_RT_8km-30min_$today$hh
if [ -e rt/$yyyy/$mm/$today/$filewanted ]
then
echo I already have ${filewanted}.
else
echo Downloading ${filewanted}.
/usr/bin/wget -q $ftpsite/$yyyy/$yyyy$mm/$filewanted.gz
if [ -e $filewanted.gz ]
then
mkdir -p rt/$yyyy/$mm/$today
mv $filewanted.gz rt/$yyyy/$mm/$today
/bin/gunzip -f rt/$yyyy/$mm/$today/$filewanted.gz
echo Success!
else
echo Failed! File may not be on the server yet.
fi
fi
done
yesterday=`date --date=${today}-1day +%Y%m%d`
yyyy=`date --date=${today}-1day +%Y`
mm=`date --date=${today}-1day +%m`
for hh in {00..23}
do
filewanted=CMORPH_V0.x_RT_8km-30min_$yesterday$hh
if [ -e rt/$yyyy/$mm/$yesterday/$filewanted ]
then
echo I already have ${filewanted}.
else
echo Downloading ${filewanted}.
/usr/bin/wget -q $ftpsite/$yyyy/$yyyy$mm/$filewanted.gz
if [ -e $filewanted.gz ]
then
mkdir -p rt/$yyyy/$mm/$yesterday
mv $filewanted.gz rt/$yyyy/$mm/$yesterday
/bin/gunzip -f rt/$yyyy/$mm/$yesterday/$filewanted.gz
echo Success!
else
echo Failed! File may not be on the server yet.
fi
fi
done
echo Done.
exit 0
| true |
e76732dd3ad83aec03c5cc912319351a68044519
|
Shell
|
akbartk/docker-codeigniter-php71
|
/docker-entrypoint
|
UTF-8
| 1,554 | 3.328125 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
if [[ "$1" == apache2* ]] && [ "${FWD_REMOTE_IP,,}" == "true" ]; then
a2enmod remoteip -q
echo 'RemoteIPHeader X-Forwarded-For' > $APACHE_CONFDIR/conf-available/docker-ci-apache.conf
a2enconf docker-oc-apache -q
fi
if [ "${ENABLE_CRON,,}" == "true" ]; then
cron
echo 'Cron enabled.'
fi
if [ ! -z "$PHP_DISPLAY_ERRORS" ]; then
echo "php.ini set display_errors=$PHP_DISPLAY_ERRORS"
sed -i "/display_errors=*/c\display_errors=$PHP_DISPLAY_ERRORS" /usr/local/etc/php/conf.d/docker-ci-php.ini
fi
if [ ! -z "$PHP_POST_MAX_SIZE" ]; then
echo "php.ini set post_max_size=$PHP_POST_MAX_SIZE"
sed -i "/post_max_size=*/c\post_max_size=$PHP_POST_MAX_SIZE" /usr/local/etc/php/conf.d/docker-ci-php.ini
fi
if [ ! -z "$PHP_MEMORY_LIMIT" ]; then
echo "php.ini set memory_limit=$PHP_MEMORY_LIMIT"
sed -i "/memory_limit=*/c\memory_limit=$PHP_MEMORY_LIMIT" /usr/local/etc/php/conf.d/docker-ci-php.ini
fi
if [ ! -z "$PHP_UPLOAD_MAX_FILESIZE" ]; then
echo "php.ini set upload_max_filesize=$PHP_UPLOAD_MAX_FILESIZE"
sed -i "/upload_max_filesize=*/c\upload_max_filesize=$PHP_UPLOAD_MAX_FILESIZE" /usr/local/etc/php/conf.d/docker-ci-php.ini
fi
if [ ! -z "$TZ" ]; then
echo "php.ini set date.timezone=$TZ"
sed -i "/date.timezone=*/c\date.timezone=$TZ" /usr/local/etc/php/conf.d/docker-ci-php.ini
fi
if [[ "$1" == apache2* ]] && [ -z "$(ls -A /var/www/html)" ]; then
echo "Document root (/var/www/html) is empty. Copying CodeIgniter User Guide..."
mv /usr/src/CodeIgniter_1.7.3/user_guide/* /var/www/html/.
fi
exec "$@"
| true |
a8900d5b63f2a773a497a0f260258ff7c7ee98da
|
Shell
|
dywisor/omir
|
/site/src/rootfs/base/FACTORY/site/share/netconfig/ifconfig-info.sh
|
UTF-8
| 1,149 | 3.59375 | 4 |
[
"MIT"
] |
permissive
|
#!/bin/sh
ifconfig_io_first_inet() {
# inet <addr> netmask <netmask> ..ignored..
# ignore IPv4 link-local adress 169.254.0.0/16
awk '($1 == "inet" && (!($2 ~ "^169[.]254[.]")) && $3 == "netmask") { print $2, $4; exit; }'
}
ifconfig_io_first_inet6() {
# inet6 <addr> prefixlen <prefixlen> ..ignored..
# ignore IPv6 link-local adress fe80::/10 (fe80 - febf)
awk '($1 == "inet6" && (!($2 ~ "^fe[89ab][a-f0-9]:")) && $3 == "prefixlen") { print $2, $4; exit; }'
}
route_io_first_gw() {
awk '($1 == "gateway:") { print $2; exit; }'
}
_ifconfig_get_first_inetx() {
v0=""
v1=""
local func
local iface
func="${1:?}"
iface="${2:?}"
set -- $( ifconfig "${iface}" | "${func}" )
v0="${1-}"
v1="${2-}"
[ -n "${v0}" ]
}
ifconfig_get_first_inet() {
_ifconfig_get_first_inetx ifconfig_io_first_inet "${@}"
}
ifconfig_get_first_inet6() {
_ifconfig_get_first_inetx ifconfig_io_first_inet6 "${@}"
}
route_get_first_gw() {
v0="$( route -n get -inet 0.0.0.0/0 2>/dev/null | route_io_first_gw )" && [ -n "${v0}" ]
}
route_get_first_gw6() {
v0="$( route -n get -inet6 0::0/0 2>/dev/null | route_io_first_gw )" && [ -n "${v0}" ]
}
| true |
b7d160e5c3bbf1233f2f140001613d1479695929
|
Shell
|
TechTinkerer42/Haiti
|
/aimir-web/src/main/webapp/kml/start.sh
|
UTF-8
| 1,619 | 3.015625 | 3 |
[] |
no_license
|
#!/bin/bash
cd `dirname $0`
echo `pwd`
JDBC_FILE=../WEB-INF/classes/jdbc.properties
JDBC_URL=`cat $JDBC_FILE | awk -F= '{if ($1=="jdbc.url") print $2}' | awk -F@ '{print $2}' | sed -e 's/\:/\//2' | sed 's/^[ \t]*//g' | sed 's/[ \t\r\n]*$//g'`
JDBC_PASS=`cat $JDBC_FILE | awk -F= '{if ($1=="jdbc.password") print $2}'| sed 's/^[ \t]*//g' | sed 's/[ \t\r\n]*$//g'`
JDBC_USER=`cat $JDBC_FILE | awk -F= '{if ($1=="jdbc.username") print $2}'| sed 's/^[ \t]*//g' | sed 's/[ \t\r\n]*$//g'`
export DB_CONN=$JDBC_USER/$JDBC_PASS@$JDBC_URL
echo $DB_CONN
if [ -e data ] ; then
echo "data directory is already exist"
else
mkdir data
fi
sqlplus -S ${DB_CONN} > meter_map.txt << EOF
set pages 0
set feedback off
select 'METER_MAP_EXIST='||count(*) from user_tables where table_name = 'METER_MAP';
EOF
#echo "METER_MAP_EXIST=0" > meter_map.txt
ago7day=$( date +"%Y%m%d" -d '7 days ago' )
#echo $ago7day
#sqlplus -S ${DB_CONN} > dculist.txt <<EOF
#
#set pages 0
#set feedback off
#set linesize 2000
#
#select sys_id||' '||id from mcu where last_comm_date > '$ago7day' and (mcu_status is null or mcu_status != (select id from code where code='1.1.4.2')) and gpiox is not null and gpioy is not null;
#EOF
sqlplus -S ${DB_CONN} > dsolist.txt <<EOF
set pages 0
set feedback off
set linesize 2000
select loc.name from location loc inner join mcu m on m.location_id = loc.id where m.last_comm_date > '$ago7day' and (m.mcu_status is null or m.mcu_status != (select id from code where code='1.1.4.2'))
and m.gpiox is not null and m.gpioy is not null group by loc.name;
EOF
#./startKML.sh
./startKMLDSO.sh
#./startData.sh
| true |
bbb024b4b629fe9b7d70afa2f9cea1fe50e46429
|
Shell
|
litaxc/v2ray_bootstrap
|
/bootstrap.sh
|
UTF-8
| 802 | 2.859375 | 3 |
[] |
no_license
|
read -p "Enter your server's public IP: " __ADDRESS__
export __PATH__=$PWD
export __UUID__=$(uuidgen)
export V2RAY_LOCATION_ASSET=.
export V2RAY_LOCATION_CONFIG=./server
sudo apt-get update
sudo apt-get install -y unzip
curl -o v2ray.zip -L https://github.com/v2ray/v2ray-core/releases/download/v4.21.3/v2ray-linux-64.zip
unzip v2ray.zip
rm config.json
sed -i 's/__UUID__/'"${__UUID__}"'/g; s/__ADDRESS__/'"${__ADDRESS__}"'/g' ./local/config.json
sed -i 's/__UUID__/'"${__UUID__}"'/g; s/__ADDRESS__/'"${__ADDRESS__}"'/g' ./server/config.json
sudo mkdir -p /etc/v2ray
sudo cp v2ray v2ctl /usr/local/bin
sudo cp ./server/config.json /etc/v2ray/config.json
# use systemd to make sure it start on boot
sudo cp v2ray.service /etc/systemd/system/
sudo systemctl start v2ray
sudo systemctl enable v2ray
| true |
95afb74ead874382825e157af54975a8dffaf558
|
Shell
|
lhfei/cluster-conf
|
/db-scripts/tv_report/get_tv_report_ddl.sh
|
UTF-8
| 537 | 3.796875 | 4 |
[
"MIT"
] |
permissive
|
#!/bin/bash
schema="tv_report";
getArray() {
local i=0
array=() # Clear array
while IFS= read -r line # Read a line
do
array+=( "$line" ) # Append line to the array
done < "$1"
}
getArray "./${schema}-tables-list.txt"
for tname in "${array[@]}"
do
echo "===== [${schema}.$tname] start loading ..."
tname=${tname// /}
echo "===== [${schema}.$tname] start loading ..."
hive -e "SHOW CREATE TABLE ${schema}.${tname}" >> ./${schema}_ddl_create.sql
echo ";" >> ./${schema}_ddl_create.sql
done
| true |
40cfe4ecaa9f5fc3db785f99fe81b71423d8a344
|
Shell
|
drake7707/openvpn-cluster
|
/openvpn-server/service/run_server
|
UTF-8
| 943 | 3.265625 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/sh
set -x
mkdir -p /data && cd /data
if [[ ! -d "/data/pki" ]]; then
/usr/share/easy-rsa/easyrsa --batch init-pki
/usr/share/easy-rsa/easyrsa --req-cn=openvpn-server --batch build-ca nopass
/usr/share/easy-rsa/easyrsa --keysize=${VPN_KEYSIZE:-2048} --batch gen-dh
openvpn --genkey --secret /data/pki/ta.key
/usr/share/easy-rsa/easyrsa --batch build-server-full "openvpn-server" nopass
/usr/share/easy-rsa/easyrsa --batch gen-crl
fi
if [[ ! -f "/data/server.conf" ]]; then
/service/build_config server
fi
# if there is a rules.sh script in the data folder then execute it
# this is useful to set up the correct iptables, especially as those are lost once the container restarts
if [[ -f "/data/rules.sh" ]]; then
/bin/sh /data/rules.sh
fi
# ensure the ccd directory exists, the client specific rules will also be stored in there
mkdir -p /data/ccd
openvpn --config /data/server.conf --client-config-dir /data/ccd
| true |
fcec2852dec593aaed5ff8c46c640781974ffb42
|
Shell
|
AlexDosch1/Biocomp-Exercise06
|
/hwk6.sh
|
UTF-8
| 890 | 3.125 | 3 |
[] |
no_license
|
#Part 1:writes file with unique gender-years-experience combinations in wages.csv
#Part 2:prints info for highest and lowest earners and number of females in top ten
#Part 3:effect of graduating college on min wage
#Usage: bash hwk6.sh wages.csv
#Part 1
cat wages.csv | grep -v "gender" | cut -d, -f 1,2 | sort -d | uniq | sort -n > step1.txt
#Part 2
#highest earner
cat wages.csv | grep -v "gender" | cut -d, -f 1,2,4 | sort -n -k 4 | tail -1
#lowest earner
cat wages.csv | grep -v "gender" | cut -d, -f 1,2,4 | sort -n -k 4 | head -1
#number of ladies in top 10
cat wages.csv | grep -v "gender" | cut -d, -f 1,2,4 | sort -n -k 4 | head -10 | grep "female" | wc -l
#Part 3
low=$(cat wages.csv | cut -d, -f 3,4 | grep -w ^1[26] | sort -k1 | cut -d, -f 2 | head -1)
high=$(cat wages.csv | cut -d, -f 3,4 | grep -w ^1[26] | sort -k1 | cut -d, -f 2 | tail -1)
echo "$(($high - $low))"
| true |
4e7740fab15dd9654c0da8db9c2d3e7b5bcf2835
|
Shell
|
SpicyGator/invert_terminator
|
/invert_term.sh
|
UTF-8
| 745 | 3.46875 | 3 |
[] |
no_license
|
#!/bin/bash
#Script used to change terminator colors.
CONFIG=~/.config/terminator/config
DARK_FG="\"#ffffff\""
DARK_BG="\"#333333\""
LIGHT_FG="\"#000000\""
LIGHT_BG="\"#ffffff\""
CURRENT=$(grep -e "foreground_color\ =\ " $CONFIG)
if [[ $CURRENT == " foreground_color = $LIGHT_FG" ]]; then
sed -i.bak "s/foreground_color\ =\ $LIGHT_FG/foreground_color\ =\ $DARK_FG/g" $CONFIG
sed -i.bak "s/background_color\ =\ $LIGHT_BG/background_color\ =\ $DARK_BG/g" $CONFIG
else
if [[ $CURRENT == " foreground_color = $DARK_FG" ]]; then
sed -i.bak "s/foreground_color\ =\ $DARK_FG/foreground_color\ =\ $LIGHT_FG/g" $CONFIG
sed -i.bak "s/background_color\ =\ $DARK_BG/background_color\ =\ $LIGHT_BG/g" $CONFIG
fi
fi
terminator &
exit 0
| true |
0d5f0e8d2c03d4ad2244348143c3eb37166beb85
|
Shell
|
alshamiri5/cos-toolbox
|
/cos-kernel
|
UTF-8
| 17,156 | 4 | 4 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# This script fetches $FILES_TO_FETCH (kernel headers, source, toolchain,
# ...) of a specific COS release and installs them for compiling,
# debugging, etc. See usage() for details.
#
# This script is meant to run in COS toolbox or inside a cos-toolbox
# container.
#
set -eu
set -o pipefail
# Program name and version. Bump the version number if you change
# this script.
readonly PROG_NAME="$(basename "${0}")"
readonly PROG_VERSION="1.2"
# ANSI escape sequences for pretty printing.
readonly RED_S="\033[00;31m"
readonly BLUE_S="\033[00;34m"
readonly PURPLE_S="\033[00;35m"
readonly ANSI_E="\033[0m"
# Build ID number is passed as an arg or read from $COS_OS_RELEASE.
BUILD_ID=""
readonly COS_OS_RELEASE="/media/root/etc/os-release"
readonly COS_IMAGE_PROJECT="cos-cloud"
# Kernel header, source, toolchain tarballs and toolchain_env file in COS public GCS bucket.
readonly COS_GCS_BUCKET="gs://cos-tools"
readonly KERNEL_HEADERS="kernel-headers.tgz"
readonly KERNEL_SRC="kernel-src.tar.gz"
readonly TRUSTED_KEY="trusted_key.pem"
readonly TOOLCHAIN="toolchain.tar.xz"
readonly TOOLCHAIN_ENV_FILENAME="toolchain_env"
readonly FILES_TO_FETCH=("${KERNEL_HEADERS}" "${KERNEL_SRC}" "${TRUSTED_KEY}" "${TOOLCHAIN}" "${TOOLCHAIN_ENV_FILENAME}")
# Installation directory names.
readonly FETCHED_FILES_DIRNAME="fetched-files"
readonly KERNEL_HEADERS_DIRNAME="cos-kernel-headers"
readonly KERNEL_SRC_DIRNAME="cos-kernel-src"
readonly TOOLCHAIN_DIRNAME="cos-toolchain"
readonly TOOLCHAIN_ENV_DIRNAME="cos-toolchain-env"
# Installation directory paths will be initialized after $INSTALL_DIR
# and $BUILD_ID are set.
FETCHED_FILES_DIR=""
KERNEL_HEADERS_DIR=""
KERNEL_SRC_DIR=""
TOOLCHAIN_DIR=""
TOOLCHAIN_ENV_DIR=""
# Temporary files created for the list subcommand.
readonly TMP_IMAGE_LIST="/tmp/image_list"
readonly TMP_BUILD_ID_LIST="/tmp/build_id_list"
readonly TMP_BUILD_ID_FILES="/tmp/build_id_files"
# Compilation environment variables.
CC=""
CXX=""
SUBCOMMAND=""
# Set the defaults that can be changed by command line flags.
HELP="" # -h
INSTALL_DIR="${HOME}" # -i
ECHO=":" # -v
GLOBAL_OPTIONS="$(cat <<EOF
-h, --help help message
-i, --instdir install directory (default \$HOME: $HOME)
-v, --verbose verbose mode
EOF
)"
ALL="" # -a
LIST_OPTIONS="$(cat <<EOF
-h, --help help message
-a, --all include deprecated builds
EOF
)"
# shellcheck disable=SC2034
EXTRACT="yes" # -x
FETCH_OPTIONS="$(cat <<EOF
-h, --help help message
-x, --no-xtract do not extract files
EOF
)"
# shellcheck disable=SC2034
KERNEL_CONFIG="" # -c
PRINT_CMD="" # -p
MAKE_VERBOSE="" # -V
BUILD_OPTIONS="$(cat <<EOF
-h, --help help message
-c, --kconf path to kernel configuration file
-p, --print print commands to build the kernel, but do not execute
-V enable make's verbose mode
EOF
)"
# shellcheck disable=SC2034
REMOVE_OPTIONS="$(cat <<EOF
-h, --help help message
-a, --all remove all fetched files
EOF
)"
usage() {
local -r exit_code="${1}"
cat <<EOF
${PROG_NAME} v${PROG_VERSION}
Usage:
${PROG_NAME} [<global-options>] <subcommand> [<subcommand-options>] [<build-id>]
Subcommmands:
list list available builds
fetch fetch kernel headers, source, and toolchain tarballs
build build kernel (implies fetch)
remove remove fetched and extracted files
help print help message
Global options:
${GLOBAL_OPTIONS}
list options:
${LIST_OPTIONS}
fetch options:
${FETCH_OPTIONS}
build options:
${BUILD_OPTIONS}
remove options:
${REMOVE_OPTIONS}
Environment:
HOME default installation directory
EOF
exit "${exit_code}"
}
main() {
local options
parse_args "${@}"
# Global help message.
if [[ -z "${SUBCOMMAND}" || "${SUBCOMMAND}" == "help" ]]; then
usage 0
fi
# Subcommand-specific help message.
if [[ -n "${HELP}" ]]; then
echo "${SUBCOMMAND}" specific options:
options="${SUBCOMMAND^^}_OPTIONS"
echo "${!options}"
exit 0
fi
# No need to initialize if we're listing available releases.
if [[ "${SUBCOMMAND}" != "list" ]]; then
initialize
fi
case "${SUBCOMMAND}" in
"list") subcmd_list;;
"fetch") subcmd_fetch;;
"build") subcmd_build;;
"remove") subcmd_remove;;
*) fatal internal error processing "${SUBCOMMAND}"
esac
}
parse_args() {
local args
if ! args=$(getopt \
--options "ac:hi:pvVx" \
--longoptions "all config: help instdir: print verbose no-xtract" \
-- "$@"); then
# getopt has printed an appropriate error message.
exit 1
fi
eval set -- "${args}"
while [[ "${#}" -gt 0 ]]; do
case "${1}" in
-a|--all)
ALL="yes";;
-c|--kconf)
shift
KERNEL_CONFIG="${1}";;
-h|--help)
HELP="yes";;
-i|--instdir)
shift
INSTALL_DIR="${1}";;
-p|--print)
PRINT_CMD="echo";;
-v|--verbose)
ECHO="info";;
-V)
MAKE_VERBOSE="V=1";;
-x|--no-xtract)
EXTRACT="no";;
--)
;;
*)
if [[ ${1} =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
BUILD_ID="${1}"
shift
continue
fi
if [[ -n "${SUBCOMMAND}" ]]; then
fatal specify only one subcommand
fi
case "${1}" in
"list") SUBCOMMAND="${1}";;
"fetch") SUBCOMMAND="${1}";;
"build") SUBCOMMAND="${1}";;
"remove") SUBCOMMAND="${1}";;
"help") SUBCOMMAND="${1}";;
"--") ;;
*) fatal "${1}": invalid build id
esac
esac
shift
done
if [[ -z "${INSTALL_DIR}" ]]; then
fatal install directory not specified
fi
}
initialize() {
if [[ "${SUBCOMMAND}" == "remove" && -n "${ALL}" ]]; then
return
fi
# If build ID is not provided as an argument, we assume we're
# running on COS and the user wants the current build ID.
if [[ -z "${BUILD_ID}" ]]; then
if [[ ! -f "${COS_OS_RELEASE}" ]]; then
fatal "${COS_OS_RELEASE}" does not exist and build ID not specified
fi
# shellcheck disable=SC1090
source "${COS_OS_RELEASE}"
fi
if [[ ! ${BUILD_ID} =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
error "${BUILD_ID}": invalid build id
return 1
fi
FETCHED_FILES_DIR="${INSTALL_DIR}/${FETCHED_FILES_DIRNAME}/${BUILD_ID}"
KERNEL_HEADERS_DIR="${INSTALL_DIR}/${KERNEL_HEADERS_DIRNAME}/${BUILD_ID}"
KERNEL_SRC_DIR="${INSTALL_DIR}/${KERNEL_SRC_DIRNAME}/${BUILD_ID}"
TOOLCHAIN_DIR="${INSTALL_DIR}/${TOOLCHAIN_DIRNAME}/${BUILD_ID}"
TOOLCHAIN_ENV_DIR="${INSTALL_DIR}/${TOOLCHAIN_ENV_DIRNAME}/${BUILD_ID}"
info INSTALL_DIR="${INSTALL_DIR}"
info BUILD_ID="${BUILD_ID}"
echo
"${ECHO}" FETCHED_FILES_DIR="${FETCHED_FILES_DIR}"
"${ECHO}" KERNEL_HEADERS_DIR="${KERNEL_HEADERS_DIR}"
"${ECHO}" KERNEL_SRC_DIR="${KERNEL_SRC_DIR}"
"${ECHO}" TOOLCHAIN_DIR="${TOOLCHAIN_DIR}"
"${ECHO}" TOOLCHAIN_ENV_DIR="${TOOLCHAIN_ENV_DIR}"
"${ECHO}"
}
subcmd_list() {
local header
local n
local build_id
local all_lines
local line
# If we generated the list of images within the past hour, use it.
if [[ ! -s "${TMP_IMAGE_LIST}" || -z "$(find "${TMP_IMAGE_LIST}" -cmin -60)" ]]; then
info getting the list of images from "${COS_IMAGE_PROJECT}"
list_cos_images > "${TMP_IMAGE_LIST}"
fi
# If we generated the list of build IDs within the past hour, use it.
if [[ ! -s "${TMP_BUILD_ID_FILES}" || -z "$(find "${TMP_BUILD_ID_FILES}" -cmin -60)" ]]; then
info getting the list of builds from "${COS_GCS_BUCKET}"
gsutil ls -r "${COS_GCS_BUCKET}" > "${TMP_BUILD_ID_FILES}"
fi
# Get and sort the list of build IDs in $COS_GCS_BUCKET.
if [[ -n "${BUILD_ID}" ]]; then
echo "${BUILD_ID}" > "${TMP_BUILD_ID_LIST}"
# The $BUILD_ID may be deprecated or obsolete, but
# becaue it was specified on the command line, we
# still want to print it.
ALL="yes"
else
grep '^gs://.*:$' "${TMP_BUILD_ID_FILES}" | \
grep -E '[0-9]+\.[0-9]+\.[0-9]+' | \
sed -e "s;${COS_GCS_BUCKET}/;;" -e "s;/:;;" | \
sort -V > "${TMP_BUILD_ID_LIST}"
fi
# Build and print the header.
header="BUILD_ID MS FAMILY"
if [[ -n "${ALL}" ]]; then
header="${header} STAT"
fi
header="${header} HDR SRC KEY TLC"
echo "${header}"
n=0
while read -r build_id; do
# Although we no longer create releases with the exact
# same build ID in different image families, there are
# still older releases like cos-65-10323-104-0 and
# cos-stable-65-10323-104-0 that do have the same
# build ID. So, grep can return multiple lines.
all_lines=("$(grep "${build_id//./-}" "${TMP_IMAGE_LIST}")")
while read -r line; do
if [[ ("${line}" == *"DEPRECATED"* || "${line}" == *"OBSOLETE"*) && -z "${ALL}" ]]; then
continue
fi
milestone_family=($(get_milestone_family "${line}"))
printf "%-14s %2s %6s" "${build_id}" "${milestone_family[0]}" "${milestone_family[1]}"
if [[ -n "${ALL}" ]]; then
if [[ "${line}" == *"DEPRECATED"* ]]; then
echo -n " dep"
elif [[ "${line}" == *"OBSOLETE"* ]]; then
echo -n " obs"
else
echo -n " "
fi
fi
echo -n " "
for f in "${FILES_TO_FETCH[@]}"; do
if grep -q "/${build_id}/${f}\$" "${TMP_BUILD_ID_FILES}"; then
echo -n "+++ "
else
echo -n "--- "
fi
done
echo
n=$((n + 1))
if [[ "${n}" -gt 25 ]]; then
echo
echo "${header}"
n=0
fi
done <<< "${all_lines[@]}"
done < "${TMP_BUILD_ID_LIST}"
}
subcmd_fetch() {
local f
local md5 # md5sum checksum file
mkdir -p "${FETCHED_FILES_DIR}"
for f in "${FILES_TO_FETCH[@]}"; do
if [[ -s "${FETCHED_FILES_DIR}/${f}" ]]; then
"${ECHO}" "${FETCHED_FILES_DIR}/${f}" already exists
else
"${ECHO}" fetching "${COS_GCS_BUCKET}/${BUILD_ID}/${f}"
if ! fetch_file "${COS_GCS_BUCKET}/${BUILD_ID}/${f}" "${FETCHED_FILES_DIR}/${f}"; then
# Failing to fetch toolchain, toolchain_env or trusted key is not fatal
# because older build do not have them.
if [[ "${f}" != "${TOOLCHAIN}" && "${f}" != "${TRUSTED_KEY}" && "${f}" != "${TOOLCHAIN_ENV_FILENAME}" ]]; then
fatal could not fetch "${COS_GCS_BUCKET}/${BUILD_ID}/${f}"
fi
warn could not fetch "${COS_GCS_BUCKET}/${BUILD_ID}/${f}"
rm -f "${COS_GCS_BUCKET}/${BUILD_ID}/${f}"
fi
rm -f "${FETCHED_FILES_DIR}/${f}.verified" "${FETCHED_FILES_DIR}/${f}.installed"
fi
md5="${f}.md5"
if [[ -s "${FETCHED_FILES_DIR}/${md5}" ]]; then
"${ECHO}" "${FETCHED_FILES_DIR}/${md5}" already exists
else
"${ECHO}" fetching "${COS_GCS_BUCKET}/${BUILD_ID}/${md5}"
# The md5 file is missing for old builds, so we tolerate failure.
if ! fetch_file "${COS_GCS_BUCKET}/${BUILD_ID}/${md5}" "${FETCHED_FILES_DIR}/${md5}"; then
# This error is not fatal because older tarballs do not have
# md5sum checksum files.
warn could not fetch "${COS_GCS_BUCKET}/${BUILD_ID}/${md5}"
rm -f "${FETCHED_FILES_DIR}/${md5}"
fi
fi
done
verify_fetched_files
if [[ "${EXTRACT}" == "yes" ]]; then
extract_files
fi
}
subcmd_build() {
subcmd_fetch
set_compilation_env
${PRINT_CMD} cd "${KERNEL_SRC_DIR}"
${PRINT_CMD} make ${MAKE_VERBOSE} -j $(($(nproc) * 2)) CROSS_COMPILE=x86_64-cros-linux-gnu- CC="${CC}" CXX="${CXX}"
}
subcmd_remove() {
local f
if [[ -n "${ALL}" ]]; then
for f in "${FETCHED_FILES_DIRNAME}" "${KERNEL_HEADERS_DIRNAME}" "${KERNEL_SRC_DIRNAME}" "${TOOLCHAIN_DIRNAME}" "${TOOLCHAIN_ENV_DIRNAME}"; do
info removing "${INSTALL_DIR}/${f}"
# shellcheck disable=SC2115
rm -rf "${INSTALL_DIR}/${f}"
done
return
fi
for f in "${FETCHED_FILES_DIR}" "${KERNEL_HEADERS_DIR}" "${KERNEL_SRC_DIR}" "${TOOLCHAIN_DIR}" "${TOOLCHAIN_ENV_DIR}"; do
if [[ -n "${f}" ]]; then
info removing "${f}"
rm -rf "${f}"
fi
done
for f in "${TMP_IMAGE_LIST}" "${TMP_BUILD_ID_LIST}" "${TMP_BUILD_ID_FILES}"; do
if [[ -f "${f}" ]]; then
info removing "${f}"
rm -f "${f}"
fi
done
}
list_cos_images() {
gcloud compute images list --project "${COS_IMAGE_PROJECT}" --no-standard-images --show-deprecated
}
get_milestone_family() {
local line="${1}"
local milestone
local family
#cos-65-10323-104-0 cos-cloud cos-65-lts DEPRECATED READY
#cos-dev-72-11190-0-0 cos-cloud cos-dev DEPRECATED READY
if [[ "${line}" =~ ^cos-[0-9][0-9]* ]]; then
# shellcheck disable=SC2001
milestone="$(echo "${line}" | sed -e 's/cos-\(.*\)-\(.*\)-\(.*\)-\([0-9][0-9]*\)\( *cos-cloud.*\)/\1/')"
family="lts"
else
# shellcheck disable=SC2001
milestone="$(echo "${line}" | sed -e 's/cos-\(.*\)-\(.*\)-\(.*\)-\(.*\)-\([0-9][0-9]*\)\( *cos-cloud.*\)/\2/')"
# shellcheck disable=SC2001
family="$(echo "${line}" | sed -e 's/cos-\(.*\)-\(.*\)-\(.*\)-\(.*\)-\([0-9][0-9]*\)\( *cos-cloud.*\)/\1/')"
fi
echo "${milestone}" "${family}"
}
fetch_file() {
local src="${1}"
local dst="${2}"
if ! gsutil cp "${src}" "${dst}" 2>/dev/null; then
return 1
fi
if ! test -s "${dst}"; then
return 1
fi
}
verify_fetched_files() {
local f
local checksum
for f in "${FILES_TO_FETCH[@]}"; do
f="${FETCHED_FILES_DIR}/${f}"
if [[ ! -f "${f}.md5" ]]; then
warn no "${f}.md5", skipping verification
continue
fi
if [[ -f "${f}.verified" ]]; then
"${ECHO}" "${f}": already verified
continue
fi
checksum="$(md5sum "${f}" | awk '{ print $1 }')"
if [[ "${checksum}" == "$(cat "${f}.md5")" ]]; then
"${ECHO}" verified "${f}"
touch "${f}.verified"
else
fatal "${f}" md5sum mismatch: expected "$(cat "${f}.md5")", got "${checksum}"
fi
done
}
extract_files() {
local f
local installed="no"
local kernel_config="${KERNEL_SRC_DIR}/.config"
if [[ -f "${FETCHED_FILES_DIR}/${TOOLCHAIN}.installed" ]]; then
"${ECHO}" toolchain already installed
else
info installing toolchain
mkdir -p "${TOOLCHAIN_DIR}"
tar -C "${TOOLCHAIN_DIR}" -xf "${FETCHED_FILES_DIR}/${TOOLCHAIN}"
touch "${FETCHED_FILES_DIR}/${TOOLCHAIN}.installed"
installed="yes"
fi
if [[ -f "${FETCHED_FILES_DIR}/${TOOLCHAIN_ENV_FILENAME}.installed" ]]; then
"${ECHO}" toolchain_env already installed
elif [[ -f "${FETCHED_FILES_DIR}/${TOOLCHAIN_ENV_FILENAME}" ]]; then
info installing toolchain_env
mkdir -p "${TOOLCHAIN_ENV_DIR}"
cp "${FETCHED_FILES_DIR}/${TOOLCHAIN_ENV_FILENAME}" "${TOOLCHAIN_ENV_DIR}"
touch "${FETCHED_FILES_DIR}/${TOOLCHAIN_ENV_FILENAME}.installed"
installed="yes"
fi
if [[ -f "${FETCHED_FILES_DIR}/${KERNEL_HEADERS}.installed" ]]; then
"${ECHO}" kernel headers already installed
else
info installing kernel headers
mkdir -p "${KERNEL_HEADERS_DIR}"
tar -C "${KERNEL_HEADERS_DIR}" -xf "${FETCHED_FILES_DIR}/${KERNEL_HEADERS}"
touch "${FETCHED_FILES_DIR}/${KERNEL_HEADERS}.installed"
installed="yes"
fi
if [[ -f "${FETCHED_FILES_DIR}/${KERNEL_SRC}.installed" ]]; then
"${ECHO}" kernel source already installed
else
info installing kernel source
mkdir -p "${KERNEL_SRC_DIR}"
tar -C "${KERNEL_SRC_DIR}" -xf "${FETCHED_FILES_DIR}/${KERNEL_SRC}"
(cd "$(dirname "${KERNEL_SRC_DIR}")" && rm -f kernel && ln -s "$(basename "${KERNEL_SRC_DIR}")" kernel)
touch "${FETCHED_FILES_DIR}/${KERNEL_SRC}.installed"
installed="yes"
fi
if [[ -n "${KERNEL_CONFIG}" ]]; then
info creating kernel config file from "${KERNEL_CONFIG}"
if [[ "${KERNEL_CONFIG}" == "/proc/config.gz" ]]; then
zcat "${KERNEL_CONFIG}" > "${kernel_config}"
else
cp -a "${KERNEL_CONFIG}" "${KERNEL_SRC_DIR}"
fi
installed="yes"
else
if [[ -s "${kernel_config}" ]]; then
"${ECHO}" "${kernel_config}" already exists
else
info copying kernel config from kernel headers
f="$(eval echo "${KERNEL_HEADERS_DIR}"/usr/src/linux-headers-*/.config)"
if [[ ! -f "${f}" ]]; then
fatal "${f}" does not exist
fi
cp -a "${f}" "${kernel_config}"
installed="yes"
fi
fi
# Check if we fetched a trusted key and need to copy it to kernel source directory.
output="$(grep -w "CONFIG_SYSTEM_TRUSTED_KEYS" "${kernel_config}")" || true
if echo "${output}" | grep -qw "certs/${TRUSTED_KEY}"; then
if [[ -f "${FETCHED_FILES_DIR}/${TRUSTED_KEY}" ]]; then
if [[ ! -f "${KERNEL_SRC_DIR}/certs/${TRUSTED_KEY}" ]]; then
info copying trusted key to "${KERNEL_SRC_DIR}/certs/${TRUSTED_KEY}"
cp -a "${FETCHED_FILES_DIR}/${TRUSTED_KEY}" "${KERNEL_SRC_DIR}/certs/${TRUSTED_KEY}"
installed="yes"
else
"${ECHO}" trusted key "${KERNEL_SRC_DIR}/certs/${TRUSTED_KEY}" already exists
fi
else
warn modifying trusted key kernel config option because we could not fetch the trusted key
sed -i.bak -e 's/CONFIG_SYSTEM_TRUSTED_KEYS=.*/CONFIG_SYSTEM_TRUSTED_KEYS=""/' "${kernel_config}"
diff "${kernel_config}" "${kernel_config}.bak" || true
fi
fi
if [[ "${installed}" == "yes" ]]; then
echo
fi
}
set_compilation_env() {
local path
path="$(realpath "${TOOLCHAIN_DIR}/bin")"
${PRINT_CMD} export PATH="${path}:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/google-cloud-sdk/bin"
if [[ -s "${TOOLCHAIN_ENV_DIR}/${TOOLCHAIN_ENV_FILENAME}" ]]; then
source "${TOOLCHAIN_ENV_DIR}/${TOOLCHAIN_ENV_FILENAME}"
else
# To support COS build not having toolchain_env file
CC="x86_64-cros-linux-gnu-gcc"
CXX="x86_64-cros-linux-gnu-g++"
fi
}
info() {
if [[ -n "${*}" ]]; then
echo -e "${BLUE_S}INFO: ${*}${ANSI_E}" >&2
else
echo
fi
}
warn() {
if [[ "${ECHO}" != ":" ]]; then
echo -e "${PURPLE_S}WARNING: ${*}${ANSI_E}" >&2
fi
}
error() {
echo -e "${RED_S}ERROR: ${*}${ANSI_E}" >&2
}
fatal() {
error "${@}"
exit 1
}
main "${@}"
| true |
519291006c700ef2297498a023a60083cb99cf5d
|
Shell
|
KainosGurung/shellscripts
|
/misc/drinking_age.sh
|
UTF-8
| 531 | 3.59375 | 4 |
[] |
no_license
|
#! /bin/bash
echo "Hello $USER, How old are you?"
while [ true ]; do
read input
#if the input is an integer, break
if [[ $input =~ ^[0-9]+$ ]]; then
break
fi
echo "Please input a positive integer"
done
if [[ $input -ge "16" ]]; then
echo "Congratulations! You are of legal age to drink alcohol."
if [[ $input -ge "18" ]]; then
amount=$((($input-16)*100))
echo "You have drunk $amount liters of alcohol thus far."
fi
else
echo "You must wait $((16-$input)) year(s) before being legally allowed to drink."
fi
| true |
667079613bf7a8d9a6395dd6770f0d227b8644e4
|
Shell
|
kartoza/docker-mapserver
|
/setup.sh
|
UTF-8
| 2,407 | 2.84375 | 3 |
[] |
no_license
|
#!/bin/bash
#Install curl from source
if [[ ! -f /tmp/resources/curl-7.50.0.tar.gz ]]; then \
wget -c https://curl.haxx.se/download/curl-7.50.0.tar.gz -P /tmp/resources/; \
fi;\
cd /tmp/resources && \
tar -zxvf curl-7.50.0.tar.gz && \
cd curl-7.50.0 && \
./configure --with-ssl=/usr/local/ssl --enable-ares --enable-versioned-symbols && \
make -j 4 install
#VERSION=harfbuzz-0.9.19.tar.bz2
VERSION=harfbuzz-2.6.4
EXTENSION=.tar.xz
if [ ! -f /tmp/resources/${VERSION}${EXTENSION} ]; then \
wget http://www.freedesktop.org/software/harfbuzz/release/${VERSION}${EXTENSION} -P /tmp/resources/; \
fi; \
cd /tmp/resources &&\
tar xf ${VERSION}${EXTENSION} &&\
cd $VERSION && \
./configure && \
make && \
make -j 4 install && \
ldconfig
# Compile geos
GEOS_VERSION=3.8.1
if [[ ! -f /tmp/resources/geos-${GEOS_VERSION}.tar.bz2 ]]; then \
wget http://download.osgeo.org/geos/geos-${GEOS_VERSION}.tar.bz2 -P /tmp/resources/; \
fi; \
cd /tmp/resources && \
tar xjf geos-${GEOS_VERSION}.tar.bz2 && \
cd geos-${GEOS_VERSION} && \
./configure && \
make -j 4 install
if [ ! -d /tmp/resources/mapserver ]; then \
git clone https://github.com/mapserver/mapserver /tmp/resources/mapserver; \
fi;\
mkdir -p /tmp/resources/mapserver/build && \
cd /tmp/resources/mapserver/ && \
git checkout ${MAPSERVER_VERSION} && \
cd ./build && \
cmake /tmp/resources/mapserver/ -DWITH_THREAD_SAFETY=1 \
-DWITH_KML=1 \
-DWITH_SOS=1 \
-DWITH_WMS=1 \
-DWITH_FRIBIDI=1 \
-DWITH_HARFBUZZ=1 \
-DWITH_ICONV=1 \
-DWITH_CAIRO=1 \
-DWITH_RSVG=1 \
-DWITH_MYSQL=1 \
-DWITH_GEOS=1 \
-DWITH_POSTGIS=1 \
-DWITH_CURL=1 \
-DWITH_CLIENT_WMS=1 \
-DWITH_CLIENT_WFS=1 \
-DWITH_WFS=1 \
-DWITH_WCS=1 \
-DWITH_LIBXML2=1 \
-DWITH_GIF=1 \
-DWITH_EXEMPI=1 \
-DWITH_XMLMAPFILE=1 \
-DWITH_PYTHON=ON \
-DWITH_PERL=ON \
-DWITH_PIXMAN=1 \
-DWITH_PROTOBUFC=1 \
-DWITH_FCGI=1 \
-DWITH_PHPNG=1 \
-DWITH_PHP=ON && \
make -j 4 install && \
ldconfig
echo "ServerName localhost" >> /etc/apache2/apache2.conf
echo '<?php phpinfo();' > /var/www/html/info.php
rm -rf /tmp/resources/mapserver /tmp/resources/geos-${GEOS_VERSION}.tar.bz2 \
/tmp/resources/${VERSION}${EXTENSION}
| true |
9b0192ada50efbdc85c512539096394d84df05ce
|
Shell
|
namsseng/nsstack_allinone_test
|
/nsstack_heat.sh
|
UTF-8
| 2,019 | 3.265625 | 3 |
[] |
no_license
|
#!/bin/bash
# Make sure only root can run our script
if [ "$(id -u)" != "0" ]; then
echo "You need to be 'root' dude." 1>&2
exit 1
fi
. ./nsstack_setuprc
password=$OS_PASSWORD
managementip=$OS_SERVICE_IP
hostname=$OS_HOST_NAME
apt-get install -y heat-api heat-api-cfn heat-engine
sleep 5
sed -e "
/^connection=sq.*$/s/^.*$/connection = mysql:\/\/heat:$password@$hostname\/heat/
/^#verbose=.*$/s/^.*$/verbose=true/
/^#log_dir=.*$/s/^.*$/log_dir=\/var\/log\/heat/
/^#rabbit_host=.*$/s/^.*$/rabbit_host = $hostname/
/^#rabbit_password=.*$/s/^.*$/rabbit_password = $password/
/\[keystone_authtoken\]/a auth_uri = http:\/\/$hostname:5000\nauth_port = 35357\nauth_protocol = http\nauth_uri = http:\/\/$hostname:5000\/v2.0\nadmin_tenant_name = service\nadmin_user = heat\nadmin_password = $password
/\[ec2authtoken\]/a auth_uri = http:\/\/$hostname:5000
/^#heat_metadata_server_url=.*$/s/^.*$/heat_metadata_server_url=http:\/\/$managementip:8000/
/^#heat_waitcondition_server_url=.*$/s/^.*$/heat_waitcondition_server_url=http:\/\/$managementip:8000\/v1\/waitcondition/
" -i /etc/heat/heat.conf
su -s /bin/sh -c "heat-manage db_sync" heat
service heat-api restart
service heat-api-cfn restart
service heat-engine restart
sleep 4
source demo_openrc.sh
cat > ns-stack.yml <<EOF
heat_template_version: 2013-05-23
description: Test Template
parameters:
ImageID:
type: string
description: Image use to boot a server
NetID:
type: string
description: Network ID for the server
resources:
server1:
type: OS::Nova::Server
properties:
name: "Test server"
image: { get_param: ImageID }
flavor: "m1.tiny"
networks:
- network: { get_param: NetID }
outputs:
server1_private_ip:
description: IP address of the server in the private network
value: { get_attr: [ server1, first_address ] }
EOF
sleep 1
NET_ID=$(nova net-list | awk '/ demo-net / { print $2 }')
heat stack-create -f ns-stack.yml -P "ImageID=Cirros 0.3.0;NetID=$NET_ID" testStack
| true |
5813b1ca4942b4d5e121caa41a434392694527c1
|
Shell
|
luicahleo/scriptsSTA
|
/P05/instalar-java.sh
|
UTF-8
| 286 | 2.71875 | 3 |
[] |
no_license
|
PRACTICA="P05"
ACTUAL=`pwd` && cd `dirname $0` && ORIGEN=`pwd` && cd ${ACTUAL}
DIRRAIZ=${ORIGEN}"/"
JDK=java-1.7.0-openjdk-devel
echo "Instalando paquete Java..."
#yum install ${JDK}
echo "Verificando que la instalacion del paquete Java ha sido correcta..."
/usr/bin/java -version
| true |
2f6dfab92b6de0199a1f9af04aaddafbf71cf15c
|
Shell
|
gmoben/dotfiles
|
/.local/bin/i3-battery-monitor
|
UTF-8
| 1,895 | 3.578125 | 4 |
[] |
no_license
|
#!/bin/bash
# Modified from https://github.com/arjvik/dots/blob/master/i3/bin/i3-battery-monitor
BATTERY="BAT0"
FULL=${4:-100}
LOW=${3:-15}
CRITICAL=${2:-5}
SUSPEND=${1:-3}
# Kill other instances of i3-battery-monitor
if pids=$(pidof -x $0 -o $$); then
kill $pids 2>/dev/null
fi
# log "Parameters: FULL=$FULL LOW=$LOW CRITICAL=$CRITICAL"
NOTIFY_ON_FULL=false
NOTIFY_ON_POWER=false
NOTIFY_ON_BATTERY=false
# Loop indefinitely until killed
while true; do
CHARGE=$(cat "/sys/class/power_supply/$BATTERY/capacity")
STATUS=$(cat "/sys/class/power_supply/$BATTERY/status")
if [[ $STATUS != "Full" ]]; then
NOTIFY_ON_FULL=true
fi
if [[ $STATUS =~ (Charging|Unknown|Not charging)$ ]]; then
if [[ $NOTIFY_ON_POWER == true ]]; then
notify-send -u normal "Power Connected" "Battery: $CHARGE%"
NOTIFY_ON_POWER=false
fi
NOTIFY_ON_BATTERY=true
elif [[ $STATUS == "Discharging" ]]; then
NOTIFY_ON_POWER=true
if [[ $NOTIFY_ON_BATTERY == true ]]; then
notify-send -u normal "Power Disconnected" "Battery: $CHARGE%"
NOTIFY_ON_BATTERY=false
fi
if [[ $CHARGE -le $SUSPEND ]]; then
notify-send -u critical "Battery Ultra-critical ($CHARGE%)!" "Suspending..."
systemctl suspend-then-hibernate
elif [[ $CHARGE -le $CRITICAL ]]; then
notify-send -u critical "Battery Critical ($CHARGE%)!" "Will suspend at $SUSPEND%"
elif [[ $CHARGE -le $LOW ]]; then
notify-send -u normal "Battery Low ($CHARGE%)" "Plug in soon!"
fi
else
if [[ $NOTIFY_ON_FULL == true && $CHARGE -ge $FULL ]]; then
notify-send -u low "Battery Full ($CHARGE%)"
NOTIFY_ON_FULL=false
fi
fi
# echo $CHARGE $STATUS $NOTIFY_ON_FULL $NOTIFY_ON_POWER $NOTIFY_ON_BATTERY
sleep 1s
done
| true |
e443cf4ae2c64093b655884eb6275839540bbcef
|
Shell
|
BastianHavers/ananke
|
/reproduce/figure18.sh
|
UTF-8
| 988 | 3.609375 | 4 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
. shared_vars.sh
EXPERIMENT_NAME="synthetic2"
OUTPUT_DATA_FOLDER="${COMMIT_HASH}"_"${EXPERIMENT_NAME}"
cd ..
echo "Reproduce Fig. 18 in two steps:"
echo " 1) run the underlying experiment"
echo " 2) plot the experiment data"
test -d "${OUTPUT_PATH}/${OUTPUT_DATA_FOLDER}" && { echo "--------"; echo WARNING: Output folder "${OUTPUT_PATH}/${OUTPUT_DATA_FOLDER}" exists already. Proceeding anyways.; }
echo "WARNING: This experiment consumes a lot of memory. Please ensure that the Flink taskexecutor is configured to use at least 32GB RAM."
countdown 5 "to begin."
# run experiment
./scripts/run.sh scripts/experiments/synthetic2.sh -r 10 -d 10 -c "${EXPERIMENT_NAME}"
test -d "${OUTPUT_PATH}/${OUTPUT_DATA_FOLDER}" || { echo "--------"; echo CRITICAL: Output folder "${OUTPUT_PATH}/${OUTPUT_DATA_FOLDER}" not found, aborting...; exit; }
# plot
python3 "${PLOT_SCRIPT}" --path "${OUTPUT_PATH}" --experiment "${OUTPUT_DATA_FOLDER}" --name synthetic2 --plot synthetic2
| true |
58fe113f064a6dd371cf93d23df1f80318278a37
|
Shell
|
vestige-java/vestige.unix
|
/standard_edition_linux_desktop_python3_rpm/build
|
UTF-8
| 1,490 | 2.8125 | 3 |
[] |
no_license
|
#!/bin/bash
VESTIGE_RPM_VERSION=$(cat src/vestige-desktop-python3.spec | grep "Version:" | sed 's/Version:[ \t]*//g')
rm -r target
mkdir -p target/vestige-desktop-python3-$VESTIGE_RPM_VERSION
cp src/Makefile target/vestige-desktop-python3-$VESTIGE_RPM_VERSION/
cp src/vestige-desktop-python3.spec target/
cp -r ../standard_edition_linux_desktop/src/hicolor target/vestige-desktop-python3-$VESTIGE_RPM_VERSION/
cp ../standard_edition_linux_desktop/src/vestige_python3_launcher.py target/vestige-desktop-python3-$VESTIGE_RPM_VERSION/vestige
chmod +x target/vestige-desktop-python3-$VESTIGE_RPM_VERSION/vestige
cp ../standard_edition_linux_desktop/src/vestige.desktop target/vestige-desktop-python3-$VESTIGE_RPM_VERSION/
find target \( -name .svn -o -name .DS_Store \) -exec rm -rf {} \;
pushd target && tar -czf vestige-desktop-python3.tar.gz vestige-desktop-python3-$VESTIGE_RPM_VERSION && popd
scp target/vestige-desktop-python3.tar.gz rpmbuilder:rpmbuild/SOURCES/vestige-desktop-python3.tar.gz
scp target/vestige-desktop-python3.spec rpmbuilder:rpmbuild/SPECS/vestige-desktop-python3.spec
ssh rpmbuilder rpmbuild -bb '~/rpmbuild/SPECS/vestige-desktop-python3.spec'
ssh rpmbuilder 'echo "" | setsid rpm --define "_gpg_name '$GPG_NAME'" --addsign rpmbuild/RPMS/noarch/vestige-desktop-python3-'$VESTIGE_RPM_VERSION'-1.noarch.rpm'
scp rpmbuilder:rpmbuild/RPMS/noarch/vestige-desktop-python3-$VESTIGE_RPM_VERSION-1.noarch.rpm target/vestige-desktop-python3-$VESTIGE_RPM_VERSION-1.noarch.rpm
| true |
4a09eb3919d6d1aedad864ebdb03f820a4a9635c
|
Shell
|
bruceauyeung/quickquest
|
/quickquest.sh
|
UTF-8
| 909 | 3.140625 | 3 |
[] |
no_license
|
#!/bin/sh
LONG_BIT=`getconf LONG_BIT`
if [ "$LONG_BIT" == "32" ];then
USR_LIB_PATH="/usr/lib";
else
USR_LIB_PATH="/usr/lib64";
fi
QUICKQUEST_PROG_DIR=$(dirname $0);
export LD_LIBRARY_PATH=$QUICKQUEST_PROG_DIR/jnotify-linux$LONG_BIT/:$QUICKQUEST_PROG_DIR/qtjambi-linux$LONG_BIT/:$LD_LIBRARY_PATH;
JAVA_CLASSPATH="$QUICKQUEST_PROG_DIR/qtjambi-linux$LONG_BIT/@qtjambi.jar.file.name@:$QUICKQUEST_PROG_DIR/quickquest-linux.jar";
for f in `ls $QUICKQUEST_PROG_DIR/lib/*.jar`; do
JAVA_CLASSPATH=$JAVA_CLASSPATH:$f;
done
echo "QuickQuest Program Path:" $QUICKQUEST_PROG_DIR;
echo "Class Path:" $JAVA_CLASSPATH;
java -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.port=9898 -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false -Djava.rmi.server.hostname=127.0.0.1 -Dquickquest.prog.dir="$QUICKQUEST_PROG_DIR" -cp $JAVA_CLASSPATH us.suselinks.quickquest.Main
| true |
04933fd086bdb7a48a9db89345fe5768ce246302
|
Shell
|
yyotti/dotfiles
|
/config/zsh/zshrc
|
UTF-8
| 1,805 | 3.015625 | 3 |
[] |
no_license
|
function __zcompile() # {{{
{
local _src=${1}
local _zwc=${_src}.zwc
if [[ ! -f ${_zwc} ]] || [[ ${_src} -nt ${_zwc} ]]; then
zcompile "${_src}"
fi
} # }}}
__zcompile "${ZDOTDIR}/.zshrc"
__zcompile "${ZDOTDIR}/rc.d/_asdf.zsh" && source "${ZDOTDIR}/rc.d/_asdf.zsh"
if [[ ${WSL_DISTRO_NAME} != '' ]] && [[ ${WSL_GUEST_IP} == '' ]]; then
# for WSL2
return
fi
if ${USE_TMUX:-true} && [[ $- =~ i ]] && [[ ${TMUX} == '' ]] && (( ${+commands[tmux]} )); then
__zcompile "${ZDOTDIR}/rc.d/_tmux.zsh" && source "${ZDOTDIR}/rc.d/_tmux.zsh"
fi
# __zcompile "${ZDOTDIR}/rc.d/_zplugin.zsh" && source "${ZDOTDIR}/rc.d/_zplugin.zsh"
__zcompile "${ZDOTDIR}/rc.d/_zinit.zsh" && source "${ZDOTDIR}/rc.d/_zinit.zsh"
__zcompile "${ZDOTDIR}/rc.d/_utilities.zsh" && source "${ZDOTDIR}/rc.d/_utilities.zsh"
__zcompile "${ZDOTDIR}/rc.d/_options.zsh" && source "${ZDOTDIR}/rc.d/_options.zsh"
__zcompile "${ZDOTDIR}/rc.d/_fzf.zsh" && source "${ZDOTDIR}/rc.d/_fzf.zsh"
__zcompile "${ZDOTDIR}/rc.d/_histories.zsh" && source "${ZDOTDIR}/rc.d/_histories.zsh"
__zcompile "${ZDOTDIR}/rc.d/_completions.zsh" && source "${ZDOTDIR}/rc.d/_completions.zsh"
__zcompile "${ZDOTDIR}/rc.d/_prompt.zsh" && source "${ZDOTDIR}/rc.d/_prompt.zsh"
__zcompile "${ZDOTDIR}/rc.d/_keybinds.zsh" && source "${ZDOTDIR}/rc.d/_keybinds.zsh"
__zcompile "${ZDOTDIR}/rc.d/_aliases.zsh" && source "${ZDOTDIR}/rc.d/_aliases.zsh"
if [[ -f ${ZDOTDIR}/zshrc.local ]]; then
__zcompile "${ZDOTDIR}/zshrc.local" && source "${ZDOTDIR}/zshrc.local"
fi
autoload -Uz compinit
compinit
zinit cdreplay -q
if [[ -f ${XDG_DATA_HOME}/google-cloud-sdk/completion.zsh.inc ]]; then
# これのタイミングがよく分からない
source "${XDG_DATA_HOME}/google-cloud-sdk/completion.zsh.inc"
fi
if (( ${+commands[zprof]} )); then
zprof | less
fi
| true |
d4348971e140e617698fba152626103170cda3c5
|
Shell
|
usgs-coupled/phreeqci
|
/phreeqc3/test/ex20b
|
UTF-8
| 150 | 2.75 | 3 |
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
#! /bin/sh
# Test ex20b
NAME=ex20b
DB=iso.dat
: ${PHREEQC=phreeqc}
${PHREEQC} $DATADIR/$NAME $NAME.out $DBDIR/$DB $NAME.log
result=$?
exit $result
| true |
16b35d6f50f0c6307235559d7ff01e4aabdb95c4
|
Shell
|
cha63506/chromium-capsicum
|
/tools/wine_valgrind/chrome_tests.sh
|
UTF-8
| 22,880 | 3.359375 | 3 |
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
# chromium-runtests.sh [testsuite]
# Script to run a respectable subset of Chromium's test suite
# (excepting parts that run the browser itself, and excepting layout tests).
# Run from parent of src directory.
# By default, runs all test suites. If you specify one testsuite
# (e.g. base_unittests), it only runs that one.
#
# Chromium's test suite uses gtest, so each executable obeys the options
# documented in the wiki at http://code.google.com/p/googletest
# In particular, you can run a single test with --gtest_filter=Foo.Bar,
# and get a full list of tests in each exe with --gtest_list_tests.
#
# Before running the tests, regardless of operating system:
# 1) Make sure your system has at least one printer installed,
# or printing_unittests and unit_tests' PrintJobTest.SimplePrint
# will fail. A fake printer is fine, nothing will be printed.
# 2) Install the test cert as described at
# http://bugs.winehq.org/show_bug.cgi?id=20370
# or net_unittests' HTTPSRequestTest.*, SSLClientSocketTest.*
# and others may fail.
#
# Chrome doesn't start without the --no-sandbox
# option in wine, so skip test suites that invoke it directly until I
# figure out how to jam that in there.
# The bot that runs this script seemed to ignore stderr, so redirect stderr to stdout by default
2>&1
usage() {
cat <<_EOF_
Usage: sh chromium-runtests.sh [--options] [suite ...]
Runs chromium tests on Windows or Wine.
Options:
--individual - run tests individually
--groups - run tests grouped by their major gtest name
--gtest_filter X - only run the tests matching X
--target X - test with Debug or Release binaries, default to Debug
--just-crashes - run only tests epected to crash
--just-fails - run only tests epected to fail
--just-flaky - run only tests epected to fail sometimes
--just-hangs - run only tests epected to hang
--list-failures - show list of expected failures
--logfiles - log to one file per test, in logs subdir, rather than stdout
--loops N - run tests N times
-n - dry run, only show what will be done
--suppression_dir - directory containing the suppression files
--timeout N - let each executable run for N seconds (default varies)
--used-suppressions - extract histogram of used valgrind suppressions from current contents of logs directory
--valgrind - run the tests under valgrind
--vnc N - run the tests inside a vnc server running on display N
--winedebug chan - e.g. --windebug +relay,+seh
Currently supported suites:
app_unittests base_unittests courgette_unittests googleurl_unittests
ipc_tests media_unittests net_unittests printing_unittests sbox_unittests
sbox_validation_tests setup_unittests tcmalloc_unittests unit_tests
Default is to run all suites. It takes about five minutes to run them all
together, 22 minutes to run them all individually.
_EOF_
exit 1
}
# Tests, grouped by how long they take to run
# Skip ones that require chrome itself for the moment
SUITES_1="googleurl_unittests printing_unittests sbox_validation_tests setup_unittests"
#SUITES_10="app_unittests courgette_unittests ipc_tests reliability_tests sbox_integration_tests sbox_unittests tab_switching_test tcmalloc_unittests url_fetch_test"
SUITES_10="app_unittests courgette_unittests ipc_tests sbox_unittests tcmalloc_unittests"
#SUITES_100="automated_ui_tests installer_util_unittests media_unittests nacl_ui_tests net_perftests net_unittests plugin_tests sync_unit_tests"
SUITES_100="media_unittests net_unittests"
#SUITES_1000="base_unittests interactive_ui_tests memory_test page_cycler_tests perf_tests test_shell_tests unit_tests"
SUITES_1000="base_unittests unit_tests"
#SUITES_10000="ui_tests startup_tests"
THE_VALGRIND_CMD="/usr/local/valgrind-10880/bin/valgrind \
--gen-suppressions=all \
--leak-check=full \
--num-callers=25 \
--show-possible=no \
--smc-check=all \
--trace-children=yes \
--track-origins=yes \
-v \
--workaround-gcc296-bugs=yes \
"
LANG=C
PATTERN="are definitely|uninitialised|Unhandled exception|Invalid read|Invalid write|Invalid free|Source and desti|Mismatched free|unaddressable byte|vex x86|impossible|Assertion |INTERNAL ERROR|Terminated|Test failed|Alarm clock|Command exited with non-zero status"
reduce_verbosity() {
# Filter out valgrind's extra -v output except for the 'used_suppression' lines
# Also remove extra carriage returns
awk '!/^--/ || /^--.*used_suppression:/' | tr -d '\015'
}
# Filter out known failures
# Avoid tests that hung, failed, or crashed on windows in Dan's reference run,
# or which fail in a way we don't care about on Wine,
# or which hang or crash on wine in a way that keeps other tests from running.
# Also lists url of bug report, if any.
# Format with
# sh chromium-runtests.sh --list-failures | sort | awk '{printf("%-21s %-20s %-52s %s\n", $1, $2, $3, $4);}'
list_known_failures() {
cat <<_EOF_
app_unittests crash-valgrind IconUtilTest.TestCreateSkBitmapFromHICON http://bugs.winehq.org/show_bug.cgi?id=20634, not a bug, need to figure out how to handle DIB faults
base_unittests hang EtwTraceControllerTest.EnableDisable http://bugs.winehq.org/show_bug.cgi?id=20946, advapi32.ControlTrace() not yet implemented
base_unittests crash EtwTraceConsumer*Test.* http://bugs.winehq.org/show_bug.cgi?id=20946, advapi32.OpenTrace() unimplemented
base_unittests crash EtwTraceProvider*Test.* http://bugs.winehq.org/show_bug.cgi?id=20946, advapi32.RegisterTraceGuids() unimplemented
base_unittests dontcare BaseWinUtilTest.FormatMessageW
base_unittests dontcare FileUtilTest.CountFilesCreatedAfter
base_unittests dontcare FileUtilTest.GetFileCreationLocalTime
base_unittests dontcare PEImageTest.EnumeratesPE Alexandre triaged
base_unittests dontcare-winfail TimeTicks.HighResNow fails if run individually on windows
base_unittests dontcare WMIUtilTest.*
base_unittests fail HMACTest.HMACObjectReuse http://bugs.winehq.org/show_bug.cgi?id=20340
base_unittests fail HMACTest.HmacSafeBrowsingResponseTest http://bugs.winehq.org/show_bug.cgi?id=20340
base_unittests fail HMACTest.RFC2202TestCases http://bugs.winehq.org/show_bug.cgi?id=20340
base_unittests fail_wine_vmware RSAPrivateKeyUnitTest.ShortIntegers
base_unittests flaky-dontcare StatsTableTest.MultipleProcesses http://bugs.winehq.org/show_bug.cgi?id=20606
base_unittests hang-dontcare DirectoryWatcherTest.*
base_unittests hang-valgrind JSONReaderTest.Reading # not really a hang, takes 400 seconds
base_unittests hang-valgrind RSAPrivateKeyUnitTest.InitRandomTest # not really a hang, takes 300 seconds
base_unittests flaky-valgrind TimeTicks.Deltas # fails half the time under valgrind, timing issue?
base_unittests hang-valgrind TimerTest.RepeatingTimer*
base_unittests hang-valgrind TimeTicks.WinRollover # not really a hang, takes 1000 seconds
base_unittests fail-valgrind ConditionVariableTest.LargeFastTaskTest # fails under wine + valgrind TODO(thestig): investigate
base_unittests fail-valgrind ProcessUtilTest.CalcFreeMemory # fails under wine + valgrind TODO(thestig): investigate
base_unittests fail-valgrind ProcessUtilTest.KillSlowChild # fails under wine + valgrind TODO(thestig): investigate
base_unittests fail-valgrind ProcessUtilTest.SpawnChild # fails under wine + valgrind TODO(thestig): investigate
base_unittests flaky-valgrind StatsTableTest.StatsCounterTimer # flaky, timing issues? TODO(thestig): investigate
base_unittests fail-valgrind StatsTableTest.StatsRate # fails under wine + valgrind TODO(thestig): investigate
base_unittests fail-valgrind StatsTableTest.StatsScope # fails under wine + valgrind TODO(thestig): investigate
ipc_tests flaky IPCChannelTest.ChannelTest http://bugs.winehq.org/show_bug.cgi?id=20628
ipc_tests flaky IPCChannelTest.SendMessageInChannelConnected http://bugs.winehq.org/show_bug.cgi?id=20628
ipc_tests hang IPCSyncChannelTest.* http://bugs.winehq.org/show_bug.cgi?id=20390
media_unittests crash FFmpegGlueTest.OpenClose
media_unittests crash FFmpegGlueTest.Read
media_unittests crash FFmpegGlueTest.Seek
media_unittests crash FFmpegGlueTest.Write
media_unittests fail_wine_vmware WinAudioTest.PCMWaveStreamTripleBuffer
media_unittests hang-valgrind WinAudioTest.PCMWaveSlowSource
net_unittests fail SSLClientSocketTest.Read_Interrupted http://bugs.winehq.org/show_bug.cgi?id=20748
net_unittests fail HTTPSRequestTest.HTTPSExpiredTest # https/ssl failing on the bot, bad Wine? TODO(thestig): investigate
net_unittests fail HTTPSRequestTest.HTTPSGetTest # https/ssl failing on the bot, bad Wine? TODO(thestig): investigate
net_unittests fail HTTPSRequestTest.HTTPSMismatchedTest # https/ssl failing on the bot, bad Wine? TODO(thestig): investigate
net_unittests fail SSLClientSocketTest.Connect # https/ssl failing on the bot, bad Wine? TODO(thestig): investigate
net_unittests fail SSLClientSocketTest.Read # https/ssl failing on the bot, bad Wine? TODO(thestig): investigate
net_unittests fail SSLClientSocketTest.Read_FullDuplex # https/ssl failing on the bot, bad Wine? TODO(thestig): investigate
net_unittests fail SSLClientSocketTest.Read_SmallChunks # https/ssl failing on the bot, bad Wine? TODO(thestig): investigate
net_unittests fail URLRequestTestHTTP.HTTPSToHTTPRedirectNoRefererTest # https/ssl failing on the bot, bad Wine? TODO(thestig): investigate
sbox_unittests fail JobTest.ProcessInJob
sbox_unittests fail JobTest.TestCreation
sbox_unittests fail JobTest.TestDetach
sbox_unittests fail JobTest.TestExceptions
sbox_unittests fail RestrictedTokenTest.AddAllSidToRestrictingSids
sbox_unittests fail RestrictedTokenTest.AddMultipleRestrictingSids
sbox_unittests fail RestrictedTokenTest.AddRestrictingSid
sbox_unittests fail RestrictedTokenTest.AddRestrictingSidCurrentUser
sbox_unittests fail RestrictedTokenTest.AddRestrictingSidLogonSession
sbox_unittests fail RestrictedTokenTest.DefaultDacl
sbox_unittests fail RestrictedTokenTest.DeleteAllPrivileges
sbox_unittests fail RestrictedTokenTest.DeleteAllPrivilegesException
sbox_unittests fail RestrictedTokenTest.DeletePrivilege
sbox_unittests fail RestrictedTokenTest.DenyOwnerSid
sbox_unittests fail RestrictedTokenTest.DenySid
sbox_unittests fail RestrictedTokenTest.DenySids
sbox_unittests fail RestrictedTokenTest.DenySidsException
sbox_unittests fail RestrictedTokenTest.ResultToken
sbox_unittests fail ServiceResolverTest.PatchesServices
sbox_unittests flaky IPCTest.ClientFastServer
sbox_validation_tests fail ValidationSuite.*
unit_tests crash BlacklistManagerTest.* http://crbug.com/27726
unit_tests crash SafeBrowsingProtocolParsingTest.TestGetHashWithMac http://bugs.winehq.org/show_bug.cgi?id=20340
unit_tests crash-valgrind DnsMasterTest.MassiveConcurrentLookupTest
unit_tests crash-valgrind NullModelTableViewTest.* http://bugs.winehq.org/show_bug.cgi?id=20553
unit_tests crash-valgrind RenderViewTest.OnPrintPageAsBitmap http://bugs.winehq.org/show_bug.cgi?id=20657 (for wine oom)
unit_tests crash-valgrind TableViewTest.* http://bugs.winehq.org/show_bug.cgi?id=20553
unit_tests dontcare-hangwin UtilityProcessHostTest.ExtensionUnpacker
unit_tests dontcare FirefoxImporterTest.Firefox2NSS3Decryptor # FF2 dlls without symbols cause leaks
unit_tests dontcare ImporterTest.Firefox2Importer # FF2 dlls without symbols cause leaks
unit_tests dontcare SpellCheckTest.SpellCheckText
unit_tests fail EncryptorTest.EncryptionDecryption http://bugs.winehq.org/show_bug.cgi?id=20495
unit_tests fail EncryptorTest.String16EncryptionDecryption http://bugs.winehq.org/show_bug.cgi?id=20495
unit_tests hang-valgrind ExtensionAPIClientTest.* Not really a hang, just takes 30 minutes
unit_tests fail ImporterTest.IEImporter http://bugs.winehq.org/show_bug.cgi?id=20625
unit_tests fail RenderViewTest.InsertCharacters http://bugs.winehq.org/show_bug.cgi?id=20624
unit_tests fail SafeBrowsingProtocolParsingTest.TestVerifyChunkMac http://bugs.winehq.org/show_bug.cgi?id=20340
unit_tests fail SafeBrowsingProtocolParsingTest.TestVerifyUpdateMac http://bugs.winehq.org/show_bug.cgi?id=20340
unit_tests fail_wine_vmware RenderProcessTest.TestTransportDIBAllocation
_EOF_
}
# Times are in seconds, and are twice as high as slowest observed runtime so far in valgrind,
# rounded to the nearest power of two multiple of 100 seconds.
# TODO: make the returned value lower if --valgrind is not given
get_expected_runtime() {
case "$timeout_manual" in
[0-9]*) echo $timeout_manual; return;;
esac
case $1 in
app_unittests) echo 200;;
base_unittests) echo 1000;;
courgette_unittests) echo 1000;;
googleurl_unittests) echo 200;;
ipc_tests) echo 400;;
media_unittests) echo 400;;
net_unittests) echo 2000;;
printing_unittests) echo 100;;
sbox_unittests) echo 100;;
sbox_validation_tests) echo 100;;
setup_unittests) echo 100;;
tcmalloc_unittests) echo 1000;;
unit_tests) echo 4000;;
*) echo "unknown test $1" >&2 ; exec false;;
esac
}
# Run $2... but kill it if it takes longer than $1 seconds
alarm() { time perl -e 'alarm shift; exec @ARGV' "$@"; }
init_runtime() {
CHROME_ALLOCATOR=winheap
export CHROME_ALLOCATOR
if test "$WINDIR" = ""
then
WINE=${WINE:-/usr/local/wine/bin/wine}
export WINE
WINESERVER=${WINESERVER:-/usr/local/wine/bin/wineserver}
WINEPREFIX=${WINEPREFIX:-$HOME/.wine-chromium-tests}
export WINEPREFIX
WINE_HEAP_REDZONE=16
export WINE_HEAP_REDZONE
if netstat -tlnp | grep :1337
then
echo Please kill the server listening on port 1337, or reboot. The net tests need this port.
exit 1
fi
if test ! -f /usr/share/ca-certificates/root_ca_cert.crt
then
echo "You need to do"
echo "sudo cp src/net/data/ssl/certificates/root_ca_cert.crt /usr/share/ca-certificates/"
echo "sudo vi /etc/ca-certificates.conf (and add the line root_ca_cert.crt)"
echo "sudo update-ca-certificates"
echo "else ssl tests will fail."
echo "(Alternately, modify this script to run Juan's importer, http://bugs.winehq.org/show_bug.cgi?id=20370#c4 )"
exit 1
fi
if test -n "$VNC"
then
export DISPLAY=":$VNC"
vncserver -kill "$DISPLAY" || true
# VNC servers don't clean these up if they get a SIGKILL, and would then
# refuse to start because these files are there.
rm -f "/tmp/.X${VNC}-lock" "/tmp/.X11-unix/X${VNC}"
vncserver "$DISPLAY" -ac -depth 24 -geometry 1024x768
fi
$dry_run rm -rf $WINEPREFIX
$dry_run test -f winetricks || wget http://kegel.com/wine/winetricks
$dry_run sh winetricks nocrashdialog corefonts gecko > /dev/null
$dry_run sleep 1
$dry_run $WINE winemine &
fi
}
shutdown_runtime() {
if test "$WINDIR" = ""
then
$dry_run $WINESERVER -k
if test -n "$VNC"
then
vncserver -kill "$DISPLAY"
fi
fi
}
# Looks up tests from our list of known bad tests. If $2 is not '.', picks tests expected to fail in a particular way.
get_test_filter()
{
mysuite=$1
myfilter=$2
list_known_failures | tee tmp.1 |
awk '$1 == "'$mysuite'" && /'$myfilter'/ {print $3}' |tee tmp.2 |
tr '\012' : |tee tmp.3 |
sed 's/:$/\n/'
}
# Output the logical and of the two gtest filters $1 and $2.
# Handle the case where $1 is empty.
and_gtest_filters()
{
# FIXME: handle more complex cases
case "$1" in
"") ;;
*) echo -n "$1": ;;
esac
echo $2
}
# Expands a gtest filter spec to a plain old list of tests separated by whitespace
expand_test_list()
{
mysuite=$1 # e.g. base_unittests
myfilter=$2 # existing gtest_filter specification with wildcard
# List just the tests matching $myfilter, separated by colons
$WINE ./$mysuite.exe --gtest_filter=$myfilter --gtest_list_tests |
tr -d '\015' |
grep -v FLAKY |
perl -e 'while (<STDIN>) { chomp; if (/^[A-Z]/) { $testname=$_; } elsif (/./) { s/\s*//; print "$testname$_\n"} }'
}
# Parse arguments
announce=true
do_individual=no
dry_run=
extra_gtest_filter=
fail_filter="."
loops=1
logfiles=
SUITES=
suppression_dirs=
TARGET=Debug
timeout_manual=
VALGRIND_CMD=
VNC=
want_fails=no
winedebug=
while test "$1" != ""
do
case $1 in
--individual) do_individual=yes;;
--groups) do_individual=groups;;
--gtest_filter) extra_gtest_filter=$2; shift;;
--just-crashes) fail_filter="crash"; want_fails=yes;;
--just-fails) fail_filter="fail"; want_fails=yes;;
--just-flaky) fail_filter="flaky"; want_fails=yes;;
--just-hangs) fail_filter="hang"; want_fails=yes;;
--list-failures) list_known_failures; exit 0;;
--list-failures-html) list_known_failures | sed 's,http://\(.*\),<a href="http://\1">\1</a>,;s/$/<br>/' ; exit 0;;
--loops) loops=$2; shift;;
-n) dry_run=true; announce=echo ;;
--suppression_dir) suppression_dirs="$suppression_dirs $2"; shift;;
--target) TARGET=$2; shift;;
--timeout) timeout_manual=$2; shift;;
--used-suppressions) cd logs; grep used_suppression *.log | sed 's/-1.*--[0-9]*-- used_suppression//'; exit 0;;
--valgrind) VALGRIND_CMD="$THE_VALGRIND_CMD";;
--vnc) VNC=$2; shift;;
--winedebug) winedebug=$2; shift;;
--logfiles) logfiles=yes;;
-*) usage; exit 1;;
*) SUITES="$SUITES $1" ;;
esac
shift
done
if test "$SUITES" = ""
then
SUITES="$SUITES_1 $SUITES_10 $SUITES_100 $SUITES_1000"
fi
if test "$VALGRIND_CMD" != ""
then
if test "$suppression_dirs" = ""
then
# Default value for winezeug.
suppression_dirs="../../../ ../../../../../valgrind"
# Also try the script dir.
suppression_dirs="$suppression_dirs $(dirname $0)"
fi
# Check suppression_dirs for suppression files to create suppression_options
suppression_options=
for dir in $suppression_dirs
do
for f in valgrind-suppressions chromium-valgrind-suppressions
do
if test -f "$dir/$f"
then
dir="`cd $dir; pwd`"
suppression_options="$suppression_options --suppressions=$dir/$f"
fi
done
done
VALGRIND_CMD="$VALGRIND_CMD $suppression_options"
fi
set -e
trap shutdown_runtime 0
init_runtime
export WINEDEBUG=$winedebug
set -x
mkdir -p logs
cd "src/chrome/$TARGET"
i=1
while test $i -le $loops
do
for suite in $SUITES
do
expected_to_fail="`get_test_filter $suite $fail_filter`"
case $want_fails in
no) filterspec=`and_gtest_filters "${extra_gtest_filter}" -${expected_to_fail}` ;;
yes) filterspec=`and_gtest_filters "${extra_gtest_filter}" ${expected_to_fail}` ;;
esac
case $do_individual in
no)
$announce $VALGRIND_CMD $WINE ./$suite.exe --gtest_filter=$filterspec
LOG=../../../logs/$suite-$i.log
$dry_run alarm `get_expected_runtime $suite` \
$VALGRIND_CMD $WINE ./$suite.exe --gtest_filter=$filterspec 2>&1 | eval reduce_verbosity | tee $LOG || errors=yes true
egrep -q "$PATTERN" $LOG && errors=yes
test "$logfiles" = yes || rm $LOG
;;
yes)
for test in `expand_test_list $suite $filterspec`
do
$announce $VALGRIND_CMD $WINE ./$suite.exe --gtest_filter="$test"
LOG=../../../logs/$suite-$test-$i.log
$dry_run alarm `get_expected_runtime $suite` \
$VALGRIND_CMD $WINE ./$suite.exe --gtest_filter="$test" 2>&1 | eval reduce_verbosity | tee $LOG || errors=yes true
egrep -q "$PATTERN" $LOG && errors=yes
test "$logfiles" = yes || rm $LOG
done
;;
groups)
for test in `expand_test_list $suite $filterspec | sed 's/\..*//' | sort -u`
do
$announce $VALGRIND_CMD $WINE ./$suite.exe --gtest_filter="$test.*-${expected_to_fail}"
LOG=../../../logs/$suite-$test-$i.log
$dry_run alarm `get_expected_runtime $suite` \
$VALGRIND_CMD $WINE ./$suite.exe --gtest_filter="$test.*-${expected_to_fail}" 2>&1 | eval reduce_verbosity | tee $LOG || errors=yes true
egrep -q "$PATTERN" tmp.log && errors=yes
test "$logfiles" = yes || rm $LOG
done
;;
esac
done
i=`expr $i + 1`
done
case "$errors" in
yes) echo "Errors detected, condition red. Battle stations!" ; exit 1;;
*) echo "No errors detected." ;;
esac
| true |
c21ada1c5fb30d93da943f8be488a6da8de4b733
|
Shell
|
LadyNightmare/SistemasOperativos
|
/Practicas/scripts/done/backup2.sh
|
UTF-8
| 331 | 3.65625 | 4 |
[] |
no_license
|
#!/bin/bash
#Autora: Cristina Díaz García
#Grupo: Informática D
for i in $@
do
if [ ! -f "$i" ]
then
echo $i no existe
else
A=$(ls $i* | wc -w)
if [ $A -ge 9 ]
then
echo “Se ha superado el número máximo de versiones”
else
Num=`expr $A + 1`
Date=`date +%y%m%d`
Version=${Date}_$i
cp $i $Version
fi
fi
done
| true |
8b0e7caf5896d0a55d00c29ef7958774022dad7f
|
Shell
|
max-linux/max-desktop
|
/max-skel-conf/usr/share/max/25_windows_uefi
|
UTF-8
| 1,510 | 3.4375 | 3 |
[] |
no_license
|
#!/bin/sh
set -e
# wget https://gist.github.com/raw/4330598/adaf598a78d568dbfada596441bdfad3b4dd3f97/25_windows_uefi
# Probe for UEFI entries in EFI system partition
prefix="/usr"
exec_prefix="${prefix}"
datarootdir="${prefix}/share"
export TEXTDOMAIN=grub
export TEXTDOMAINDIR="${datarootdir}/locale"
. "${datarootdir}/grub/grub-mkconfig_lib"
found=
bcd=/boot/efi/EFI/Microsoft/Boot/BCD
if [ -n "$bcd" ]; then
if grep -qs "W.i.n.d.o.w.s. .8" "$bcd"; then
long="Windows 8 (loader)"
elif grep -qs "W.i.n.d.o.w.s. .7" "$bcd"; then
long="Windows 7 (loader)"
elif grep -qs "W.i.n.d.o.w.s. .V.i.s.t.a" "$bcd"; then
long="Windows Vista (loader)"
elif grep -qs "W.i.n.d.o.w.s. .S.e.r.v.e.r. .2.0.0.8. .R.2." "$bcd"; then
long="Windows Server 2008 R2 (loader)"
elif grep -qs "W.i.n.d.o.w.s. .S.e.r.v.e.r. .2.0.0.8." "$bcd"; then
long="Windows Server 2008 (loader)"
elif grep -qs "W.i.n.d.o.w.s. .R.e.c.o.v.e.r.y. .E.n.v.i.r.o.n.m.e.n.t" "$bcd"; then
long="Windows Recovery Environment (loader)"
elif grep -qs "W.i.n.d.o.w.s. .S.e.t.u.p" "$bcd"; then
long="Windows Recovery Environment (loader)"
else
long="Windows Vista (loader)"
fi
found=true
gettext_printf "Found %s on %s\n" "${long}" "UEFI ESP" >&2
cat << EOF
menuentry "${long}" {
insmod search_fs_uuid
insmod chain
EOF
DEVICE=`cat /etc/mtab | grep /boot/efi | cut -f 1 -d ' '`
prepare_grub_to_access_device ${DEVICE} | sed -e "s/^/\t/"
cat << EOF
chainloader /EFI/Microsoft/Boot/bootmgfw.efi
}
EOF
fi
| true |
bc77fb983b7dcc2228a76119b3c53e794416dfc5
|
Shell
|
sendanor/nor-nopg-cli
|
/examples/event-scripts/update-description.sh
|
UTF-8
| 435 | 3.171875 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash -x
# NOPG_EVENT_ID
# NOPG_EVENT_NAME
# NOPG_EVENT_TYPE
set -e
test -n "$NOPG_TR"
test -n "$NOPG_EVENT_ID"
export NOPG_TR=''
TR=''
function finish {
status="$?"
test -n "$TR" && nopg -b -q "$TR" rollback
exit "$status"
}
trap finish EXIT
TR="$(nopg -b -q start)"
test -n "$TR"
nopg -b -q "$TR" update --where-'$id'="$NOPG_EVENT_ID" --set-description="Testing $(date)"
nopg -b -q "$TR" commit
tr=''
exit 0
| true |
ab7b9d3c3087b0fd7303d8a308083b4da18a084f
|
Shell
|
harshjoeyit/BJP
|
/Bash/filetest.sh
|
UTF-8
| 822 | 3.625 | 4 |
[] |
no_license
|
#! /usr/bin/bash
# -e interprets the escape sequences
# \c keeps the cursor on the same line
echo -e "Enter the name of the file : \c"
read file_name
# using the \" escape sequence
# -e is flag for if the file exist s ir not
if [ -e $file_name ]
then
echo -e "\"$file_name\" found"
else
echo -e "\"$file_name\" not found"
fi
if [ -s $file_name ]
then
echo -e "empty file"
else
echo -e "non empty file"
fi
echo -e "Enter the name of the dir : \c"
read dir_name
#-d is the flag for a directory
if [ -d $dir_name ]
then
echo -e "\n \"$dir_name\" found \n"
else
echo -e "\n \"$dir_name\" not found \n"
fi
# charchetr special files - text , char , code flag -c
# block special files -audio video flag -b
# for checking the read , write , execute permissions use -r -w -x flags
| true |
0746e1076bc28a95ff0d6011675e5d2510cd8110
|
Shell
|
dai-ndebuhr/cloud-native-workstation
|
/opa/gatekeeper.sh
|
UTF-8
| 1,027 | 3.375 | 3 |
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
#!/usr/bin/env bash
kubectl apply -f https://raw.githubusercontent.com/open-policy-agent/gatekeeper/release-3.5/deploy/gatekeeper.yaml
kubectl apply -f kubernetes/constraint-templates.yaml
count=0
echo "Constraint Template CRDs: Creating..."
# Wait up to (approximately) three minutes for CRD registration
while [ $count -le 180 ]
do
kubectl get requiredlabels 2> /dev/null
requiredlabels=$?
kubectl get deploymentselector 2> /dev/null
deploymentselector=$?
# Exit code 0 means there were no resources found (desired behavior)
# Before CRD registration, the exit code for the get commands is 1
if [ $requiredlabels -ne 0 ] || [ $deploymentselector -ne 0 ]
then
echo "Constraint Template CRDs: Still creating..."
sleep 5
count=$(( count+5 ))
else
echo "Constraint Template CRDs: Creation complete"
exit 0
fi
done
echo "Constraint Template CRDs: Failed"
# On failure, display stderr
kubectl get requiredlabels
kubectl get deploymentselector
exit 3
| true |
11a3a510bf0b1def65e4b3f3b872e9e4f326bbde
|
Shell
|
OliverJaas/bash_skriptimine
|
/kasutajad_paroolid_eraldatud
|
UTF-8
| 589 | 3.40625 | 3 |
[] |
no_license
|
#!/bin/bash
#
# loob kaks faili milles ühes on kasutajanimed teises paroolid
if [ $# -ne 2 ]; then
echo "Kasutusjuhend: $0 kasutajad paroolid"
else
kasutajad=$1
paroolid=$2
if [ -f $kasutajad -a -r $kasutajad ] && [ -f $paroolid -a -r $paroolid ]; then
echo "Fail töötab vastavalt skriptile"
for rida in $(paste -d: $kasutajad $paroolid)
do
kasutajanimi=$(echo "$rida" | cut -f1 -d:)
sh lisakasutaja $kasutajanimi
echo "$rida" | chpasswd
done
else
echo "Probleem peitub failis $failinimi"
fi
#parameetrite kontrolli lõpp
fi
| true |
823925a93879a7d28852129b5ce344fee87e627b
|
Shell
|
ziman/baltrad-merge
|
/src/merge.sh
|
UTF-8
| 1,116 | 2.515625 | 3 |
[] |
no_license
|
#!/bin/bash
LD_LIBRARY_PATH="${LD_LIBRARY_PATH}:/opt/radar/lib"
LD_LIBRARY_PATH="${LD_LIBRARY_PATH}:/opt/radar/rave/lib"
LD_LIBRARY_PATH="${LD_LIBRARY_PATH}:/opt/radar/rave/Lib"
LD_LIBRARY_PATH="${LD_LIBRARY_PATH}:/opt/radar/rsl/lib"
LD_LIBRARY_PATH="${LD_LIBRARY_PATH}:/opt/radar/vol2bird/lib"
LD_LIBRARY_PATH="${LD_LIBRARY_PATH}:/usr/lib/x86_64-linux-gnu"
export LD_LIBRARY_PATH
PATH="${PATH}:/opt/radar/vol2bird/bin"
PATH="${PATH}:/opt/radar/baltrad-merge"
export PATH
PYTHONPATH="${PYTHONPATH}:/opt/radar/vol2bird/share/vol2bird"
PYTHONPATH="${PYTHONPATH}:/opt/radar/vol2bird/share/vol2bird/pyvol2bird"
PYTHONPATH="${PYTHONPATH}:/opt/radar/rave/Lib"
export PYTHONPATH
export OPTIONS_CONF="/data/etc/options.conf"
data_in="$1"
data_out="$2"
data_work="$3"
shift; shift; shift
# 525600 minutes = 1 year
exec /opt/radar/baltrad-merge/generate_profiles.py \
--merge-files /opt/radar/rave/bin/merge_files \
--scans2pvol /opt/radar/baltrad-merge/Scans2Pvol.py \
--vol2bird /opt/radar/vol2bird/bin/vol2bird \
--keep-merged \
-i "$data_in" \
-o "$data_out" \
-w "$data_work" \
"$@"
| true |
0006ad80d18d7b908b4dea748997d1804579a5e7
|
Shell
|
molecul/sslstrip-hsts-openwrt
|
/INSTALL.sh
|
UTF-8
| 1,832 | 3.59375 | 4 |
[
"MIT"
] |
permissive
|
#!/bin/bash
#
# 18.01.2018 - Adaptate for GL-AR300M
# Installation script for SSLstrip2 + DNS2proxy for the WiFi Pineapple NANO + TETRA.
# I consider this a dirty fix to get sslstrip2 running. It's needed because the upstream libraries are not up to date.
#
# Written by: Andreas Nilsen - [email protected] - https://www.github.com/adde88
#
# Variables and colors.
RED='\033[0;31m'
NC='\033[0m'
sslstrip_version="0.9"
SSLSTRIP_IPK="https://github.com/molecul/sslstrip-hsts-openwrt/raw/master/sslstrip-hsts_"$sslstrip_version"_ar71xx.ipk"
INSTROOT=""
#
# Starting Installtion.
echo -e "${RED}Installing: ${NC}SSLstrip2 + DNS2Proxy."
echo -e "Go grab a cup of coffee, this can take a little while...\n"
# Download installation-files to temporary directory, and then update OPKG repositories.
cd /tmp
opkg update
wget "$SSLSTRIP_IPK"
#
# Creating sym-link between python-directories located on the sd-card and internally.
# The main-directory will be located on the sd-card (/sd)
# This will only happen on the Pineapple NANO.
if [ -e /sd ]; then
# sym-link & nano install
INSTROOT="/sd"
#rm -r /usr/lib/python2.7
mv /usr/lib/python2.7 /usr/lib/python2.7-backup
mkdir -p /sd/usr/lib/python2.7
ln -s /sd/usr/lib/python2.7 /usr/lib/python2.7
opkg --dest sd --force-overwrite install sslstrip-hsts_"$sslstrip_version"_ar71xx.ipk
else
# Tetra installation / general install.
opkg --force-overwrite install sslstrip-hsts_"$sslstrip_version"_ar71xx.ipk
fi
# Cleanup
rm sslstrip-hsts_"$sslstrip_version"_ar71xx.*
cd ${INSTROOT}/usr/share/sslstrip2/
python ./setup.py install
cd -
chmod +x ${INSTROOT}/usr/share/dns2proxy/dns2proxy.py
ln -s ${INSTROOT}/usr/share/dns2proxy/dns2proxy.py /usr/bin/dns2proxy
echo -e "${RED}Installation completed!"
mv /usr/lib/python2.7 /usr/lib/python2.7_dns2proxy
mv /usr/lib/python2.7-backup /usr/lib/python2.7
exit 0
| true |
3bc9077ed44d0fb6f843170858e4184da4671b37
|
Shell
|
gchen98/mendel-gpu
|
/bin/loop_phasing.sh
|
UTF-8
| 514 | 3.4375 | 3 |
[] |
no_license
|
#!/bin/bash
if [ $# -lt 4 ] ; then
echo "Usage <dbname> <use X?[1|0]> <person_chunk_start> <person_chunk_end>"
exit 1
fi
dbname=$1
useX=$2
chunk_start=$3
chunk_end=$4
if [ $useX -eq 1 ] ; then
chroms='1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 X'
elif [ $useX -eq 0 ] ; then
chroms='22'
#chroms='1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22'
else
echo use X is 1 or 0
exit 1
fi
maxpart=20
for chr in $chroms
do
run_phasing.sh $dbname $chr $chunk_start $chunk_end
done
| true |
52fbefaa0091ec374b7617438e8614217044b261
|
Shell
|
tokageel/dotfiles
|
/.config/git/hooks/prepare-commit-msg
|
UTF-8
| 457 | 3.546875 | 4 |
[] |
no_license
|
#!/bin/sh
COMMIT_MSG_FILE=$1
COMMIT_SOURCE=$2
SHA1=$3
# do not process if ammend commit
if [ -n "$(head -n1 $COMMIT_MSG_FILE)" ]; then
exit 0
fi
# get issue number
BRANCH_NAME=$(git rev-parse --abbrev-ref HEAD)
ISSUE_NUM=$(echo $BRANCH_NAME | sed -e 's_feature/#\([0-9][0-9]*\)[^0-9]*_\1_g')
# do not process if issue number is not available
if [ -z "$ISSUE_NUM" ]; then
exit 0
fi
sed -i.bak "1s/^/refs #$ISSUE_NUM /g" "$COMMIT_MSG_FILE"
exit 0
| true |
bd2d6e7ea80bd1ec77176c3c1fef58f6a0d02528
|
Shell
|
bioinf/bi2014-mycoplasma-genitalium
|
/scripts/goblast.sh
|
UTF-8
| 229 | 2.828125 | 3 |
[] |
no_license
|
#!/bin/bash
genomes=$(ls ./genomes)
query=sample_proteins.fa
for gen in ${genomes}
do
db=db_"$gen"
makeblastdb -in ./genomes/${gen} -out ${db} -dbtype nucl
tblastn -query ${query} -db ${db} -outfmt 7 -out result_"$gen"
done
| true |
c8a7550028c8e898e4f0bdb8176a0c8182379323
|
Shell
|
lpswj/Linux
|
/ch02/example.sh
|
UTF-8
| 313 | 3.359375 | 3 |
[] |
no_license
|
#!/bin/bash
echo "Is it morning?Please answer yes or no"
read timeofday
if [ "$timeofday" = "yes" ];then
echo -n "Good morning"
elif [ "$timeofday" = "no" ];then
echo -n "Good afternoon"
else
echo -n "Sorry,$timeofday not recognized.Enter yes or no"
#printf "sorry ,not recognized";
exit 1
fi
exit 0
| true |
04e6d87cb75d9778b8d329c2b7bdf02301201f16
|
Shell
|
manasmbellani/athena-tools
|
/pentest_lateralmovt/remove_http_server.sh
|
UTF-8
| 395 | 2.796875 | 3 |
[] |
no_license
|
# !/bin/bash
#
# Script kills the running HTTP servers via python launched with http.server &
# SimpleHTTPServer using pkill
#
# Examples:
# To launch HTTP service on port 1433 in the folder /tmp:
# ./invoke_http_server.sh 1433 /tmp
echo "[*] Removing all running HTTP/HTTPS Servers running in the background via pkill"
pkill -f ".*(http\.server|SimpleHTTPServer|httpd\.socket).*"
| true |
ad670658607f367ac2b8b70a6dbeedea77d48aa5
|
Shell
|
ay65535/gitlab-installer
|
/install-gitlab.sh
|
UTF-8
| 2,612 | 4.09375 | 4 |
[
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/env bash
# Copyright (c) 2013-2016 Tuomo Tanskanen <[email protected]>
# Usage: Copy 'gitlab.rb.example' as 'gitlab.rb', then 'vagrant up'.
set -ex
# these are normally passed via Vagrantfile to environment
# but if you run this on bare metal they need to be reset
GITLAB_HOSTNAME=${GITLAB_HOSTNAME:-127.0.0.1}
GITLAB_PORT=${GITLAB_PORT:-8443}
#
# --------------------------------
# Installation - no need to touch!
# --------------------------------
#
export DEBIAN_FRONTEND=noninteractive
fatal()
{
echo "fatal: $@" >&2
}
check_for_root()
{
if [[ $EUID != 0 ]]; then
fatal "need to be root"
exit 1
fi
}
check_for_gitlab_rb()
{
if [[ ! -e /vagrant/gitlab.rb ]]; then
fatal "gitlab.rb not found at /vagrant"
exit 1
fi
}
set_gitlab_edition()
{
if [[ $GITLAB_EDITION == "community" ]]; then
GITLAB_PACKAGE=gitlab-ce
elif [[ $GITLAB_EDITION == "enterprise" ]]; then
GITLAB_PACKAGE=gitlab-ee
else
fatal "\"${GITLAB_EDITION}\" is not a supported GitLab edition"
exit 1
fi
}
check_for_backwards_compatibility()
{
if egrep -q "^ci_external_url" /vagrant/gitlab.rb; then
fatal "ci_external_url setting detected in 'gitlab.rb'"
fatal "This setting is deprecated in Gitlab 8.0+, and will cause Chef to fail."
fatal "Check the 'gitlab.rb.example' for fresh set of settings."
exit 1
fi
}
rewrite_hostname()
{
sed -i -e "s,^external_url.*,external_url '${GITLAB_URL}'," /etc/gitlab/gitlab.rb
}
# All commands expect root access.
check_for_root
# Check that the GitLab edition which is defined is supported and set package name
set_gitlab_edition
# Check for configs that are not compatible anymore
check_for_gitlab_rb
check_for_backwards_compatibility
echo "Installing ${GITLAB_PACKAGE} via apt ..."
apt-get install -y ${GITLAB_PACKAGE}
if [[ ${GITLAB_PORT} == 80 ]]; then
GITLAB_URL="http://${GITLAB_HOSTNAME}/"
elif [[ ${GITLAB_PORT} == 443 ]]; then
GITLAB_URL="https://${GITLAB_HOSTNAME}/"
else
GITLAB_URL="https://${GITLAB_HOSTNAME}:${GITLAB_PORT}/"
fi
# fix the config and reconfigure
cp /vagrant/gitlab.rb /etc/gitlab/gitlab.rb
rewrite_hostname
chown root:root /etc/gitlab/gitlab.rb
chmod 600 /etc/gitlab/gitlab.rb
head -14 /etc/gitlab/gitlab.rb
gitlab-ctl reconfigure
#EXTERNAL_URL=${GITLAB_URL} gitlab-ctl reconfigure
head -14 /etc/gitlab/gitlab.rb
# done
echo "Done!"
echo " Login at ${GITLAB_URL}, username 'root'. Password will be reset on first login."
echo " Config found at /etc/gitlab/gitlab.rb and updated by 'sudo gitlab-ctl reconfigure'"
| true |
834298ce3aaf3ec2571a8d19b1539fd7147e4858
|
Shell
|
mrodden/redstone
|
/tools/test_cli.sh
|
UTF-8
| 702 | 3.140625 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# usage: test_cli.sh
# Set IBMCLOUD_API_KEY and KEY_CRNS to use
error() {
[ -n "$1" ] && echo "$*" >&2
}
die() {
error "$*"
exit 1
}
cat setup.py | python -m redstone.crypto encrypt --key-crns "$KEY_CRNS" - | python -m redstone.crypto decrypt -
[ $? -eq 0 ] || die "test 1 failed"
RSCRYPTO_KEY_CRNS=$KEY_CRNS python -m redstone.crypto encrypt setup.py | python -m redstone.crypto decrypt -
[ $? -eq 0 ] || die "test 2 failed"
RSCRYPTO_KEY_CRNS=$KEY_CRNS python -m redstone.crypto encrypt setup.py > encrypted_file
[ $? -eq 0 ] || die "test 3 part 1 failed"
python -m redstone.crypto decrypt encrypted_file
[ $? -eq 0 ] || die "test 3 part 2 failed"
rm -f encrypted_file
| true |
666e11b32f576722617e914ed5709ff3b737879f
|
Shell
|
ttytyper/dotfiles
|
/.bash_profile
|
UTF-8
| 3,941 | 3.59375 | 4 |
[] |
no_license
|
# bash_profile:
if [ -n "${BASH_VERSION}" ] ; then
use_color=false
safe_term=${TERM//[^[:alnum:]]/.} # sanitize TERM
if [[ -f /etc/DIR_COLORS ]] ; then
grep -q "^TERM ${safe_term}" /etc/DIR_COLORS && use_color=true
elif type -p dircolors >/dev/null ; then
if dircolors --print-database | grep -q "^TERM ${safe_term}" ; then
use_color=true
fi
else
case "$safe_term" in
xterm) use_color=true ;;
esac
fi
fi
# This file is sourced by bash when you log in interactively.
[ -f ~/.bashrc ] && . ~/.bashrc
# A quick function to check if a command exists
cmdexists()
{
type "${@}" 1>/dev/null 2>&1
}
# Screen and X11 window title
screentitle() {
echo -ne "\033k${*}\033\\"
}
xtitle() {
echo -ne "\033]0;${*}\007"
}
title() {
screentitle "${@}"
xtitle "${@}"
}
if [ "$SSH_CONNECTION" ]; then
xtitle "$HOSTNAME"
fi
# TODO: Consider using tput setaf and tput sgr0 instead
# https://github.com/lhunath/scripts/blob/master/bashlib/bashlib#L210
if [ "${use_color}" = "true" ]; then
COL_BLACK="\033[0;30m"
COL_DARKGRAY="\033[1;30m"
COL_RED="\033[0;31m"
COL_LIGHTRED="\033[1;31m"
COL_GREEN="\033[0;32m"
COL_LIGHTGREEN="\033[1;32m"
COL_BROWN="\033[0;33m"
COL_YELLOW="\033[1;33m"
COL_BLUE="\033[0;34m"
COL_LIGHTBLUE="\033[1;34m"
COL_PURPLE="\033[0;35m"
COL_LIGHTPURPLE="\033[1;35m"
COL_CYAN="\033[0;36m"
COL_LIGHTCYAN="\033[1;36m"
COL_LIGHTGRAY="\033[0;37m"
COL_WHITE="\033[1;37m"
COL_RESET="\033[0;0m"
else
COL_BLACK=""
COL_DARKGRAY=""
COL_RED=""
COL_LIGHTRED=""
COL_GREEN=""
COL_LIGHTGREEN=""
COL_BROWN=""
COL_YELLOW=""
COL_BLUE=""
COL_LIGHTBLUE=""
COL_PURPLE=""
COL_LIGHTPURPLE=""
COL_CYAN=""
COL_LIGHTCYAN=""
COL_LIGHTGRAY=""
COL_WHITE=""
COL_RESET=""
fi
# If we are working in a virtual console...
if [ "${TERM}" = "linux" ]; then
# Change the beep frequency and length
setterm -bfreq 400 -blength 100
# Change the font to something more pleasing
#setfont ka8x16thin-1
fi
# gpg-agent, for managing my gpg key
if [ -d "${HOME}/.gnupg" ] && cmdexists gpg-agent; then
# New way to start gpg-agent, implied by calling gpgconf.
# If you get errors such as "inappropriate ioctl for device", try:
# gpg-connect-agent /bye
# Also check https://www.gnupg.org/documentation/manuals/gnupg-devel/Common-Problems.html
export GPG_TTY=$(tty)
echo UPDATESTARTUPTTY | gpg-connect-agent >/dev/null
unset SSH_AGENT_PID
if [ "${gnupg_SSH_AUTH_SOCK_by:-0}" -ne $$ ]; then
export SSH_AUTH_SOCK="$(gpgconf --list-dirs agent-ssh-socket)"
fi
fi
# Use afuse and sshfs for auto mounting remote directories under ~/net
if cmdexists afuse; then
if cmdexists sshfs && [ -d ~/net ] && ! mountpoint -q ~/net; then
afuse -o mount_template='sshfs -oreconnect -oServerAliveInterval=15 -oServerAliveCountMax=3 -oControlMaster=no -oPasswordAuthentication=no -oConnectTimeout=3 -oIdentityFile=~/.ssh/sshfs.key %r: %m' -o unmount_template='fusermount -u -z %m' ~/net
fi
fi
# Bash completion
[ -f /etc/profile.d/bash-completion ] && source /etc/profile.d/bash-completion
# If an ecryptfs dir is present but locked, notify the user who might want a hint to unlock it
if [ -d "$HOME/.Private" ] && [ -e "$HOME/.ecryptfs/Private.mnt" ] && cmdexists ecryptfs-mount-private && ! mountpoint -q "$(cat "$HOME/.ecryptfs/Private.mnt")"; then
echo "Note: ecryptfs is locked. Use ecryptfs-mount-private to unlock"
fi
# A few shorthands for switching between US dvorak and Danish qwerty
if [ "$DISPLAY" ]; then
asdf() { setxkbmap -layout "dvorak" -variant 'us' -option 'ctrl:nocaps'; }
aoei() { setxkbmap -layout "dk" -variant 'nodeadkeys' -option 'ctrl:nocaps'; }
aoeu() { aoei; }
fi
# Auto-logout after 5 mins if I'm root
if [ "$USER" = "root" ] || [ "$UID" = "0" ]; then
export TMOUT="300"
fi
# Local man pages
if test -d "$HOME/.manpath" && cmdexists manpath; then
export MANPATH="$(manpath -q):$HOME/.manpath"
fi
if [ "$XDG_RUNTIME_DIR" ]; then
export MPD_HOST="$XDG_RUNTIME_DIR/mpd/socket"
fi
| true |
73bfdeef07fa67cf7c04780f70845fb7509ccda5
|
Shell
|
LCTT/lctt-scripts
|
/remove_DMCA.sh
|
UTF-8
| 279 | 2.859375 | 3 |
[] |
no_license
|
#!/bin/bash
set -e
source $(dirname "${BASH_SOURCE[0]}")/base.sh
cd "$(get-lctt-path)/sources"
domain="$1"
git checkout -b "$domain"
git grep -l "$domain"|while read file; do git rm "$file"; done
git commit -a -m "remove $domain"
git push -u origin "$domain"
git checkout master
| true |
1475f539042081cf727a6f1af7a34cf447b1279d
|
Shell
|
cppforlife/cockroachdb-release
|
/jobs/smoke-tests/templates/run
|
UTF-8
| 2,228 | 3.46875 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
echo "`date`: Configure certs directory"
certs_path=/var/vcap/data/smoke-tests/certs
rm -rf $certs_path
mkdir -p $certs_path
cp /var/vcap/jobs/smoke-tests/config/certs/* $certs_path/
mv $certs_path/client.crt $certs_path/client.<%= p("user") %>.crt
mv $certs_path/client.key $certs_path/client.<%= p("user") %>.key
chmod 600 $certs_path/*
chown vcap:vcap $certs_path/*
echo "`date`: Drop and create database"
# todo what if first instance is not available?
export COCKROACH_HOST=<%= link("conn").instances.first.address %>
export COCKROACH_PORT=<%= link("conn").p("port") %>
/var/vcap/packages/cockroachdb/cockroach \
sql --user root --certs-dir $certs_path -e "DROP DATABASE IF EXISTS <%= p("database") %>;"
/var/vcap/packages/cockroachdb/cockroach \
sql --user root --certs-dir $certs_path -e "CREATE DATABASE <%= p("database") %>;"
/var/vcap/packages/cockroachdb/cockroach \
user set --user root --certs-dir $certs_path <%= p("user") %>
function clean_up {
echo "`date`: Clean up"
/var/vcap/packages/cockroachdb/cockroach \
sql --user root --certs-dir $certs_path -e "DROP DATABASE IF EXISTS <%= p("database") %>;"
/var/vcap/packages/cockroachdb/cockroach \
user rm --user root --certs-dir $certs_path <%= p("user") %>
}
trap clean_up EXIT
/var/vcap/packages/cockroachdb/cockroach \
sql --user root --certs-dir $certs_path \
-e "GRANT CREATE,DROP,SELECT,INSERT,DELETE,UPDATE ON DATABASE <%= p("database") %> TO <%= p("user") %>;"
<%
cmd = p("tests.#{p("test_to_run")}.cmd")
opts = p("tests.#{p("test_to_run")}.opts")
%>
echo "`date`: Run smoke tests"
export SMOKE_TESTS_CA_CERT=$certs_path/ca.crt
export SMOKE_TESTS_CERT=$certs_path/client.<%= p("user") %>.crt
export SMOKE_TESTS_KEY=$certs_path/client.<%= p("user") %>.key
export SMOKE_TESTS_CONN="postgresql://<%= p("user") %>@${COCKROACH_HOST}:${COCKROACH_PORT}/<%= p("database") %>?sslmode=verify-ca&sslkey=${SMOKE_TESTS_KEY}&sslcert=${SMOKE_TESTS_CERT}&sslrootcert=${SMOKE_TESTS_CA_CERT}"
export LOG_DIR=/var/vcap/sys/log/smoke-tests
mkdir -p $LOG_DIR
/var/vcap/packages/smoke-tests/bin/<%= cmd %> \
<%= opts.join(" ") %> $SMOKE_TESTS_CONN # >$LOG_DIR/stdout.log 2>$LOG_DIR/stderr.log
echo "`date`: Done"
| true |
1e7798d09028ef1f8a0a937cdcc1caec759ae403
|
Shell
|
guenther-brunthaler/usr-local-bin-xworld-jv3gwuidf2ezyr5vbqavqtxyh
|
/roman-numerals-to-decimal
|
UTF-8
| 1,388 | 3.578125 | 4 |
[] |
no_license
|
#! /bin/sh
# Convert roman numerals (in either letter case) into decimal values. All
# variants mentioned in the Wikipedia article are supported.
#
# Version 2017.346.2
#
# Copyright (c) 2017 Guenther Brunthaler. All rights reserved.
#
# This script is free software.
# Distribution is permitted under the terms of the LGPLv3.
awk -f /dev/fd/5 5<< 'EOF' -- "$@" || printf '%s\n' "$0 failed!" >& 2 && false
# Globals: romdigs, largest_digit, d2v[]
function init_decoder( value, digit, factor, i, rl, char) {
romdigs= "IVXLCDM"; rl= length(romdigs); value= 0.5; factor= 5
for (i= 1; i <= rl; ++i) {
char= substr(romdigs, i, 1)
largest_digit= d2v[char]= d2v[tolower(char)] \
= value*= factor= 7 - factor
}
romdigs= "^[" romdigs tolower(romdigs) "]*$"
}
function decode(roman , n, i, total, partial, last_digit) {
n= length(roman); total= partial= 0; last_digit= largest_digit
for (i= 1; i <= n; ++i) {
if ((digit= d2v[substr(roman, i, 1)]) > last_digit) {
partial= digit - partial
} else if (digit < last_digit) {
total+= partial; partial= digit
} else {
partial+= digit
}
last_digit= digit
}
print total + partial
}
function process_arguments( i, v) {
for (i= 1; i < ARGC; ++i) if ((v= ARGV[i]) ~ romdigs) decode(v)
}
BEGIN {
init_decoder()
if (ARGC > 1) { process_arguments(); exit }
}
$0 ~ romdigs { decode($0) }
EOF
| true |
2afcb9b585d7981c83c409a9bbfb80606af72797
|
Shell
|
envoylabs/cosmos-autostaking
|
/auto_delegate.sh
|
UTF-8
| 729 | 3.65625 | 4 |
[] |
no_license
|
#!/bin/bash
usage() { echo "Usage: $0 [-p <string>]" 1>&2; exit 1; }
while getopts ":p:" option; do
case "${option}" in
p)
p=${OPTARG}
;;
*)
usage
;;
esac
done
shift $((OPTIND-1))
if [[ -z "${p}" ]]; then
p="${PWD}/.profile"
fi
source ${p}
echo "Last running: $(date)" > ${PWD}/auto_delegate.log
echo "Log: ${PWD}/auto_delegate.log"
while :
do
if [[ "$KEYRING_BACKEND" = "test" || "$KEYRING_BACKEND" = "memory" ]]; then
${PWD}/delegate.sh >> ${PWD}/output.log
else
${PWD}/delegate.exp $(cat ${PWD}/.passwd) >> ${PWD}/auto_delegate.log
fi
echo "------ SLEEP 30s ------" >> ${PWD}/auto_delegate.log
sleep 30
done
| true |
377e409253f376ba3bb933c6e71e4f08a8b0e4a2
|
Shell
|
ronzyfonzy/server-provisions
|
/manifests/php.sh
|
UTF-8
| 1,110 | 4.15625 | 4 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
CURRENT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
source "${CURRENT_DIR}/../common/base.sh"
###
# Variables
###
###
# Input checker
###
while [[ $# -gt 0 ]]
do
key="$1"
case $key in
-h | --help)
PROCESS="help"
;;
-i | --install)
PROCESS="install"
;;
-d | --dir)
INSTALL_DIR="$2"
shift
;;
-u | --update)
PROCESS="update"
;;
-un | --uninstall)
PROCESS="uninstall"
;;
*)
# unknown option
;;
esac
shift # past argument or value
done
###
# Script
###
function _help {
printf "No process was initiated.
-i, --install Install
-u, --update Update
--uninstall Uninstall
-h, --help Display this help and exit
"
}
function _install {
printf "install\n"
installApt php5-fpm php5-cli php5-mysql php5-curl php5-intl php-pear php5-mcrypt php5-memcached
sudo service php5-fpm restart
}
function _update {
printf "update\n"
}
function _uninstall {
printf "uninstall\n"
uninstallApt php5-*
}
case ${PROCESS} in
install)
_install
;;
update)
_update
;;
uninstall)
_uninstall
;;
*)
_help
;;
esac
| true |
b227633c8c2a3aa42d944e172ecce9ac5dde8c76
|
Shell
|
avatao-content/challenge-toolbox
|
/entrypoint.sh
|
UTF-8
| 1,312 | 3.859375 | 4 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
# This script can be used as an entrypoint for build jobs
# or it can be "sourced" without parameters. Supports POSIX shells.
set -aeuo pipefail
if [ -e "$(dirname "$0")/.env" ]; then
source "$(dirname "$0")/.env"
fi
if [ -e ".env" ]; then
source ".env"
fi
# Support Google Cloud Build
if [ -n "${PROJECT_ID-}" ] && [ -n "${BUILD_ID-}" ]; then
export GOOGLE_PROJECT_ID="$PROJECT_ID"
export CI=true
fi
# Configure Google Cloud SDK
if command -v gcloud &>/dev/null; then
if [ -n "${GOOGLE_SERVICE_ACCOUNT_KEY-}" ]; then
export GOOGLE_APPLICATION_CREDENTIALS="/tmp/challenge-toolbox-google-service-account-key.json"
echo "$GOOGLE_SERVICE_ACCOUNT_KEY" > "$GOOGLE_APPLICATION_CREDENTIALS"
fi
if [ -n "${GOOGLE_APPLICATION_CREDENTIALS-}" ]; then
gcloud auth activate-service-account --key-file="$GOOGLE_APPLICATION_CREDENTIALS"
fi
if [ -n "${GOOGLE_PROJECT_ID-}" ]; then
gcloud config set project "$GOOGLE_PROJECT_ID"
fi
fi
# Log into docker registries from environment variables:
# DOCKER_LOGIN_[ID]_{SERVER,USERNAME,PASSWORD}
env | grep -Eo "^DOCKER_LOGIN_[A-Z0-9_]+_" | sort -u | xargs -r -I[] sh -c \
'echo "$[]PASSWORD" | docker login --username "$[]USERNAME" "$[]SERVER" --password-stdin '
# Execute the parameters...
if [ $# -ne 0 ]; then
exec "$@"
fi
| true |
66a592b09c64f14612b723c8ba32e84959af480e
|
Shell
|
rfjakob/gocryptfs
|
/tests/dl-linux-tarball.bash
|
UTF-8
| 636 | 4.34375 | 4 |
[
"MIT"
] |
permissive
|
#!/bin/bash -eu
#
# This script checks the size of /tmp/linux-3.0.tar.gz and downloads
# a fresh copy if the size is incorrect or the file is missing.
URL=https://cdn.kernel.org/pub/linux/kernel/v3.0/linux-3.0.tar.gz
TGZ=/tmp/linux-3.0.tar.gz
SIZE_WANT=96675825
SIZE_ACTUAL=0
if [[ -e $TGZ ]]; then
if [[ $OSTYPE == linux* ]] ; then
SIZE_ACTUAL=$(stat -c %s "$TGZ")
else
# Mac OS X
SIZE_ACTUAL=$(stat -f %z "$TGZ")
fi
fi
if [[ $SIZE_ACTUAL -ne $SIZE_WANT ]]; then
echo "Downloading linux-3.0.tar.gz"
if command -v wget > /dev/null ; then
wget -nv --show-progress -c -O "$TGZ" "$URL"
else
curl -o "$TGZ" "$URL"
fi
fi
| true |
4a4b3556a2b6e00e0254c8bd35b228d6a087a1d5
|
Shell
|
maverickcasanova/things
|
/scripts/battery_watchdog
|
UTF-8
| 2,498 | 3.5625 | 4 |
[] |
no_license
|
#!/bin/sh
#
# battery_watchdog v0.2 - Shutdown (graceful or hardcore) system when battery is low.
#
# Copyright (c) 2011, Piotr Karbowski <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are
# permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or other
# materials provided with the distribution.
# * Neither the name of the Piotr Karbowski nor the names of its contributors may be
# used to endorse or promote products derived from this software without specific
# prior written permission.
# Remember to check and adjust paths below, they may not be vaild on your system!
#battery_dir='/sys/bus/acpi/drivers/battery/PNP0C0A:00/power_supply/BAT0'
battery_dir='/sys/class/power_supply/BAT0'
#ac_dir='/sys/bus/acpi/drivers/ac/ACPI0003:00/power_supply/AC'
ac_dir='/sys/class/power_supply/AC'
interval=''
while :; do
interval="${interval:-60}"
if [ "$(cat ${ac_dir}/online)" = '0' ]; then
battery_remaining="$(cat ${battery_dir}/charge_now)"
battery_total="$(cat ${battery_dir}/charge_full)"
battery_percent="$((battery_remaining*100/battery_total))"
if [ "${battery_percent}" -le '5' ]; then
# 5% or less remaining power in the battery.
logger -t 'battery watchdog' "Hardcore emergency shutdown, ${battery_percent}% battery left."
# Sync all filesystems, inital sync.
echo 's' > '/proc/sysrq-trigger'
# Wait 5s
sleep 5
# Another sync.
echo 's' > '/proc/sysrq-trigger'
# Wait 3s
sleep 3
# Another sync.
echo 's' > '/proc/sysrq-trigger'
# Wait just 1s
sleep 1
# Remount all filesystems in read-only
echo 'r' > '/proc/sysrq-trigger'
# Wait 3s
sleep 3
# Poweroff machine
echo 'o' > '/proc/sysrq-trigger'
elif [ "${battery_percent}" -le '10' ]; then
# Hibernate? Poweroff?
logger -t 'battery watchdog' "Emergency shutdown, ${battery_percent}% battery left."
hibernate || poweroff
elif [ "${battery_percent}" -le '15' ]; then
# 15% or less in the battery.
# Changing interval to 30s
interval='30'
else
# Reset interval to 60s.
interval='60'
fi
sleep "${interval}"
else
# Running on AC. Wait minute before next check.
sleep 60
fi
done
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.