blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
c70baf00284e14d466a879d668177595414ca8f1
|
Shell
|
alexmoore/dotfiles
|
/git_setup.sh
|
UTF-8
| 457 | 2.515625 | 3 |
[] |
no_license
|
#!/bin/bash
source /usr/local/opt/git/etc/bash_completion.d/git-prompt.sh
source /usr/local/opt/git/etc/bash_completion.d/git-completion.bash
## Add git branch
PS1='\[\e[4m\e[34m\]\t:\W\[\e[0;33m\]$(__git_ps1 "(%s)") \[\e[m\]\$ '; [[ $EUID == 0 ]] &&
PS1='\[\e[4m\e[31m\]\t:\W\[\e[0;36m\]$(__git_ps1 "(%s)") \[\e[m\]\$ '
export GIT_PS1_SHOWDIRTYSTATE=1
complete -o default -o nospace -F _git_checkout gco
complete -o default -o nospace -F _git_branch gb
| true |
ec9e1ffce9e10f8df9b1f30ccf358d834f8b4efd
|
Shell
|
alexandruantochi/re_crawler
|
/entrypoint.sh
|
UTF-8
| 173 | 2.78125 | 3 |
[] |
no_license
|
#!/bin/bash
FILE="./data/$1"
if [ -f $FILE ]; then
echo "File already exists. $FILE"
else
python -m scrapy runspider -o $FILE "./re_crawler/spiders/olx_re_ro.py"
fi
| true |
1409ba9ee1b3ed960ccf802ae55fd9bcbb2d49b4
|
Shell
|
aur-archive/conkyclementine-bzr
|
/PKGBUILD
|
UTF-8
| 804 | 2.578125 | 3 |
[] |
no_license
|
# Contributor: kaivalagi <[email protected]>
# Maintainer: blades <[email protected]>
pkgname=conkyclementine-bzr
pkgver=r8
pkgrel=1
pkgdesc="Provides Clementine info, for use in Conky."
arch=('i686' 'x86_64')
url="https://code.launchpad.net/~conky-companions/+junk/conkyclementine"
license=('GPL3')
depends=('python2')
makedepends=('bzr')
install=$pkgname.install
_bzrbranch="~conky-companions/+junk"
_bzrmod="conkyclementine"
source=("conkyclementine::bzr+http://bazaar.launchpad.net/${_bzrbranch}/${_bzrmod}")
md5sums=('SKIP')
pkgver() {
cd "$srcdir/conkyclementine"
printf "r%s" "$(bzr revno)"
}
build() {
cd $srcdir/conkyclementine
python setup.py build
}
package() {
cd $srcdir/conkyclementine
python setup.py install --root=$pkgdir
install -D -m644 README $pkgdir/usr/share/conkyclementine/README
}
| true |
c177548a77d17a58933931915d768bad8c9aa104
|
Shell
|
bysanivs/Shekel-Online-DevOps-assignment
|
/readme.bash
|
UTF-8
| 351 | 2.796875 | 3 |
[] |
no_license
|
#Script to check service status
./checkService.sh SERVICENAME TASK
TASK=start|stop|status|restart
#runservice is a script, we can use to check for a service and start it if it is not running and to make it run always we can add it to crontab as below format
#crontab -e
* * * * * /usr/scripts/runservice.sh servicename >> /usr/logs/runservice.log
| true |
ac0438c24a87bb46949d83ecc4d04540926c387f
|
Shell
|
PeterZhouSZ/learning-to-generate-wasserstein-barycenters
|
/experiments/run_interpolate_XP.sh
|
UTF-8
| 5,262 | 2.796875 | 3 |
[] |
no_license
|
#!/bin/bash
# usage: ./run_interpolate_XP.sh [model_id] [results_XPs_path] + comment/uncomment desired lines
echo "================================"
echo "== INTERPOLATIONS EXPERIMENTS =="
echo "================================"
triangle_size=2 # min=1
pentagon_size=3 # min=2
echo "Generating barycenters - same classes, triangle ..."
python interpolate_XP.py -m $1 -r $2 -k "triangle" -f $4 -l "cat1,cat2,cat3" -t $5 -i "cats" -n $triangle_size -c $3
echo "Generating barycenters - same classes, pentagon ..."
python interpolate_XP.py -m $1 -r $2 -k "pentagon" -f $4 -l "cat1,cat2,cat3,cat4,cat5" -t $5 -i "cats" -n $pentagon_size -c $3
echo "Generating barycenters - same classes, line ..."
python interpolate_XP.py -m $1 -r $2 -k "line" -f $4 -l "cat1,cat2" -t $5 -i "cats_28x28" -w "0.0,1.0|0.1,0.9|0.2,0.8|0.3,0.7|0.4,0.6|0.5,0.5|0.6,0.4|0.7,0.3|0.8,0.2|0.9,0.1|1.0,0.0" -c $3 -s '28,28'
python interpolate_XP.py -m $1 -r $2 -k "line" -f $4 -l "cat1,cat2,cat3" -t $5 -i "cats" -n 8 -c $3
python interpolate_XP.py -m $1 -r $2 -k "line" -f $4 -l "cat1,cat2,cat3,cat4,cat5" -t $5 -i "cats" -n 8 -c $3
python interpolate_XP.py -m $1 -r $2 -k "line" -f $4 -l "cat1,cat2" -t $5 -i "cats_iso" -w "0.5,0.5|0.5,0.5" -c $3
echo "Generating barycenters - different classes, triangle ..."
python interpolate_XP.py -m $1 -r $2 -k "triangle" -f $4 -l "cat1,car1,cloud1" -t $5 -i "diff" -n $triangle_size -c $3
echo "Generating barycenters - different classes, pentagon ..."
python interpolate_XP.py -m $1 -r $2 -k "pentagon" -f $4 -l "cat1,car1,cloud1,diamond1,owl1" -t $5 -i "diff" -n $pentagon_size -c $3
echo "Generating barycenters - different classes, line ..."
python interpolate_XP.py -m $1 -r $2 -k "line" -f $4 -l "car1,owl1" -t $5 -i "diff" -w "0.0,1.0|0.1,0.9|0.2,0.8|0.3,0.7|0.4,0.6|0.5,0.5|0.6,0.4|0.7,0.3|0.8,0.2|0.9,0.1|1.0,0.0" -c $3
python interpolate_XP.py -m $1 -r $2 -k "line" -f $4 -l "car1,owl1" -t $5 -i "diff_28x28" -w "0.0,1.0|0.1,0.9|0.2,0.8|0.3,0.7|0.4,0.6|0.5,0.5|0.6,0.4|0.7,0.3|0.8,0.2|0.9,0.1|1.0,0.0" -c $3 -s '28,28'
python interpolate_XP.py -m $1 -r $2 -t "line" -f $4 -l "cat1,car1,cloud1" -t $5 -i "diff" -n 8 -c $3
python interpolate_XP.py -m $1 -r $2 -t "line" -f $4 -l "cat1,car1,cloud1,diamond1,owl1" -t $5 -i "diff" -n 8 -c $3
echo "Generating interpolation of lines..."
python interpolate_XP.py -m $1 -r $2 -k "pentagon" -f $4 -l "line1,line2,line3,line4,line5" -t $5 -i "fig4" -n $pentagon_size -c $3
python interpolate_XP.py -m $1 -r $2 -k "line" -f $4 -l "line1,line2,line3,line4,line5" -t $5 -i "lines" -n 8 -c $3
python interpolate_XP.py -m $1 -r $2 -k "line" -f $4 -l "line4,line5" -t $5 -i "lines" -w "0.0,1.0|0.1,0.9|0.2,0.8|0.3,0.7|0.4,0.6|0.5,0.5|0.6,0.4|0.7,0.3|0.8,0.2|0.9,0.1|1.0,0.0" -c $3
echo "Generating interpolation of ellipses..."
python interpolate_XP.py -m $1 -r $2 -k "pentagon" -f $4 -l "circle1,circle2,circle3,circle4,circle5" -t $5 -i "fig6" -n $pentagon_size -c $3
python interpolate_XP.py -m $1 -r $2 -k "line" -f $4 -l "circle1,circle2,circle3,circle4,circle5" -t $5 -i "ellipses" -n 8 -c $3
python interpolate_XP.py -m $1 -r $2 -k "line" -f $4 -l "circle3,circle15" -t $5 -i "circles" -w "0.0,1.0|0.1,0.9|0.2,0.8|0.3,0.7|0.4,0.6|0.5,0.5|0.6,0.4|0.7,0.3|0.8,0.2|0.9,0.1|1.0,0.0" -c $3
echo "Generating barycenter of 100 cats..."
python interpolate_XP.py -m $1 -r $2 -k "line" -f $4 -l "1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100" -t $5 -i "catstest" -w "0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01" -c $3
# echo "Generating barycenters - full shapes, triangle ..."
# python interpolate_XP.py -m $1 -r $2 -k "triangle" -f $4 -l "double_disk,heart,cross" -t $5 -i "full" -n $triangle_size -c $3
# echo "Generating barycenters - full shapes, pentagon ..."
# python interpolate_XP.py -m $1 -r $2 -k "pentagon" -f $4 -l "double_disk,heart,cross,duck,tooth" -t $5 -i "full" -n $pentagon_size -c $3
# echo "Generating barycenters - full shapes, line ..."
# python interpolate_XP.py -m $1 -r $2 -k "line" -f $4 -l "double_disk,heart,cross" -t $5 -i "full" -n 6 -c $3
# python interpolate_XP.py -m $1 -r $2 -k "line" -f $4 -l "double_disk,heart,cross,duck,tooth" -t $5 -i "full" -n 6 -c $3
echo "Building interpolation animations..."
python interpolate_XP.py -m $1 -r $2 -k "anim" -f $4 -l "cat1,cat2" -t $5 -c $3
python interpolate_XP.py -m $1 -r $2 -k "anim" -f $4 -l "car1,owl1" -t $5 -c $3
# with full shapes:
# python interpolate_XP.py -m $1 -r $2 -k "anim" -f $4 -l "double_disk,duck" -t $5 -c $3
# python interpolate_XP.py -m $1 -r $2 -k "anim" -f $4 -l "double_disk,heart" -t $5 -c $3
| true |
252138364f9887941f49d907a337135db06edde0
|
Shell
|
cafernandezlo/bash_utils
|
/screencasting
|
UTF-8
| 1,165 | 3.703125 | 4 |
[] |
no_license
|
#! /bin/bash
# This script has as main objective to record the screen and produce a video with audio from microphone
#default/preferred options
screen_size=$(xdpyinfo | grep dimensions | awk '{print $2}')
video_source="-f x11grab -s $screen_size -i :0.0"
video_encoder="-c:v libx264 -r 30"
audio_source="-f alsa -ac 2 -i default"
audio_encoder="-c:a flac"
output_name="output.mkv"
info(){
printf "Usage: $0 [-p] [-w] [-o FILE] [-s] [-h]\\n";
printf "\\t-p\\tUse pulse audio\\n"
printf "\\t-w\\tRecord webcam only\\n"
printf "\\t-o\\tName and extension of the output FILE\\n"
printf "\\t-s\\tStop recording\\n"
printf "\\t-h\\tDisplay this help\\n"
}
while getopts "pwo:sh" o; do
case "${o}" in
p)
audio_source="-f pulse -ac 2 -i default"
;;
w)
video_source="-i /dev/video0"
;;
o)
output_name=$OPTARG
;;
s)
pkill -f "ffmpeg"&&exit
;;
*)
info && exit
;;
esac
done
ffmpeg -y -nostdin $video_source $audio_source $video_encoder $audio_encoder $output_name >/dev/null 2>&1 &
| true |
4ce436bef1ba0a3d810fb1f6ded447bb0b2f7f71
|
Shell
|
mmirolim/kythe
|
/kythe/cxx/extractor/testdata/test_claim_pragma.sh
|
UTF-8
| 828 | 3.0625 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -e
# This test checks that the extractor handles #pragma kythe_claim.
# It should be run from the Kythe root.
BASE_DIR="${PWD}/kythe/cxx/extractor/testdata"
OUT_DIR="${PWD}/campfire-out/test/kythe/cxx/extractor/testdata"
EXTRACTOR="${PWD}/campfire-out/bin/kythe/cxx/extractor/cxx_extractor"
KINDEX_TOOL="${PWD}/campfire-out/bin/kythe/cxx/tools/kindex_tool"
EXPECTED_INDEX="701540b440f4a705d068bf22dca4963251447feb72b2cdecd24c83ac369dedf7.kindex"
INDEX_PATH="${OUT_DIR}"/"${EXPECTED_INDEX}"
rm -f -- "${INDEX_PATH}_UNIT"
KYTHE_OUTPUT_DIRECTORY="${OUT_DIR}" \
"${EXTRACTOR}" --with_executable "/usr/bin/g++" \
-I./kythe/cxx/extractor/testdata \
./kythe/cxx/extractor/testdata/claim_main.cc
"${KINDEX_TOOL}" -suppress_details -explode "${INDEX_PATH}"
diff "${BASE_DIR}/claim_main.UNIT" "${INDEX_PATH}_UNIT"
| true |
25d1946ac5a17c9ec0776691b97a2397d5c1ef06
|
Shell
|
42daniel/heroku-private-buildpack
|
/bin/detect
|
UTF-8
| 242 | 2.796875 | 3 |
[] |
no_license
|
#!/bin/sh
# we'll expect a file that lists the private buildpacks to apply
# this pack is valid for apps with a hello.txt in the root
if [ -f $1/.heroku-private-build.lst ]; then
echo "Private buildpack detected."
exit 0
else
exit 1
fi
| true |
514b6e167bc2166b176783eb1d0616aa43d3d157
|
Shell
|
mvvveen/SH-OET-Python
|
/installanaconda.sh
|
UTF-8
| 2,624 | 3.40625 | 3 |
[] |
no_license
|
#!/bin/bash
# This script installs Python 2.7.6 3.3.5 and Anaconda3
# My Anaconda don't want none unless you got buns hun
clear
echo "This script will install and configure anaconda software for your system"
# Check whether you are root
if [ "$(whoami)" == "root" ]; then
echo "Sorry, you are running with root, please run without sudo rights"
exit 1
fi
# Let's download pyenv
git clone https://github.com/yyuu/pyenv.git ~/.pyenv
# Let's install some dependencies
sudo apt-get install -y make build-essential libssl-dev zlib1g-dev libbz2-dev libreadline-dev libsqlite3-dev wget curl llvm libpq-dev python-dev pkg-config libpng-dev libfreetype6-dev
# Let's set some of those environment variables and make sure them suckers stick after a reboot (needs some checks)
echo 'export PYENV_ROOT="$HOME/.pyenv"' >> ~/.bashrc
echo 'export PATH="$PYENV_ROOT/bin:$PATH"' >> ~/.bashrc
echo 'eval "$(pyenv init -)"' >> ~/.bashrc
export PYENV_ROOT="$HOME/.pyenv"
export PATH="$PYENV_ROOT/bin:$PATH"
eval "$(pyenv init -)"
# Here we install the actual python versions
pyenv install 3.3.5
pyenv rehash
pyenv install 2.7.6
pyenv rehash
git clone https://github.com/yyuu/pyenv-virtualenv.git ~/.pyenv/plugins/pyenv-virtualenv
# Let's install some demo stuff
pyenv virtualenv 2.7.6 venv_2.7.6_deformdemo
pyenv virtualenv 3.3.5 venv_3.3.5_deformdemo
# Created venv in ~/.pyenv/versions/venv_3.3.5_deformdemo
git clone https://github.com/Pylons/deformdemo.git ~/deformdemo
cd ~/deformdemo
pyenv activate venv_2.7.6_deformdemo
pip install setuptools --upgrade
python setup.py develop
pyenv activate venv_3.3.5_deformdemo
pip install setuptools --upgrade
pip install gunicorn
python setup.py develop
# Here we download that snake if it is not there already
if [ ! -f "./Anaconda3-4.3.0-Linux-x86_64.sh" ]; then
wget https://repo.continuum.io/archive/Anaconda3-4.3.0-Linux-x86_64.sh
fi
# Let's start the sucker
bash Anaconda3-4.3.0-Linux-x86_64.sh
# More environment variables to set
export PATH="~/anaconda3/bin:$PATH"
echo export PATH="~/anaconda3/bin:$PATH" >> ~/.bashrc
# Stuff used to make the websites work
conda install pandas -y
conda install jupyter -y
conda/pip install pyjade
conda/pip install numpy
pip install geoalchemy2
pip install sqlalchemy_utils
pip install transaction
pip install pyramid
pip install geojson
pip install gunicorn
# This one is need to connect python to postgresql
sudo apt-get build-dep -y python-psycopg2
# This one is a conveniant way to start the website (look into changing this to systemctl services)
sudo apt-get install -y supervisor
sudo systemctl enable supervisor
| true |
97af65de009fb38722d226fecedfa1f6a7716615
|
Shell
|
ceylon-docker/ceylon
|
/build.sh
|
UTF-8
| 6,352 | 4 | 4 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
# Define all the versions that should be generated
VERSIONS=(1.0.0 1.1.0 1.2.0 1.2.1 1.2.2 1.3.0 1.3.1 1.3.2 1.3.3)
# Define the "latest" version
LATEST=1.3.3
# Define which JRE versions to generate for
JRES=(7 8)
# Define the default JRE
DEFAULT_JRE=8
# Define default platform
DEFAULT_PLATFORM="debian"
IMAGE=docker.io/ceylon/ceylon
BUILD=0
PULL=0
PUSH=0
CLEAN=0
VERBOSE=0
QUIET=-q
for arg in "$@"; do
case "$arg" in
--help)
echo "Usage: $0 [--help] [--pull] [--build] [--push] [--clean] [--verbose]"
echo ""
echo " --help : shows this help text"
echo " --pull : pulls any previously existing images from Docker Hub"
echo " --build : runs 'docker build' for each image"
echo " --push : pushes each image to Docker Hub"
echo " --clean : removes local images"
echo " --verbose : show more information while running docker commands"
echo ""
exit
;;
--build)
BUILD=1
;;
--pull)
PULL=1
;;
--push)
PUSH=1
;;
--clean)
CLEAN=1
;;
--verbose)
VERBOSE=1
QUIET=
;;
esac
done
function error() {
local MSG=$1
[[ ! -z $MSG ]] && echo $MSG
exit 1
}
function build_dir() {
local VERSION=$1
[[ -z $VERSION ]] && error "Missing 'version' parameter for build_dir()"
local FROM=$2
[[ -z $FROM ]] && error "Missing 'from' parameter for build_dir()"
local NAME=$3
[[ -z $NAME ]] && error "Missing 'name' parameter for build_dir()"
local DOCKERFILE=$4
[[ -z $DOCKERFILE ]] && error "Missing 'dockerfile' parameter for build_dir()"
local INCLUDE_BOOTSTRAP=$5
[[ -z $INCLUDE_BOOTSTRAP ]] && error "Missing 'include_bootstrap' parameter for build_dir()"
shift 5
local TAGS=("$@")
echo "Building image $NAME with tags ${TAGS[@]} ..."
rm -rf /tmp/docker-ceylon-build-templates
mkdir /tmp/docker-ceylon-build-templates
[[ $INCLUDE_BOOTSTRAP -eq 1 ]] && cp templates/bootstrap.sh /tmp/docker-ceylon-build-templates/
cp templates/$DOCKERFILE /tmp/docker-ceylon-build-templates/Dockerfile
sed -i "s/@@FROM@@/${FROM//\//\\/}/g" /tmp/docker-ceylon-build-templates/Dockerfile
sed -i "s/@@VERSION@@/$VERSION/g" /tmp/docker-ceylon-build-templates/Dockerfile
mkdir -p "$VERSION/$NAME"
pushd "$VERSION/$NAME" > /dev/null
cp /tmp/docker-ceylon-build-templates/* .
rm -rf /tmp/docker-ceylon-build-templates
if [[ $PULL -eq 1 ]]; then
echo "Pulling existing image from Docker Hub (if any)..."
if [[ $VERBOSE -eq 1 ]]; then
docker pull "$FROM" || true
docker pull "${IMAGE}:$NAME" || true
else
docker pull "$FROM" > /dev/null || true
docker pull "${IMAGE}:$NAME" > /dev/null || true
fi
fi
if [[ $BUILD -eq 1 ]]; then
echo "Building image..."
docker build -t "${IMAGE}:$NAME" $QUIET .
fi
for t in ${TAGS[@]}; do
[[ $BUILD -eq 1 ]] && docker tag "${IMAGE}:$NAME" "${IMAGE}:$t"
done
if [[ $CLEAN -eq 1 ]]; then
echo "Removing image..."
docker rmi "${IMAGE}:$NAME"
for t in ${TAGS[@]}; do
docker rmi "${IMAGE}:$t"
done
fi
echo "Cleaning up..."
local oldimages=$(docker images --filter dangling=true -q 2>/dev/null)
[[ -n "$oldimages" ]] && docker rmi $oldimages
popd > /dev/null
}
function build_normal_onbuild() {
local VERSION=$1
[[ -z $VERSION ]] && error "Missing 'version' parameter for build_normal_onbuild()"
local FROM=$2
[[ -z $FROM ]] && error "Missing 'from' parameter for build_normal_onbuild()"
local JRE=$3
[[ -z $JRE ]] && error "Missing 'jre' parameter for build_normal_onbuild()"
local PLATFORM=$4
[[ -z $PLATFORM ]] && error "Missing 'platform' parameter for build_normal_onbuild()"
shift 4
local TAGS=("$@")
echo "Building for JRE $JRE with tags ${TAGS[@]} ..."
local OBTAGS=()
for t in ${TAGS[@]}; do
OBTAGS+=("$t-onbuild")
done
local NAME="$VERSION-$JRE-$PLATFORM"
build_dir $VERSION $FROM $NAME "Dockerfile.$JRE.$PLATFORM" 1 "${TAGS[@]}"
build_dir $VERSION "ceylon/ceylon:$NAME" "$NAME-onbuild" "Dockerfile.onbuild" 0 "${OBTAGS[@]}"
}
function build_jres() {
local VERSION=$1
[[ -z $VERSION ]] && error "Missing 'version' parameter for build_jres()"
local FROM_TEMPLATE=$2
[[ -z $FROM_TEMPLATE ]] && error "Missing 'from_template' parameter for build_jres()"
local JRE_TEMPLATE=$3
[[ -z $JRE_TEMPLATE ]] && error "Missing 'jre_template' parameter for build_jres()"
local PLATFORM=$4
[[ -z $PLATFORM ]] && error "Missing 'platform' parameter for build_jres()"
echo "Building for platform $PLATFORM ..."
for t in ${JRES[@]}; do
local FROM=${FROM_TEMPLATE/@/$t}
local JRE=${JRE_TEMPLATE/@/$t}
local TAGS=()
if [[ "$PLATFORM" == "$DEFAULT_PLATFORM" ]]; then
TAGS+=("$VERSION-$JRE")
if [[ "$t" == "$DEFAULT_JRE" ]]; then
TAGS+=("$VERSION")
if [[ "$VERSION" == "$LATEST" ]]; then
TAGS+=("latest")
fi
fi
if [[ "$VERSION" == "$LATEST" ]]; then
TAGS+=("latest-$JRE")
fi
fi
if [[ "$t" == "$DEFAULT_JRE" ]]; then
TAGS+=("$VERSION-$PLATFORM")
fi
if [[ "$VERSION" == "$LATEST" ]]; then
TAGS+=("latest-$JRE-$PLATFORM")
if [[ "$t" == "$DEFAULT_JRE" ]]; then
TAGS+=("latest-$PLATFORM")
fi
fi
build_normal_onbuild $VERSION $FROM $JRE $PLATFORM "${TAGS[@]}"
done
}
function build() {
local VERSION=$1
[[ -z $VERSION ]] && error "Missing 'version' parameter for build()"
echo "Building version $VERSION ..."
build_jres $VERSION "ceylon/ceylon-base:jre@-redhat" "jre@" "redhat"
build_jres $VERSION "ceylon/ceylon-base:jre@-debian" "jre@" "debian"
}
for v in ${VERSIONS[@]}; do
build $v
done
[[ $PUSH -eq 1 ]] && echo "Pushing image to Docker Hub..." && docker push "${IMAGE}"
| true |
21dcd089bbf27bee0201a460df1594892c7bab2d
|
Shell
|
govindarajanv/linux
|
/sysadmin/sed/sed-exercises.sh
|
UTF-8
| 4,155 | 3.546875 | 4 |
[] |
no_license
|
# print both matching pattern 'p' and stdout, output is duplicated, space is optional
sed ' p ' /etc/passwd
#suppress stdout and print only if pattern matches, use -n switch to suppress stdout
sed -n 'p' /etc/passwd
#print first 5 line
sed -n '1,5 p ' /etc/passwd
#print till the last line from 5th line
sed -n '5,$ p ' /etc/passwd
#print lines beginning with user
sed -n '/^user/ p ' /etc/passwd
#remove commented lines from a file
sed ' /^#/ d' /etc/ntp.conf
#remove both commented and empty lines from a file
sed ' /^#/ d; /^$/ d' /etc/ntp.conf
#Take a backup of ntp.conf as ntp.conf.safe, remove commented line '#' and empty lines
#sed -i.backup '/^#/d;/^$/d' /etc/ntp.conf
#format of sed
#sed ' [range] [/pattern/] s/<string>/<replacement>/ ' /etc/passwd
# '/' follows 's' is a delimiter
# 'g' optionis required for more than one replacement required per line
[root@work-station vagrant]# sed ' 6,9 s/^/ /g' parsecsv.sh
#!/bin/bash
OLDIFS=$IFS
IFS=","
while read product price quantity
do
echo -e "\e[1;33m$product \
========================\e[0m\n\
Price : \t $price \n\
Quantity : \t $quantity \n"
done < $1
IFS=$OLDIFS
[root@work-station vagrant]# sed -n ' 6,9 s/^/ /g' parsecsv.sh
[root@work-station vagrant]# sed -n ' 6,9 s/^/ /p' parsecsv.sh
echo -e "\e[1;33m$product \
========================\e[0m\n\
Price : \t $price \n\
Quantity : \t $quantity \n"
#modify the shell from bash to sh
sed -n ' /^vagrant/ s@/bin/bash@/bin/sh@p ' /etc/passwd
#Append, insert and delete
#find server 3 and append server ntp.example.com
sed ' /^server 3/ a server ntp.example.com ' /etc/ntp.conf
#insert
sed ' /^server 0/ i server ntp.example.com ' /etc/ntp.conf
#delete
sed ' /^server\s[0-9]\.ubuntu/ d ' /etc/ntp.conf
#Multiple sed commands
sed ' {
/^server 0/ i server ntp.example.com
/^server \s[0-9]/d
} ' /etc/ntp.conf
#create .sed file
#deletion of empty lines /^$/d
#deletion of commented line /^\s*#/d
sed -f ntp.sed /etc/ntp
sed -i.backup -f ntp.sed /etc/ntp
#remotely edit file using ssh and sed
# -t switch in ssh assigns a tty allowing for sudo password
# .sed file will be on remote server
#ssh -t user@server sudo sed -i.bak -f /tmp/ntp.sed
scp ntp.sed [email protected]:/tmp
ssh -t [email protected] sudo -i.bkp -f /tmp/ntp.sed /etc/ntp.conf
#substitution groups
# 1 indicates first file in the replacing string, make the last name of the employee all caps
#
sed ' s@\([^,]*\)@\U\1@' employees
# with last name of the employee all caps and first name in all small
# two groupings are separated by comma character no 17
# first command removes comma in the output while second one retains it
#https://regexr.com/ ^ - Negated set, match any character not in the set
sed 's@\([^,]*\),\([^,]*\)@\U\1\L\2@' employees
sed 's@\([^,]*\),\([^,]*\)@\U\1,\L\2@' employees
#cat dictFile
#first:second
#one:two
#swap the contents delimited by :
sed 's/\(.*\):\(.*\)/\2:\1/' dictFile
#change the above employee file such as way that print first name:last name: third column
sed 's@\(.*\),\(.*\),\(.*\)@\2:\1:\3@' employees
#
sed 's/\(^\|[0-9.]\)([0-9]\+\)\([0-9]\{3\}\)/\1\2,\3/g' /proc/loadavg
#To execute a command using sed
# cat file.list gives /etc/hosts and /etc/services
sed ' s/^/ls -l /e ' file.list
# add users in the system by reading a file containing user names
sed ' s/^/sudo useradd /e ' user.list
# creating and deleting tar files
sed ' /^\// s/^/tar -rf catalog.tar /e' cat.list
sed ' /^\// s/^/rm -f /e' cat.list
#Add extra spaces in a given range in vim
#%/stats/stuff/g here % says entire document else you can specify a range
#g represents global meaning all occurrences not the first occurrence.
# 1,30s/server/stuff/g
#if you want to intend the line, first one is pattern matching
# /^restrict/s/^/ /
#to add spaces till the end of the document from 8th line
# 8,$ s/^/
#selectively move lines to a new file
#4,10 w <filename>
#read a file
#:r <filename>
# Remove blank lines from virtualhost.conf; \ represnets a new blank line
sed ' /^\s*$/d ' virtualhost.conf
sed ' /^\s*$/d;/^<\/Virt/a \ ' virtualhost.conf
| true |
ec5fc66ed93fdb3dde8222c262390c61925c1182
|
Shell
|
Adrcpp/projet-java
|
/projet/restart.sh
|
UTF-8
| 757 | 3.265625 | 3 |
[] |
no_license
|
#!/bin/bash
if [ "$1" = "--hard" ] ; then
echo "------------------------";
echo 'Hard restart activated';
cd dao-service
mvn clean install -Dmaven.test.skip=true
if [[ "$?" -ne 0 ]] ; then
echo "------------------------";
echo 'Compilation failed on dao-service - restart stopped'; exit $rc
fi
cd ..
fi
echo "------------------------";
cd rest-api
mvn clean install
if [[ "$?" -ne 0 ]] ; then
echo "\n\n ------------------------";
echo 'Compilation failed rest-api - restart stopped'; exit $rc
fi
echo "------------------------";
echo " Removing old docker image and container";
docker rmi tomcat:latest -f
docker rm projet_tomcat_1 -f
echo "------------------------";
echo "Starting docker container";
docker-compose up
| true |
85cda74fbcf9a510c87e4bfe3673bbb961921b8d
|
Shell
|
mgodlewski/ansible-role-logstash
|
/tests/test_ansible-role-logstash_jessie
|
UTF-8
| 2,697 | 3.484375 | 3 |
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
#!/bin/bash
test_logstash_is_running() {
running=1
grep -q Debian /etc/issue && (service logstash status |grep -q 'running') && running=$?
grep -q CentOS /etc/issue && (initctl list |grep -q logstash) && (initctl status logstash |grep -q 'start/running') && running=$?
grep -q CentOS /etc/issue && test -x /etc/init.d/logstash && (/etc/init.d/logstash status |grep -q 'running') && running=$?
assert "test $running -eq 0"
}
test_idempotence() {
assert "sudo -u rpaulson ansible-playbook --verbose --diff -i 'localhost,' --connection=local /tmp/ansible/playbook.yml | grep -q 'changed=0.*failed=0'"
}
test_version_installed() {
version_found=0
expected_version=tbd
[ -x /opt/logstash/bin/logstash ] && expected_version="logstash 2.3.4" && version_found="$(/opt/logstash/bin/logstash --version)"
[ -x /usr/share/logstash/bin/logstash ] && expected_version="logstash 5.6.0" && version_found="$(/usr/share/logstash/bin/logstash --version)"
assert_equals "$expected_version" "$version_found"
}
test_check_mode_succeed() {
assert "sudo -u rpaulson ansible-playbook --verbose --diff -i 'localhost,' --connection=local /tmp/ansible/playbook.yml --check"
}
test_custom_filters() {
assert "diff templates/etc/logstash/conf.d/20-myApp.conf /etc/logstash/conf.d/20-myApp.conf" "custom filters are missing"
}
run_ansible() {
assert "sudo -u rpaulson ansible-playbook --verbose --diff -i 'localhost,' --connection=local /tmp/ansible/playbook.yml"
}
setup_suite() {
mkdir /tmp/ansible/group_vars -p
[ ! -d /tmp/ansible/templates ] && cp -r templates/ /tmp/ansible/
cat << EOF > /tmp/ansible/playbook.yml
- hosts: all
pre_tasks:
- name: Add backports
apt_repository:
repo: "deb http://ftp.debian.org/debian jessie-backports main"
when: ansible_os_family == 'Debian'
- name: Update apt cache.
apt: update_cache=yes cache_valid_time=600
when: ansible_os_family == 'Debian'
- name: Install jdk8 on Debian
apt:
name: "{{item}}"
default_release: jessie-backports
with_items:
- ca-certificates-java
- openjdk-8-jre
when: ansible_os_family == 'Debian'
- name: Install jdk8 on RedHat
yum:
name: java-1.8.0-openjdk-devel
when: ansible_os_family == 'RedHat'
roles:
- role: ansible-role-logstash
logstash_configuration_files:
- etc/logstash/conf.d/01-beats-input.conf
- "{{playbook_dir}}/templates/etc/logstash/conf.d/20-myApp.conf"
- etc/logstash/conf.d/30-elasticsearch-output.conf
EOF
[ -f logstash_version ] && echo ' logstash_version: "'$(cat logstash_version)'"' >> /tmp/ansible/playbook.yml
run_ansible
}
| true |
7a72de1b2b9d0d13aabe8af7fa9f25eca87194f1
|
Shell
|
RaInta/CoherentDirectedDAG
|
/Scripts/InstallPython.sh.old
|
UTF-8
| 256 | 2.53125 | 3 |
[] |
no_license
|
#!/bin/bash
PYMOD_DIR=$(pwd)/pythonModules
mkdir ${PYMOD_DIR}
PYTHONPATH=${PYTHONPATH}${PYMOD_DIR}
easy_install --install-dir ${PYMOD_PY} intervaltree
echo "PYTHONPATH=${PYTHONPATH}${PYMOD_DIR}:" >> ${HOME}/.bash_profile
source ${HOME}/.bash_profile
| true |
af86eac383db092615d735c5a8e029e700c01cb5
|
Shell
|
gdsports/xac_joystick_tinyusb
|
/acli.sh
|
UTF-8
| 2,592 | 3.296875 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
ARDDIR=${HOME}/Sync/acli_xac_joystick_tinyusb
export ARDUINO_BOARD_MANAGER_ADDITIONAL_URLS="https://adafruit.github.io/arduino-board-index/package_adafruit_index.json https://files.seeedstudio.com/arduino/package_seeeduino_boards_index.json"
export ARDUINO_DIRECTORIES_DATA="${ARDDIR}/data"
export ARDUINO_DIRECTORIES_DOWNLOADS="${ARDDIR}/downloads"
export ARDUINO_DIRECTORIES_USER="${ARDDIR}/user"
export MYPROJECT_EXAMPLES="${PWD}/examples"
export MYPROJECT_TOOLS="${PWD}/tools"
arduino-cli core update-index
mkdir -p ${ARDUINO_DIRECTORIES_DOWNLOADS}/packages
mkdir -p ${ARDUINO_DIRECTORIES_DOWNLOADS}/libraries
if [ -d ~/Sync/ard_staging ]
then
cp -R ~/Sync/ard_staging/* ${ARDUINO_DIRECTORIES_DOWNLOADS}
fi
arduino-cli core install adafruit:samd
arduino-cli core install Seeeduino:samd
arduino-cli core install adafruit:nrf52
arduino-cli core list
arduino-cli lib update-index
arduino-cli lib install "Bounce2"
arduino-cli lib install "Adafruit DotStar"
arduino-cli lib install "Adafruit TinyUSB Library"
ln -s ${PWD} ${ARDUINO_DIRECTORIES_USER}/libraries
# Compile all examples for all boards
BOARDS=('Seeeduino:samd:seeed_XIAO_m0' 'adafruit:samd:adafruit_trinket_m0' 'adafruit:samd:adafruit_qtpy_m0' 'adafruit:samd:adafruit_itsybitsy_m0' 'adafruit:samd:adafruit_itsybitsy_m4' 'adafruit:nrf52:feather52840' 'adafruit:nrf52:itsybitsy52840')
for board in "${BOARDS[@]}" ; do
export ARDUINO_BOARD_FQBN=${board}
ARDUINO_BOARD_FQBN2=${ARDUINO_BOARD_FQBN//:/.}
arduino-cli cache clean
find ${MYPROJECT_EXAMPLES} -name '*.ino' -print0 | xargs -0 -n 1 arduino-cli compile --fqbn ${board} --verbose --build-properties build.vid=0x0f0d,build.pid=0x00c1,build.flags.usbstack=-DUSE_TINYUSB
# Convert all HEX to UF2 for drag-and-drop burning on boards with UF2 boot loader
for MYSKETCH in ${MYPROJECT_EXAMPLES}/* ; do
if [ ! -d ${MYSKETCH}/build/${ARDUINO_BOARD_FQBN2} ] ; then
mkdir -p ${MYSKETCH}/build/${ARDUINO_BOARD_FQBN2}
fi
pushd ${MYSKETCH}/build/${ARDUINO_BOARD_FQBN2}
for i in *.bin ; do
if [[ -f $i ]] ; then
if [[ ${board} == *"m4"* ]] ; then
${MYPROJECT_TOOLS}/uf2conv.py -c -b 0x4000 $i -o $i.uf2
else
${MYPROJECT_TOOLS}/uf2conv.py -c $i -o $i.uf2
fi
fi
done
FIRMWARE=${MYSKETCH}/firmware/${ARDUINO_BOARD_FQBN2}
if [[ ! -d ${FIRMWARE} ]] ; then
mkdir -p ${FIRMWARE}
fi
mv *.ino.bin.uf2 ${FIRMWARE}
popd
done
done >errors 2>&1
| true |
1ed9454e012e6b883368da3a44034d66a3c1b9ee
|
Shell
|
Rothamsted/bioknet-onto
|
/utils/to_lode/to_comments.sh
|
UTF-8
| 861 | 3.140625 | 3 |
[] |
no_license
|
# Converts some annotation properties in the ontology file $1
# Sends output to $2 (default is lode_bioknet.nt)
# into rdfs:comment annotations. This is necessary to render the file in LODE (http://www.essepuntato.it/lode),
# since it doesn't understand many other properties (https://github.com/essepuntato/LODE/issues/7).
# It expects JENA_HOME to be initialised, see ../*init.sh
#
finput="$1"; : ${finput:=../../bioknet.owl}
fout="$2"; : ${fout:=lode_bioknet.nt}
tmpf=$(mktemp /tmp/tocomm_XXX.nt)
"$JENA_HOME/bin/update" --data="$finput" --update=descr2comments.sparul --dump >$tmpf
"$JENA_HOME/bin/update" --data=$tmpf --update=note2comments.sparul --dump | sponge $tmpf
"$JENA_HOME/bin/update" --data=$tmpf --update=aggregate_comments.sparul --dump | sponge $tmpf
"$JENA_HOME/bin/update" --data=$tmpf --update=delete_comments.sparul --dump >"$fout"
| true |
c35e9a3c623df26361c644d8d280017e88a1dc60
|
Shell
|
intelfx/bin
|
/git/misc/git-subtree-split-file
|
UTF-8
| 1,288 | 3.890625 | 4 |
[] |
no_license
|
#!/bin/bash
. lib.sh || exit
ARGS=$(getopt -o '-P:b:' --long 'prefix:,branch:' -n "${0##*/}" -- "$@") || exit
eval set -- "$ARGS"
unset ARGS
ARG_PREFIX=
ARG_BRANCH=
ARGS=()
while (( $# )); do
case "$1" in
-P|--prefix)
ARG_PREFIX="$2"
shift 2
;;
-b|--branch)
ARG_BRANCH="$2"
shift 2
;;
*)
ARGS+=( "$1" )
shift
;;
esac
done
set -- "${ARGS[@]}"
[[ "$ARG_BRANCH" ]] || die "missing required option: -b/--branch"
[[ "$ARG_PREFIX" ]] || die "missing required option: -P/--prefix"
cleanup() {
git for-each-ref --format '%(refname)' 'refs/exported/*' \
| xargs -r -n1 git update-ref -d
}
trap cleanup EXIT
if git for-each-ref --format '%(refname)' 'refs/exported/*' | grep -q .; then
die "internal error: refs/exported/* namespace is not empty"
fi
#trace git branch -f "$ARG_BRANCH" "$(git rev-list -1 "${ARGS[@]}")"
git fast-export --refspec 'refs/heads/*:refs/exported/*' "${ARGS[@]}" \
| sed -r "s|^(M) ([^ ]+) ([^ ]+) ${ARG_PREFIX}/(.+)$|\\1 \\2 \\3 \\4|" \
| git fast-import
git for-each-ref --format '%(refname)' 'refs/exported/*' \
| readarray -t EXPORTED_REFS
if (( ${#EXPORTED_REFS[@]} != 1 )); then
die "internal error: exported ${#EXPORTED_REFS[@]} != 1 refs: $(join ", " "${EXPORTED_REFS[@]}")"
fi
git branch -f "$ARG_BRANCH" "$EXPORTED_REFS"
| true |
a1fa3c9b0ae047e5ee2c7436d33ee5a1e761e7bc
|
Shell
|
stephenlauck/chef-repo
|
/bootstrap.sh
|
UTF-8
| 1,676 | 3.4375 | 3 |
[] |
no_license
|
#!/bin/bash
export HAB_LICENSE="accept-no-persist"
if [ ! -e "/bin/hab" ]; then
curl https://raw.githubusercontent.com/habitat-sh/habitat/master/components/hab/install.sh | sudo bash
fi
if grep "^hab:" /etc/passwd > /dev/null; then
echo "Hab user exists"
else
useradd hab && true
fi
if grep "^hab:" /etc/group > /dev/null; then
echo "Hab group exists"
else
groupadd hab && true
fi
# fix for https://github.com/habitat-sh/habitat/issues/6771
hab pkg install core/hab-studio/0.83.0
pkg_origin=lauck
pkg_name=migration
# echo "Starting $pkg_origin/$pkg_name"
# latest_hart_file=$(ls -la /tmp/results/$pkg_origin-$pkg_name* | tail -n 1 | cut -d " " -f 10)
# echo "Latest hart file is $latest_hart_file"
# echo "Installing $latest_hart_file"
# hab pkg install $latest_hart_file
# echo "Determing pkg_prefix for $latest_hart_file"
# pkg_prefix=$(find /hab/pkgs/$pkg_origin/$pkg_name -maxdepth 2 -mindepth 2 | sort | tail -n 1)
# echo "Found $pkg_prefix"
echo "Installing $pkg_origin/$pkg_name"
hab pkg install $pkg_origin/$pkg_name
NODE_NAME=$(hostname)
# Create client.rb
FILE=/etc/chef/client.rb
if [ ! -f "$FILE" ]; then
/bin/echo 'log_location STDOUT' >> /etc/chef/client.rb
/bin/echo -e "chef_server_url \"https://api.chef.io/organizations/migration666\"" >> /etc/chef/client.rb
/bin/echo -e "validation_client_name \"migration666-validator\"" >> /etc/chef/client.rb
/bin/echo -e "validation_key \"/etc/chef/migration666-validator.pem\"" >> /etc/chef/client.rb
/bin/echo -e "node_name \"${NODE_NAME}\"" >> /etc/chef/client.rb
fi
echo "Running chef for $pkg_origin/$pkg_name"
cd $pkg_prefix
hab pkg exec $pkg_origin/$pkg_name chef-client
| true |
d2ad2c1aa59cd19b853e20b9c5adb62cd9ec5e27
|
Shell
|
dp6ai/llos
|
/trunk/public/images/rename.ksh
|
UTF-8
| 73 | 2.9375 | 3 |
[] |
no_license
|
for file in [a-z]*.JPG
do
echo $file
mv $file ${file%%.JPG}.jpg;
done
| true |
507f317db8a0cc2a123976a9e8af19cbcc43051f
|
Shell
|
crazybrainL/zshrc
|
/zshrc
|
UTF-8
| 7,422 | 2.671875 | 3 |
[] |
no_license
|
# Path to your oh-my-zsh configuration.
#ZSH=/Users/whlin/Documents/rc/zshrc/oh-my-zsh
ZSH=$HOME/.oh-my-zsh
# Set name of the theme to load.
# Look in ~/.oh-my-zsh/themes/
# Optionally, if you set this to "random", it'll load a random theme each
# time that oh-my-zsh is loaded.
#ZSH_THEME="yen3"
ZSH_THEME="whlin"
# Example aliases
# alias zshconfig="mate ~/.zshrc"
# alias ohmyzsh="mate ~/.oh-my-zsh"
# Set to this to use case-sensitive completion
# CASE_SENSITIVE="true"
# Comment this out to disable bi-weekly auto-update checks
# DISABLE_AUTO_UPDATE="true"
# Uncomment to change how often before auto-updates occur? (in days)
# export UPDATE_ZSH_DAYS=13
# Uncomment following line if you want to disable colors in ls
# DISABLE_LS_COLORS="true"
# Uncomment following line if you want to disable autosetting terminal title.
# DISABLE_AUTO_TITLE="true"
# Uncomment following line if you want to disable command autocorrection
# DISABLE_CORRECTION="true"
# Uncomment following line if you want red dots to be displayed while waiting for completion
# COMPLETION_WAITING_DOTS="true"
# Uncomment following line if you want to disable marking untracked files under
# VCS as dirty. This makes repository status check for large repositories much,
# much faster.
# DISABLE_UNTRACKED_FILES_DIRTY="true"
# Which plugins would you like to load? (plugins can be found in ~/.oh-my-zsh/plugins/*)
# Custom plugins may be added to ~/.oh-my-zsh/custom/plugins/
# Example format: plugins=(rails git textmate ruby lighthouse)
plugins=(git osx)
source $ZSH/oh-my-zsh.sh
#source ~/.nvm/nvm.sh
# Customize to your needs...
UNAME=`uname`
if [[ $UNAME == "Darwin" ]]
then
#source $HOME/Documents/rc/zshrc/zshrc_darwin
source $HOME/.zshsetting/zshrc_darwin
elif [[ $UNAME == "Linux" ]]
then
#source $HOME/Documents/rc/zshrc/zshrc_linux
source $HOME/.zshsetting/zshrc_linux
fi
#source $HOME/Documents/rc/zshrc/func/incr*.zsh
#source $HOME/.zsh/func/incr*.zsh
export EDITOR=vim
# Alias Setting
alias grep='grep --color=auto'
alias ack='ack --color-match=green'
alias ll='ls -al'
alias tmux='tmux -2'
function gg(){
git commit -a -v -m "$*"
}
alias '..'='cd ..'
alias -g ...='../..'
alias -g ....='../../..'
alias -g .....='../../../..'
hash -d code=~/Documents/code
hash -d src=~/usr/src
hash -d course=~/Documents/course
hash -d rc=~/Documents/rc/
hash -d zshrc=~/Documents/zshrc
hash -d vimfilers=~/Documents/vimfilers
hash -d tmp=~/usr/tmp
# path alias, e.g. cd ~XXX
#hash -d WWW="/home/lighttpd/html"
# HISTORY
# number of lines kept in history
export HISTSIZE=3000
# # number of lines saved in the history after logout
export SAVEHIST=3000
# # location of history
export HISTFILE=~/.zhistory
# # append command to history file once executed
setopt INC_APPEND_HISTORY
# Disable core dumps
limit coredumpsize 0
# vi key binding
bindkey -v
bindkey '^R' history-incremental-search-backward
# mapping del
bindkey "\e[3~" delete-char
setopt AUTO_PUSHD
WORDCHARS='*?_-[]~=&;!#$%^(){}<>'
# auto-completion
setopt COMPLETE_ALIASES
setopt AUTO_LIST
setopt AUTO_MENU
#setopt MENU_COMPLETE
setopt MULTIBYTE
autoload -U compinit
compinit
# Completion caching
zstyle ':completion::complete:*' use-cache on
zstyle ':completion::complete:*' cache-path .zcache
#zstyle ':completion:*:cd:*' ignore-parents parent pwd
#Completion Options
zstyle ':completion:*:match:*' original only
zstyle ':completion::prefix-1:*' completer _complete
zstyle ':completion:predict:*' completer _complete
zstyle ':completion:incremental:*' completer _complete _correct
zstyle ':completion:*' completer _complete _prefix _correct _prefix _match _approximate
# Path Expansion
zstyle ':completion:*' expand 'yes'
zstyle ':completion:*' squeeze-shlashes 'yes'
zstyle ':completion::complete:*' '\\'
#zstyle ':completion:*:*:*:default' menu yes select #interactive
zstyle ':completion:*:*:default' force-list always
# require /etc/DIR_COLORS to display colors in the completion list
[ -f /etc/DIR_COLORS ] && eval $(dircolors -b /etc/DIR_COLORS)
export ZLSCOLORS="${LS_COLORS}"
zmodload zsh/complist
zstyle ':completion:*' list-colors ${(s.:.)LS_COLORS}
zstyle ':completion:*:*:kill:*:processes' list-colors '=(#b) #([0-9]#)*=0=01;31'
zstyle ':completion:*' completer _complete _match _approximate
zstyle ':completion:*:match:*' original only
zstyle ':completion:*:approximate:*' max-errors 1 numeric
bindkey -M menuselect '^M' .accept-line
compdef pkill=kill
compdef pkill=killall
zstyle ':completion:*:*:kill:*' menu yes select interactive
zstyle ':completion:*:kill:*' force-list always
zstyle ':completion:*:processes' command 'ps -au$USER'
# Group matches and Describe
zstyle ':completion:*:matches' group 'yes'
zstyle ':completion:*:options' description 'yes'
zstyle ':completion:*:options' auto-description '%d'
zstyle ':completion:*:descriptions' format $'\e[01;33m -- %d --\e[0m'
zstyle ':completion:*:messages' format $'\e[01;35m -- %d --\e[0m'
zstyle ':completion:*:warnings' format $'\e[01;31m -- No Matches Found --\e[0m'
function precmd {
local TERMWIDTH
(( TERMWIDTH = ${COLUMNS} - 1 ))
###
# Truncate the path if it's too long.
PR_FILLBAR=""
PR_PWDLEN=""
local promptsize=${#${(%):---(%n@%m)----}}
local pwdsize=${#${(%):-%~}}
if [[ "$promptsize + $pwdsize" -gt $TERMWIDTH ]]; then
((PR_PWDLEN=$TERMWIDTH - $promptsize))
else
PR_FILLBAR="\${(l.(($TERMWIDTH - ($promptsize + $pwdsize)))..${PR_HBAR}.)}"
fi
###
# Get APM info.
#if which ibam > /dev/null; then
#PR_APM_RESULT=`ibam --percentbattery`
#elif which apm > /dev/null; then
#PR_APM_RESULT=`apm`
#fi
}
setopt extended_glob
# Terminal Title Setting
case $TERM in
xterm*)
PR_TITLEBAR=$'%{\e]0;%(!.-=*[ROOT]*=- | .)%n@%m:%~ | ${COLUMNS}x${LINES} | %y\a%}'
;;
screen)
PR_TITLEBAR=$'%{\e_screen \005 (\005t) | %(!.-=[ROOT]=- | .)%n@%m:%~ | ${COLUMNS}x${LINES} | %y\e\\%}'
;;
*)
PR_TITLEBAR=''
;;
esac
###
# Decide whether to set a screen title
if [[ "$TERM" == "screen" ]]; then
PR_STITLE=$'%{\ekzsh\e\\%}'
else
PR_STITLE=''
fi
precmd () {print -Pn "$PR_SET_CHARSET$PR_STITLE${(e)PR_TITLEBAR}"}
## Prompt Setting
#setopt prompt_subst
#source $HOME/Documents/rc/zshrc/func/zgitinit
#source $HOME/Documents/rc/zshrc/func/prompt_wunjo_setup
#source $HOME/.zsh/func/zgitinit
#source $HOME/.zsh/func/prompt_wunjo_setup
# Key Binding
bindkey '^[[H' beginning-of-line
bindkey '^[[F' end-of-line
case $TERM in (xterm*)
bindkey '\eOH' beginning-of-line
bindkey '\eOF' end-of-line
esac
#And DEL too, as well as PGDN and insert:
bindkey '^[[3~' delete-char
bindkey '^[[6~' end-of-history
#bindkey '\e[2~' redisplay
#Now bind pgup to paste the last word of the last command,
bindkey '\e[5~' insert-last-word
set -o vi
# multi-download command
function pget()
{
echo $1
cmd="pget -n 5 $1; exit"
lftp -e "$cmd"
}
# go-lang environment setting
if [[ -d "/usr/local/go" ]]; then
#statements
export PATH="/usr/local/go/bin:$PATH"
export GOPATH="/usr/local/go"
fi
# pyenv environment setting
if [[ -d $HOME/.pyenv ]]; then
#statements
export PATH="$HOME/.pyenv/bin:$PATH"
# it will add $HOME/.pyenv/shims to $PATH
eval "$(pyenv init -)"
# it will add $HOME/.pyenv/plugins/pyenv-virtualenv/shims to $PATH
eval "$(pyenv virtualenv-init -)"
export PYTHON_CONFIGURE_OPTS="--enable-framework"
fi
if [[ -d $HOME/.fzf ]]; then
[ -f ~/.fzf.zsh ] && source ~/.fzf.zsh
fi
| true |
e40127cf7ef286f3190c0e67649071ad5d608ff1
|
Shell
|
simshanith/dotfiles
|
/bash/functions.bash
|
UTF-8
| 3,810 | 3.4375 | 3 |
[] |
no_license
|
# http://kmkeen.com/awk-music/
eightbit() {
awk 'function wl() {
rate=64000;
return (rate/160)*(0.87055^(int(rand()*10)))};
BEGIN {
srand();
wla=wl();
while(1) {
wlb=wla;
wla=wl();
if (wla==wlb)
{wla*=2;};
d=(rand()*10+5)*rate/4;
a=b=0; c=128;
ca=40/wla; cb=20/wlb;
de=rate/10; di=0;
for (i=0;i<d;i++) {
a++; b++; di++; c+=ca+cb;
if (a>wla)
{a=0; ca*=-1};
if (b>wlb)
{b=0; cb*=-1};
if (di>de)
{di=0; ca*=0.9; cb*=0.9};
printf("%c",c)};
c=int(c);
while(c!=128) {
c<128?c++:c--;
printf("%c",c)};};}' |
sox -t raw -r 64k -c 1 -e unsigned -b 8 - -d
}
# pipe markdown to screen as pretty-printed, syntax highlighted HTML
markdown() {
pd mode:"beautify" readmethod:"screen" source:"`md $1`" |
pygmentize -l html
}
# html css js csv beautification via prettydiff
beautify() {
local SOURCE=`realpath "$1"`
[ -r "$SOURCE" ] && pd mode:'beautify' readmethod:'filescreen' source:"$SOURCE"
}
# Change directory to the current Finder directory
# http://apple.stackexchange.com/a/96810/52388
cdf() {
target=`osascript -e 'tell application "Finder" to if (count of Finder windows) > 0 then get POSIX path of (target of front Finder window as text)'`
if [ "$target" != "" ]; then
cd "$target"; pwd
else
echo 'No Finder window found' >&2
fi
}
# Create a new directory and enter it
function mkd() {
mkdir -p "$@" && cd "$@"
}
### iTerm2 utils
if [[ "$TERM_PROGRAM" == 'iTerm.app' ]]
then
nametab () {
if [ -z "$1" ]; then
echo "Usage:"
echo "\`nametab workspace\`"
echo "Sets the tab's namespace in iTerm using escape sequence."
else
echo -e $'\033];'${*}'\007' ; return ;
fi
}
# send growl messages
# http://aming-blog.blogspot.com/2011/01/growl-notification-from-iterm-2.html
# requires growl http://growl.info/
growl() { echo -e $'\e]9;'${*}'\007' ; return ; }
fi
# http://xkcd.com/530/
hello () {
osascript -e 'say "Hello '$1'"';
}
# git log with per-commit cmd-clickable GitHub URLs (iTerm)
function gf() {
local remote="$(git remote -v | awk '/^origin.*\(push\)$/ {print $2}')"
[[ "$remote" ]] || return
local user_repo="$(echo "$remote" | perl -pe 's/.*://;s/\.git$//')"
git log $* --name-status --color | awk "$(cat <<AWK
/^.*commit [0-9a-f]{40}/ {sha=substr(\$2,1,7)}
/^[MA]\t/ {printf "%s\thttps://github.com/$user_repo/blob/%s/%s\n", \$1, sha, \$2; next}
/.*/ {print \$0}
AWK
)" | less -F
}
# Start an HTTP server from a directory, optionally specifying the port
function server() {
local port="${1:-8000}"
open "http://localhost:${port}/"
# Set the default Content-Type to `text/plain` instead of `application/octet-stream`
# And serve everything as UTF-8 (although not technically correct, this doesn’t break anything for binary files)
python -c $'import SimpleHTTPServer;\nmap = SimpleHTTPServer.SimpleHTTPRequestHandler.extensions_map;\nmap[""] = "text/plain";\nfor key, value in map.items():\n\tmap[key] = value + ";charset=UTF-8";\nSimpleHTTPServer.test();' "$port"
}
# open man pages in Preview.app
if [ -d "/Applications/Preview.app" ]
then
pman () {
man -t "$@" |
( which ps2pdf &> /dev/null && ps2pdf - - || cat) |
open -f -a /Applications/Preview.app
}
fi
λ () {
fortune | cowsay -f tux -W 50 | lolcat -p 2
}
[[ -s "$HOME/.rvm/scripts/rvm" ]] && source "$HOME/.rvm/scripts/rvm" # Load RVM into a shell session *as a function*
| true |
950705f10f44c6f63edda3187f7003e48cb623f0
|
Shell
|
uyennguyen87/begining_linux_programming
|
/chapter_2_shell_programming/for_loop_file_name.sh
|
UTF-8
| 99 | 2.890625 | 3 |
[] |
no_license
|
#!/bin/sh
for filename in $(ls *.sh)
do
echo $filename | awk '{print toupper($0)}'
done
exit 0
| true |
18e34fb688ae9d222e325a62c2540c3538e1b8d3
|
Shell
|
kdave/xfstests
|
/tests/xfs/449
|
UTF-8
| 1,677 | 2.9375 | 3 |
[] |
no_license
|
#! /bin/bash
# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2018 Oracle, Inc.
#
# FS QA Test No. 449
#
# Make sure pretty printed XFS geometry is the same across all programs.
#
. ./common/preamble
_begin_fstest auto quick
# Import common functions.
. ./common/filter
# real QA test starts here
_supported_fs xfs
_require_scratch_nocheck
# Geometry printing wasn't unified until xfs_spaceman grew an 'info'
# command, so skip this test if there is no spaceman or it doesn't
# know what 'info' is.
_require_xfs_spaceman_command "info"
_require_command "$XFS_GROWFS_PROG" xfs_growfs
_scratch_mkfs | sed -e '/Discarding/d' -e '/deprecated/d' > $tmp.mkfs
echo MKFS >> $seqres.full
cat $tmp.mkfs >> $seqres.full
_scratch_xfs_db -c "info" > $tmp.dbinfo
echo DB >> $seqres.full
cat $tmp.dbinfo >> $seqres.full
# xfs_db doesn't take a rtdev argument, so it reports "realtime=external".
# mkfs does, so make a quick substitution
diff -u <(cat $tmp.mkfs | sed -e 's/realtime =\/.*extsz=/realtime =external extsz=/g') $tmp.dbinfo
_scratch_mount
$XFS_SPACEMAN_PROG -c "info" $SCRATCH_MNT > $tmp.spaceman
echo SPACEMAN >> $seqres.full
cat $tmp.spaceman >> $seqres.full
diff -u $tmp.mkfs $tmp.spaceman
$XFS_GROWFS_PROG -n $SCRATCH_MNT > $tmp.growfs
echo GROWFS >> $seqres.full
cat $tmp.growfs >> $seqres.full
diff -u $tmp.mkfs $tmp.growfs
$XFS_INFO_PROG $SCRATCH_MNT > $tmp.info.mnt
echo INFO_MNT >> $seqres.full
cat $tmp.info.mnt >> $seqres.full
diff -u $tmp.mkfs $tmp.info.mnt
$XFS_INFO_PROG $SCRATCH_DEV > $tmp.info.dev
echo INFO_DEV >> $seqres.full
cat $tmp.info.dev >> $seqres.full
diff -u $tmp.mkfs $tmp.info.dev
echo "Silence is golden."
status=0
exit 0
| true |
de0e5cb00533a98b00bae3db220d37781a526e8c
|
Shell
|
poon1412/GotoolsGenCRUD
|
/build.sh
|
UTF-8
| 187 | 2.625 | 3 |
[] |
no_license
|
#!/bin/sh
export GOPATH=$PWD
export GOBIN=$PWD/bin
cd $GOPATH/src/$1
if [ ! -f "$GOPATH/src/$1/glide.yaml" ]; then
glide create --non-interactive && glide i -v;
else
glide up -v;
fi
| true |
543ad945e03633eb0726660e2ee44f27221a48fa
|
Shell
|
lidongshengluck/dotfiles
|
/install.sh
|
UTF-8
| 3,852 | 3.6875 | 4 |
[] |
no_license
|
#!/usr/bin/env bash
curr_path=`pwd`
dotfiles=$HOME/.dotfiles
# vim 包管理插件
[ -z "$VUNDLE_URI" ] && VUNDLE_URI="https://github.com/VundleVim/Vundle.vim.git"
# oh_my_zsh zsh的增强配置
[ -z "$OH_MY_ZSH" ] && OH_MY_ZSH='https://github.com/robbyrussell/oh-my-zsh.git'
# zsh 的自动跳转插件
[ -z "$AUTO_JUMP" ] && AUTO_JUMP='https://github.com/joelthelion/autojump.git'
# zsh 自动补全
[ -z "$ZSH_AUTO" ] && ZSH_AUTO='https://github.com/zsh-users/zsh-autosuggestions.git'
# 显示一条消息
msg() {
printf '%b\n' "$1" >&2
}
# 成功消息提醒
success() {
msg "\33[32m[✔]\33[0m ${1}${2}"
}
# 检查运行环境
source "${curr_path}/envron_check.sh"
# 初始化目录
initialization() {
rm -rf $HOME/.vim
rm -rf $HOME/.vimrc
rm -rf $HOME/.oh-my-zsh
rm -rf $HOME/.zsh
rm -rf $HOME/.zshrc
rm -rf $HOME/.zsh_defined.sh
rm -rf $HOME/.tmux.conf
rm -rf $dotfiles
#success "创建目录"
mkdir -p "$dotfiles/vim_conf"
mkdir -p "$dotfiles/tmux_conf"
mkdir -p "$dotfiles/zsh_conf"
return 1
}
# 复制文件
copy_files() {
cp $curr_path/vim $dotfiles/vim_conf/vim -rf
cp $curr_path/vimrc $dotfiles/vim_conf/vimrc -rf
cp $curr_path/tmux.conf $dotfiles/tmux_conf/tmux.conf -rf
cp $curr_path/tmuxcolors-dark.conf $dotfiles/tmux_conf/tmuxcolors-dark.conf -rf
cp $curr_path/zshrc $dotfiles/zsh_conf/zshrc -rf
cp $curr_path/start.sh $dotfiles/zsh_conf/start.sh -rf
cp $curr_path/zsh_defined.sh $dotfiles/zsh_conf/zsh_defined.sh -rf
cp $curr_path/zsh_alias.sh $dotfiles/zsh_conf/zsh_alias.sh -rf
}
# 安装 zsh 配置
install_zsh() {
# oh_my_zsh
git clone $OH_MY_ZSH "${dotfiles}/zsh_conf/oh-my-zsh"
# autojump.zsh
git clone $AUTO_JUMP "${dotfiles}/zsh_conf/autojump"
# zsh-autosuggestions zsh的shell自动补全插件安装
git clone $ZSH_AUTO "${dotfiles}/zsh_conf/zsh/zsh-autosuggestions"
}
# 创建链接文件
create_symlinks() {
local source_path="$1"
local target_path="$2"
ln -s "$source_path/vim_conf/vim" "$target_path/.vim"
ln -s "$source_path/vim_conf/vimrc" "$target_path/.vimrc"
ln -s "$source_path/zsh_conf/zsh" "$target_path/.zsh"
ln -s "$source_path/zsh_conf/oh-my-zsh" "$target_path/.oh-my-zsh"
ln -s "$source_path/zsh_conf/zshrc" "$target_path/.zshrc"
ln -s "$source_path/zsh_conf/start.sh" "$target_path/start.sh"
ln -s "$source_path/tmux_conf/tmux.conf" "$target_path/.tmux.conf"
ln -s "$source_path/tmux_conf/tmuxcolors-dark.conf" "$target_path/.tmuxcolors-dark.conf"
success "创建链接成功"
}
# 安装vundle包管理软件
install_vundle() {
local vundle_uri="$1"
local vundle_path=$HOME/.vim/bundle/Vundle.vim
if [ ! -e "$vundle_path" ]; then
msg "安装vundle插件"
git clone $vundle_uri $vundle_path
success "vundle 插件克隆完成."
fi
}
# 安装各种插件
install_vundle_plugins() {
local system_shell="$SHELL"
export SHELL='/bin/sh'
vim \
"+set nomore" \
"+BundleInstall!" \
"+BundleClean" \
"+qall"
export SHELL="$system_shell"
success "安装插件"
}
# 创建链接文件
initialization
res=$?
if [ $res -eq 1 ]; then
success '初始化成功';
else
msg '初始化失败';
exit
fi
# 复制常用配置文件
copy_files
# 安装 zsh 插件
install_zsh
# 安装 autojump 插件
./install_autojump.sh
# 创建链接
create_symlinks "$dotfiles" \
"$HOME"
# 安装vim插件管理包
install_vundle "$VUNDLE_URI" \
"$HOME"
# 安装vim插件
install_vundle_plugins
# 修改默认环境为 zsh, 编译 vimproc
cd ~/.vim/bundle/vimproc
make
chsh -s /bin/zsh
| true |
313586323f6b99d72a7dbce91e27ade74f4ed895
|
Shell
|
atweiden/voidpkgs
|
/srcpkgs/libnfc/template
|
UTF-8
| 1,055 | 2.625 | 3 |
[
"Unlicense",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain"
] |
permissive
|
maintainer="nox"
pkgname="libnfc"
version=1.8.0
revision=1
short_desc="Low level NFC SDK and Programmers API"
makedepends+=" libusb-devel"
makedepends+=" libusb-compat-devel"
hostmakedepends="pkg-config"
license="GPL-3"
homepage="http://nfc-tools.org/"
distfiles="https://github.com/nfc-tools/libnfc/archive/$pkgname-$version.tar.gz"
checksum="0ab7d9b41442e7edc2af7c54630396edc73ce51128aa28a5c6e4135dc5595495"
build_style="cmake"
CFLAGS="-D_GNU_SOURCE"
pre_configure() {
sed \
-i \
-e "s|MODE=\"0664\", GROUP=\"plugdev\"|TAG+=\"uaccess\"|g" \
contrib/udev/93-pn53x.rules
}
post_install() {
vinstall contrib/udev/93-pn53x.rules 644 usr/lib/udev/rules.d/
vinstall contrib/linux/blacklist-libnfc.conf 644 usr/lib/modprobe.d/
}
libnfc-devel_package() {
unset depends
depends+=" $makedepends"
depends+=" libnfc>=${version}_$revision"
short_desc+=" - development files"
pkg_install() {
vmove usr/include
vmove usr/lib/pkgconfig
vmove "usr/lib/*.so"
}
}
# vim: set filetype=sh foldmethod=marker foldlevel=0 nowrap:
| true |
94423a4a262e0256d802ab46a6d5843d53805ce2
|
Shell
|
iorch/marcogeo-docker
|
/initdb-postgis.sh
|
UTF-8
| 2,032 | 3.15625 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/sh
set -e
# Perform all actions as user 'postgres'
export PGUSER=postgres
export PGDATA=$PGDATA
# Create the 'mg' template db
psql <<EOSQL
CREATE DATABASE mg;
UPDATE pg_database SET datistemplate = TRUE WHERE datname = 'mg';
EOSQL
# Populate 'template_postgis'
cd /usr/share/postgresql/$PG_MAJOR/contrib/postgis-$POSTGIS_MAJOR
psql --dbname mg < postgis.sql
psql --dbname mg < topology.sql
psql --dbname mg < spatial_ref_sys.sql
psql --dbname mg -c "CREATE EXTENSION hstore;"
psql -d mg -c "INSERT into spatial_ref_sys
(srid, auth_name, auth_srid, proj4text, srtext)
values
( 96700,
'sr-org',
6700,
'+proj=lcc +lat_1=17.5 +lat_2=29.5 +lat_0=12 +lon_0=-102 +x_0=2500000 +y_0=0 +a=6378137 +b=6378136.027241431 +units=m +no_defs ',
'PROJCS[\"unnamed\",
GEOGCS[\"WGS 84\",
DATUM[\"unknown\",
SPHEROID[\"WGS84\",6378137,6556752.3141]],
PRIMEM[\"Greenwich\",0],
UNIT[\"degree\",0.0174532925199433]],
PROJECTION[\"Lambert_Conformal_Conic_2SP\"],
PARAMETER[\"standard_parallel_1\",17.5],
PARAMETER[\"standard_parallel_2\",29.5],
PARAMETER[\"latitude_of_origin\",12],
PARAMETER[\"central_meridian\",-102],
PARAMETER[\"false_easting\",2500000],
PARAMETER[\"false_northing\",0]
]'
);"
cd /
mkdir scripts
for file in `ls -1 /data/*.shp`; do
table_name=''
file=`echo ${file}| sed 's/\/data\///'`
echo $file
case ${file:0:4} in
mge2) table_name='entidades'
;;
mglr) table_name='loc_rurales'
;;
mglu) table_name='loc_urbanas'
;;
mgm2) table_name='municipios'
;;
esac
script=`echo ${file}|sed 's/data//'`
shp2pgsql -I -WLATIN1 -s 96700 -g geom -p /data/${file} ${table_name} \
> scripts/${script}'_c.sql'
shp2pgsql -I -WLATIN1 -s 96700 -g geom -a /data/${file} ${table_name} \
> scripts/${script}'.sql'
psql -d mg -f scripts/${script}'_c.sql'
psql -d mg -f scripts/${script}'.sql'
done
| true |
1dccc801ba942d5b8fe3252aa5a7a6f850c0f8ae
|
Shell
|
open-rmf/rmf_fullstack_installer
|
/setup/scripts/host/tls_setup.bash
|
UTF-8
| 945 | 3.53125 | 4 |
[] |
no_license
|
#!/bin/bash
set -e
[ $EUID -eq 0 ] || { echo "Please run script as root" && exit 1; }
apt update
dpkg-query -l nginx &> /dev/null || (echo "Installing nginx" && apt install nginx -y)
which snap &> /dev/null || { echo "You will need to install snap." ; exit 1; }
snap install core; snap refresh core
dpkg-query -l certbot &> /dev/null && apt remove certbot
snap install --classic certbot
ln -s /snap/bin/certbot /usr/bin/certbot || true
help_textbox=$(mktemp)
cat << END > $help_textbox
Your machine needs to be connected to the internet and have a registered domain name.
Check that you can resolve the ip address of your domain name with ping.
You will require the following information:
Domain Name
Email for Association to TLS certificate
END
whiptail --textbox $help_textbox --title "TLS Setup" $LINES $COLUMNS
certbot --nginx
mkdir /etc/nginx/deploy
sed -i "123 a include /etc/nginx/deploy/*;" /etc/nginx/sites-available/default
| true |
ae5f941932c6573e89393e37a8226a737d506105
|
Shell
|
satriawandicky/pihole
|
/undoAll.sh
|
UTF-8
| 2,380 | 2.921875 | 3 |
[] |
no_license
|
TICK="[\e[32m ✔ \e[0m]"
PIHOLE_LOCATION="/etc/pihole"
GRAVITY_UNDO_BLACKLIST="pihole -b -d"
GRAVITY_UNDO_REGEX="pihole --regex -d"
GRAVITY_UNDO_WILD="pihole --wild -d"
GRAVITY_UNDO_WHITELIST="pihole -w -d"
GRAVITY_UNDO_WHITE_REGEX="pihole --white-regex -d"
GRAVITY_UNDO_WHITE_WILD="pihole --white-wild -d"
#script wildcard -- untuk mencegah false positif
echo " \e[1m Script ini akan menghapus semua domain dari repository setiap \e[0m"
echo "\n"
echo " \e[1m ..... \e[0m"
sleep 1
echo "\n"
if [ "$(id -u)" != "0" ] ; then
echo "script ini membutuhkan root access... akses root diperlukan telelbih dahulu!"
exit 2
fi
echo " ${TICK} \e[32m Undo wildcard, whitelist, blacklist dari local list... \e[0m"
sleep 1
#undo blacklist
echo " [...] \e[32m Pi-hole gravity memperbarui list....harap tunggu \e[0m"
${GRAVITY_UNDO_BLACKLIST} $(cat /etc/pihole/blacklist.txt | xargs) > /dev/null
echo " ${TICK} \e[32m Selesai undo blacklist...... \e[0m"
sleep 1
#undo regex blacklist
echo " [...] \e[32m Pi-hole gravity memperbarui txt....harap tunggu \e[0m"
${GRAVITY_UNDO_REGEX} $(cat /etc/pihole/regex.txt | xargs) > /dev/null
echo " ${TICK} \e[32m Selesai undo regex blacklist... \e[0m"
sleep 1
#undo wildcard blacklist
echo " [...] \e[32m Pi-hole gravity memperbarui list....harap tunggu \e[0m"
${GRAVITY_UNDO_WILD} $(cat /etc/pihole/wildcard.txt | xargs) > /dev/null
echo " ${TICK} \e[32m Selesai undo wildcard blacklist... \e[0m"
sleep 1
# undo whitelist
echo " [...] \e[32m Pi-hole gravity memperbarui list....harap tunggu \e[0m"
${GRAVITY_UNDO_WHITELIST} $(cat /etc/pihole/whitelist.txt | xargs) > /dev/null
echo " ${TICK} \e[32m Selesai undo whitelist...... \e[0m"
sleep 1
# undo regex whitelist
echo " [...] \e[32m Pi-hole gravity memperbarui list....harap tunggu \e[0m"
${GRAVITY_UNDO_WHITE_REGEX} $(cat /etc/pihole/white-regex.txt | xargs) > /dev/null
echo " ${TICK} \e[32m Selesai undo regex whitelist...... \e[0m"
sleep 1
# undo wildcard whitelist
echo " [...] \e[32m Pi-hole gravity memperbarui list....harap tunggu \e[0m"
${GRAVITY_UNDO_WHITE_WILD} $(cat /etc/pihole/white-wild.txt | xargs) > /dev/null
echo " ${TICK} \e[32m Selesai undo wildcard whitelist...... \e[0m"
sudo pihole -g
sudo pihole restartdns
echo " ${TICK} \e[32m Pi-hole's gravity berhasil di update \e[0m"
echo " \e[1m salam @satriawandicky \e[0m"
echo " \e[1m Happy AdBlocking :)\e[0m"
echo "\n\n"
| true |
3eedf5c92fd56b2d84d4799c5f35f14bb3e1282a
|
Shell
|
manwar/p5-aws-lambda
|
/author/list-perl-runtime.sh
|
UTF-8
| 1,280 | 3.265625 | 3 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
ROOT=$(cd "$(dirname "$0")/../" && pwd)
"$ROOT/author/validate-account.sh" || exit 2
set -ue
for ZIP in $(find "$ROOT"/.perl-layer/dist -name perl-\*-runtime.zip | sort -r); do
NAME=$(basename "$ZIP" .zip)
PERLVERION=$(echo "$NAME" | cut -d- -f2,3 | sed -e 's/-/./')
STACK="${PERLVERION//./-}"
echo "=item Perl $PERLVERION"
echo
echo "=over"
echo
while read -r REGION; do
ARN=$(aws --region "$REGION" cloudformation describe-stacks \
--stack-name "lambda-$STACK-runtime" | jq -r .Stacks[0].Outputs[0].OutputValue)
echo "=item C<$ARN>"
echo
done < "$ROOT/author/regions.txt"
echo "=back"
echo
done
for ZIP in $(find "$ROOT"/.perl-layer/dist -name perl-\*-paws.zip | sort -r); do
NAME=$(basename "$ZIP" .zip)
PERLVERION=$(echo "$NAME" | cut -d- -f2,3 | sed -e 's/-/./')
STACK="${PERLVERION//./-}"
echo "=item Perl $PERLVERION"
echo
echo "=over"
echo
while read -r REGION; do
ARN=$(aws --region "$REGION" cloudformation describe-stacks \
--stack-name "lambda-$STACK-paws" | jq -r .Stacks[0].Outputs[0].OutputValue)
echo "=item C<$ARN>"
echo
done < "$ROOT/author/regions.txt"
echo "=back"
echo
done
| true |
68d44fce406b679f2410aedaafcde7022d72115e
|
Shell
|
puremourning/VMs
|
/common/nvm_manual
|
UTF-8
| 239 | 2.6875 | 3 |
[] |
no_license
|
# vim: ft=sh
curl https://raw.githubusercontent.com/creationix/nvm/v0.33.11/install.sh | sh
export NVM_DIR="$HOME/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && . "$NVM_DIR/nvm.sh" # This loads nvm
nvm install v11
nvm alias default v11
nvm use v11
| true |
8aa44cdfa3c92fb0d7e6733a56284fe331fbb662
|
Shell
|
petronny/aur3-mirror
|
/cheflex/PKGBUILD
|
UTF-8
| 665 | 2.84375 | 3 |
[] |
no_license
|
# Maintainer: Ali H. Caliskan <ahc (at) selflex.org>
pkgname=cheflex
pkgver=1.0.4
pkgrel=1
pkgdesc="A simple yet flexible package manager."
arch=('i686' 'x86_64')
url='http://www.selflex.org/cheflex'
license=('GPL-3')
depends=('aria2' 'bash' 'fakeroot')
install=cheflex.install
source=("https://github.com/selflex/${pkgname}/archive/${pkgname}-${pkgver}.tar.gz")
md5sums=('1cc8878ec4b0c3a19de0d1ec7c1c0163')
package() {
cd ${srcdir}/${pkgname}-${pkgname}-${pkgver}
install -D -m755 cheflex.sh ${pkgdir}/usr/bin/cheflex
install -D -m644 cheflex.rc ${pkgdir}/etc/cheflex
mkdir -p ${pkgdir}/var/lib/cheflex/{grp,pkg,lst}
chmod 775 ${pkgdir}/var/lib/cheflex/{grp,pkg}
}
| true |
1bb1e0e6a1b1c6d0b8bd840f29b4e1772c2cc4ed
|
Shell
|
openshift/release
|
/ci-operator/step-registry/openshift-qe/move-pods-infra/openshift-qe-move-pods-infra-commands.sh
|
UTF-8
| 24,561 | 3.875 | 4 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -o nounset
set -o errexit
set -o pipefail
function set_storage_class() {
storage_class_found=false
default_storage_class=""
# need to verify passed storage class exists
for s_class in $(oc get storageclass -A --no-headers | awk '{print $1}'); do
if [ "$s_class"X != ${OPENSHIFT_PROMETHEUS_STORAGE_CLASS}X ]; then
s_class_annotations=$(oc get storageclass $s_class -o jsonpath='{.metadata.annotations}')
default_status=$(echo $s_class_annotations | jq '."storageclass.kubernetes.io/is-default-class"')
if [ "$default_status" = '"true"' ]; then
default_storage_class=$s_class
fi
else
storage_class_found=true
fi
done
if [[ $storage_class_found == false ]]; then
echo "setting new storage classes to $default_storage_class"
export OPENSHIFT_PROMETHEUS_STORAGE_CLASS=$default_storage_class
export OPENSHIFT_ALERTMANAGER_STORAGE_CLASS=$default_storage_class
fi
}
function wait_for_prometheus_status() {
token=$(oc create token -n openshift-monitoring prometheus-k8s --duration=6h)
URL=https://$(oc get route -n openshift-monitoring prometheus-k8s -o jsonpath="{.spec.host}")
sleep 30
max_reties=10
retry_times=1
prom_status="not_started"
echo prom_status is $prom_status
while [[ "$prom_status" != "success" ]]; do
prom_status=$(curl -s -g -k -X GET -H "Authorization: Bearer $token" -H 'Accept: application/json' -H 'Content-Type: application/json' "$URL/api/v1/query?query=up" | jq -r '.status')
echo -e "Prometheus status not ready yet, retrying $retry_times in 5s..."
sleep 5
if [[ $retry_times -gt $max_reties ]];then
"Out of max retry times, the prometheus still not ready, please check "
exit 1
fi
retry_times=$(( $retry_times + 1 ))
done
if [[ "$prom_status" == "success" ]];then
echo "######################################################################################"
echo "# The prometheus is ready now! #"
echo "######################################################################################"
fi
}
function check_monitoring_statefulset_status()
{
attempts=20
infra_nodes=$(oc get nodes -l 'node-role.kubernetes.io/infra=' --no-headers | awk '{print $1}' | tr '\n' '|')
infra_nodes=${infra_nodes:0:-1}
echo -e "\nQuery infra_nodes in check_monitoring_statefulset_status:\n[ $infra_nodes ]"
## need to get number of runnig pods in statefulsets
statefulset_list=$(oc get statefulsets --no-headers -n openshift-monitoring | awk '{print $1}');
for statefulset in $statefulset_list; do
echo "statefulset in openshift-monitoring is $statefulset"
retries=0
ready_replicas=$(oc get statefulsets $statefulset -n openshift-monitoring -ojsonpath='{.status.availableReplicas}')
wanted_replicas=$(oc get statefulsets $statefulset -n openshift-monitoring -ojsonpath='{.spec.replicas}')
infra_pods=$(oc get pods -n openshift-monitoring --no-headers -o wide | grep -E "$infra_nodes" | grep Running | grep "$statefulset" | wc -l | xargs)
echo
echo "-------------------------------------------------------------------------------------------"
echo "current replicas in $statefulset: wanted--$wanted_replicas, current ready--$ready_replicas!"
echo "current replicas in $statefulset: wanted--$wanted_replicas, current infra running--$infra_pods!"
while [[ $ready_replicas != "$wanted_replicas" || $infra_pods != "$wanted_replicas" ]]; do
sleep 30
((retries += 1))
ready_replicas=$(oc get statefulsets $statefulset -n openshift-monitoring -o jsonpath='{.status.availableReplicas}')
echo "retries printing: $retries"
infra_pods=$(oc get pods -n openshift-monitoring --no-headers -o wide | grep -E "$infra_nodes" | grep Running| grep "$statefulset" | wc -l |xargs )
echo
echo "-------------------------------------------------------------------------------------------"
echo "current replicas in $statefulset: wanted--$wanted_replicas, current ready--$ready_replicas!"
echo "current replicas in $statefulset: wanted--$wanted_replicas, current infra running--$infra_pods!"
if [[ ${retries} -gt ${attempts} ]]; then
echo "-------------------------------------------------------------------------------------------"
oc describe statefulsets $statefulset -n openshift-monitoring
for pod in $(oc get pods -n openshift-monitoring --no-headers | grep -v Running | awk '{print $1}'); do
oc describe pod $pod -n openshift-monitoring
done
echo "error: monitoring statefulsets/pods didn't become Running in time, failing"
exit 1
fi
done
echo
done
if [[ ${retries} -lt ${attempts} ]]; then
echo "All statefulset is running in openshift-monitoring as expected"
echo "-------------------------------------------------------------------------------------------"
monitoring_pods=$(oc get pods -n openshift-monitoring --no-headers -o wide | grep -E "$infra_nodes"| grep -E "`echo $statefulset_list|tr ' ' '|'`")
echo -e "$monitoring_pods\n"
fi
}
function move_routers_ingress(){
echo "===Moving routers ingress pods to infra nodes==="
oc patch -n openshift-ingress-operator ingresscontrollers.operator.openshift.io default -p '{"spec": {"nodePlacement": {"nodeSelector": {"matchLabels": {"node-role.kubernetes.io/infra": ""}}}}}' --type merge
oc rollout status deployment router-default -n openshift-ingress
# Collect infra node names
mapfile -t INFRA_NODE_NAMES < <(echo "$(oc get nodes -l node-role.kubernetes.io/infra -o name)" | sed 's;node\/;;g')
INGRESS_PODS_MOVED="false"
for i in $(seq 0 60); do
echo "Checking ingress pods, attempt ${i}"
mapfile -t INGRESS_NODES < <(oc get pods -n openshift-ingress -o jsonpath='{.items[*].spec.nodeName}')
TOTAL_NODEPOOL=$(echo "`echo "${INGRESS_NODES[@]}"| tr ' ' '\n' |sort|uniq`" "${INFRA_NODE_NAMES[@]}" | tr ' ' '\n' | sort |uniq -u)
echo
echo "Move the pod that running out of infra node into infra node"
echo "---------------------------------------------------------------------------------"
echo -e "Move:\n POD IP:[" "${INGRESS_NODES[@]}" "]"
echo -e "To: \nInfra Node IP[" "${INFRA_NODE_NAMES[@]}" "]"
echo "---------------------------------------------------------------------------------"
echo
echo
echo "---------------------------------------------------------------------------------"
echo -e "Total Worker/Infra in Nodepool: [ $TOTAL_NODEPOOL ]"
echo "---------------------------------------------------------------------------------"
echo
if [[ -z ${TOTAL_NODEPOOL} || ( $(echo $TOTAL_NODEPOOL |tr ' ' '\n'|wc -l) -lt 3 && $TOTAL_NODEPOOL != *worker* ) ]]; then
INGRESS_PODS_MOVED="true"
echo "Ingress pods moved to infra nodes"
echo "---------------------------------------------------------------------------------"
oc get po -o wide -n openshift-ingress |grep router-default
echo "---------------------------------------------------------------------------------"
break
else
sleep 10
fi
done
if [[ "${INGRESS_PODS_MOVED}" == "false" ]]; then
echo "Ingress pods didn't move to infra nodes"
echo "---------------------------------------------------------------------------------"
oc get pods -n openshift-ingress -owide
exit 1
fi
echo
}
function move_registry(){
echo "====Moving registry pods to infra nodes===="
oc apply -f - <<EOF
apiVersion: imageregistry.operator.openshift.io/v1
kind: Config
metadata:
name: cluster
spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
namespaces:
- openshift-image-registry
topologyKey: kubernetes.io/hostname
weight: 100
nodeSelector:
node-role.kubernetes.io/infra: ""
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/infra
value: reserved
- effect: NoExecute
key: node-role.kubernetes.io/infra
value: reserved
EOF
oc rollout status deployment image-registry -n openshift-image-registry
REGISTRY_PODS_MOVED="false"
for i in $(seq 0 60); do
echo "Checking registry pods, attempt ${i}"
mapfile -t REGISTRY_NODES < <(oc get pods -n openshift-image-registry -l docker-registry=default -o jsonpath='{.items[*].spec.nodeName}')
TOTAL_NODEPOOL=$(echo "`echo "${REGISTRY_NODES[@]}"| tr ' ' '\n' |sort|uniq`" "${INFRA_NODE_NAMES[@]}" | tr ' ' '\n' | sort |uniq -u)
echo
echo "Move the pod that running out of infra node into infra node"
echo "---------------------------------------------------------------------------------"
echo -e "Move:\nPOD IP: [" "${REGISTRY_NODES[@]}" " ]"
echo -e "To: \nInfra Node IP[" "${INFRA_NODE_NAMES[@]}" "]"
echo "---------------------------------------------------------------------------------"
echo
echo
echo "---------------------------------------------------------------------------------"
echo -e "Total Worker/Infra in Nodepool: [ $TOTAL_NODEPOOL ]"
echo "---------------------------------------------------------------------------------"
echo
if [[ -z ${TOTAL_NODEPOOL} || ( $(echo $TOTAL_NODEPOOL |tr ' ' '\n'|wc -l) -lt 3 && $TOTAL_NODEPOOL != *worker* ) ]]; then
REGISTRY_PODS_MOVED="true"
echo "Registry pods moved to infra nodes"
echo "---------------------------------------------------------------------------------"
oc get po -o wide -n openshift-image-registry | egrep ^image-registry
echo "---------------------------------------------------------------------------------"
break
else
sleep 10
fi
done
if [[ "${REGISTRY_PODS_MOVED}" == "false" ]]; then
echo "Image registry pods didn't move to infra nodes"
echo "---------------------------------------------------------------------------------"
oc get pods -n openshift-image-registry -owide
exit 1
fi
echo
}
function apply_monitoring_configmap()
{
oc apply -f - <<EOF
apiVersion: v1
kind: ConfigMap
metadata:
name: cluster-monitoring-config
namespace: openshift-monitoring
data:
config.yaml: |+
alertmanagerMain:
baseImage: openshift/prometheus-alertmanager
nodeSelector:
node-role.kubernetes.io/infra: ""
tolerations:
- key: node-role.kubernetes.io/infra
value: reserved
effect: NoSchedule
- key: node-role.kubernetes.io/infra
value: reserved
effect: NoExecute
prometheusK8s:
baseImage: openshift/prometheus
nodeSelector:
node-role.kubernetes.io/infra: ""
tolerations:
- key: node-role.kubernetes.io/infra
value: reserved
effect: NoSchedule
- key: node-role.kubernetes.io/infra
value: reserved
effect: NoExecute
prometheusOperator:
baseImage: quay.io/coreos/prometheus-operator
prometheusConfigReloaderBaseImage: quay.io/coreos/prometheus-config-reloader
configReloaderBaseImage: quay.io/coreos/configmap-reload
nodeSelector:
node-role.kubernetes.io/infra: ""
tolerations:
- key: node-role.kubernetes.io/infra
value: reserved
effect: NoSchedule
- key: node-role.kubernetes.io/infra
value: reserved
effect: NoExecute
nodeExporter:
baseImage: openshift/prometheus-node-exporter
kubeRbacProxy:
baseImage: quay.io/coreos/kube-rbac-proxy
grafana:
baseImage: grafana/grafana
auth:
baseImage: openshift/oauth-proxy
k8sPrometheusAdapter:
nodeSelector:
node-role.kubernetes.io/infra: ""
tolerations:
- key: node-role.kubernetes.io/infra
value: reserved
effect: NoSchedule
- key: node-role.kubernetes.io/infra
value: reserved
effect: NoExecute
kubeStateMetrics:
baseImage: quay.io/coreos/kube-state-metrics
nodeSelector:
node-role.kubernetes.io/infra: ""
tolerations:
- key: node-role.kubernetes.io/infra
value: reserved
effect: NoSchedule
- key: node-role.kubernetes.io/infra
value: reserved
effect: NoExecute
telemeterClient:
nodeSelector:
node-role.kubernetes.io/infra: ""
tolerations:
- key: node-role.kubernetes.io/infra
value: reserved
effect: NoSchedule
- key: node-role.kubernetes.io/infra
value: reserved
effect: NoExecute
openshiftStateMetrics:
nodeSelector:
node-role.kubernetes.io/infra: ""
tolerations:
- key: node-role.kubernetes.io/infra
value: reserved
effect: NoSchedule
- key: node-role.kubernetes.io/infra
value: reserved
effect: NoExecute
thanosQuerier:
nodeSelector:
node-role.kubernetes.io/infra: ""
tolerations:
- key: node-role.kubernetes.io/infra
value: reserved
effect: NoSchedule
- key: node-role.kubernetes.io/infra
value: reserved
effect: NoExecute
EOF
}
function apply_monitoring_configmap_withpvc(){
oc apply -f- <<EOF
apiVersion: v1
kind: ConfigMap
metadata:
name: cluster-monitoring-config
namespace: openshift-monitoring
data:
config.yaml: |+
alertmanagerMain:
baseImage: openshift/prometheus-alertmanager
nodeSelector:
node-role.kubernetes.io/infra: ""
volumeClaimTemplate:
spec:
storageClassName: ${OPENSHIFT_ALERTMANAGER_STORAGE_CLASS}
resources:
requests:
storage: ${OPENSHIFT_ALERTMANAGER_STORAGE_SIZE}
tolerations:
- key: node-role.kubernetes.io/infra
value: reserved
effect: NoSchedule
- key: node-role.kubernetes.io/infra
value: reserved
effect: NoExecute
prometheusK8s:
retention: ${OPENSHIFT_PROMETHEUS_RETENTION_PERIOD}
baseImage: openshift/prometheus
nodeSelector:
node-role.kubernetes.io/infra: ""
volumeClaimTemplate:
spec:
storageClassName: ${OPENSHIFT_PROMETHEUS_STORAGE_CLASS}
resources:
requests:
storage: ${OPENSHIFT_PROMETHEUS_STORAGE_SIZE}
tolerations:
- key: node-role.kubernetes.io/infra
value: reserved
effect: NoSchedule
- key: node-role.kubernetes.io/infra
value: reserved
effect: NoExecute
prometheusOperator:
baseImage: quay.io/coreos/prometheus-operator
prometheusConfigReloaderBaseImage: quay.io/coreos/prometheus-config-reloader
configReloaderBaseImage: quay.io/coreos/configmap-reload
nodeSelector:
node-role.kubernetes.io/infra: ""
tolerations:
- key: node-role.kubernetes.io/infra
value: reserved
effect: NoSchedule
- key: node-role.kubernetes.io/infra
value: reserved
effect: NoExecute
nodeExporter:
baseImage: openshift/prometheus-node-exporter
kubeRbacProxy:
baseImage: quay.io/coreos/kube-rbac-proxy
grafana:
baseImage: grafana/grafana
auth:
baseImage: openshift/oauth-proxy
k8sPrometheusAdapter:
nodeSelector:
node-role.kubernetes.io/infra: ""
tolerations:
- key: node-role.kubernetes.io/infra
value: reserved
effect: NoSchedule
- key: node-role.kubernetes.io/infra
value: reserved
effect: NoExecute
kubeStateMetrics:
baseImage: quay.io/coreos/kube-state-metrics
nodeSelector:
node-role.kubernetes.io/infra: ""
tolerations:
- key: node-role.kubernetes.io/infra
value: reserved
effect: NoSchedule
- key: node-role.kubernetes.io/infra
value: reserved
effect: NoExecute
telemeterClient:
nodeSelector:
node-role.kubernetes.io/infra: ""
tolerations:
- key: node-role.kubernetes.io/infra
value: reserved
effect: NoSchedule
- key: node-role.kubernetes.io/infra
value: reserved
effect: NoExecute
openshiftStateMetrics:
nodeSelector:
node-role.kubernetes.io/infra: ""
tolerations:
- key: node-role.kubernetes.io/infra
value: reserved
effect: NoSchedule
- key: node-role.kubernetes.io/infra
value: reserved
effect: NoExecute
thanosQuerier:
nodeSelector:
node-role.kubernetes.io/infra: ""
tolerations:
- key: node-role.kubernetes.io/infra
value: reserved
effect: NoSchedule
- key: node-role.kubernetes.io/infra
value: reserved
effect: NoExecute
EOF
}
function move_monitoring(){
echo "===Moving monitoring pods to infra nodes==="
platform_type=$(oc get infrastructure cluster -o=jsonpath='{.status.platformStatus.type}')
platform_type=$(echo $platform_type | tr -s 'A-Z' 'a-z')
default_sc=$(oc get sc -o jsonpath='{.items[?(@.metadata.annotations.storageclass\.kubernetes\.io/is-default-class=="true")].metadata.name}')
if [[ -n $default_sc ]]; then
set_storage_class
apply_monitoring_configmap_withpvc
else
apply_monitoring_configmap
fi
echo "Check if all pods with infra IP in openshift-monitoring"
sleep 15
mapfile -t INFRA_NODE_NAMES < <(echo "$(oc get nodes -l node-role.kubernetes.io/infra -o name)" | sed 's;node\/;;g')
MONITORING_PODS_MOVED="false"
for i in $(seq 0 60); do
echo "Checking monitoring pods, attempt ${i}"
MONITORING_NODES=("`oc get pods -n openshift-monitoring -o jsonpath='{.items[*].spec.nodeName}' -l app.kubernetes.io/component=alert-router`")
MONITORING_NODES+=("`oc get pods -n openshift-monitoring -o jsonpath='{.items[*].spec.nodeName}' -l app.kubernetes.io/name=kube-state-metrics`")
MONITORING_NODES+=("`oc get pods -n openshift-monitoring -o jsonpath='{.items[*].spec.nodeName}' -l app.kubernetes.io/name=prometheus-adapter`")
MONITORING_NODES+=("`oc get pods -n openshift-monitoring -o jsonpath='{.items[*].spec.nodeName}' -l app.kubernetes.io/name=prometheus`")
MONITORING_NODES+=("`oc get pods -n openshift-monitoring -o jsonpath='{.items[*].spec.nodeName}' -l app.kubernetes.io/name=prometheus-operator`")
MONITORING_NODES+=("`oc get pods -n openshift-monitoring -o jsonpath='{.items[*].spec.nodeName}' -l app.kubernetes.io/name=prometheus-operator-admission-webhook`")
MONITORING_NODES+=("`oc get pods -n openshift-monitoring -o jsonpath='{.items[*].spec.nodeName}' -l app.kubernetes.io/name=telemeter-client`")
MONITORING_NODES+=("`oc get pods -n openshift-monitoring -o jsonpath='{.items[*].spec.nodeName}' -l app.kubernetes.io/name=thanos-query`")
TOTAL_NODEPOOL=$(echo "`echo "${MONITORING_NODES[@]}"| tr ' ' '\n' |sort|uniq`" "${INFRA_NODE_NAMES[@]}" | tr ' ' '\n' | sort |uniq -u)
echo
echo "Move the pod that running out of infra node into infra node"
echo "---------------------------------------------------------------------------------"
echo -e "Move:\nPOD IP: [" "${MONITORING_NODES[@]}" "]"
echo -e "To: \nInfra Node IP[" "${INFRA_NODE_NAMES[@]}" " ]"
echo "---------------------------------------------------------------------------------"
echo
echo
echo "---------------------------------------------------------------------------------"
echo -e "Total Worker/Infra in Nodepool: [ $TOTAL_NODEPOOL ]"
echo "---------------------------------------------------------------------------------"
echo
if [[ -z ${TOTAL_NODEPOOL} || ( $(echo $TOTAL_NODEPOOL |tr ' ' '\n'|wc -l) -lt 3 && $TOTAL_NODEPOOL != *worker* ) ]]; then
MONITORING_PODS_MOVED="true"
break
else
sleep 10
fi
done
if [[ "${MONITORING_PODS_MOVED}" == "false" ]]; then
echo "Monitoring pods didn't move to infra nodes"
echo "---------------------------------------------------------------------------------"
oc get pods -n openshift-monitoring -owide
exit 1
fi
sleep 30
echo "Check statefulset moving status in openshift-monitoring"
check_monitoring_statefulset_status
echo "Final check - Check if all pods to be settle"
sleep 5
max_retries=30
retry_times=1
while [[ $(oc get pods --no-headers -n openshift-monitoring | grep -Pv "(Completed|Running)" | wc -l) != "0" ]];
do
echo -n "." && sleep 5;
if [[ $retry_times -le $max_retries ]];then
echo "Some pods fail to startup in limit times, please check ..."
exit 1
fi
retry_times=$(( $retry_times + 1 ))
done
if [[ $retry_times -lt $max_retries ]];then
echo "######################################################################################"
echo "# All PODs of prometheus is Completed or Running! #"
echo "######################################################################################"
fi
echo
wait_for_prometheus_status
}
##############################################################################################
# #
# Move Pods to Infra Nodes Entrypoint #
# #
##############################################################################################
if test ! -f "${KUBECONFIG}"
then
echo "No kubeconfig, can not continue."
exit 0
fi
# Download jq
if [ ! -d /tmp/bin ];then
mkdir /tmp/bin
export PATH=$PATH:/tmp/bin
curl -sL https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64 > /tmp/bin/jq
chmod ug+x /tmp/bin/jq
fi
#Get Basic Infrastructue Architecture Info
node_arch=$(oc get nodes -ojsonpath='{.items[*].status.nodeInfo.architecture}')
platform_type=$(oc get infrastructure cluster -ojsonpath='{.status.platformStatus.type}')
platform_type=$(echo $platform_type | tr -s 'A-Z' 'a-z')
node_arch=$(echo $node_arch | tr -s " " "\n"| sort|uniq -u)
######################################################################################
# CHANGE BELOW VARIABLE IF YOU WANT TO SET DIFFERENT VALUE #
######################################################################################
#IF_MOVE_INGRESS IF_MOVE_REGISTRY IF_MOVE_MONITORING can be override if you want to disable moving
#ingress/registry/monitoring
export OPENSHIFT_PROMETHEUS_RETENTION_PERIOD=15d
export OPENSHIFT_PROMETHEUS_STORAGE_SIZE=100Gi
export OPENSHIFT_ALERTMANAGER_STORAGE_SIZE=2Gi
case ${platform_type} in
aws)
#Both Architectures also need:
OPENSHIFT_PROMETHEUS_STORAGE_CLASS=gp3-csi
OPENSHIFT_ALERTMANAGER_STORAGE_CLASS=gp3-csi
;;
gcp)
OPENSHIFT_PROMETHEUS_STORAGE_CLASS=ssd-csi
OPENSHIFT_ALERTMANAGER_STORAGE_CLASS=ssd-csi
;;
ibmcloud)
OPENSHIFT_PROMETHEUS_STORAGE_CLASS=ibmc-vpc-block-5iops-tier
OPENSHIFT_ALERTMANAGER_STORAGE_CLASS=ibmc-vpc-block-5iops-tier
OPENSHIFT_PROMETHEUS_STORAGE_CLASS=ibmc-vpc-block-5iops-tier
OPENSHIFT_ALERTMANAGER_STORAGE_CLASS=ibmc-vpc-block-5iops-tier
;;
openstack)
OPENSHIFT_PROMETHEUS_STORAGE_CLASS=standard-csi
OPENSHIFT_ALERTMANAGER_STORAGE_CLASS=standard-csi
;;
alibabacloud)
OPENSHIFT_ALERTMANAGER_STORAGE_CLASS=alicloud-disk
OPENSHIFT_PROMETHEUS_STORAGE_CLASS=alicloud-disk
OPENSHIFT_PROMETHEUS_STORAGE_CLASS=alicloud-disk
OPENSHIFT_ALERTMANAGER_STORAGE_CLASS=alicloud-disk
;;
azure)
#Azure use VM_SIZE as instance type, to unify variable, define all to INSTANCE_TYPE
OPENSHIFT_PROMETHEUS_STORAGE_CLASS=managed-csi
OPENSHIFT_ALERTMANAGER_STORAGE_CLASS=managed-csi
;;
nutanix)
#nutanix use VM_SIZE as instance type, to uniform variable, define all to INSTANCE_TYPE
OPENSHIFT_PROMETHEUS_STORAGE_CLASS=nutanix-volume
OPENSHIFT_ALERTMANAGER_STORAGE_CLASS=nutanix-volume
;;
vsphere|default)
;;
*)
echo "Un-supported infrastructure cluster detected."
exit 1
esac
IF_MOVE_INGRESS=${IF_MOVE_INGRESS:=true}
if [[ ${IF_MOVE_INGRESS} == "true" ]];then
move_routers_ingress
fi
IF_MOVE_REGISTRY=${IF_MOVE_REGISTRY:=true}
if [[ ${IF_MOVE_REGISTRY} == "true" ]];then
move_registry
fi
IF_MOVE_MONITORING=${IF_MOVE_MONITORING:=true}
if [[ ${IF_MOVE_MONITORING} == "true" ]];then
move_monitoring
fi
| true |
9805f2d0055e96f41486581e7d88b6089895aa5e
|
Shell
|
GopuTS/workspace-helper
|
/gitflow-helper/gitify.sh
|
UTF-8
| 5,568 | 3.71875 | 4 |
[] |
no_license
|
#!/bin/bash
featureModulesFile=~/easy/FeatureModules.txt
workSpace=~/workspace-sts/universe
cd $workSpace
moduleRegx="*-model* *client* *-service* platform-*"
stashSuffix="easyWSSwitch"
modules=()
function garbageCollect {
for m in $moduleRegx; do
cd $m
printf "In $m \n"
git gc
cd ..
done
}
function setFeatureModules {
while read -r moduleName ; do
if [[ "$moduleName" != *"#"* ]]; then
modules+=($moduleName)
fi
done < "$featureModulesFile"
if [[ ${#modules[@]} < 1 ]]; then
printf "please set the modules in $featureModulesFile \n"
fi
}
function setAllModules {
for f in $moduleRegx; do
modules+=($f)
done
}
function isDirty {
if [[ -z "$(git status --porcelain)" ]]; then
return 1;
else
return 0;
fi
}
# isDirty
# if [[ $? == 0 ]]; then
# echo "WorkingCopy is dirty"
# fi
function isBranchExists {
# module=$1
# repositoryTemplate="[email protected]:universe/$module.git"
branch=$1
repository=$(git config --get remote.origin.url)
returnValue=$(git ls-remote $repository $branch)
if [[ -n $returnValue ]]; then
return 0
else
return 1
fi
}
# isBranchExists "gis-service" "feature/PROF-530-ba_flow_update_for_inter_realm"
# echo "$?"
dirtyModules=()
function getDirtyModules {
for f in $moduleRegx; do
cd $f
isDirty
if [[ $? == 0 ]]; then
dirtyModules+=($f)
fi
cd ..
done
}
function switchBranch {
destinationBranch=$1
for f in $moduleRegx; do
echo "-------------"
printf "$f:\n"
cd $f
switchToBranch="develop"
isBranchExists $destinationBranch
if [[ $? == 0 ]]; then
switchToBranch=$destinationBranch
fi
currentBranchName=$(git symbolic-ref --short HEAD)
stashMessage="$currentBranchName-$stashSuffix"
if [[ isDirty ]]; then
git stash save --include-untracked "$stashMessage"
fi
git checkout $switchToBranch
stashMessage="$switchToBranch-$stashSuffix"
stashToPop=$(git stash list | grep $stashMessage | grep -o stash@\{.\})
if [[ ! -z $stashToPop ]]; then
git stash pop $stashToPop
fi
echo "-------------"
cd ..
done
}
function pull {
for f in ${modules[@]}; do
cd $f
echo "-------------"
printf "$f:\n"
currentBranchName=$(git symbolic-ref --short HEAD)
stashMessage="$currentBranchName-$stashSuffix"
if [[ isDirty ]]; then
git stash save --include-untracked "$stashMessage"
fi
git pull
stashToPop=$(git stash list | grep $stashMessage | grep -o stash@\{.\})
if [[ ! -z $stashToPop ]]; then
git stash pop $stashToPop
fi
echo "-------------"
cd ..
done
}
function push {
for f in ${modules[@]}; do
cd $f
git push
cd ..
done
}
function validateForDirtyModules {
isStrictMode=$1
if [[ $isStrictMode == true ]]; then
getDirtyModules
if [[ ${#dirtyModules[@]} > 0 ]]; then
printf "Running in strict mode. Dirty modules are not allowed:\n"
echo "${dirtyModules[@]}"
printf "\n"
exit 1
fi
printf "All modules are clean!\n"
fi
}
function startFeature {
featureName=$1
baseBranchName=$2
for m in ${modules[@]}; do
cd $m
git flow feature start $featureName $baseBranchName
cd ..
done
}
function finishFeature {
featureName=$1
for m in ${modules[@]}; do
cd $m
git flow feature finish $featureName
cd ..
done
}
if [[ "$1" == "ic" || "$1" == "isClean" ]]; then
getDirtyModules
if [[ ${#dirtyModules[@]} > 0 ]]; then
echo "Oops! There are dirty modules: ${dirtyModules[@]}"
else
echo "Hurray! All modules are clean! No uncommited changes."
fi
elif [[ "$1" == "s" || "$1" == "switch" ]]; then
echo "s / switch"
branch=$2
if [[ -z $branch ]]; then
echo "One more argument needed: <destination branch name>"
exit 1
fi
isStrictMode=${3-true}
validateForDirtyModules $isStrictMode
switchBranch $branch
elif [[ "$1" == "pl" || "$1" == "pull" ]]; then
whichModules=${2-f}
isStrictMode=${3-true}
validateForDirtyModules $isStrictMode
if [[ $whichModules == "all" ]]; then
setAllModules
else
setFeatureModules
fi
pull
elif [[ "$1" == "ps" || "$1" == "push" ]]; then
#statements
whichModules=${2-f}
isStrictMode=${3-true}
validateForDirtyModules $isStrictMode
if [[ $whichModules == "all" ]]; then
setAllModules
else
setFeatureModules
fi
push
elif [[ "$1" == "sf" || "$1" == "startFeature" ]]; then
featureName=$2
if [[ -z $featureName ]]; then
printf "featureName is mandatory to start a feature \n"
exit 1
fi
baseBranchName=${3-develop}
printf "base branch: $baseBranchName \n"
setFeatureModules
startFeature $featureName $baseBranchName
elif [[ "$1" == "ff" || "$1" == "finishFeature" ]]; then
featureName=$2
if [[ -z $featureName ]]; then
printf "featureName is mandatory to finish a feature \n"
exit 1
fi
setFeatureModules
finishFeature $featureName
#statements
#statements
# elif [[ $1 == 1 ]]; then
# setFeatureModules
# echo "${modules[@]}"
# printf "\n"
elif [[ "$1" == "gc" || "$1" == "garbageCollect" ]]; then
garbageCollect
else
printf "*********************************************\n"
printf "Please provide one of the below arguments:\n"
printf "ic/isClean\n"
printf "s/switch\n"
printf "s <branch name> <isStrictMode>[*true|false]"
printf "pl/pull\n"
printf "pl/pull <feature or all>[*f|all] <isStrictMode>[*true|false]\n"
printf "ps/push\n"
printf "ps/push <feature or all>[*f|all] <isStrictMode>[*true|false]\n"
printf "sf/StartFeature\n"
printf "sf/StartFeature <feature branch name> <base branch>\n"
printf "ff/finishFeature\n"
printf "gc/garbageCollect\n"
printf "gc"
printf "*********************************************\n"
fi
| true |
b2e2243517442108ecdc530103bf79c99a01a80e
|
Shell
|
abveritas/bundles
|
/swt/PKGBUILD
|
UTF-8
| 2,855 | 3.03125 | 3 |
[] |
no_license
|
pkgname=swt
pkgver=3.7
pkgrel=1
pkgdesc="An open source widget toolkit for Java"
_date=201106131736
arch=(i686 x86_64)
url="http://www.eclipse.org/swt/"
license=('EPL')
depends=('java-runtime>=6' 'gtk2>=2.20.1' 'libxtst')
#optdepends=('libgnomeui' 'xulrunner' 'mesa')
#makedepends=('openjdk6' 'libxtst' 'mesa' 'libgnomeui' 'xulrunner>=1.9.1' 'unzip' 'pkgconfig' 'apache-ant')
optdepends=('libgnomeui' 'mesa')
makedepends=('openjdk6' 'libxtst' 'mesa' 'libgnomeui' 'xulrunner' 'unzip' 'pkgconfig' 'apache-ant')
source=(http://download.eclipse.org/eclipse/downloads/drops/R-${pkgver}-${_date}/swt-${pkgver}-gtk-linux-x86.zip
http://download.eclipse.org/eclipse/downloads/drops/R-${pkgver}-${_date}/swt-${pkgver}-gtk-linux-x86_64.zip
build-swt.xml)
md5sums=('21d084526f5df4e02900b2ce4ae8b5d1'
'60f11408bc8db299c5406a904a665a5e'
'f5e548bc26a0f1f3c18131be76face40')
if [ "${CARCH}" = "i686" ]; then
source=(http://download.eclipse.org/eclipse/downloads/drops/R-${pkgver}-${_date}/swt-${pkgver}-gtk-linux-x86.zip
build-swt.xml)
noextract=(swt-${pkgver}-gtk-linux-x86.zip)
md5sums=('21d084526f5df4e02900b2ce4ae8b5d1'
'f5e548bc26a0f1f3c18131be76face40')
fi
if [ "${CARCH}" = "x86_64" ]; then
source=(http://download.eclipse.org/eclipse/downloads/drops/R-${pkgver}-${_date}/swt-${pkgver}-gtk-linux-x86_64.zip
build-swt.xml)
md5sums=('60f11408bc8db299c5406a904a665a5e'
'f5e548bc26a0f1f3c18131be76face40')
noextract=(swt-${pkgver}-gtk-linux-x86_64.zip)
fi
build() {
cd "${srcdir}"
. /etc/profile.d/openjdk6.sh
. /etc/profile.d/apache-ant.sh
unzip -jqo ${srcdir}/swt-${pkgver}-gtk-linux-*.zip "*src.zip"
unzip -qo src.zip
rm -rf about_files
mkdir src
mv org src/
cp build-swt.xml build.xml
[ "${CARCH}" = "x86_64" ] && export SWT_PTR_CFLAGS=-DJNI64
make -f make_linux.mak make_awt
make -f make_linux.mak make_swt
make -f make_linux.mak make_atk
make -f make_linux.mak make_gnome
# "XULRunner >= 2.0 is not currently supported"
# https://bugs.eclipse.org/bugs/show_bug.cgi?id=327696
#
# export MOZILLA_INCLUDES="`pkg-config --cflags libxul`"
# export MOZILLA_LIBS="-Wl,-R`pkg-config libxul --variable=libdir` `pkg-config libxul --libs`"
# export XULRUNNER_INCLUDES="${MOZILLA_INCLUDES}"
# export XULRUNNER_LIBS="${MOZILLA_LIBS}"
# make -f make_linux.mak make_mozilla
# make -f make_linux.mak make_xulrunner
make -f make_linux.mak make_cairo
make -f make_linux.mak make_glx
ant compile
}
package() {
cd "${srcdir}"
cp version.txt build/version.txt
cp src/org/eclipse/swt/internal/SWTMessages.properties build/org/eclipse/swt/internal/
ant jar
install -m755 -d "${pkgdir}/usr/share/java"
install -m755 -d "${pkgdir}/usr/lib"
install -m644 swt.jar "${pkgdir}/usr/share/java/"
install -m755 *.so "${pkgdir}/usr/lib/"
}
| true |
2b7f1b1db3751318383787b2876189a7a8574a1c
|
Shell
|
dfresh613/SecurityWatchServer
|
/SecurityWatchServer/scripts/moveRecordings.sh
|
UTF-8
| 547 | 3.28125 | 3 |
[] |
no_license
|
#!/bin/bash
function move () {
timestamp=$(date +"%m-%d-%y")
dir=/tmp/record/$timestamp/
mkdir $dir
mv /tmp/record/*.jpg $dir
mv /tmp/record/*.avi $dir
cp /tmp/motion.log $dir
}
function copy (){
cp -r $dir /vmstore/userhome/securityCam/
}
function remount () {
umount -lf /mocaps
mount /mocaps
}
function clean () {
rm -rf $dir
cat /dev/null > /tmp/motion.log
}
move
copy
if [ $? -eq 0 ];then
clean
else
while [ ! $? -eq 0 ];do
echo "Attempting to remount"
remount
sleep 10
echo "waiting..."
copy
done
clean
fi
| true |
c6ef3809973d56b1e6f4de1e5f85e506fdbdf706
|
Shell
|
Hackmodford/factorio-init
|
/config.example
|
UTF-8
| 2,462 | 3.46875 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
# Enable debugging, useful when you want to figure out why
# this script is not behaving the way you expect it to do
DEBUG=0
# What do you want to call this service?
SERVICE_NAME="Factorio"
# Which user/group is running factorio?
# Running any public service as "root" is generally not recommended
USERNAME=factorio
USERGROUP=factorio
# The absolute path to the factorio directory
FACTORIO_PATH=/path/to/factorio
# The absolute path to the factorio binary
BINARY=${FACTORIO_PATH}/bin/x64/factorio
#Where to store the pidfile
PIDFILE=/path/to/pid
# The latency expected between peers (milliseconds)
# Try to set this to the highest latency + 30
# If Peer A and B have a latency of 100 between each other
# set this to 130
LATENCY=250
# The number of minutes between each autosave
AUTOSAVE_INTERVAL=10
# The number of autosaves to use for rotation
AUTOSAVE_SLOTS=3
# Factorio comes packaged in a tarball containing the directory named "factorio"
# when using this scripts update/install command we expect to see this very
# directory. If you want to supply your own update/install tar other than what you can download
# from factorio.com/downloads you can use this option to tell the script this is ok.
#
# If you place your factorio install within a directory named "factorio" and you update/install
# with the official tarball you can safely ignore this option.
PACKAGE_DIR_NAME=factorio
#
# narcotiq update options https://github.com/narc0tiq/factorio-updater
# Either pull in the submodule or clone narc0tics repo:
#
# git clone https://github.com/narc0tiq/factorio-updater
# OR
# git submodule init
# git submodule update
#
# absolute path to the factorio-updater script
UPDATE_SCRIPT=/path/to/update-factorio.py
# See https://github.com/narc0tiq/factorio-updater for details regarding the options
UPDATE_EXPERIMENTAL=0
UPDATE_TMPDIR=/tmp
# Set to 1 to enable download of the headless updates
HEADLESS=0
# Extras
# Additional binary arguments, these will be sent to the binary when issuing the "start" command
EXTRA_BINARGS="--disallow-commands --peer-to-peer"
#
# Refrain from changing the variables below, they are used internally by the script and should
# not be altered unless you know what you are doing
#
SCREEN_NAME=factorio-screen
SAVE_NAME=factorio-init-save
INVOCATION="${BINARY} --start-server ${SAVE_NAME} --autosave-interval ${AUTOSAVE_INTERVAL} --autosave-slots ${AUTOSAVE_SLOTS} --latency-ms ${LATENCY} ${EXTRA_BINARGS}"
| true |
d52166183202957deba79bde8b8ed795299bc2ed
|
Shell
|
mdryden/config
|
/shell_profiles/.profile
|
UTF-8
| 2,278 | 3.25 | 3 |
[
"Unlicense"
] |
permissive
|
# ~/.profile: executed by the command interpreter for login shells.
# This file is not read by bash(1), if ~/.bash_profile or ~/.bash_login
# exists.
# see /usr/share/doc/bash/examples/startup-files for examples.
# the files are located in the bash-doc package.
# the default umask is set in /etc/profile; for setting the umask
# for ssh logins, install and configure the libpam-umask package.
#umask 022
# if running bash
if [ -n "$BASH_VERSION" ]; then
# include .bashrc if it exists
if [ -f "$HOME/.bashrc" ]; then
. "$HOME/.bashrc"
fi
fi
# set PATH so it includes user's private bin if it exists
if [ -d "$HOME/bin" ] ; then
PATH="$HOME/bin:$PATH"
fi
# set PATH so it includes user's private bin if it exists
if [ -d "$HOME/.local/bin" ] ; then
PATH="$HOME/.local/bin:$PATH"
fi
# PROMPT stuff
BLACK=$(tput setaf 0)
BLACK_BG=$(tput setab 0)
RED=$(tput setaf 1)
RED_BG=$(tput setab 1)
GREEN=$(tput setaf 2)
GREEN_BG=$(tput setab 2)
LIME_YELLOW=$(tput setaf 190)
LIME_YELLOW_BG=$(tput setab 190)
YELLOW=$(tput setaf 3)
YELLOW_BG=$(tput setab 3)
POWDER_BLUE=$(tput setaf 153)
POWDER_BLUE_BG=$(tput setab 153)
BLUE=$(tput setaf 4)
BLUE_BG=$(tput setab 4)
MAGENTA=$(tput setaf 5)
MAGENTA_BG=$(tput setab 5)
CYAN=$(tput setaf 6)
CYAN_BG=$(tput setab 6)
WHITE=$(tput setaf 7)
WHITE_BG=$(tput setab 7)
BRIGHT=$(tput bold)
NORMAL=$(tput sgr0)
BLINK=$(tput blink)
REVERSE=$(tput smso)
UNDERLINE=$(tput smul)
__git_branch() {
git branch 2> /dev/null | sed -e '/^[^*]/d' -e 's/* \(.*\)/\1/'
}
__gcloud_ps1() {
CONTEXT=$(cat ~/.config/gcloud/active_config)
if [ -n "$CONTEXT" ]; then
echo -e "${CONTEXT}"
fi
}
__firebase_ps1() {
CONTEXT=$(grep \"$(pwd)\" ~/.config/configstore/firebase-tools.json | cut -d" " -f2 | cut -d"\"" -f2)
if [ -n "$CONTEXT" ]; then
echo "$CONTEXT"
fi
}
__kube_ps1()
{
# Get current context
CONTEXT=$(cat ~/.kube/config | grep "current-context:" --color=NO | sed "s/current-context: //")
if [ -n "$CONTEXT" ]; then
echo -e "${BRIGHT}${WHITE}${RED_BG}${BLINK}${CONTEXT}${NORMAL}"
fi
}
export PS1='
\[${GREEN}\]\u@\h \[${LIME_YELLOW}\]gcp:$(__gcloud_ps1) \[${CYAN}\]fb:$(__firebase_ps1) \[${POWDER_BLUE}\]git:$(__git_branch)
\[\033[01;34m\]\w\[${NORMAL}\]\$ '
| true |
3f7f038525c721e9ba9a1f1207c0185471649fae
|
Shell
|
GabenGitHub/IdentityServer-for-SPA
|
/dbscript.sh
|
UTF-8
| 336 | 2.5625 | 3 |
[] |
no_license
|
green=`tput setaf 2`
reset=`tput sgr0`
echo "${green}Creating initial migration ApplicationDbContext... ${reset}"
dotnet ef migrations add InitApplicationDbContext -c ApplicationDbContext -o Data/Migrations/AppMigrations
echo "${green}Updating database ApplicationDbContext ${reset}"
dotnet ef database update -c ApplicationDbContext
| true |
c3292645f7e161018112a933343d9c3d30523ec4
|
Shell
|
charles-josiah/mineradores
|
/mineradores/build-massivo.sh
|
UTF-8
| 61 | 2.59375 | 3 |
[] |
no_license
|
for a in `ls`; do cd $a; docker build -t $a . ; cd .. ; done
| true |
b4a6b4f6ba2b436fa54d86a61b0152c28b633c32
|
Shell
|
akbaraziz/docker_scripts
|
/docker_private_registry_security.sh
|
UTF-8
| 1,319 | 3.3125 | 3 |
[] |
no_license
|
#!/bin/bash
set -ex
# How to pull and search for images on DockerHub
docker pull ubuntu
docker search ubuntu
# Attempt to authenticate against the private registry
docker login <registry public hostname>
# Configure Docker to ignore certificate verification when accessing the private registry - THIS IS A BAD THING
sudo vi /etc/docker/daemon.json
{
"insecure-registries" : ["<registry public hostname>"]
}
# Restart Docker
sudo systemctl restart docker
# Try Docker Login again
docker login <registry public hostname>
# Log out of the Private registry
docker logout <registry public hostname>
# Remove insecurity entry from daemon.json
# Restart Docker
sudo systemctl restart docker
# Download Cert Public Key from the Registry and configure Docker to use insecurity
sudo mkdir -p /etc/docker/certs.d/<registry public hostname>
sudo scp cloud_user@<registry public hostname>:/home/cloud_user/registry/certs/domain.crt /etc/docker/certs.d/<registry public hostname>
# Login to Docker
docker login <registry public hostname>
# Push and Pull from Registry
docker pull ubuntu
docker tag ubuntu <registry public hostname>/ubuntu
docker push <registry public hostname>/ubuntu
docker image rm <registry public hostname>/ubuntu
docker image rm ubuntu
docker pull <registry public hostname>/ubuntu
| true |
8ec7511f8bb0fb2fa298055944231a8fcf22a5c4
|
Shell
|
JCErasmus/QshOni
|
/build.sh
|
UTF-8
| 3,190 | 3.203125 | 3 |
[] |
no_license
|
#!/QOpenSys/usr/bin/qsh
#----------------------------------------------------------------
# Script name: build.sh
# Author: Richard Schoen
# Purpose: Create QSHONI library, copies source members and compiles objects
#----------------------------------------------------------------
SRCLIB="QSHONI"
SRCLIBTEXT="QShell on IBM i"
SRCFILE="SOURCE"
dashes="---------------------------------------------------------------------------"
function cpy_member
{
# ----------------------------------------------------------------
# Copy source member and set source type
# ----------------------------------------------------------------
SRCMEMBER=`echo "${CURFILE^^}" | cut -d'.' -f1` # Parse PC file name prefix to member name
SRCTYPE=`echo "${CURFILE^^}" | cut -d'.' -f2` # Parse PC file name extenstion to souce type
system -v "CPYFRMSTMF FROMSTMF('${PWD}/${CURFILE}') TOMBR('/QSYS.LIB/${SRCLIB}.LIB/${SRCFILE}.FILE/${SRCMEMBER}.MBR') MBROPT(*REPLACE)"
system -v "CHGPFM FILE(${SRCLIB}/${SRCFILE}) MBR($SRCMEMBER) SRCTYPE(${SRCTYPE}) TEXT('${SRCTEXT}')"
}
echo "$dashes"
echo "Starting Build of ${SRCLIBTEXT} library ${SRCLIB}"
# Create library, clear library and create source file
system -v "CRTLIB ${SRCLIB} TYPE(*PROD) TEXT('${SRCLIBTEXT}')"
system -v "CLRLIB LIB(${SRCLIB})"
system -v "CRTSRCPF FILE(${SRCLIB}/${SRCFILE}) RCDLEN(120)"
# Copy all the source members and set source types
CURFILE="QSHBASH.CMD"
SRCTEXT="Run Bash Command via Qshell"
cpy_member
CURFILE="QSHBASHC.CLLE"
SRCTEXT="Run Bash Command via Qshell"
cpy_member
CURFILE="QSHEXEC.CMD"
SRCTEXT="Run Bash Command via Qshell"
cpy_member
CURFILE="QSHEXECC.CLLE"
SRCTEXT="Run QShell Command Line"
cpy_member
CURFILE="QSHLOGSCAC.CLP"
SRCTEXT="Scan Qshell Log File for Value"
cpy_member
CURFILE="QSHLOGSCAN.CMD"
SRCTEXT="Scan Qshell Log File for Value"
cpy_member
CURFILE="QSHLOGSCAR.RPGLE"
SRCTEXT="Scan Qshell Log File for Value"
cpy_member
CURFILE="QSHPATH.CMD"
SRCTEXT="Set Open Source Package Path Environment Variables"
cpy_member
CURFILE="QSHPATHC.CLLE"
SRCTEXT="Set Open Source Package Path Environment Variables"
cpy_member
CURFILE="QSHSTDOUTR.RPGLE"
SRCTEXT="Read and parse stdout log"
cpy_member
CURFILE="QSHIFSCHKR.RPGLE"
SRCTEXT="Check for IFS File Existence"
cpy_member
CURFILE="QSHIFSCHKC.CLP"
SRCTEXT="Check for IFS File Existence"
cpy_member
CURFILE="QSHIFSCHK.CMD"
SRCTEXT="Check for IFS File Existence"
cpy_member
CURFILE="QSHPYRUN.CMD"
SRCTEXT="Run Python Script via Qshell"
cpy_member
CURFILE="QSHPYRUNC.CLLE"
SRCTEXT="Run Python Script via Qshell"
cpy_member
CURFILE="QSHDEMO01R.RPGLE"
SRCTEXT="Read Outfile STDOUTQSH and display via DSPLY cmd"
cpy_member
CURFILE="SRCBLDC.CLP"
SRCTEXT="Build cmds from QSHONI/SOURCE file"
cpy_member
CURFILE="README.TXT"
SRCTEXT="Read Me Docs on Setup"
cpy_member
CURFILE="VERSION.TXT"
SRCTEXT="Version Notes"
cpy_member
# Create and run build program
system -q "CRTCLPGM PGM(${SRCLIB}/SRCBLDC) SRCFILE(${SRCLIB}/${SRCFILE})"
system -v "CALL PGM(${SRCLIB}/SRCBLDC)"
echo "${SRCLIBTEXT} library ${SRCLIB} was created and programs compiled."
echo "$dashes"
| true |
9778cd6cbd06cb0f878f4b9c908fb4c9ae31033f
|
Shell
|
maanteeamet/pelias-data-container
|
/scripts/nlsfi-loader.sh
|
UTF-8
| 360 | 3.3125 | 3 |
[] |
no_license
|
#!/bin/bash
# Note: a valid MMLAPIKEY env var must be set when calling this script
# errors should break the execution
set -e
NAME=paikat.zip
URL=$(node $SCRIPTS/parse_nlsfi_url.js)
mkdir -p $DATA/nls-places
cd $DATA/nls-places
# Download nls paikat data
echo 'Loading nlsfi data'
wget -O $NAME $URL
unzip -o $NAME
rm $NAME
echo '##### Loaded nlsfi data'
| true |
b6bc3163fadc73d52a8d91670905dab51fc63cd7
|
Shell
|
phatblat/dotfiles
|
/.dotfiles/intellij/functions.zsh
|
UTF-8
| 1,467 | 3.703125 | 4 |
[
"MIT"
] |
permissive
|
#-------------------------------------------------------------------------------
#
# intellij/functions.zsh
# Command-line functions for IntelliJ
#
#-------------------------------------------------------------------------------
# Launch IntelliJ IDEA
# https://gist.github.com/chrisdarroch/7018927
function idea {
local dir
# check for where the latest version of IDEA is installed
IDEA=`ls -1d /Applications/IntelliJ\ * | tail -n1`
# were we given a directory?
if [ -d "$1" ]; then
# echo "checking for things in the working dir given"
dir=`ls -1d "$1" | head -n1`
fi
# were we given a file?
if [ -f "$1" ]; then
# echo "opening '$1'"
open -a "$IDEA" "$1"
else
if [ -n "${dir}" ]; then
# let's check for stuff in our working directory.
pushd "${dir}" > /dev/null
fi
# does our working dir have an .idea directory?
if [ -d ".idea" ]; then
# echo "opening via the .idea dir"
open -a "$IDEA" .
# is there an IDEA project file?
elif [ -f *.ipr ]; then
# echo "opening via the project file"
open -a "$IDEA" `ls -1d *.ipr | head -n1`
# Is there a pom.xml?
elif [ -f pom.xml ]; then
# echo "importing from pom"
open -a "$IDEA" "pom.xml"
# can't do anything smart; just open IDEA
else
# echo 'cbf'
open "$IDEA"
fi
if [ -n "${dir}" ]; then
popd > /dev/null
fi
fi
}
| true |
990a30349844b7d190a6c0d41027a50e8742502b
|
Shell
|
tbaumeist/FreeNet-Analysis
|
/Scripts/archive/server/MySqlInc.sh
|
UTF-8
| 1,458 | 3.671875 | 4 |
[] |
no_license
|
#!/bin/bash
_defaultPort=2323
#===================================================================================================
# Main Entry Point
#===================================================================================================
# parameters
# 1 Configuration file
# 2 Password
source ./../common/parameters.sh
declare password
declare randomCount
declare randomLength
declare calcHost
ParameterScriptWelcome "mysql.sh"
ParameterPassword password $1
ParameterRandomCount randomCount "How many random words to insert? " $2
ParameterEnterHost calcHost "Enter host name to perform location and key calculation: " $3
ParameterScriptWelcomeEnd
#===================================================================================================
for i in `seq $randomCount`
do
word=""
word="testdata$i"
returned=$(expect -c "
spawn telnet $calcHost $_defaultPort
match_max 100000
expect \"*TMCI>*\"
send -- \"GETCHK:$word\r\"
expect eof
send -- \"QUIT\r\"
interact timeout 30 return
" | egrep "URI:|Double:")
doctored=$(echo $returned | sed -e 's/URI://g' -e 's/Double//g' -e 's/\r//g' -e 's/ //g')
location=$(echo $doctored | cut -d':' -f2)
key=$(echo $doctored | cut -d':' -f1)
echo "Inserting $word $location $key"
mysql -u freenetscript -p$password freenet -Bse "insert into RandomData values (\"$word\",$location,\"$key\");"
done
echo "********** Data Insert Complete ***************"
| true |
7daf0e2d056dc64a44112acdb180c9f9a47feadd
|
Shell
|
stuarteberg/stuart-scripts
|
/create-flyem-env.sh
|
UTF-8
| 5,145 | 2.828125 | 3 |
[] |
no_license
|
#!/bin/bash
#
# 2020-08-14
# Here's how to create my flyem development environment from scratch.
#
# Updated 2022-11
set -x
set -e
WORKSPACE=/Users/bergs/workspace
ENV_NAME=flyem-310
CONDA_CMD=create
#CONDA_CMD=install
DEVELOP_MODE=1
CORE_ONLY=0
CLOUDVOL=1
INSTALLER=mamba
STUART_CREDENTIALS=0 # Non-portable stuff
PYTHON_VERSION=3.10
core_conda_pkgs=(
"python=${PYTHON_VERSION}"
ipython
jupyterlab
nodejs
matplotlib
ipywidgets
bokeh
selenium # Required for rendering bokeh plot images, also for neuroglancer's video tool
firefox # ditto
geckodriver # ditto
datashader
jupyter_bokeh
hvplot
pandas
feather-format
pytest
'vol2mesh>=0.1.post20'
'libdvid-cpp>=0.4.post21'
'neuclease>=0.5.post70'
'flyemflows>=0.5.post.dev523'
'neuprint-python>=0.4.25'
lemon
'dvid>=0.9.17'
zarr
matplotlib
dill
h5py
vigra
)
optional_conda_pkgs=(
'graph-tool>=2.45'
umap-learn
ngspice
plotly
line_profiler
'google-cloud-sdk'
'google-cloud-bigquery>=1.26.1'
crcmod # optional dependency of gsutil rsync, for faster checksums
pynrrd
cython
anytree
pot
'gensim>=4.0'
atomicwrites
beartype
brotli
fastremap # Cool, there's a conda package for this...
future
multiprocess
orjson
pathos
pox
ppft
pysimdjson
zfpy # This is optional, but if something brings it in via pip, it breaks numcodecs and zarr.
)
# neuroglancer dependencies are all available via conda,
# even though neuroglancer itself isn't.
ng_conda_pkgs=(
'sockjs-tornado>=1.0.7'
'tornado>=6'
'google-apitools'
nodejs
)
# Some cloudvol dependencies aren't on conda-forge,
# but these ones are
cloudvol_conda_pkgs=(
boto3
brotli
brotlipy
chardet
crc32c
gevent
google-auth
google-cloud-core
'google-cloud-storage>=1.30'
inflection
json5
protobuf
psutil
python-dateutil
tenacity
zstandard
)
if [[ ! -z "${CORE_ONLY}" && ${CORE_ONLY} != "0" ]]; then
${INSTALLER} ${CONDA_CMD} -y -n ${ENV_NAME} -c flyem-forge -c conda-forge ${core_conda_pkgs[@]}
elif [[ ! -z "${CLOUDVOL}" && ${CLOUDVOL} != "0" ]]; then
${INSTALLER} ${CONDA_CMD} -y -n ${ENV_NAME} -c flyem-forge -c conda-forge ${core_conda_pkgs[@]} ${optional_conda_pkgs[@]} ${ng_conda_pkgs[@]} ${cloudvol_conda_pkgs[@]}
else
${INSTALLER} ${CONDA_CMD} -y -n ${ENV_NAME} -c flyem-forge -c conda-forge ${core_conda_pkgs[@]} ${optional_conda_pkgs[@]} ${ng_conda_pkgs[@]}
fi
if [[ ! -z "${STUART_CREDENTIALS}" && ${STUART_CREDENTIALS} != "0" ]]; then
# This is related to my personal credentials files. Not portable!
${INSTALLER} install -y -n ${ENV_NAME} $(ls ${WORKSPACE}/stuart-credentials/pkgs/stuart-credentials-*.tar.bz2 | tail -n1)
fi
set +x
# https://github.com/conda/conda/issues/7980#issuecomment-492784093
eval "$(conda shell.bash hook)"
conda activate ${ENV_NAME}
set -x
jupyter nbextension enable --py widgetsnbextension
jupyter labextension install @jupyter-widgets/jupyterlab-manager
if [[ ! -z "${CORE_ONLY}" && ${CORE_ONLY} != "0" ]]; then
echo "Skipping plotly extensions"
else
# plotly jupyterlab support
#
jupyter labextension install jupyterlab-plotly
jupyter labextension install @jupyter-widgets/jupyterlab-manager plotlywidget
fi
# These would all be pulled in by 'pip install neuroglancer cloud-volume',
# but I'll list the pip dependencies explicitly here for clarity's sake.
pip_pkgs=(
neuroglancer
tensorstore
#'graspologic>=2.0' # Sadly, not yet available for python-3.10
)
if [[ ! -z "${CLOUDVOL}" && ${CLOUDVOL} != "0" ]]; then
pip_pkgs+=(
cloud-volume # 2.0.0
'cloud-files>=0.9.2'
'compressed-segmentation>=1.0.0'
'fastremap>=1.9.2'
'fpzip>=1.1.3'
DracoPy
posix-ipc
python-jsonschema-objects
)
fi
if [[ ! -z "${CORE_ONLY}" && ${CORE_ONLY} != "0" ]]; then
echo "Skipping optional pip installs, including neuroglancer"
else
pip install ${pip_pkgs[@]}
fi
if [[ ! -z "${DEVELOP_MODE}" && ${DEVELOP_MODE} != "0" ]]; then
# It is assumed you already have git repos for these in ${WORKSPACE}/
develop_pkgs=(
vol2mesh
neuclease
flyemflows
neuprint-python
)
# Explicitly install the dependencies,
# even though they're already installed.
# This ensures that they get entries in the environment specs,
# so they don't get automatically removed when we run 'conda update ...'.
# (conda tends to automatically remove packages that aren't explicitly required by your environment specs.)
for p in ${develop_pkgs[@]}; do
${INSTALLER} install -y -n ${ENV_NAME} --only-deps -c flyem-forge -c conda-forge ${p}
done
echo "Uninstalling the following packages and re-installing them in 'develop' mode: ${develop_pkgs[@]}"
for p in ${develop_pkgs[@]}; do
rm -rf ${CONDA_PREFIX}/lib/python${PYTHON_VERSION}/site-packages/${p}*
cd ${WORKSPACE}/${p}
python setup.py develop
done
fi
| true |
e2c158313d7ad6004023b7c01c287fa99c2b7ddb
|
Shell
|
SpenserJ/dotfiles
|
/home/.config/zsh/.zshrc
|
UTF-8
| 949 | 2.765625 | 3 |
[
"MIT"
] |
permissive
|
zstyle ':znap:*' repos-dir ~/.config/zsh/znap
source ~/.config/zsh/znap/zsh-snap/znap.zsh
# uncomment this and the last line for zprof info
# zmodload zsh/zprof
# All of our zsh config files
typeset -U config_files
config_files=($HOME/.config/zsh/**/*.zsh)
# `${(M)config_files:#*/plugins.zsh}` will return files that match the expression
# `${config_files:#*/plugins.zsh}` will exclude files that match the expression
# Load the path files
for file in ${(M)config_files:#*/path.zsh}; do
source "$file"
done
# Load the znap plugins
for file in ${(M)config_files:#*/plugins.zsh}; do
source "$file"
done
# Load everything except the plugins, path, and completion files
for file in ${config_files:#*/(plugins|znap/**/*|path|completion).zsh}; do
source "$file"
done
# Load the completion config
for file in ${(M)config_files:#*/completion.zsh}; do
source "$file"
done
# Clean up any variables that we have set
unset config_files
# zprof
| true |
2962fa678e78abbe43afb1f6dea39c36680b55c1
|
Shell
|
agomezl/.conf
|
/scripts/emacs.sh
|
UTF-8
| 213 | 3.640625 | 4 |
[] |
no_license
|
#!/bin/bash
CONF_DIR=$(cd $(dirname ${BASH_SOURCE[0]} )/.. && pwd)
sudo dnf -y install emacs
if [ -h ~/.emacs ]
then echo ".emacs is already in place"
exit 1
fi
rm -fr ~/.emacs
ln -s ${CONF_DIR} ~/.emacs
| true |
10d3d060825f982ba6402f917d06521ed5db76fc
|
Shell
|
jasonshih/antikc
|
/bootstrap/scripts/openssl
|
UTF-8
| 870 | 3.5625 | 4 |
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
NAME=${openssl[0]}
PROGRAM=${openssl[3]}
FILE=${openssl[2]}
EXT=${openssl[4]}
DLINK=${openssl[1]}
MD5=${openssl[5]}
if [ $REBUILD_ALL ]; then
echo Building $PREFIX32/$NAME because REBUILD_ALL=$REBUILD_ALL
elif [ -e $PREFIX32/$NAME ]; then
echo Skipping $PREFIX32/$NAME because we already have it
return
else
echo Building $PREFIX32/$NAME because dont have it yet
fi
download ${DLINK}
waitUser
check ${FILE} ${MD5}
waitUser
unpack ${EXT}
waitUser
startBuild
cd $BUILD_DIR/$PROGRAM
{
setarch i386 ./config -m32 --prefix=$PREFIX32 shared zlib-dynamic $LDFLAGS $CFLAGS || exit 1
StartMake
waitUser
make || exit 1
waitUser
c_MakeTest check || exit 1
StartMakeInstall
waitUser
make install || exit 1
} 2>&1 | tee $BUILD_DIR/LOG_$PROGRAM.log
if [ $PIPESTATUS -ne 0 ]
then
echo -e "$L_FAIL_BUIL"
exit 1
fi
waitUser
cd $ROOT
| true |
fa5ac43e7877dd7fd5066e0851e8c5bf6af441bc
|
Shell
|
Myullnir/Aprendiendo-Bash
|
/Tateti.sh
|
UTF-8
| 5,323 | 3.625 | 4 |
[] |
no_license
|
#!/bin/bash
# Vamos a hacer un tateti
echo Juguemos un Tateti
# Con esto se elige quien empieza. El jugador siempre es la X
moneda=$(( $RANDOM % 2 ))
echo Tiramos una moneda para decidir quien empieza
echo "Elegí, ¿Cara o Cruz?"
select resultado in Cara Cruz
do
if [ $resultado = Cara ]
then
resultado=0
if [ $resultado -eq $moneda ]
then
echo Comienza el jugador
Turno=X
break
else
echo Comienza la máquina
Turno=O
break
fi
elif [ $resultado = Cruz ]
then
resultado=1
if [ $resultado -eq $moneda ]
then
echo Comienza el jugador
Turno=X
break
else
echo Comienza la máquina
Turno=O
break
fi
fi
done
##################################################################
# Armo la lista de variables que voy a usar
for i in {1..9}
do
Lista[$i]=P$i
done
# Si la condición de victoria quedó de antes, borrala así jugamos
if [ -s Ganar.vic ]
then
rm Ganar.vic
fi
##################################################################
# Esto es por si comienza primero la máquina
if [ $Turno = O ]
then
j=0
PosibleJugada=( )
for i in {1..9}
do
if [ -z ${!Lista[$i]} ]
then
PosibleJugada[$j]=${Lista[$i]}
((j++))
fi
done
largo=${#PosibleJugada[*]}
jugadarand=$(( $RANDOM % $largo))
echo Debería marcar la casilla ${PosibleJugada[$jugadarand]}
echo Mis posibles jugadas son ${PosibleJugada[*]}
echo Los índices de mis jugadas son ${!PosibleJugada[*]}
echo Mi elección es la componente del vector $jugadarand
case ${PosibleJugada[$jugadarand]} in
P1)
P1=O
echo La casilla P1 tiene un $P1
;;
P2)
P2=O
echo La casilla P2 tiene un $P2
;;
P3)
P3=O
echo La casilla P3 tiene un $P3
;;
P4)
P4=O
echo La casilla P4 tiene un $P4
;;
P5)
P5=O
echo La casilla P5 tiene un $P5
;;
P6)
P6=O
echo La casilla P6 tiene un $P6
;;
P7)
P7=O
echo La casilla P7 tiene un $P7
;;
P8)
P8=O
echo La casilla P8 tiene un $P8
;;
P9)
P9=O
echo La casilla P9 tiene un $P9
;;
esac
Turno=X
fi
echo
echo " ${P1-1} | ${P2-2} | ${P3-3} "
echo ------------------------------------------------------
echo " ${P4-4} | ${P5-5} | ${P6-6} "
echo ------------------------------------------------------
echo " ${P7-7} | ${P8-8} | ${P9-9} "
##################################################################
Posiciones="1 2 3 4 5 6 7 8 9 Salida"
PS3="Elegí que posición marcar: "
select jugada in $Posiciones
do
if [ $Turno = X ]
then
if [ $jugada = Salida ]
then
echo Termino en empate
break
fi
case $jugada in
1)
if [ -z $P1 ]
then
P1=$Turno
if [ $Turno = X ]
then
Turno=O
else
Turno=X
fi
fi
;;
2)
if [ -z $P2 ]
then
P2=$Turno
if [ $Turno = X ]
then
Turno=O
else
Turno=X
fi
fi
;;
3)
if [ -z $P3 ]
then
P3=$Turno
if [ $Turno = X ]
then
Turno=O
else
Turno=X
fi
fi
;;
4)
if [ -z $P4 ]
then
P4=$Turno
if [ $Turno = X ]
then
Turno=O
else
Turno=X
fi
fi
;;
5)
if [ -z $P5 ]
then
P5=$Turno
if [ $Turno = X ]
then
Turno=O
else
Turno=X
fi
fi
;;
6)
if [ -z $P6 ]
then
P6=$Turno
if [ $Turno = X ]
then
Turno=O
else
Turno=X
fi
fi
;;
7)
if [ -z $P7 ]
then
P7=$Turno
if [ $Turno = X ]
then
Turno=O
else
Turno=X
fi
fi
;;
8)
if [ -z $P8 ]
then
P8=$Turno
if [ $Turno = X ]
then
Turno=O
else
Turno=X
fi
fi
;;
9)
if [ -z $P9 ]
then
P9=$Turno
if [ $Turno = X ]
then
Turno=O
else
Turno=X
fi
fi
;;
esac
fi
# Visualizamos la jugada del usuario
export ${!P*}
./Vexe.sh
# Revisamos si el jugador ya gano
export ${!P*}
export Turno
./Gexe.sh
if [ -s Ganar.vic ]
then
cat Ganar.vic
break
fi
# Jugada de la máquina
if [ $Turno = O ]
then
j=0
PosibleJugada=( )
for i in {1..9}
do
if [ -z ${!Lista[$i]} ]
then
PosibleJugada[$j]=${Lista[$i]}
((j++))
fi
done
largo=${#PosibleJugada[*]}
jugadarand=$(( $RANDOM % $largo))
# echo Debería marcar la casilla ${PosibleJugada[$jugadarand]}
# echo Mis posibles jugadas son ${PosibleJugada[*]}
# echo Mi elección es la componente del vector $jugadarand
case ${PosibleJugada[$jugadarand]} in
P1)
P1=O
echo La casilla P1 tiene un $P1
;;
P2)
P2=O
echo La casilla P2 tiene un $P2
;;
P3)
P3=O
echo La casilla P3 tiene un $P3
;;
P4)
P4=O
echo La casilla P4 tiene un $P4
;;
P5)
P5=O
echo La casilla P5 tiene un $P5
;;
P6)
P6=O
echo La casilla P6 tiene un $P6
;;
P7)
P7=O
echo La casilla P7 tiene un $P7
;;
P8)
P8=O
echo La casilla P8 tiene un $P8
;;
P9)
P9=O
echo La casilla P9 tiene un $P9
;;
esac
Turno=X
fi
# Estas dos lineas llaman a un programa que ejecuta al programa que visualiza
# La razón de usar un programa intermedio es porque siempre hay que "Rebautizar"
# Los archivos antes de usarlo, y para poder pasarle las variables fácilmente
export ${!P*}
./Vexe.sh
# Establezcamos las condiciones de victoria
export ${!P*}
export Turno
./Gexe.sh
if [ -s Ganar.vic ]
then
cat Ganar.vic
break
fi
done
| true |
0abafea17069f5cbd40fae5b2cca9623167edb33
|
Shell
|
skingdev/.dotfiles
|
/.bash_profile
|
UTF-8
| 524 | 2.71875 | 3 |
[] |
no_license
|
if [ -f ~/.bashrc ]; then
source ~/.bashrc
fi
[[ -s "$HOME/.profile" ]] && source "$HOME/.profile" # Load the default .profile
[[ -s "$HOME/.rvm/scripts/rvm" ]] && source "$HOME/.rvm/scripts/rvm" # Load RVM into a shell session *as a function*
export NVM_DIR="/Users/sking/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && . "$NVM_DIR/nvm.sh" # This loads nvm
export PATH="/Users/sking/rebar:$PATH:~/bin"
. ~/erlang/18.2/activate
test -e "${HOME}/.iterm2_shell_integration.bash" && source "${HOME}/.iterm2_shell_integration.bash"
| true |
aa28de43826d2c95fad74d3a5d45bf04398fc074
|
Shell
|
Keirua/mazes-py
|
/gen_pokemon_mazes.sh
|
UTF-8
| 288 | 2.875 | 3 |
[] |
no_license
|
#!/bin/bash
# Array of file names
file_names=("003.png" "006.png" "009.png" "025.png" "151.png" "813.png" "831.png")
# Loop over the file names
for filename in "${file_names[@]}"
do
# Run the command with the current filename
poetry run python mask_from_img.py "$filename"
done
| true |
7c8e0276e1566a3774f37a1f9ef9b2b75a3b3b00
|
Shell
|
longj6306/hello-world
|
/update-binary
|
UTF-8
| 3,316 | 2.875 | 3 |
[] |
no_license
|
#!/sbin/sh
set_perm() {
chown $1.$2 $4
chown $1:$2 $4
chmod $3 $4
}
ch_con() {
/system/bin/toolbox chcon u:object_r:system_file:s0 $1
chcon u:object_r:system_file:s0 $1
}
ch_con_ext() {
/system/bin/toolbox chcon $2 $1
chcon $2 $1
}
/sbin/busybox mount /system
/sbin/busybox mount -o rw,remount /system /system
API=$(cat /system/build.prop | grep ro.build.version.sdk= | dd bs=1 skip=21 count=2)
SUMOD=06755
SUGOTE=false
MKSH=/system/bin/mksh
if [ "$API" -eq "$API" ]; then
if [ "$API" -gt "17" ]; then
SUMOD=0755
SUGOTE=true
fi
fi
if [ ! -f $MKSH ]; then
MKSH=/system/bin/sh
fi
/sbin/busybox rm /system/bin/su
/sbin/busybox rm /system/xbin/su
/sbin/busybox rm /system/xbin/sugote
/sbin/busybox rm /system/xbin/daemonsu
/cache/recovery/chattr -i /system/xbin/.su
/cache/recovery/chattr.pie -i /system/xbin/.su
/cache/recovery/chattr -i /system/bin/.ext/.su
/cache/recovery/chattr.pie -i /system/bin/.ext/.su
/cache/recovery/chattr -i /system/xbin/daemonsu
/cache/recovery/chattr.pie -i /system/xbin/daemonsu
/cache/recovery/chattr -i /system/etc/install-recovery.sh
/cache/recovery/chattr.pie -i /system/etc/install-recovery.sh
/sbin/busybox rm /system/bin/.ext/.su
/sbin/busybox rm /system/bin/install-recovery.sh
/sbin/busybox mkdir /system/bin/.ext
/sbin/busybox chown 0.0 /system/bin/.ext
/sbin/busybox chmod 0777 /system/bin/.ext
/sbin/busybox cat /cache/recovery/su > /system/xbin/su
if ($SUGOTE); then
/sbin/busybox cat /cache/recovery/su > /system/xbin/sugote
/sbin/busybox cat /cache/recovery/mksh > /system/xbin/sugote-mksh
fi
/sbin/busybox cat /cache/recovery/su > /system/bin/.ext/.su
/sbin/busybox cat /cache/recovery/su > /system/xbin/daemonsu
/sbin/busybox cat /cache/recovery/install-recovery.sh > /system/etc/install-recovery.sh
/sbin/busybox ln -s /system/etc/install-recovery.sh /system/bin/install-recovery.sh
/sbin/busybox cat /cache/recovery/99SuperSUDaemon > /system/etc/init.d/99SuperSUDaemon
/sbin/busybox cat /cache/recovery/.installed_su_daemon > /system/etc/.installed_su_daemon
set_perm 0 0 0777 /system/bin/.ext
set_perm 0 0 $SUMOD /system/bin/.ext/.su
set_perm 0 0 $SUMOD /system/xbin/su
if ($SUGOTE); then
set_perm 0 0 0755 /system/xbin/sugote
set_perm 0 0 0755 /system/xbin/sugote-mksh
fi
set_perm 0 0 0755 /system/xbin/daemonsu
set_perm 0 0 0755 /system/etc/install-recovery.sh
set_perm 0 0 0755 /system/etc/init.d/99SuperSUDaemon
set_perm 0 0 0644 /system/etc/.installed_su_daemon
set_perm 0 0 0644 /system/app/Superuser.apk
ch_con /system/bin/.ext/.su
ch_con /system/xbin/su
if ($SUGOTE); then
ch_con_ext /system/xbin/sugote u:object_r:zygote_exec:s0
ch_con /system/xbin/sugote-mksh
fi
ch_con /system/xbin/daemonsu
ch_con /system/etc/install-recovery.sh
ch_con /system/etc/init.d/99SuperSUDaemon
ch_con /system/etc/.installed_su_daemon
ch_con /system/app/Superuser.apk
ch_con /system/app/Maps.apk
ch_con /system/app/GMS_Maps.apk
ch_con /system/app/YouTube.apk
/system/xbin/su --install
/sbin/busybox rm /cache/recovery/chattr
/sbin/busybox rm /cache/recovery/chattr.pie
/sbin/busybox rm /cache/recovery/su
/sbin/busybox rm /cache/recovery/mksh
/sbin/busybox rm /cache/recovery/install-recovery.sh
/sbin/busybox rm /cache/recovery/99SuperSUDaemon
/sbin/busybox rm /cache/recovery/.installed_su_daemon
/sbin/busybox sleep 5s
exit 0
| true |
cc464f9d31bb7bcaf73a023b2a7a1ea398f2a7c6
|
Shell
|
luiarthur/cytof5
|
/sims/sim_study/old/sim4.sh
|
UTF-8
| 1,811 | 3.375 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
# Maximum number of cores to use
MAX_CORES=36
# AWS Bucket to store results
AWS_BUCKET="s3://cytof-vary-kmcmc-n1000"
# STAGGER_TIME in seconds. To avoid mass dumping to disk simultaneously.
STAGGER_TIME=100
# Experiment settings
MCMC_ITER=1000
BURN=10000
I=3
J=32
N_factor="100 1000 10000"
K=8
L=4
K_MCMC="6 7 8 9 10"
L_MCMC=5
betaPriorScale="0.01 0.1 5.0"
betaTunerInit="0.1"
RESULTS_DIR="results/sim4/"
SEED="98 64"
# simulation number, just for book keeping. Ignore this.
simNumber=0
for seed in $SEED; do
for nFac in $N_factor; do
for bs in $betaPriorScale; do
for k_mcmc in $K_MCMC; do
# Simulation number
simNumber=$((simNumber + 1))
# Experiment name
exp_name="I${I}_J${J}_N_factor${nFac}_K${K}_L${L}_K_MCMC${k_mcmc}_L_MCMC${L_MCMC}_betaPriorScale${bs}_betaTunerInit${betaTunerInit}_SEED${seed}"
# Output directory
outdir="$RESULTS_DIR/$exp_name/"
mkdir -p $outdir
# Julia Command to run
jlCmd="julia sim.jl --I=${I} --J=${J} --N_factor=${nFac} --K=${K} \
--L=${L} --K_MCMC=${k_mcmc} --L_MCMC=${L_MCMC} --b0PriorSd=${bs} \
--b1PriorScale=${bs} --SEED=${seed} --RESULTS_DIR=$RESULTS_DIR \
--EXP_NAME=$exp_name --MCMC_ITER=${MCMC_ITER} --BURN=${BURN} \
--b0TunerInit=${betaTunerInit} --b1TunerInit=${betaTunerInit}"
# Sync results to S3
syncToS3="aws s3 sync $outdir $AWS_BUCKET/$exp_name --exclude '*.nfs*'"
# Remove output files to save space on cluster
rmOutput="rm -rf ${outdir}"
cmd="$jlCmd > ${outdir}/log.txt && $syncToS3 && $rmOutput"
sem -j $MAX_CORES $cmd
echo $cmd
echo "Results for simulation $simNumber -> $outdir"
sleep $STAGGER_TIME
done
done
done
done
| true |
352f46c4a63138a9c95587f079c5b21e617de101
|
Shell
|
lowrydale/sonicbase
|
/scripts/get-distribution
|
UTF-8
| 330 | 3.5625 | 4 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
export SONIC_BASE_HOME=$1
if [ ${1:0:1} != "/" ]
then
export SONIC_BASE_HOME=$HOME/$SONIC_BASE_HOME
fi
mkdir -p $SONIC_BASE_HOME/tmp
export mac=`which sw_vers`
echo $mac
if [ "$mac" == '' ]; then
cat /etc/*-release > $SONIC_BASE_HOME/tmp/distribution
else
sw_vers > $SONIC_BASE_HOME/tmp/distribution
fi
| true |
b530cb069939e627326e379db6ad431e83938c85
|
Shell
|
whigg/scripts
|
/plot/get_coordinates_CRD_HLM
|
UTF-8
| 2,178 | 3.046875 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
SITE=$1
Case=5
CRD_Apriory=($(grep "$SITE" ALP_NET.CRD | awk '{print $4, $5, $6}'))
X_0=${CRD_Apriory[0]}
Y_0=${CRD_Apriory[1]}
Z_0=${CRD_Apriory[2]}
echo "Coorditates at Epoch 2005-01-01"
echo "$X_0 $Y_0 $Z_0"
echo " Epoch Coordinates Translation Resudials "
echo "YYYY-MM-DD X,m Y,m Z,m dX,mm dY,mm dZ,mm N,mm E,mm U,mm"
#echo " Epoch Coordinates Translation Resudials " > CRD/$SITE.CRD
#echo "YYYY-MM-DD X,m Y,m Z,m dX,mm dY,mm dZ,mm N,mm E,mm U,mm" >> CRD/$SITE.CRD
# 2004-01-02 4231162.589480 -332746.701170 4745130.921950 10.48 -19.17 -7.05 -15.15 10.25 -19.28
grep $SITE Resudials_$Case/HLM*.OUT | cut -c16-21 > SessionList
rm CRD/$SITE.CRD
rm dX
rm dY
rm dZ
rm North
rm East
rm Up
while read Session
do {
Epoch=$( grep "EPOCH" Results_F1_$Case/F1_"$Session".CRD | awk '{print $6}')
CRD_XYZ=($(grep "$SITE" Results_F1_$Case/F1_"$Session".CRD | awk '{print $4, $5, $6}'))
Res_NEU=($(grep "$SITE" Resudials_$Case/HLM"$Session".OUT | awk '{print $10, $11, $12}'))
X=${CRD_XYZ[0]}
Y=${CRD_XYZ[1]}
Z=${CRD_XYZ[2]}
dX=$(echo $X*1000 - $X_0*1000 | bc)
dY=$(echo $Y*1000 - $Y_0*1000 | bc)
dZ=$(echo $Z*1000 - $Z_0*1000 | bc)
N=${Res_NEU[0]}
E=${Res_NEU[1]}
U=${Res_NEU[2]}
#echo "$Epoch $X $Y $Z $dX $dY $dZ $N $E $U" | awk '{printf("%10s %15.6f %15.6f %15.6f %8.2f %8.2f %8.2f %8.2f %8.2f %8.2f \n", $1, $2, $3, $4, $5, $6, $7, $8, $9, $10)}'
echo "$Epoch $X $Y $Z $dX $dY $dZ $N $E $U" | awk '{printf("%10s %15.6f %15.6f %15.6f %8.2f %8.2f %8.2f %8.2f %8.2f %8.2f \n", $1, $2, $3, $4, $5, $6, $7, $8, $9, $10)}' >> CRD/$SITE.CRD
echo "$Epoch $dX" >> dX
echo "$Epoch $dY" >> dY
echo "$Epoch $dZ" >> dZ
echo "$Epoch $N" >> North
echo "$Epoch $E" >> East
echo "$Epoch $U" >> Up
} done < SessionList
rm SessionList
#rm dX
#rm dY
#rm dZ
#rm North
#rm East
#rm Up
echo 'DONE'
| true |
6a2b7592a2fb3c21f894f1eb2f1220d9caacbcb0
|
Shell
|
fabi92/deeplearning
|
/cnn/environment.sh
|
UTF-8
| 245 | 2.71875 | 3 |
[] |
no_license
|
#!/bin/bash
BASE=$(dirname "$BASH_SOURCE")
echo "$BASE"
DIR=`pwd`
BASEDIR="$DIR/$BASE"
export LC_ALL=en_US.UTF-8
export LANG=en_US.UTF-8
export PYTHONPATH="$BASEDIR"/tools:"$BASEDIR"/base:"$BASEDIR"/base/layers:"$BASEDIR"/base/nets:$PYTHONPATH
| true |
0725efdab82ad3baa2d93dc3112d147b8916ef36
|
Shell
|
mickdupreez/dots
|
/.local/bin/myscripts/bat
|
UTF-8
| 258 | 2.734375 | 3 |
[] |
no_license
|
#!/bin/bash
now=`cat /sys/class/power_supply/BAT0/status`
current=`cat /sys/class/power_supply/BAT0/charge_now`
full=`cat /sys/class/power_supply/BAT0/charge_full`
out=`echo $current $full | awk '{ printf("%f\n", $1/$2 * 100) }'`
echo "$now "${out%.*}"%"
| true |
f463f0cd371f452bda4f6a02df3e3d75cdadf66d
|
Shell
|
bartx84/launcher
|
/core/themes.sh
|
UTF-8
| 1,772 | 3.765625 | 4 |
[] |
no_license
|
#!/bin/bash
#universal and modular bash application launcher
#https://github.com/bartx84/launcher
#bartx [at] mail.com
#
#core library
#themes library
#import the global functions library
. $maindir/core/glb_functions.sh
function get_current_theme() {
mypth=$(pwd)
if [ -e "$theme_file" ]
then
my_theme_path=( `cat "$theme_file"`)
if [ $my_theme_path != "" ] && [ -e "$themes_dir/$my_theme_path" ]
then
current_theme=$(echo "$themes_dir/$my_theme_path")
else
cd $themes_dir
echo "base" > current.conf
cd $mypth
current_theme=$(echo "$themes_dir/base/")
fi
else
cd $themes_dir
echo "base" > current.conf
cd $mypth
current_theme=$(echo "$themes_dir/base/")
fi
}
function reload_current_theme() {
mypth=$(pwd)
cd $themes_dir
echo $current_theme > current.conf
cd $mypth
}
function get_theme_name() {
declare -a themesource
themesource=( `echo "$1" | tr '/' '\n'`)
tLen=${#themesource[@]}
namepos=$(($tLen-1))
themename=${themesource[$namepos]}
echo "$themename"
}
function download_theme() {
themename=$(get_theme_name $1)
echo "installing $themename theme"
cd $themes_dir
git clone $1
merge_theme $themename
echo $themename > current.conf
echo "Theme $themename installed"
}
function merge_theme() {
echo -e "${RED}installing launcher basic commands in $1 theme${NC}"
cp -r base/* $1
echo -e "${GREEN}Done${NC}"
}
function merge_all_themes() {
echo -e "${RED}UPGRADE LAUNCHER BASIC COMMANNDS IN ALL INSTALLED THEMES${NC}"
cd $themes_dir
for i in $( ls -d */)
do
dirs=( `echo "$i" | tr '/' '\n'`)
dirname=${dirs[0]}
if [ $dirname != "base" ]
then
merge_theme $dirname
fi
done
echo -e "${GREEN}ALL INSTALLED THEMES UPGRADED${NC}"
echo -e ""
echo -e "${YELLOW}PRESS ENTER TO RETURN TO LAUNCHER${NC}"
}
| true |
a1bed11a7f412f3344322b9a33fff78293876ec5
|
Shell
|
Jekotia/.zsh
|
/bin/squash
|
UTF-8
| 3,874 | 4.0625 | 4 |
[] |
no_license
|
#! /bin/bash
mnt=/mnt/squash
tmp=/tmp/squash
tmpTab=$tmp/fstab
# check for root/sudo
if [[ ! `id -u` == "0" ]] ; then
echo "This must be run with root/sudo!"
exit 2
fi
fail=0
# check if squashfs-tools is installed
if ! which mksquashfs > /dev/null 2>&1 ; then
echo "Missing required package: squashfs-tools"
let "fail++"
fi
# check if kpartx is installed
if ! which kpartx > /dev/null 2>&1 ; then
echo "Missing required package: kpartx"
let "fail++"
fi
if [ $fail -gt 0 ] ; then
exit 1
fi
# creates directories leading up to the output file, if they don't exist
function ptouch() {
for p do
_dir="$(dirname -- "$p")"
mkdir -p -- "$_dir" #&&
#touch -- "$p"
done
}
function _mksquash () {
ptouch $destImg
if mksquashfs $mnt $destImg -comp lzo -e lib/modules etc/console-setup/cached_UTF-8_del.kmap.gz ; then
return 0
else
return 1
fi
}
function _usage () {
echo "Usage: $0 -i source.img dest.img"
echo " $0 -d /dev/sdb2 dest.img"
echo " $0 -c /dev/sdb2"
}
# copies fstab to /tmp/squash, when working with a device as the source
function _fstab_backup () {
if ! cp -a $1 $tmpTab ; then
echo "Failed to backup $1/etc/fstab!"
return 1
else
echo $1 > $tmpTab.path
return 0
fi
}
function _fstab_restore () {
restorePath=`cat $tmpTab.path`
echo $restorePath
while true; do
read -p "I'm going to move $tmpTab to $restorePath. Does this look correct?" yn
case $yn in
[Yy]* ) break;;
[Nn]* ) exit;;
* ) echo "Please answer yes or no.";;
esac
done
if ! mv $tmpTab $restorePath ; then
echo "Failed to restore $restorePath!"
return 1
else
rm -f $tmpTab.path
return 0
fi
}
if [ -e $tmpTab ] && [[ "$1" != "-c" ]] ; then
echo "WARNING: It appears this script did not finish last time it ran."
echo " This script will not continue until $tmpTab has been removed."
echo " If you use $0 -c, I will attempt to put $tmpTab back where I found it."
exit 5
fi
if ! mkdir -p $mnt ; then
echo "Could not create $mnt" ; exit 2
fi
if ! mkdir -p $tmp ; then
echo "Could not create $tmp" ; exit 3
fi
case $1 in
# squash -i input.img output.img
-i|image|img)
srcImg=$2
destImg=$3
if [ ! -e $srcImg ] ; then
echo "$srcImg does not exist" ; exit 10
fi
if ! kpartx -av $srcImg | grep loop0p2 ; then
echo "Failed to find loop0p2 for initial mount" ; exit 11
fi
if ! mount /dev/mapper/loop0p2 $mnt ; then
echo "Failed to mount /dev/mapper/loop0p2" ; exit 12
fi
if ! sed -i 's/^\/dev\/mmcblk/#\0/g' $mnt/etc/fstab ; then
echo "Failed to update $mnt/etc/fstab" ; exit 13
fi
_mksquash
umount $mnt
kpartx -d $srcImg
;;
# squash -d /mnt/input output.img
-d|dev|device)
srcDev=$2
destImg=$3
umount $srcDev > /dev/null 2>&1
if ! mount $srcDev $mnt ; then
echo "Could not mount $srcDev" ; exit 21
fi
if ! _fstab_backup $mnt/etc/fstab ; then
echo "Failed to backup $mnt/etc/fstab" ; exit 22
fi
if ! sed -i 's/^\/dev\/mmcblk/#\0/g' $mnt/etc/fstab ; then
echo "Failed to modify $mnt/etc/fstab"
_fstab_restore
exit 23
fi
if ! _mksquash ; then
_fstab_restore
exit 24
fi
_fstab_restore
umount $srcDev
;;
# squash -p /mnt/input/fs/root output.img
-p|path)
srcPath=$2
destImg=$3
if ! _fstab_backup $srcPath/etc/fstab ; then
echo "Failed to backup $srcPath/etc/fstab" ; exit 30
fi
if ! sed -i 's/^\/dev\/mmcblk/#\0/g' $srcPath/etc/fstab ; then
echo "Failed to modify $srcPath/etc/fstab"
_fstab_restore
exit 31
fi
if ! _mksquash ; then
_fstab_restore
exit 32
fi
_fstab_restore
;;
-c)
_fstab_restore
## fstab=$1/etc/fstab
## if [ -e $fstab ] ; then
## mv $tmpTab $fstab
## fi
;;
*)
_usage
exit
;;
esac
| true |
ff68ac5345f991587de27074e06a8c394927073e
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/theyoke-git/PKGBUILD
|
UTF-8
| 784 | 2.609375 | 3 |
[] |
no_license
|
# Maintainer: willemw <[email protected]>
_pkgname=theyoke
pkgname=$_pkgname-git
pkgver=r23.aa04319
pkgrel=1
pkgdesc="Ultra-simple, polite RSS aggregrator for the command line"
arch=('any')
url="http://github.com/mackers/theyoke/"
license=('GPL' 'PerlArtistic')
depends=('perl-libwww' 'perl-html-parser' 'perl-uri' 'perl-xml-feed' 'perl-digest-md5' 'perl-encode' 'perl-term-size' 'perl-html-format')
makedepends=('git')
conflicts=($_pkgname)
provides=($_pkgname)
source=($pkgname::git://github.com/mackers/theyoke.git)
md5sums=('SKIP')
pkgver() {
cd $pkgname
printf "r%s.%s" "$(git rev-list --count HEAD)" "$(git rev-parse --short HEAD)"
}
package() {
cd $pkgname
install -Dm755 scripts/theyoke "$pkgdir/usr/bin/theyoke"
install -Dm644 README "$pkgdir/usr/share/theyoke/README"
}
| true |
728e9798674f0ca26a192f81368f3226c2fbe93b
|
Shell
|
davidosomething/dotfiles
|
/bin/fzf-audio
|
UTF-8
| 305 | 2.921875 | 3 |
[] |
no_license
|
#!/usr/bin/env zsh
#
# fzf-audio - fzf interface for SwitchAudioSource
__fzf-audio() {
SwitchAudioSource -s \
"$(SwitchAudioSource -a | grep "(output)" |\
fzf-tmux --height=8 --cycle --no-mouse --no-multi |\
cut -f1 -d'(' | xargs)"
}
command -v SwitchAudioSource >/dev/null && __fzf-audio
| true |
04500a7404c688570f1a42e8bd88ee638e5efdff
|
Shell
|
ltonetwork/lto-public-chain
|
/e2e/bin/run_public_node
|
UTF-8
| 399 | 2.953125 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -Cue -o pipefail
PROJECT_DIR="$(cd "$(dirname "${0}")/../.." ; pwd)" # Absolute path to project
(
cd "$PROJECT_DIR"
docker build . -t ltonetwork/public-node:dev
docker run -d --rm -p 6869:6869 -e LTO_NETWORK=CUSTOM -e LTO_ENABLE_REST_API=true -e LTO_API_KEY=open --name=lto_public_node_e2e ltonetwork/public-node:dev
sleep 5
docker logs lto_public_node_e2e
)
| true |
c8b3afbfeceaff2a66186076a81536ecd41de895
|
Shell
|
runngezhang/sword_offer
|
/leetcode/python/331. 验证二叉树的前序序列化.py
|
UTF-8
| 1,622 | 2.859375 | 3 |
[] |
no_license
|
#!/bin/bash python
"""
331. 验证二叉树的前序序列化
思路:
递归或者堆栈
"""
class Solution(object):
def isValidSerialization(self, preorder):
"""
:type preorder: str
:rtype: bool
模仿树的重建,最终应该满足的条件为
vec[i:]可以形成二叉树,并且i最终达到len(vec)
"""
i, vec = 0, preorder.split(",")
# ====================================
def dfs(vec):
"""
vec[i:]能否按照先序恢复一颗二叉树
"""
nonlocal i
if i >= len(vec): # 越界了
return False
elif vec[i] == "#": # 没有越界,当前为空节点
i += 1
return True
else: # 当前为非空节点,遍历左右子树
i += 1
return dfs(vec) and dfs(vec)
# ====================================
return dfs(vec) and i >= len(vec)
def isValidSerialization(self, preorder):
"""
:type preorder: str
:rtype: bool
利用堆栈
当出现9##的时候,将其转换为#,判断最终能否剩余一个#即可
相当于把左子树整体用一个空节点替换了
"""
stack, vec = [], preorder.split(",")
for node in vec:
stack.append(node)
while len(stack) >= 3 and stack[-1] == stack[-2] == "#" and stack[-3] != "#":
stack.pop(), stack.pop(), stack.pop()
stack.append("#")
return len(stack) == 1 and stack[0] == "#"
| true |
5a8cd7f968386bdf407b95b045f79aad773e1f40
|
Shell
|
trandoan/appA
|
/bin/appA
|
UTF-8
| 209 | 2.953125 | 3 |
[] |
no_license
|
#!/bin/sh
# Use JAVA_HOME if set, otherwise look for java in PATH
if [ -x $JAVA_HOME/bin/java ]; then
JAVA=$JAVA_HOME/bin/java
else
JAVA=`which java`
fi
JAVA_OPTS=
$JAVA ${JAVA_OPTS} -jar appA-*.jar
| true |
25d4a7d746e922095897f5d577f49423451bb051
|
Shell
|
coronalabs/corona
|
/tools/build_output.sh
|
UTF-8
| 943 | 3.78125 | 4 |
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
#!/bin/bash
# builds output.zip for iPhone
# $1 = app name
# $2 = path to template.app
# $3 = path to tmp build dir
# $4 = path to certificate file
# $5 = path to input zip file // main.lu file (or src file used to create message digest)
# $6 = "little" or "big" (endianness of file)
# assign tmp dir to $dst
dst="$3"
path=`dirname $0`
#uuid=`uuidgen`
#dst="$3/$uuid"
#mkdir $dst
appName="$1"
appNameWithoutSpaces=`echo -n "$1" | sed "s/ //g"`
appDir="$appNameWithoutSpaces.app"
appPath="$dst/$appDir"
#echo "Using app dir: $appPath"
# copy template.app into $dst, renaming it to $appDir
cp -pR $2 "$appPath"
exePath="$appPath/$appNameWithoutSpaces"
echo "$appPath/template $exePath"
mv "$appPath/template" "$exePath"
# Unpack input zip file into the app bundle
unzip -o $5 -d $appPath
$path/car "$appPath/resource.car" "$appPath"/*.lu
rm -f "$appPath"/*.lu
# create output.zip in $dst
cd $dst
zip -rym output.zip "$appDir"
cd -
| true |
2599d0922397d8c736e71340859fc4b4bb798306
|
Shell
|
Krettis/dotfiles
|
/install/.apps
|
UTF-8
| 1,294 | 3.25 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
source ${DOTFILE_DIRECTORY}/log.sh
msg_run "Installing Brew Cask Apps"
# homebrew-cask
if [ ! -d "/usr/local/Library/Taps/caskroom/homebrew-cask" ]
then
brew tap phinze/homebrew-cask
brew install brew-cask
msg_ok "homebrew-cask is ready!"
else
msg_ok "Setup homebrew-cask"
fi
function cask_install(){
appUri="$1"
appTitle="$2"
brew cask install "$appUri" --appdir=/Applications 2> /dev/null
msg_ok "$appTitle"
}
## Browsers
cask_install "firefox" "FireFox"
cask_install "google-chrome" "Google Chrome"
cask_install "chromium" "Chromium"
## Media
cask_install "vlc" "Vlc Media Player"
cask_install "spotify" "Spotify"
## Tools
cask_install "alfred" "Alfred"
cask_install "iterm2" "iTerm2"
## Development
cask_install "vagrant" "Vagrant"
cask_install "virtualbox" "VirtualBox"
cask_install "dockertoolbox" "Docker Toolbox"
cask_install "minikube" "Kubernetes minikube"
cask_install "sourcetree" "Sourcetree"
cask_install "dash" "Dash"
cask_install "sublime-text" "Sublime Text"
## Other
cask_install "transmit" "Transmit"
cask_install "robomongo" "Robomongo"
cask_install "mysqlworkbench" "Mysql Workbench"
cask_install "google-drive" "Google Drive"
cask_install "keka" "Keka"
cask_install "icons8" "icons8"
cask_install "caffeine" "Caffeine"
msg_ok "Done!"
| true |
7b3e2a20e124cabf32e2eb84df2adb9d2c38b9ed
|
Shell
|
seroperson/dotfiles
|
/.config/zsh/.zshrc
|
UTF-8
| 2,069 | 3.109375 | 3 |
[] |
no_license
|
#!/bin/zsh
export PATH="$PATH:$ZDOTDIR/bin"
# {{{ zgen configuration
local zgen_file=$ZDOTDIR/zgen/zgen.zsh
if ! [ -e $zgen_file ]; then
git clone https://github.com/tarjoilija/zgen.git $ZDOTDIR/zgen/
fi
# {{{ z configuration
_Z_DATA=$XDG_DATA_HOME/zsh_z
_Z_CMD=j
# }}}
source $zgen_file
if ! zgen saved; then
# change directory to git repository root directory
zgen load mollifier/cd-gitroot
# the best theme ever
zgen load subnixr/minimal
# jumping around (alternative to fasd)
zgen load rupa/z
# zsh anything.el-like widget
zgen load zsh-users/zaw
zgen save
fi
# {{{ theme configuration
MNML_MAGICENTER=(mnml_me_git)
MNML_USER_CHAR="$"
MNML_INSERT_CHAR=">"
MNML_NORMAL_CHAR="-"
MNML_ELLIPSIS_CHAR="..."
MNML_RPROMPT=('mnml_cwd 2 256' mnml_git git_count_modified_files)
function git_count_modified_files() {
local has_git="$(git rev-parse --abbrev-ref HEAD 2> /dev/null)"
if [ -n "$has_git" ]; then
local count=$(git diff --numstat)
if [ "x$count" != "x" ]; then
echo $count | awk "{add+=\$1; del+=\$2} END {printf \"%%{\\033[3${MNML_OK_COLOR}m%%}+%s %%{\\033[3${MNML_ERR_COLOR}m%%}-%s%%{\\033[0m%%}\", add, del}"
fi
fi
}
# }}}
# }}}
# {{{ including sources
case `uname` in
'Linux') OS='lin' ;;
'Darwin') OS='osx' ;;
'FreeBSD') OS='bsd' ;;
*) OS='unk' ;;
esac
include_source() {
[ -f "$ZDOTDIR/$1" ] && source "$ZDOTDIR/$1"
[ "x$2" != "x" ] && include_source $2
}
include_source "func.zsh" "func.$OS.zsh"
include_source "opt.zsh" "opt.$OS.zsh"
include_source "zstyle.zsh" "zstyle.$OS.zsh"
include_source "alias.zsh" "alias.$OS.zsh"
include_source "bindkey.zsh" "bindkey.$OS.zsh"
# {{{ including soft-based configurations
is_base16_shell_available && source "$ZDOTDIR/soft/base16.zsh"
is_tmux_enabled && source "$ZDOTDIR/soft/tmux.zsh"
# }}}
include_source "machine-based.zsh"
# }}}
# {{{ history
export HISTFILE="$XDG_DATA_HOME/zsh_history"
export HISTSIZE=2147483647 # LONG_MAX
export SAVEHIST=$HISTSIZE
# }}}
rationalize_path path
init_ssh_key >&/dev/null
init_gpg_key >&/dev/null
| true |
361ebdf145af86f03cade4d3e214d724aaf6e536
|
Shell
|
ilkosta/make_lrs
|
/update_repo.sh
|
UTF-8
| 272 | 3.703125 | 4 |
[] |
no_license
|
#!/bin/sh
REPO_URL=$1
REPO_DIR=$2
pwd=$PWD
mkdir -p $(dirname $REPO_DIR)
if [ -d $REPO_DIR ]
then
echo "updating $REPO_DIR ..." \
&& cd $REPO_DIR \
&& git pull
cd $pwd
else
echo "downloading repository $REPO_URL to $REPO_DIR ..."
git clone $REPO_URL $REPO_DIR
fi
| true |
a7702442693e874a0e68ed468e2593bb1f34c6f8
|
Shell
|
Eoin-ONeill-Yokai/PKGBUILD
|
/00_not_for_me/rs232monitor-git/PKGBUILD
|
UTF-8
| 830 | 2.515625 | 3 |
[] |
no_license
|
# Maintainer:
pkgname=rs232monitor-git
pkgver=r12.cc8077f
pkgrel=1
pkgdesc=""
arch=('x86_64')
url="https://github.com/cdwijs/rs232monitor"
license=()
depends=('qt5-base' 'qt5-serialport')
makedepends=('git')
provides=('rs232monitor')
conflicts=('rs232monitor')
source=("${pkgname}::git+https://github.com/cdwijs/rs232monitor.git")
md5sums=('SKIP')
pkgver() {
cd ${pkgname}
printf "r%s.%s" "$(git rev-list --count HEAD)" "$(git rev-parse --short HEAD)"
}
build() {
cd "${srcdir}/${pkgname}"
qmake rs232monitor.pro PREFIX=/usr
make
}
package() {
cd ${srcdir}/${pkgname}
# make DESTDIR=${pkgdir} install
make install INSTALL_ROOT="$pkgdir/"
# install -d ${pkgdir}/usr/share/licenses/${pkgname}
# install -d ${pkgdir}/usr/share/doc/${pkgname}
# ln -s /opt/${pkgname}/${pkgname} ${pkgdir}/usr/bin/${pkgname}
}
| true |
c0aa0685dbe478dec9b6296ee04a8b555181d81b
|
Shell
|
LaKing/srvctl
|
/modules/srvctl/completion.sh
|
UTF-8
| 2,732 | 3.65625 | 4 |
[] |
no_license
|
#!/bin/bash
(bash /usr/local/share/srvctl/srvctl.sh complicate &>/dev/null &)
_fedora_srvctl_options() {
local SC_USER
## determine the proper user
if [[ -z $SUDO_USER ]]
then
SC_USER="$USER"
else
SC_USER="$SUDO_USER"
fi
#echo "SRVCTL_OPTIONS $SC_USER"
## these are the current arguments while typing
local curr_arg;
curr_arg=${COMP_WORDS[COMP_CWORD]}
#echo "@ ${#COMP_WORDS[@]} @"
## the list will contain those words we can assume for now to be relevant
local list length command CMD arr argument
list=""
length=${#COMP_WORDS[@]}
sc_user="/var/local/srvctl/completion/$SC_USER"
## the command
if [[ $length == 2 ]]
then
list="$list start stop restart enable disable status kill help complicate"
[[ -f "$sc_user.commands" ]] && list="$list $(cat "$sc_user.commands")"
[[ -f "$sc_user.units" ]] && list="$list $(cat "$sc_user.units")"
[[ -f "$sc_user.VE" ]] && list="$list $(cat "$sc_user.VE")"
fi
if [[ $length == 3 ]]
then
CMD="${COMP_WORDS[1]}"
if [[ $CMD == enable ]] || [[ $CMD == start ]] || [[ $CMD == restart ]] || [[ $CMD == stop ]] || [[ $CMD == status ]] || [[ $CMD == disable ]] || [[ $CMD == kill ]]
then
[[ -f "$sc_user.units" ]] && list="$list $(cat "$sc_user.units")"
[[ -f "$sc_user.VE" ]] && list="$list $(cat "$sc_user.VE")"
else
list="$list start stop restart enable disable status kill help complicate"
fi
fi
## the arguments
if (( length > 2 )) && [[ -f "$sc_user.arguments" ]]
then
## CMD is the srvctl-command we consider now
CMD="${COMP_WORDS[1]}"
command="$(grep "$CMD" "$sc_user".arguments)"
arr=("$command")
argument=${arr[$length-2]}
list="$list $(echo "$argument" | sed 's/[A-Z]//g' | tr '[' ' ' | tr ']' ' ' | tr '|' ' ' )"
if [[ -f "$sc_user.$argument" ]]
then
list="$(cat "$sc_user.$argument")"
fi
fi
## TODO Prefer mapfile or read -a to split command output (or quote to avoid splitting).
# shellcheck disable=SC2207
COMPREPLY=( $(compgen -W "$list" -- "$curr_arg" ) );
}
complete -F _fedora_srvctl_options sc
complete -F _fedora_srvctl_options srvctl
function display() {
local hints
if [[ -z $SUDO_USER ]]
then
hints=/var/local/srvctl/completion/"$USER".hints
else
hints=/var/local/srvctl/completion/"$SUDO_USER".hints
fi
if [[ ! -f $hints ]]
then
sleep 1
fi
if [[ -f $hints ]]
then
cat "$hints"
else
echo 'no hints'
fi
}
display
| true |
7bd75e550e9c35927470590968f857313492f4ec
|
Shell
|
mskblackbelt/dotfiles
|
/zsh/omz_custom/plugins/battery/battery.plugin.zsh
|
UTF-8
| 6,033 | 3.390625 | 3 |
[
"MIT"
] |
permissive
|
###########################################
# Battery plugin for oh-my-zsh #
# Original Author: Peter hoeg (peterhoeg) #
# Email: [email protected] #
###########################################
# Author: Sean Jones (neuralsandwich) #
# Email: [email protected] #
# Modified to add support for Apple Mac #
###########################################
function battery_level_gauge() {
local gauge_slots=${BATTERY_GAUGE_SLOTS:-10};
local green_threshold=${BATTERY_GREEN_THRESHOLD:-6};
local yellow_threshold=${BATTERY_YELLOW_THRESHOLD:-4};
local color_green=${BATTERY_COLOR_GREEN:-%F{green}};
local color_yellow=${BATTERY_COLOR_YELLOW:-%F{yellow}};
local color_red=${BATTERY_COLOR_RED:-%F{red}};
local color_reset=${BATTERY_COLOR_RESET:-%{%f%k%b%}};
local battery_prefix=${BATTERY_GAUGE_PREFIX:-'['};
local battery_suffix=${BATTERY_GAUGE_SUFFIX:-']'};
local filled_symbol=${BATTERY_GAUGE_FILLED_SYMBOL:-'❚'};
local empty_symbol=${BATTERY_GAUGE_EMPTY_SYMBOL:-'☐'};
local charging_color=${BATTERY_CHARGING_COLOR:-$color_yellow};
# local charging_symbol=${BATTERY_CHARGING_SYMBOL:-' ⚡️ '};
local charging_symbol=${BATTERY_CHARGING_SYMBOL:-'⚡︎'};
local charged_symbol=${BATTERY_CHARGED_SYMBOL:-' ⏚ '};
local battery_remaining_percentage=$(battery_pct_remaining);
if [[ $battery_remaining_percentage =~ [0-9]+ ]]; then
local filled=$(((( $battery_remaining_percentage + $gauge_slots - 1) / $gauge_slots)));
local empty=$(($gauge_slots - $filled));
if [[ $filled -gt $green_threshold ]]; then
local gauge_color=$color_green;
elif [[ $filled -gt $yellow_threshold ]]; then
local gauge_color=$color_yellow;
else
local gauge_color=$color_red;
fi
else
local filled=$gauge_slots;
local empty=0;
filled_symbol=${BATTERY_UNKNOWN_SYMBOL:-'.'};
fi
local charging=' '
if battery_is_charging; then
charging=$charging_symbol
elif battery_charged; then
charging=$charged_symbol
fi
printf ${charging_color//\%/\%\%}$charging${color_reset//\%/\%\%}${battery_prefix//\%/\%\%}${gauge_color//\%/\%\%}
printf ${filled_symbol//\%/\%\%}'%.0s' {1..$filled}
[[ $filled -lt $gauge_slots ]] && printf ${empty_symbol//\%/\%\%}'%.0s' {1..$empty}
printf ${color_reset//\%/\%\%}${battery_suffix//\%/\%\%}${color_reset//\%/\%\%}
}
if [[ "IS_MAC" -eq 1 ]] && [[ $(pmset -g batt | grep -c "Batt") -gt 0 ]]; then
function battery_pct() {
pmset -g batt | grep -o "\d*%" | tr -d "%"
# local smart_battery_status="$(ioreg -rc "AppleSmartBattery")"
# typeset -F maxcapacity=$(echo $smart_battery_status | grep '^.*"MaxCapacity"\ =\ ' | sed -e 's/^.*"MaxCapacity"\ =\ //')
# typeset -F currentcapacity=$(echo $smart_battery_status | grep '^.*"CurrentCapacity"\ =\ ' | sed -e 's/^.*CurrentCapacity"\ =\ //')
# integer i=$(((currentcapacity/maxcapacity) * 100))
# echo $i
}
function plugged_in() {
[[ $(pmset -g batt | grep -c "Now drawing from 'AC Power'") -eq 1 ]]
}
function battery_is_charging() {
[[ $(ioreg -rc "AppleSmartBattery"| grep '^.*"IsCharging" = ' | sed -e 's/^.*"IsCharging"\ =\ //') == "Yes" ]]
}
function battery_charged() {
[[ $(pmset -g batt | grep -c " charged") -eq 1 ]]
}
function battery_pct_remaining() {
if plugged_in ; then
echo "External Power"
else
battery_pct
fi
}
function battery_time_remaining() {
if [[ $(pmset -g batt | grep -c " discharging") -eq 1 ]] ; then
echo " $(pmset -g batt | grep -o "[0-9]*:[0-9]* remaining" | \
cut -f1 -d ' ')"
else
if [[ $(pmset -g batt | grep -c " charging") -eq 1 ]]; then
echo " $(pmset -g batt | grep -o "[0-9]*:[0-9]* remaining" | \
cut -f1 -d ' ')"
fi
fi
}
function battery_pct_prompt () {
if [[ $(ioreg -rc AppleSmartBattery | grep -c '^.*"ExternalConnected"\ =\ No') -eq 1 ]] ; then
b=$(battery_pct_remaining)
if [ $b -gt 50 ] ; then
color='green'
elif [ $b -gt 20 ] ; then
color='yellow'
else
color='red'
fi
echo "%{$fg[$color]%}[$(battery_pct_remaining)%%]%{$reset_color%}"
else
echo "∞"
fi
}
elif [[ $(uname) == "Linux" ]] && [[ -x $(which upower) ]] && [[ $(upower -e | grep -i -c "battery") -ge 1 ]] ; then
local battery_name=$(upower -e | grep "battery")
local adapter_name=$(upower -e | grep "line_power")
local display_device=$(upower -e | grep "DisplayDevice")
function battery_is_charging() {
[[ $(upower -i $display_device | grep -c ' charging') -gt 0 ]]
}
function battery_is_discharging() {
[[ $(upower -i $display_device | grep -c 'discharging') -gt 0 ]]
}
function battery_charged() {
[[ $(upower -i $display_device | grep "state" | grep -c "fully-charged") -eq 1 ]]
}
function battery_pct() {
upower -i $display_device | grep "percentage" | cut -d ":" -f2 | sed -e 's/^\s*//' | tr -d "%"
}
function battery_pct_remaining() {
if [ ! $(battery_is_charging) ] ; then
battery_pct
else
echo "External Power"
fi
}
function battery_time_remaining() {
if [[ $(upower -i $display_device | grep -c "discharging") -gt 0 ]] ; then
echo "$(upower -i $display_device | grep "time to empty" | cut -d ":" -f2 | sed -e 's/^\s*//')"
fi
}
function battery_pct_prompt() {
b=$(battery_pct_remaining)
if [[ $(battery_is_discharging) ]] ; then
if [ $b -gt 50 ] ; then
color='green'
elif [ $b -gt 20 ] ; then
color='yellow'
else
color='red'
fi
echo "%{$fg[$color]%}[$(battery_pct_remaining)%%]%{$reset_color%}"
else
echo "∞"
fi
}
else
# Empty functions so we don't cause errors in prompts
function battery_pct_remaining() {
}
function battery_time_remaining() {
}
function battery_pct_prompt() {
}
function battery_is_charging() {
}
function battery_level_gauge() {
}
fi
| true |
f9228ec0b57ba2d9970f9f4c2547a9e8ef3530cb
|
Shell
|
NGTS/zlp-qa
|
/scripts/build_science_list.sh
|
UTF-8
| 184 | 3.140625 | 3 |
[] |
no_license
|
#!/bin/bash
set -eu
find_files() {
find ${NGTS}/flat-field-optimisation/data -name 'proc*.fits'
}
limit() {
head -n 200
}
main() {
find_files | limit > $1
}
main "$@"
| true |
388d6160186583f30241a20ddb1366c24d9f6239
|
Shell
|
wuhlcom/mosquitto
|
/mosquitto.sh
|
UTF-8
| 489 | 2.578125 | 3 |
[] |
no_license
|
Number=40000
j=0
for i in `seq 20001 $Number`
do
mosquitto_sub -t testSubFixTopic -h 192.168.10.8 -p 1883 -q $j -i subCID$i -k 600 -u mqttclient -P mqttclient &
j=`expr $j + 1`
if [ $j -ge 3 ]; then
j=0
fi
done
sleep 20
while true
do
for i in `seq 20001 $Number`
do
mosquitto_pub -t testSubFixTopic -h 192.168.10.8 -p 1883 -q 2 -i pubIDRetain20300 -k 600 -u mqttclient -P mqttclient -m test$i
sleep 1
done
done
| true |
5d97dc062d5856637a17ebf011fa1f3b08566493
|
Shell
|
yghlc/Landuse_DL
|
/flooding_mapping/package_results_files.sh
|
UTF-8
| 445 | 3.4375 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
# bash create new region defined parameter files (ini)
#authors: Huang Lingcao
#email:[email protected]
#add time: 20 June, 2021
# Exit immediately if a command exits with a non-zero status. E: error trace
set -eE -o functrace
for dd in $(ls -d *_1); do
echo $dd
out=${dd}.zip
if [ -f ${out} ]; then
echo ${out} exist, skip
continue
fi
# only add post*.shp and ini files
zip -r ${out} ${dd}/*post* ${dd}/*.ini
done
| true |
0ab17cdcb6cf5ef462afda55986a0bbeca9cd94b
|
Shell
|
mutek/Antenna
|
/SPAM/UNBOUND/TEST/Unbound.build
|
UTF-8
| 2,083 | 3.40625 | 3 |
[
"Unlicense"
] |
permissive
|
#!/usr/bin/env sh
# Builder per Unbound DNS recursive caching validating server
#
# Luca Cappelletti (2014)
# Public Domain
#
# ricetta per Unbound
# questa ricetta viene eseguita direttamente nella directory corrente
## indagare su hash e command -v in alternativa a which
DEB_PACKAGE="unbound"
DOWNLOAD_URL="http://unbound.net/downloads/unbound-1.4.21.tar.gz"
SOURCE_ARCHIVE="unbound-1.4.21.tar.gz"
BUILD_DIR="unbound-1.4.21"
RELEASE="1.4.21"
APP_NAME="Unbound"
APP_DISTRO="Debian7"
APP_KERNEL="Linux"
APP_PLATFORM="ia32"
# ANDRMD
#PREFIX=/home/$APP_NAME/.App/io$APP_NAME$RELEASE-$APP_DISTRO/$APP_KERNEL-$APP_PLATFORM
# POSIX
BASE_DIR=/opt
PREFIX=$BASE_DIR/$APP_NAME/.App/io$APP_NAME$RELEASE-$APP_DISTRO/$APP_KERNEL-$APP_PLATFORM
for i in "apt-get" "wget" "echo" "sha1sum" "sha256sum" "tar" "make"
do
[ -z $(which $i) ] && echo "il tool: "$i" non è raggiungibile...esco..." && exit 1
done
echo "i tools di base esistono...proseguo"
# root o sudo ma vai in manuale
apt-get -y build-dep $DEB_PACKAGE || exit 1
wget $DOWNLOAD_URL
wait
echo "hash dichiarati: "
echo "SHA1:3ef4ea626e5284368d48ab618fe2207d43f2cee1"
echo "SHA256:502f817a72721f78243923eb1d6187029639f7a8bdcc33a6ce0819bbb2a80970"
echo ""
echo "Calcolo hash..."
sha1sum $SOURCE_ARCHIVE
wait
sha256sum $SOURCE_ARCHIVE
tar -xf $SOURCE_ARCHIVE || exit 1
cd $BUILD_DIR
export LC_ALL=C
./configure --prefix=$PREFIX --with-username=$APP_NAME
make
make install
cd ..
rm --preserve-root -r $BUILD_DIR
mv $BASE_DIR/$APP_NAME/.App App
wait
rm --preserve-root -r $BASE_DIR/$APP_NAME
# ready...
echo $APP_NAME" ready in App folder"
echo "to install just:"
echo "mkdir "$BASE_DIR"/"$APP_NAME
echo "than"
echo "mv App "$BASE_DIR"/"$APP_NAME"/App"
echo "Add a new system username: "$APP_NAME
echo "fine tune unbound.conf..take inspiration from unbound.conf.template and put it into "$PREFIX"/etc/unbound/"
echo "Clean /etc/resolv.conf and put only: 127.0.0.1"
echo "bang Unbound from root with:"
echo "export LD_LIBRARY_PATH="$PREFIX"/lib:\$LD_LIBRARY_PATH"
echo "export PATH="$PREFIX"/sbin:\$PATH"
echo $PREFIX"/sbin/unbound"
| true |
75fe86c33b2ca8798589b3bc9baa6d63eaecc1f8
|
Shell
|
woodruffw/snippets
|
/ffcat/ffcat
|
UTF-8
| 803 | 4.21875 | 4 |
[
"LicenseRef-scancode-proprietary-license",
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# ffcat: Concatenate a bunch of movies/media into a single file
set -eo pipefail
function help {
echo "Usage: ffcat <output> <input [input ...]>"
exit
}
function installed {
cmd=$(command -v "${1}")
[[ -n "${cmd}" ]] && [[ -f "${cmd}" ]]
return ${?}
}
function cleanup {
rm "${inputs_file}" 2>/dev/null
}
trap cleanup EXIT
inputs_file=$(mktemp -u ffcat_inputs_XXXXX.txt)
[[ "${#}" -ge 2 ]] || help
output="${1}" && shift
[[ ! -f "${output}" ]] || help
for input in "${@}"; do
[[ -f "${input}" ]] || help
echo "file '${input}'" >> "${inputs_file}"
done
installed ffmpeg || { >&2 echo "Missing ffmpeg."; exit 1; }
# you'd better hope your stream supports the `copy' encoder
ffmpeg -f concat -safe 0 -i "${inputs_file}" -c copy "${output}"
| true |
53f53be21706cee24e14d678312c68812ec76634
|
Shell
|
jsmentch/scripts
|
/convertbin.sh
|
UTF-8
| 848 | 3.328125 | 3 |
[] |
no_license
|
#bash script to go from a mask in MNI space in .hdr format to a binarized NIFTI file in the group template space based on the mni2tmpl matrix
#!/bin/bash
#echo "The script seems to be working."
echo "ROI conversion log" > log.txt
ls *.hdr | while read l;
do
fsl5.0-fslchfiletype NIFTI_GZ "$l"
echo "$l converted to NIFTI at $(date)" >> log.txt
done
ls *.nii.gz | while read n;
do
fsl5.0-fslmaths $n -bin $n
echo "$n binarized at $(date)" >> log.txt
fsl5.0-flirt -in $n -ref ~/Documents/JeffOpenFMRI_Task002_Pandora/templates/grpbold7Tp1/from_mni/avg152T1_brain.nii.gz -out $n -init ~/Documents/JeffOpenFMRI_Task002_Pandoras/templates/grpbold7Tp1/xfm/mni2tmpl_12dof.mat -applyxfm
echo "$n transformed to group template at $(date)" >> log.txt
fsl5.0-fslmaths $n -bin $n
echo "$n binarized post xfm at $(date)" >> log.txt
done
| true |
74368438a82654fb326968b4d1b18fb0686bc2b7
|
Shell
|
IAmNotMahd/LaneDetector
|
/env_setup.sh
|
UTF-8
| 5,213 | 2.84375 | 3 |
[
"MIT"
] |
permissive
|
<<<<<<< HEAD
# ***************
# INSTALL OPENCV
# ***************
# VERSION TO BE INSTALLED
OPENCV_VERSION='3.4.2'
# 1. KEEP UBUNTU OR DEBIAN UP TO DATE
sudo apt-get -y update
sudo apt-get -y upgrade
sudo apt-get -y dist-upgrade
sudo apt-get -y autoremove
# 2. INSTALL THE DEPENDENCIES
# Build tools:
sudo apt-get install -y build-essential cmake
# GUI (if you want to use GTK instead of Qt, replace 'qt5-default' with 'libgtkglext1-dev' and remove '-DWITH_QT=ON' option in CMake):
sudo apt-get install -y qt5-default libvtk6-dev
# Media I/O:
sudo apt-get install -y zlib1g-dev libjpeg-dev libwebp-dev libpng-dev libtiff5-dev libjasper-dev libopenexr-dev libgdal-dev
# Video I/O:
sudo apt-get install -y libdc1394-22-dev libavcodec-dev libavformat-dev libswscale-dev libtheora-dev libvorbis-dev libxvidcore-dev libx264-dev yasm libopencore-amrnb-dev libopencore-amrwb-dev libv4l-dev libxine2-dev
# Parallelism and linear algebra libraries:
sudo apt-get install -y libtbb-dev libeigen3-dev
# Python:
sudo apt-get install -y python-dev python-tk python-numpy python3-dev python3-tk python3-numpy
# Java:
sudo apt-get install -y ant default-jdk
# Documentation:
sudo apt-get install -y doxygen
# 3. INSTALL THE LIBRARY
sudo apt-get install -y unzip wget
wget https://github.com/opencv/opencv/archive/${OPENCV_VERSION}.zip
unzip ${OPENCV_VERSION}.zip
rm ${OPENCV_VERSION}.zip
mv opencv-${OPENCV_VERSION} OpenCV
cd OpenCV
mkdir build
cd build
cmake -DWITH_QT=ON -DWITH_OPENGL=ON -DFORCE_VTK=ON -DWITH_TBB=ON -DWITH_GDAL=ON -DWITH_XINE=ON -DBUILD_EXAMPLES=ON -DENABLE_PRECOMPILED_HEADERS=OFF ..
make -j4
sudo make install
sudo ldconfig
# 4. EXECUTE SOME OPENCV EXAMPLES AND COMPILE A DEMONSTRATION
# To complete this step, please visit 'http://milq.github.io/install-opencv-ubuntu-debian'.
# ***************
# INSTALL CUDA Toolkit
# ***************
# 1. Get the package from Nvidia
# http://developer.download.nvidia.com/compute/cuda/repos/ubuntu1404/x86_64/
wget http://developer.download.nvidia.com/compute/cuda/repos/ubuntu1404/x86_64/cuda-repo-ubuntu1404_7.5-18_amd64.deb
sudo dpkg -i cuda-repo-ubuntu1404_7.5-18_amd64.deb
rm cuda-repo-ubuntu1404_7.5-18_amd64.deb
# 2. Set environment variables
echo 'export CUDA_HOME=/usr/local/cuda
export CUDA_ROOT=/usr/local/cuda
export PATH=$PATH:$CUDA_ROOT/bin:$HOME/bin
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$CUDA_ROOT/lib64
' >> ~/.bashrc
# 3. Let terminal know of the changes to the .bashrc file
source .bashrc
sudo apt-get update
sudo apt-get install -y cuda
# 4. Check if installation is successful by running the next line
=======
# ***************
# INSTALL OPENCV
# ***************
# VERSION TO BE INSTALLED
OPENCV_VERSION='3.4.2'
# 1. KEEP UBUNTU OR DEBIAN UP TO DATE
sudo apt-get -y update
sudo apt-get -y upgrade
sudo apt-get -y dist-upgrade
sudo apt-get -y autoremove
# 2. INSTALL THE DEPENDENCIES
# Build tools:
sudo apt-get install -y build-essential cmake
# GUI (if you want to use GTK instead of Qt, replace 'qt5-default' with 'libgtkglext1-dev' and remove '-DWITH_QT=ON' option in CMake):
sudo apt-get install -y qt5-default libvtk6-dev
# Media I/O:
sudo apt-get install -y zlib1g-dev libjpeg-dev libwebp-dev libpng-dev libtiff5-dev libjasper-dev libopenexr-dev libgdal-dev
# Video I/O:
sudo apt-get install -y libdc1394-22-dev libavcodec-dev libavformat-dev libswscale-dev libtheora-dev libvorbis-dev libxvidcore-dev libx264-dev yasm libopencore-amrnb-dev libopencore-amrwb-dev libv4l-dev libxine2-dev
# Parallelism and linear algebra libraries:
sudo apt-get install -y libtbb-dev libeigen3-dev
# Python:
sudo apt-get install -y python-dev python-tk python-numpy python3-dev python3-tk python3-numpy
# Java:
sudo apt-get install -y ant default-jdk
# Documentation:
sudo apt-get install -y doxygen
# 3. INSTALL THE LIBRARY
sudo apt-get install -y unzip wget
wget https://github.com/opencv/opencv/archive/${OPENCV_VERSION}.zip
unzip ${OPENCV_VERSION}.zip
rm ${OPENCV_VERSION}.zip
mv opencv-${OPENCV_VERSION} OpenCV
cd OpenCV
mkdir build
cd build
cmake -DWITH_QT=ON -DWITH_OPENGL=ON -DFORCE_VTK=ON -DWITH_TBB=ON -DWITH_GDAL=ON -DWITH_XINE=ON -DBUILD_EXAMPLES=ON -DENABLE_PRECOMPILED_HEADERS=OFF ..
make -j4
sudo make install
sudo ldconfig
# 4. EXECUTE SOME OPENCV EXAMPLES AND COMPILE A DEMONSTRATION
# To complete this step, please visit 'http://milq.github.io/install-opencv-ubuntu-debian'.
# ***************
# INSTALL CUDA Toolkit
# ***************
# 1. Get the package from Nvidia
# http://developer.download.nvidia.com/compute/cuda/repos/ubuntu1404/x86_64/
wget http://developer.download.nvidia.com/compute/cuda/repos/ubuntu1404/x86_64/cuda-repo-ubuntu1404_7.5-18_amd64.deb
sudo dpkg -i cuda-repo-ubuntu1404_7.5-18_amd64.deb
rm cuda-repo-ubuntu1404_7.5-18_amd64.deb
# 2. Set environment variables
echo 'export CUDA_HOME=/usr/local/cuda
export CUDA_ROOT=/usr/local/cuda
export PATH=$PATH:$CUDA_ROOT/bin:$HOME/bin
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$CUDA_ROOT/lib64
' >> ~/.bashrc
# 3. Let terminal know of the changes to the .bashrc file
source .bashrc
sudo apt-get update
sudo apt-get install -y cuda
# 4. Check if installation is successful by running the next line
>>>>>>> 827862fe03b90a1044b60f3c64480d1d59cdf6fe
# nvcc -V
| true |
1b324075dc56d75009c6c871733b0782a73cedd8
|
Shell
|
msys2/MINGW-packages
|
/mingw-w64-lua-mpack/PKGBUILD
|
UTF-8
| 2,197 | 2.953125 | 3 |
[
"BSD-3-Clause"
] |
permissive
|
# Maintainer: Rui Abreu Ferreira <[email protected]>
_realname=mpack
pkgbase=mingw-w64-lua-${_realname}
pkgname=("${MINGW_PACKAGE_PREFIX}-lua51-${_realname}"
"${MINGW_PACKAGE_PREFIX}-lua-${_realname}")
pkgver=1.0.10
pkgrel=2
arch=('any')
mingw_arch=('mingw32' 'mingw64' 'ucrt64' 'clang64' 'clang32')
url='https://github.com/libmpack/libmpack'
license=('MIT')
makedepends=("${MINGW_PACKAGE_PREFIX}-lua51"
"${MINGW_PACKAGE_PREFIX}-lua"
"${MINGW_PACKAGE_PREFIX}-cc"
"${MINGW_PACKAGE_PREFIX}-libmpack"
"${MINGW_PACKAGE_PREFIX}-pkg-config")
source=("${_realname}-${pkgver}.tar.gz"::"https://github.com/libmpack/libmpack-lua/archive/${pkgver}.tar.gz")
sha256sums=('b58a25a0b48e1ec584beee6c80e0e31182dad7a45005ce58e88e6cfff3a3f3e4')
build() {
cd libmpack-lua-${pkgver}
${CC} -O2 -fPIC -DMPACK_USE_SYSTEM $(pkg-config --cflags lua) -shared -o mpack.dll lmpack.c $(pkg-config --libs lua) -lmpack
${CC} -O2 -fPIC -DMPACK_USE_SYSTEM $(pkg-config --cflags lua5.1) -shared -o mpack.dll.5.1 lmpack.c $(pkg-config --libs lua5.1) -lmpack
}
package_lua-mpack() {
pkgdesc='Msgpack serialization library for Lua (mingw-w64)'
depends=("${MINGW_PACKAGE_PREFIX}-lua" "${MINGW_PACKAGE_PREFIX}-libmpack")
cd libmpack-lua-${pkgver}
local luaver=$(pkg-config lua --modversion | sed -r 's/^([0-9]+[.][0-9]+)[.][0-9]+$/\1/')
install -Dm755 mpack.dll "${pkgdir}${MINGW_PREFIX}/lib/lua/${luaver}/mpack.dll"
install -Dm644 LICENSE-MIT "${pkgdir}${MINGW_PREFIX}/share/licenses/lua-lmpack/LICENSE"
}
package_lua51-mpack() {
pkgdesc='Msgpack serialization library for Lua 5.1 (mingw-w64)'
depends=("${MINGW_PACKAGE_PREFIX}-lua51" "${MINGW_PACKAGE_PREFIX}-libmpack")
cd libmpack-lua-${pkgver}
install -Dm755 mpack.dll.5.1 "${pkgdir}${MINGW_PREFIX}/lib/lua/5.1/mpack.dll"
install -Dm644 LICENSE-MIT "${pkgdir}${MINGW_PREFIX}/share/licenses/lua51-mpack/LICENSE"
}
# template start; name=mingw-w64-splitpkg-wrappers; version=1.0;
# vim: set ft=bash :
# generate wrappers
for _name in "${pkgname[@]}"; do
_short="package_${_name#${MINGW_PACKAGE_PREFIX}-}"
_func="$(declare -f "${_short}")"
eval "${_func/#${_short}/package_${_name}}"
done
# template end;
| true |
3e7ce9f0acc6cdd9d65600bda186213f2897513b
|
Shell
|
NicolasBernaerts/munin-plugin
|
/dns325_temperature
|
UTF-8
| 4,300 | 3.625 | 4 |
[] |
no_license
|
#!/ffp/bin/bash
# ----------------------------------------------
# Muninlite plugin for DNS-325
#
# Used to supervise :
# * fan speed
# * case temperature
# * hdd temperature
#
# This script needs the following utilities to run :
# * fan_control
# * smartctl
#
# To install muninlite on DNS-325, please check :
# http://bernaerts.dyndns.org/nas/71-dns325-ffp07/319-dns325-dns323-ffp7-munin-node-muninlite
#
# 28/04/2015, V1.0 - Creation by N. Bernaerts
# 02/05/2015, V1.1 - Handle error where temperature case is read as 0
# ----------------------------------------------
# -------------------------------------------------------
# Initialisation
# -------------------------------------------------------
# define warning and critical temperature levels
CASE_WARNING="45"
CASE_CRITICAL="50"
DISK_WARNING="45"
DISK_CRITICAL="50"
# define display value for 3 fan states (stop, low & high)
FAN_STOP="20"
FAN_LOW="24"
FAN_HIGH="28"
# get nas name
NAS_IP=$(hostname)
# declaration of disks
ARR_DISK=("sda" "sdb")
# -------------------------------------------------------
# Configuration call
# -------------------------------------------------------
if [ "$1" = "config" ]; then
# -------------------------------
# NAS general infos
# -------------------------------
echo "host_name $NAS_IP"
echo "graph_title Temperature & Fan speed"
echo "graph_vlabel Degree Celsius"
echo "graph_category temperature"
echo "graph_info Temperature Levels & Fan speed"
echo "graph_scale no"
echo "graph_args --lower-limit $FAN_STOP --upper-limit ${DISK_CRITICAL} --rigid"
# -------------------------------
# Fan speed
# -------------------------------
echo "fan.label Fan"
echo "fan.info Fan speed (off:$FAN_STOP, low:$FAN_LOW, high:$FAN_HIGH)"
echo "fan.colour c0c0c0"
echo "fan.draw AREA"
# -------------------------------
# Case internal temperature
# -------------------------------
echo "case.label Internal case"
echo "case.info Internal Case Sensor"
echo "case.colour 00ff00"
echo "case.warning :${CASE_WARNING}"
echo "case.critical :${CASE_CRITICAL}"
# -------------------------------
# Hard drives temperature
# -------------------------------
for DISK in "${ARR_DISK[@]}"
do
# get disk size (in Gb)
DISK_INFO="No disk"
DISK_SIZE=$(smartctl -i --device=marvell /dev/${DISK} | grep "^User Capacity" | sed 's/^.*:\([0-9 ,]*\)bytes.*$/\1/' | tr -d " ," | sed 's/\(.*\).\{9\}/\1/')
# if disk is present, get its full characteristics
if [ "${DISK_SIZE}" != "" ]
then
DISK_MODEL=$(smartctl -i --device=marvell /dev/${DISK} | grep "^Device Model" | sed 's/^.*:[ ]*\(.*\)$/\1/')
DISK_SERIAL=$(smartctl -i --device=marvell /dev/${DISK} | grep "^Serial Number" | sed 's/^.*:[ ]*\(.*\)$/\1/')
DISK_INFO="${DISK_SIZE} Gb, model ${DISK_MODEL}, s/n ${DISK_SERIAL}"
fi
# display disk1 configuration
echo "${DISK}.label /dev/${DISK}"
echo "${DISK}.info ${DISK_INFO}"
echo "${DISK}.warning :${DISK_WARNING}"
echo "${DISK}.critical :${DISK_CRITICAL}"
done
# -------------------------------------------------------
# Normal call : Read Data
# -------------------------------------------------------
else
# read internal case temperature (sometimes reads 0 ...)
TEMP_CASE=$(temperature g 0 | sed 's/^.* = \([0-9]*\).*$/\1/g')
[ "${TEMP_CASE}" = "0" ] && TEMP_CASE="U"
# publish case temperature
echo "case.value ${TEMP_CASE}"
# retrieve fan status from fanspeed command
FAN_STATE=$(fanspeed g)
# if fan is supposed to rotate, retrieve last fan speed change in /var/log/user.log
[ "$FAN_STATE" != "stop" ] && FAN_STATE=$(cat /var/log/user.log | grep "Set Fan Speed" | tail -n 1 | sed 's/^.*"\([^"]*\)".*$/\1/g' | tr '[:upper:]' '[:lower:]')
# publish fan speed
case ${FAN_STATE} in
"stop") echo "fan.value ${FAN_STOP}" ;;
"low") echo "fan.value ${FAN_LOW}" ;;
"high") echo "fan.value ${FAN_HIGH}" ;;
*) echo "fan.value U" ;;
esac
# loop thru both disks
for DISK in "${ARR_DISK[@]}"
do
# read hard disk 1 temperature
TEMP_DISK=$(smartctl -A --device=marvell /dev/${DISK} | grep "^194.*" | sed 's/^.*- *\([0-9]*\).*$/\1/g')
# publish disk temperature
echo "${DISK}.value ${TEMP_DISK}"
done
fi
| true |
b15fae98064e238195c0fabf6a7424ad9d856257
|
Shell
|
ryanschulze/bash_functions
|
/notify.sh
|
UTF-8
| 1,186 | 3.75 | 4 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
#=== FUNCTION ================================================================
# NAME: notify
# DESCRIPTION: Calls notify-send with some addational logic and defaults
# PARAMETERS: [ -t <topic> ] [ -t <timeout in seconds> ] [ -i <icon> ]
#===============================================================================
notify() {
local timeout=30
local topic='Notify'
local icon='info'
while [[ ${1} =~ ^-[ti]$ ]]; do
if [[ ${1} == '-t' && ${2} =~ ^[0-9]+$ ]]; then
timeout=${2}
elif [[ ${1} == '-t' ]]; then
topic=${2}
elif [[ ${1} == '-i' ]]; then
icon=${2}
fi
shift;shift;
done
timeout=$((timeout * 1000))
notify-send -i "${icon}" -t "${timeout}" "${topic}" " ${*}"
} # end of function notify
#-------------------------------------------------------------------------------
# Example usage
#-------------------------------------------------------------------------------
#
# notify test
# notify -t test the topic is test
# notify -t 10 message with a 10 second timeout
# notify -t 10 -t test a 10 second timeout and topic test
# notify -t 10 -i error -t test an error icon and a 10 second timeout and topic test
| true |
d53e756b2f2e3ed69935874cec87b210bb31afe7
|
Shell
|
victorkp/tomcat-tools
|
/local-deploy.sh
|
UTF-8
| 381 | 3.65625 | 4 |
[] |
no_license
|
#!/bin/bash
if [ "$#" -ne 1 ]; then
# No Arguments
echo You need to specify the web archive file
echo Usage: ./local-deploy APP.war
exit
fi
warname=$1
filename=$(basename ${warname} .war)
echo
echo Removing old WAR
sudo rm -f /usr/share/tomcat/webapps/${filename}.war
echo
echo Copying new WAR
sudo cp $warname /usr/share/tomcat/webapps/${filename}.war
echo
echo Done!
echo
| true |
80fd871c8be90a3d9f1fdec32d5dc8a379fb41c4
|
Shell
|
echken/packages
|
/docker_apps/mongodb/remove.sh
|
UTF-8
| 384 | 3.203125 | 3 |
[] |
no_license
|
#!/bin/bash
#author: Huang Jinhua
#date: 01/05/2017
#description: To remove docker container and images
#Step 1: Make sure docker has started
if [ -z "$(ps -aux | grep docker | grep -v grep)" ] ; then
echo "Docker is not running, so not necessary to remove docker images"
exit 0
fi
#Step 2: Delete mongodb container and image
docker rm mongodb
docker rmi openestuary/mongodb:3.4
| true |
a7381bfb8bff612bd7b58f9ec11c7b2636252db6
|
Shell
|
fauxserve/Casper-Scripts
|
/Reporting Scripts/objects_scoped_to_named_computer_group.sh
|
UTF-8
| 9,635 | 3.3125 | 3 |
[] |
no_license
|
#!/bin/sh
# v0 - in progress
# set -x
reset
####################################################################################################
#
# Copyright (c) 2015, JAMF Software, LLC. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the JAMF Software, LLC nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY JAMF SOFTWARE, LLC "AS IS" AND ANY
# EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL JAMF SOFTWARE, LLC BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
####################################################################################################
########## Description ##########
##
#
##
########## End Description ##########
########## Usage ##########
##
#
# This script has three options for providing the required variables:
# 1: Pass the variables in as shell parameters in line. (This requires quotes)
# 2: Be prompted for the variables during shell execution (Do not include quotes)
# 3: Hard code variables into this script file
#
#
# How To Use:
# 1) To pass the variables in with script parameters in one line, keep the variables below blank.
# Then, provide the path to the script with the URL, user & resource in one line separated by spaces. If the Group contains spaces, please include the whole group in quotes.
# Example:
# ./objects_scoped_to_named_computer_group.sh https://jamf.company.com:8443 ladmin policies "All Managed Clients"
# For security reasons, the password will need to be provided separately.
#
# 2) To be prompted for the variables, just run the unmodified script by itself and the shell will do the rest.
#
# 3) To hard code variables you can fill them in under the Customization section.
# This option is not recommended.
#
##
########## End Usage ##########
### If the values are left blank here, they will be prompted for during execution, or can be passed in as parameters.
### This option is not recommended. See Usage above.
# JSS URL
jssUrl=""
# Full URL and credentials to the JSS.
apiUser=""
apiPass=""
# Resource - needs to match how JSS spells the resource.
apiResource=""
# Computer Group - What we are reporting on. These groups should not have commas in the name.
apiComputerGroup=""
##
########## End Customization ##########
########## BODY OF SCRIPT - DO NOT MODIFY BELOW ##########
##
CurrentUser=$(/bin/ls -l /dev/console | /usr/bin/awk '{ print $3 }')
CurrentUserHome=$(dscl . -read /Users/$CurrentUser NFSHomeDirectory 2>/dev/null | awk '{ print $2 }')
echo ""
# read parameters
if [[ $jssUrl == "" ]]; then
jssUrl=$1
fi
if [[ $apiUser == "" ]]; then
apiUser=$2
fi
if [[ $apiResource == "" ]]; then
apiResource=$3
fi
if [[ $apiComputerGroup == "" ]]; then
apiComputerGroup=$4
fi
if [[ $jssUrl == "" ]]; then
echo "Enter your full Jamf Pro Server address:"
echo " Example: https://jamf.company.com:8443"
echo " Example: https://company.jamfcloud.com"
read jssUrl
fi
if [[ $apiUser == "" ]]; then
echo "Enter valid API username:"
read apiUser
fi
if [[ $apiPass == "" ]]; then
echo ""
echo "Enter valid API user password:"
echo "(silent input)"
read -s apiPass
fi
if [[ $apiResource == "" ]]; then
echo "What API resource do you want to report on?"
echo "Example:"
echo " macapplications"
echo " osxconfigurationprofiles"
echo " policies"
echo " restrictedsoftware"
read apiResource
fi
if [[ $apiComputerGroup == "" ]]; then
echo "What JSS Computer Group do you want to report on?"
echo " Note: This must be spelled exactly as it is in the JSS."
echo " Note: Do not include quotes"
echo " Example: All Managed Clients"
read apiComputerGroup
fi
echo ""
echo ""
theDATE=$(date "+%Y_%m_%d")
theTIME=$(date "+%H_%M")
echo "The Date is: $theDATE"
echo "The Time is: $theTIME"
echo ""
echo "Working parameters:"
echo " local user: $CurrentUser"
echo " local user home: $CurrentUserHome"
echo " JSS URL: $jssUrl"
echo " API username: $apiUser"
echo " JSS Object: $apiResource"
echo " JSS Computer Group: $apiComputerGroup"
echo ""
echo ""
echo ""
# File paths - Please do not modify
folderPath="$CurrentUserHome/Desktop/apiScript"
scratchFolderPath="$folderPath/Scratch"
file1="$folderPath/peripheralName.xml" # File used to create computer name variables
csvFile="$folderPath/Report_${apiResource}_Scoped_To_Computer_Group_${apiComputerGroup}--${theDATE}--${theTIME}.csv" # CSV file used as our counter and computer name variable for our CURL loop
xmlPath="$folderPath/apiGET.xml" # File path to write our API GET. This will be the computer information by ID
file2="$folderPath/report.txt" # Path to our final report. Use txt as the file format please
rm -rf $scratchFolderPath
if [ ! -e "$scratchFolderPath" ]; then
mkdir -p "$scratchFolderPath" # sleep 2
fi
### This is the kicker here. Generates a space separated array of every Peripheral ID to be queried. Thanks to Chris Shasse on the proper formatting.
apiResourceIdList=$(/usr/bin/curl -sk -u $apiUser:$apiPass -H "Accept: application/xml" $jssUrl/JSSResource/$apiResource | tidy -xml 2>/dev/null | grep '<id>' | sed -n 's|<id>\(.*\)</id>|\1|p' | sort -n)
# echo "apiResourceIdList is: $apiResourceIdList"
# How to parse data from API output (XML)
# https://jamfsoftware.atlassian.net/wiki/display/SW/Pasing+XML+Data+from+the+API
###
# Each variable to be generated into the csv will have an initial value so the first dump generates the csv headers on row 1. Each variable will be cleared and regenerated below.
valueApiResourceID="$apiResource ID"
valueApiResourceName="Name"
valueApiResourceScopedToGroup="Scoped To $apiComputerGroup"
########### HERE THERE BE FUNCTIONS ###########
function CsvDump {
csvDumpContent="${valueApiResourceID}, ${valueApiResourceName}, ${valueApiResourceScopedToGroup}"
# echo $csvDumpContent
echo $csvDumpContent>>$csvFile
}
CsvDump # Put in the column headers
#########
function ApiResourceLoop {
# sleep 2
echo " Working with File ID: $id "
tempFile="$folderPath/Scratch/file$id.xml"
# echo "tempFile is \"$tempFile\""
# /usr/bin/curl -sk -u $apiUser:$apiPass -H "Accept: application/xml" $jssUrl/JSSResource/$apiResource/id/$id/subset/general%26scope | xmllint --format - --xpath /name > $tempFile
/usr/bin/curl -sk -u $apiUser:$apiPass -H "Accept: application/xml" $jssUrl/JSSResource/$apiResource/id/$id | xmllint --format - --xpath /name > $tempFile
# cat $tempFile
# Each variable to be generated into the CSV file will be cleared and then re-populated by parsing the $tempFile. This ensures that the CSV headers are populated only once, as well as the row in the CSV for each $id has unique/new data.
# 2>&1
valueApiResourceName=""
valueApiResourceName=$(cat $tempFile | xpath //general/name 2>/dev/null | sed -e 's/\<name>//g; s/\<\/name>//g')
echo " valueApiResourceName is \"$valueApiResourceName\""
valueApiResourceID=""
## Even though the ID is read from the array, I'm checking the local cache file to verify that all of the data is consistent.
valueApiResourceID=$(cat $tempFile | xpath //general/id 2>/dev/null | sed -e 's/\<id>//g; s/\<\/id>//g')
echo " valueApiResourceID is \"$valueApiResourceID\""
valueApiResourceScopedToGroup=""
valueApiResourceScopedToGroup=$(cat $tempFile | xpath //scope/computer_groups/computer_group/name 2>/dev/null | sed -e 's/\<name>//g; s/\<\/name>//g')
echo " valueApiResourceScopedToGroup is \"$valueApiResourceScopedToGroup\""
# within one function, call another function to output the data into the CSV file
if [[ "$valueApiResourceScopedToGroup" == *$apiComputerGroup* ]]; then
echo " valueApiResourceScopedToGroup MATCHES apiComputerGroup = writing $apiComputerGroup to file "
CsvDump
else
echo " valueApiResourceScopedToGroup DOES NOT match apiComputerGroup "
echo " $valueApiResourceScopedToGroup != $apiComputerGroup "
fi
# rm $tempFile
}
function EchoLoop {
# This is for testing that the array is being read in the proper order.
echo $id
}
######### Run the actual program #########
for id in ${apiResourceIdList}; do
# EchoLoop
ApiResourceLoop
done
# cp $csvFile $csvFileExport
# open -a /Applications/Microsoft\ Excel.app $csvFileExport
#
# qlmanage -R "$csvFile"
echo "Done!
Please check out the report file located at:
$csvFile"
| true |
a23cb09eda39adb362b16127599b32111799e84a
|
Shell
|
gainge/RepoTracker
|
/data/clean_extract.sh
|
UTF-8
| 556 | 3.6875 | 4 |
[] |
no_license
|
#!/bin/bash
if [ "$#" != "2" ]
then
echo "Invalid Argument Count!"
exit
fi
FILE="$1"
EXTRACTED_DIR="data/extracted"
if [ ! -d $EXTRACTED_DIR ]
then
mkdir $EXTRACTED_DIR
fi
SOURCE_DIR="$EXTRACTED_DIR/$2"
# Remove existing files, if applicable
if [ -d $SOURCE_DIR ]
then
rm -r $SOURCE_DIR
fi
mkdir $SOURCE_DIR
echo "Unzipping File..."
# Original unzip from honor checker
unzip $FILE "*.java" -d $SOURCE_DIR -x "*.jar" ".class" "libs/*" "*.zip" "build/*" "app/src/main/res/*" "app/build/*" &>/dev/null
# Clean up the original zip file
rm $FILE
| true |
4af20f9a1b9b238bf4e35d7b1bd62b608b4b12d4
|
Shell
|
drn/dots
|
/git/functions/git-pullme
|
UTF-8
| 998 | 4.09375 | 4 |
[
"MIT"
] |
permissive
|
#!/bin/bash
# Opens your browser to the github pull request creation page for the current
# commit against the source branch.
# abort if not in git directory
if ! git rev-parse --git-dir > /dev/null 2>&1; then
echo "Aborting. Not in a git repository."
exit 1
fi
# specify the root url
root="https://github.com"
# author/repo of the default remote
path="$(git path)"
# get current branch
current_branch="$(git me)"
# determine the remote that contains the current branch
current_remote="$(git remote-contains $current_branch)"
# determine the author of the current remote
current_author="$(git author $current_remote)"
current="$current_author:$current_branch"
# determine source author and branch
source_branch="master"
if [ ! -z "$(git branch --remote | grep upstream/main$)" ]; then
source_branch="main"
fi
source_author="$(git author)"
source="$source_author:$source_branch"
# open the web browser at the computed pull request endpoint
open "$root/$path/compare/$source...$current"
| true |
d00258aeff34ce76f778379105ec0521a2a9b11c
|
Shell
|
adlerj/OS_Design
|
/Project_1/Stuff/testscript_p2.sh
|
UTF-8
| 222 | 3.25 | 3 |
[] |
no_license
|
#!/bin/bash
count=5
for file in *.txt; do
name2="runtimes_wtc_btproc_"$file".output"
echo "WTC_BTPROC " $file >> "$name2"
for ((c=1; c<=$count; c++)); do
./wtc_btproc $file >> "$name2"
done
done
| true |
f5f4342400ade7c6a956c000373fde12f9319dd9
|
Shell
|
v3netcz/vmanager
|
/tests/run-tests.sh
|
UTF-8
| 452 | 3.8125 | 4 |
[] |
no_license
|
#!/bin/sh
# Finds PHP binary
PHPBIN=`whereis php-cgi | cut -d" " -f2`
if [ "$PHPBIN" == "" ]; then
PHPBIN=`whereis php | cut -d" " -f2`
if [ "$PHPBIN" == "" ]; then
echo "Error. PHP binary not found"
exit 1
fi
fi
# saves the path to this script's directory
dir=` dirname $0 `
# absolutizes the path if necessary
if echo $dir | grep -v ^/ > /dev/null; then
dir=` pwd `/$dir
fi
# run tests
$PHPBIN "$dir/scripts/runtests.php" -p "$PHPBIN" $*
| true |
608bb1ea454bb09f59c0a136308c17ae9a998e33
|
Shell
|
simhaonline/kvm-scripts-2
|
/clone.sh
|
UTF-8
| 1,136 | 3.109375 | 3 |
[] |
no_license
|
#!/bin/bash
mkdir -p .logs
touch .logs/${_new_name}.log
exec > >(tee -a .logs/${_new_name}.log) 2>&1
_base=archlinux-base
_new_name=$1
source ./utils.sh
virt-clone \
--original ${_base} \
--name "${_new_name}" \
-f "/var/lib/libvirt/images/${_new_name}.qcow2" || exit 1
sudo virt-sysprep -d "${_new_name}" \
--operations bash-history,dhcp-client-state,machine-id,lvm-uuids,logfiles,ssh-hostkeys,customize || exit 1
virsh_start_vm_sync "${_new_name}"
until
_ip=$(virsh_get_ip "${_new_name}") > /dev/null 2>&1
do
echo "Waiting for domain IP"
sleep 2
done
until
nc -z "${_ip}" 22 > /dev/null 2>&1
do
echo "Waiting for ssh service"
sleep 2
done
ssh-keyscan "${_ip}" >> ~/.ssh/known_hosts
ssh "${_ip}" "sudo hostnamectl set-hostname '${_new_name}.kvm.bbrain.io'"
ssh "${_ip}" "sudo systemctl restart systemd-{networkd,resolved}"
c=0
until
ping -c 1 "${_new_name}.kvm.bbrain.io" > /dev/null 2>&1
do
sleep 1; ((c++))
[ $c -gt 5 ] && break
echo "Waiting for fqdn"
done
ssh-keyscan "${_new_name}.kvm.bbrain.io" >> ~/.ssh/known_hosts
virsh_add_dhcp_host "${_new_name}" default
| true |
d9447b2c0398a53f9e7173c16926353e1556149f
|
Shell
|
msuess/dotfiles
|
/zsh/zsh-config/func/TS
|
UTF-8
| 552 | 3.4375 | 3 |
[] |
no_license
|
#!/bin/sh
#
# func/TS
#
# a convenient way to create an environment for collaborative testing
#
# Copyright © 1994–2008 martin f. krafft <[email protected]>
# Released under the terms of the Artistic Licence 2.0
#
# Source repository: git://git.madduck.net/etc/zsh.git
#
local topic filename ret
topic="${(j:_:)@}"
filename="${TMPDIR:-/tmp}/script.${topic:-$LOGNAME}.$$.$(date +%Y%m%d.%H%M)"
echo "I: writing typescript to $filename ..." >&2
PS1="%# " PS2= RPS1= script -c "zsh -f" -f -q "$filename"
ret=$?
echo "I: typescript is in $filename ."
return $ret
exit $ret
| true |
aed6eb3bc7273cc4f9498a2d95881c8b774c2465
|
Shell
|
acumos/system-integration
|
/AIO/nexus/setup_nexus.sh
|
UTF-8
| 7,764 | 3.703125 | 4 |
[
"CC-BY-4.0",
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# ===============LICENSE_START=======================================================
# Acumos Apache-2.0
# ===================================================================================
# Copyright (C) 2018 AT&T Intellectual Property. All rights reserved.
# ===================================================================================
# This Acumos software file is distributed by AT&T
# under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============LICENSE_END=========================================================
#
# What this is: script to setup Acumos Nexus repos
#
# Prerequisites:
# - acumos_env.sh script prepared through oneclick_deploy.sh or manually, to
# set install options (e.g. docker/k8s)
# - If you want to specify environment values, set and export them prior
# to running this script, e.g. by creating a script named mariadb_env.sh.
# See setup_nexus_env.sh for the default values.
# - If you are deploying Nexus in standalone mode (i.e. running this script
# directly), create a nexus_env.sh file including at least a value for
# export ACUMOS_NEXUS_DOMAIN=<exernally-resolvable domain name>
# export ACUMOS_NEXUS_HOST=<internally-resolvable domain name>
# - Additionally, for k8s:
# - Available PVs with at least 10GiB disk and default storage class
#
# Usage:
# For docker-based deployments, run this script on the AIO host.
# For k8s-based deployment, run this script on the AIO host or a workstation
# connected to the k8s cluster via kubectl (e.g. via tools/setup_kubectl.sh)
#
# $ bash setup_nexus.sh <clean|prep|setup|all>
# clean|prep|setup|all: action to execute
#
function nexus_prep() {
trap 'fail' ERR
verify_ubuntu_or_centos
create_namespace $ACUMOS_NEXUS_NAMESPACE
if [[ "$ACUMOS_CREATE_PVS" == "true" && "$ACUMOS_PVC_TO_PV_BINDING" == "true" ]]; then
bash $AIO_ROOT/../tools/setup_pv.sh all /mnt/$ACUMOS_NEXUS_NAMESPACE \
$ACUMOS_NEXUS_DATA_PV_NAME $ACUMOS_NEXUS_DATA_PV_SIZE \
"200:$ACUMOS_HOST_USER"
fi
if [[ "$K8S_DIST" == "openshift" ]]; then
log "Workaround: Acumos AIO requires privilege to set PV permissions"
oc adm policy add-scc-to-user anyuid -z default -n $ACUMOS_NEXUS_NAMESPACE
fi
}
function nexus_clean() {
trap 'fail' ERR
if [[ "$DEPLOYED_UNDER" == "docker" ]]; then
log "Stop any existing docker based components for nexus-service"
bash docker_compose.sh down
else
log "Stop any existing k8s based components for nexus-service"
if [[ ! -e deploy/nexus-service.yaml ]]; then
mkdir -p deploy
cp -r kubernetes/* deploy/.
replace_env deploy
fi
stop_service deploy/nexus-service.yaml
stop_deployment deploy/nexus-deployment.yaml
log "Remove PVC for nexus-service"
delete_pvc $ACUMOS_NEXUS_NAMESPACE $ACUMOS_NEXUS_DATA_PVC_NAME
fi
}
function deploy_nexus_service() {
trap 'fail' ERR
log "Update the nexus-service template and deploy the service"
mkdir -p deploy
cp -r kubernetes/nexus-service.yaml deploy/.
if [[ "$ACUMOS_INGRESS_LOADBALANCER" == "true" ]]; then
log "Update Nexus service template to use LoadBalancer service type"
sedi 's/type: NodePort/type: LoadBalancer/' deploy/nexus-service.yaml
sedi '/nodePort/d' deploy/nexus-service.yaml
else
# Use dynamically assigned nodeports if port values are the default for docker
if [[ "$ACUMOS_NEXUS_API_PORT" == "8081" ]]; then
ACUMOS_NEXUS_API_PORT=
fi
if [[ "$ACUMOS_DOCKER_MODEL_PORT" == "8082" ]]; then
ACUMOS_DOCKER_MODEL_PORT=
fi
fi
replace_env deploy/nexus-service.yaml
start_service deploy/nexus-service.yaml
if [[ "$ACUMOS_INGRESS_LOADBALANCER" == "true" ]]; then
local t=0
while [[ $(kubectl get svc nexus-service | grep -c "pending") -gt 0 ]]; do
t=$((t+10))
if [[ $t -eq $ACUMOS_SUCCESS_WAIT_TIME ]]; then
fail "Nexus loadbalancer IP not assigned in $ACUMOS_SUCCESS_WAIT_TIME seconds"
fi
sleep 10
done
ACUMOS_NEXUS_HOST_IP=$(kubectl get svc -n $ACUMOS_NEXUS_NAMESPACE nexus-service -o json | jq -r '.status.loadBalancer.ingress[0].ip')
update_nexus_env ACUMOS_NEXUS_HOST_IP $ACUMOS_NEXUS_HOST_IP force
else
ACUMOS_NEXUS_API_PORT=$(kubectl get services -n $ACUMOS_NEXUS_NAMESPACE nexus-service -o json | jq -r '.spec.ports[0].nodePort')
update_nexus_env ACUMOS_NEXUS_API_PORT $ACUMOS_NEXUS_API_PORT force
ACUMOS_DOCKER_MODEL_PORT=$(kubectl get services -n $ACUMOS_NEXUS_NAMESPACE nexus-service -o json | jq -r '.spec.ports[1].nodePort')
update_nexus_env ACUMOS_DOCKER_MODEL_PORT $ACUMOS_DOCKER_MODEL_PORT force
fi
}
function nexus_setup() {
trap 'fail' ERR
if [[ "$DEPLOYED_UNDER" == "docker" ]]; then
# If not set explictly, the default value will be for k8s based deployment...
if [[ "$ACUMOS_NEXUS_HOST" == "$ACUMOS_INTERNAL_NEXUS_HOST" ]]; then
update_nexus_env ACUMOS_NEXUS_HOST $ACUMOS_DOMAIN force
update_nexus_env ACUMOS_DOCKER_REGISTRY_HOST $ACUMOS_DOMAIN force
fi
bash docker_compose.sh up -d --build --force-recreate
wait_running nexus-service
else
log "Setup the nexus-data PVC"
setup_pvc $ACUMOS_NEXUS_NAMESPACE $ACUMOS_NEXUS_DATA_PVC_NAME \
$ACUMOS_NEXUS_DATA_PV_NAME $ACUMOS_NEXUS_DATA_PV_SIZE \
$ACUMOS_NEXUS_DATA_PV_CLASSNAME
if [[ "$(kubectl get service -n $ACUMOS_NEXUS_NAMESPACE nexus-service)" == "" ]]; then
deploy_nexus_service
fi
log "Update the nexus deployment template and deploy it"
mkdir -p deploy
cp -r kubernetes/nexus-deployment.yaml deploy/.
replace_env deploy/nexus-deployment.yaml
start_deployment deploy/nexus-deployment.yaml
wait_running nexus $ACUMOS_NEXUS_NAMESPACE
fi
check_name_resolves nexus-service
if [[ $NAME_RESOLVES == "true" ]]; then
host=nexus-service
port=8081
else
host=$ACUMOS_NEXUS_DOMAIN
port=$ACUMOS_NEXUS_API_PORT
fi
# Add -m 10 since for some reason curl seems to hang waiting for a response
cmd="curl -v -m 10 \
-u $ACUMOS_NEXUS_ADMIN_USERNAME:$ACUMOS_NEXUS_ADMIN_PASSWORD \
http://$host:$port/service/rest/v1/script"
local i=0
while [[ ! $($cmd) ]]; do
log "Nexus API is not ready... waiting 10 seconds"
sleep 10
i=$((i+10))
if [[ $i -eq $ACUMOS_SUCCESS_WAIT_TIME ]]; then
fail "Nexus API failed to respond"
fi
done
}
if [[ $# -lt 1 ]]; then
cat <<'EOF'
Usage:
For docker-based deployments, run this script on the AIO host.
For k8s-based deployment, run this script on the AIO host or a workstation
connected to the k8s cluster via kubectl (e.g. via tools/setup_kubectl.sh)
$ bash setup_nexus.sh <clean|prep|setup|all> <nexus_host>
clean|prep|setup|all: action to execute
EOF
echo "All parameters not provided"
exit 1
fi
set -x
trap 'fail' ERR
WORK_DIR=$(pwd)
cd $(dirname "$0")
if [[ -z "$AIO_ROOT" ]]; then export AIO_ROOT="$(cd ..; pwd -P)"; fi
source $AIO_ROOT/utils.sh
source $AIO_ROOT/acumos_env.sh
if [[ -e nexus_env.sh ]]; then
log "Using prepared nexus_env.sh for customized environment values"
source nexus_env.sh
fi
source setup_nexus_env.sh
cp nexus_env.sh $AIO_ROOT/.
action=$1
if [[ "$action" == "clean" || "$action" == "all" ]]; then nexus_clean; fi
if [[ "$action" == "prep" || "$action" == "all" ]]; then nexus_prep; fi
if [[ "$action" == "setup" || "$action" == "all" ]]; then nexus_setup; fi
cd $WORK_DIR
| true |
6a0a8b5cbe65623434e83ed49e653b31c6445eae
|
Shell
|
hvvka/bash-scripts
|
/src/learning/s9/attempt0.sh
|
UTF-8
| 340 | 2.984375 | 3 |
[] |
no_license
|
#!/bin/bash
DIR="$1"
for file in $DIR/*; do
if [ -s $file ]
then
rm -f $file
fi
done
#Kasowanie wszystkich plików pustych (o zerowej wielkości) w zadanym katalogu (parametr wywołania skryptu). Skrypt powinien tworzyć w zadanym pliku listę skasowanych plików. Nie powinien analizować dołączeń symbolicznych.
| true |
265a229305fe20f598182711a69adc12f7657cef
|
Shell
|
mattjmorrison/polyglot
|
/run/colors.sh
|
UTF-8
| 223 | 3.171875 | 3 |
[] |
no_license
|
function language() {
echo -e "\033[0;36m\t"
echo "***** $1 *****"
echo -e "\033[0m"
}
function section() {
echo -e "\033[0;33m\t\n== $1 ==\033[0m"
}
function step() {
echo -e "\033[1;31m$1\033[0m"
}
| true |
8ca407f1ba530961c6fa873dd8f07d0a226f3e77
|
Shell
|
tenfourty/LinodeEnterpriseLinuxStackScripts
|
/StackScripts/CentOS-StaticIP.sh
|
UTF-8
| 1,809 | 3.0625 | 3 |
[] |
no_license
|
#!/bin/bash
# <udf name="machinename" label="Hostname - make sure reverse DNS is setup" example="hostname"/>
# <udf name="publicip" label="Linode Public IP" example="178.79.134.167"/>
# <udf name="publicnetmask" label="Netmask" default="255.255.255.0" example="255.255.255.0"/>
# <udf name="publicgateway" label="Gateway" example="178.79.134.1"/>
# <udf name="dnsresolver1" label="DNS Resolver 1" default="109.74.192.20" example="109.74.192.20"/>
# <udf name="dnsresolver2" label="DNS Resolver 2" default="109.74.193.20" example="109.74.193.20"/>
# <udf name="dnsresolver3" label="DNS Resolver 3" default="109.74.194.20" example="109.74.194.20"/>
# <udf name="privateip" label="Linode Private IP" example="192.168.154.122"/>
# <udf name="privatenetmask" label="Private Netmask" default="255.255.128.0" example="255.255.128.0"/>
source <ssinclude StackScriptID="20">
source <ssinclude StackScriptID="1641">
###########################################################
# CentOS-StaticIP - StackScripts id 1642
# http://www.linode.com/stackscripts/view/?StackScriptID=1642
#
# This script does the same tasks as our Basic setup script but also sets static public and private ips.
# This script is based on the Fedora/CentOS Basics (http://www.linode.com/stackscripts/view/?StackScriptID=52) with my own extensions.
#
###########################################################
###########################################################
# Start Script
###########################################################
# update and install our stuff
system_update
install_basics
install_private
set_hostname $MACHINENAME
set_timezone
set_public_ip $PUBLICIP $PUBLICNETMASK $PUBLICGATEWAY
set_private_ip $PRIVATEIP $PRIVATENETMASK
restart_networking
set_dns_resolver $DNSRESOLVER1 $DNSRESOLVER2 $DNSRESOLVER3
restart_networking
| true |
0b3dacdbb3cd8d6cdb88a781a0dd8e2766e8d73c
|
Shell
|
limite01/testeicasei
|
/bkp_rotina.sh
|
UTF-8
| 861 | 3.171875 | 3 |
[] |
no_license
|
#/bin/bash
#DATA DO ARQUIVO
DATA=`date +%F`
#DIRETORIOS
ROOT_DIR=/backup
TMP_ROOT_DIR=/backup/tmp
BKP_CONF_APACHE=/etc/apache2
BKP_CONF_MUNIN=/etc/munin
BKP_CONF_FAIL2BAN=/etc/fail2ban
BKP_WORDPRESS=/sites/wordpress
BKP_CONF_FIREWALL=/etc/init.d/start_firewall.sh
BKP_SCRIPTS=/root/scripts
#COMANDOS
MYSQLDUMP=/usr/bin/mysqldump
TAR=/bin/tar
RM=/bin/rm
FIND=/usr/bin/find
CHMOD=/bin/chmod
#DUMP BASE DE DADOS
$MYSQLDUMP -u userwp -p123456 dbwordpress > $TMP_ROOT_DIR/bkp_dbwordpress.sql
#COMPACTAR ARQUIVOS
$TAR -czf $ROOT_DIR/bkp_confs-$DATA.tar.gz $TMP_ROOT_DIR/bkp_dbwordpress.sql $BKP_CONF_APACHE $BKP_CONF_MUNIN $BKP_CONF_FAIL2BAN $BKP_WORDPRESS $BKP_CONF_FIREWALL $BKP_SCRIPTS
$RM $TMP_ROOT_DIR/bkp_dbwordpress.sql
$CHMOD 600 $ROOT_DIR/bkp_confs-$DATA.tar.gz
#APAGA ARQUIVOS COM MAIS DE 5 DIAS
$FIND $ROOT_DIR -ctime +5 -exec rm -rf {} \;
| true |
b47785672a599fed2abbd0eb0bb4c3240d34e044
|
Shell
|
ilventu/aur-mirror
|
/bzip2-tools/PKGBUILD
|
UTF-8
| 696 | 2.71875 | 3 |
[] |
no_license
|
# Maintainer: TDY <[email protected]>
# Contributor: Christoph Zeiler <rabyte__gmail>
# Contributor: Judd <[email protected]>
pkgname=bzip2-tools
pkgver=1.0.5
pkgrel=1
pkgdesc="Supplementary wrappers for bzip2 (bzdiff, bzgrep, and bzmore)"
arch=('i686' 'x86_64')
license=('custom')
url="http://sources.redhat.com/bzip2/"
depends=('bash' 'bzip2')
source=(http://www.bzip.org/$pkgver/bzip2-$pkgver.tar.gz)
md5sums=('3c15a0c8d1d3ee1c46a1634d00617b1a')
build() {
cd $srcdir/bzip2-$pkgver
install -dm755 $pkgdir/usr/{bin,share/man/man1}
install -m755 bz{diff,grep,more} $pkgdir/usr/bin/
install -m644 bz{diff,grep,more}.1 $pkgdir/usr/share/man/man1
install -Dm644 LICENSE $pkgdir/usr/share/licenses/$pkgname/LICENSE
}
| true |
3a36db46198f21269e57040ed0ce0d32c285f968
|
Shell
|
taitpthomas/firmware
|
/linuxcnc/src/emc/usr_intf/renaconf/build.sh
|
UTF-8
| 973 | 3.109375 | 3 |
[] |
no_license
|
#!/bin/bash
D=Debian/renaconf/files
echo "remove old package files"
rm -f ./Debian/*.deb
rm -f ./Debian/renaconf/debian/renaconf/usr/bin/AN1310cl
rm -f ./Debian/renaconf/debian/renaconf/usr/bin/wch6474
rm -f ./Debian/renaconf/debian/renaconf/DEBIAN/control
rm -f ./Debian/renaconf/debian/renaconf/DEBIAN/md5sums
rm -f ./Debian/renaconf/debian/renaconf/DEBIAN/postinst
rm -f ./Debian/renaconf_1.0.0.dsc
rm -f ./Debian/renaconf_1.0.0_i386.changes
rm -f ./Debian/renaconf_1.0.0.tar.gz
# clean only
if [ "$1" = "clean" ]; then
# remove binary
rm -f ./Debian/renaconf/files/usr/bin/AN1310cl
rm -f ./Debian/renaconf/files/usr/bin/wch6474
echo "clean only"
exit
fi
echo "copying glade files"
cp *.glade ${D}/usr/share/linuxcnc/renaconf
echo "copying py scripts"
cp renaconf.py ${D}/usr/bin/renaconf
chmod +x ${D}/usr/bin/renaconf
cp *.py ${D}/usr/share/pyshared/renaconf/
cd ./Debian/renaconf
echo "build debian package: use sudo dpkg -i <packagename>"
dpkg-buildpackage
| true |
8f8a6d6ed95c3a0d3eb17157cfbe01137477e0f3
|
Shell
|
bugryder/mmpublish
|
/go.sh
|
UTF-8
| 1,917 | 3.625 | 4 |
[] |
no_license
|
#!/bin/bash
#
# 繁星网站发版工具
#
# Copyright © 2016 kugou.com
#
# Authors:
# mmfei <[email protected]>
#
# 工作目录
# workDir=$(dirname $0)
# workDir=$(readlink -f $workDir)
workDir=`pwd`;
# ini 文件处理脚本
iniTool="${toolsDir}/ini.sh"
# IP 地址
ip=${SSH_CLIENT%% *}
echo $workDir;
#公共函数
source $workDir/common.sh
# 项目目录
projectsDir=$workDir/projectConfig
mkdir -p $toolsDir $projectsDir
#
echo
msg_green '#### 欢迎使用繁星网站发布系统 ####'
echo
#
# 项目编号
#
msg_white '#### 选择发布项目 ####'
project=''
declare -a allProjects
sed -i 's/^[ \t]*//g' $projectsDir/*.config.ini #删除行首空格
iniFiles=$(ls $projectsDir/*.config.ini 2>/dev/null)
i=0
for iniFile in $iniFiles; do
project=$(basename ${iniFile%%.config.ini})
name=$($iniTool $iniFile base name)
name=$(echo $name |tr -d ' ')
allProjects[$i]="${name}__$project"
let i++
done
allProjects[$i]="取消本次发布!"
echo
PS3=$(echo -e "\n请输入将要发布的项目编号:")
select project in ${allProjects[@]}; do
break
done
#项目名
echo $project |grep -q '取消' && error_exit
project=$(echo $project |awk -F'__' '{print $2}')
iniFile=$projectsDir/${project}.config.ini
[ -f "$iniFile" ] || error_exit
projectName=$($iniTool $iniFile base name)
##项目仓库
repoType=$($iniTool $iniFile repository 'type') #类型
repoHost=$($iniTool $iniFile repository host) #源码地址
repoName=$(basename $repoHost)
repoBranch=$($iniTool $iniFile repository branch) #发布分支
repoBranchGray=$($iniTool $iniFile repository grayBranch) #灰度分支
isMerge=$($iniTool $iniFile repository merge) #是否回源
mergeBack=$($iniTool $iniFile repository mergeBack) #回源到哪个分支
[ "x$repoType" = 'x' ] && repoType='git'
[ "x$repoBranch" = 'x' ] && repoBranch='master'
[ "x$isMerge" = 'x' ] && isMerge='0'
[ "x$mergeBack" = 'x' ] && mergeBack='develop'
| true |
bf37e661a3e49685b172e94f9749beab0c526d53
|
Shell
|
tysm/cpsols
|
/codeforces/edu/ITMO Academy: pilot course/Binary Search/step 5/check.sh
|
UTF-8
| 242 | 2.703125 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
cp-gcc c.cpp -o a
for((i=1;;i++)); do
echo $i
python3 gen.py $i > in
./a < in > o1
python3 sol.py < in > o2
diff <(./a < in) <(python3 sol.py < in) || break
done
cat in
#sed -i 's/\r$//' filename ----- remover \r do txt
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.