blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
6de512e59d96cc9e6188ee6769fab6a5f0e4636a
|
Shell
|
span999/build-scripts
|
/google/emulater/android-run.sh
|
UTF-8
| 1,196 | 3.09375 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
#
#
# http://dl.google.com/android/android-sdk_r22.0.5-linux.tgz
#
ROT_PATH=`pwd`
BIN_PATH=~/workshop/bin/android-sdk
IMG_PATH=${ROT_PATH}/out
TOOLS_PATH=`find ${BIN_PATH}/ -name \tools -type d -not -path *com* -not -path *docs*`
EMULATOR=`find ${BIN_PATH}/ -name \emulator -executable -type f -path *bin*`
ANDROID=`find ${BIN_PATH}/ -name \android -executable -type f -path *bin*`
RAMDISKIMG=`find ${IMG_PATH}/ -name \ramdisk.img -type f`
SYSTEMIMG=`find ${IMG_PATH}/ -name \system.img -type f -not -path *obj*`
USERDATAIMG=`find ${IMG_PATH}/ -name \userdata.img -type f`
echo "tools path=${TOOLS_PATH}"
echo "emulator=${EMULATOR}"
echo "android=${ANDROID}"
echo "ramdisk.img=${RAMDISKIMG}"
echo "system.img=${SYSTEMIMG}"
echo "userdata.img=${USERDATAIMG}"
echo "you have download tools & API within android device manager ..."
#cd ${TOOLS_PATH}
#echo current path=`pwd`
#./android
#cd ${ROT_PATH}
echo "create AVD on android GUI or CLI ... "
cd ${TOOLS_PATH}
echo current path=`pwd`
./android - list avd
cd ${ROT_PATH}
cd ${TOOLS_PATH}
echo current path=`pwd`
./emulator -avd AVD_for_my_Nexus_7 -system ${SYSTEMIMG} -ramdisk ${RAMDISKIMG} -data ${USERDATAIMG}
cd ${ROT_PATH}
| true |
7fb99d28b1bd3c86aface94df76c3a3b067ef8a9
|
Shell
|
couchbase/build-infra
|
/terraform/gerrit/files/scripts/gerrit-backup
|
UTF-8
| 3,456 | 3.8125 | 4 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -ex
tag=$(date +%Y%m%d)
function error() {
echo "$@"
exit 1
}
if [[ -z "${REGION}" || -z "${VOLUME_ID}" ]]
then
error "REGION and VOLUME_ID must be present in the environment"
fi
# If an existing volume is present, we want to fail up front
echo "Checking to ensure no backup volume is present"
if [ "$(aws ec2 describe-volumes --region ${REGION} --filters Name=tag:Name,Values=gerrit-backup-volume | jq -r '.Volumes[0]')" != "null" ]
then
error "Backup volume already exists"
fi
# We'll trap EXIT and clean up the volume if it's been created
function cleanup() {
for volume in $@
do
echo "Cleaning up ${volume}"
sleep 1
umount /mnt/{backup,scratch} &>/dev/null || true
aws ec2 detach-volume --volume-id ${volume} --force --region ${REGION} || true
sleep 5
aws ec2 wait volume-available --volume-ids ${volume} --region ${REGION}
aws ec2 delete-volume --volume-id ${volume} --region ${REGION}
done
}
echo "Creating volumes"
export snapshot=$(aws ec2 describe-snapshots --filters "Name=volume-id,Values=${VOLUME_ID}" "Name=status,Values=completed" --region ${REGION} | jq -r '.[]|max_by(.StartTime)|.SnapshotId')
export backup_volume=$(/usr/local/bin/aws ec2 create-volume \
--region ${REGION} \
--volume-type gp3 \
--snapshot-id ${snapshot} \
--tag-specifications 'ResourceType=volume,Tags=[{Key=Name,Value=gerrit-backup-volume},{Key=Owner,Value=build-team},{Key=Project,Value=gerrit},{Key=Purpose,Value=backup-restore}]' \
--availability-zone ${REGION}a | jq -r '.VolumeId')
export scratch_volume=$(/usr/local/bin/aws ec2 create-volume \
--region ${REGION} \
--volume-type gp3 \
--size 120 \
--tag-specifications 'ResourceType=volume,Tags=[{Key=Name,Value=gerrit-scratch-volume},{Key=Owner,Value=build-team},{Key=Project,Value=gerrit},{Key=Purpose,Value=backup-restore}]' \
--availability-zone ${REGION}a | jq -r '.VolumeId')
trap "cleanup ${backup_volume} ${scratch_volume}" EXIT
# Check volumes
if [[ "${backup_volume}" = "" || "${scratch_volume}" = "" ]]
then
error "Volume creation failed"
fi
echo "Attaching volumes"
aws ec2 wait volume-available --volume-ids ${backup_volume} --region ${REGION}
aws ec2 wait volume-available --volume-ids ${scratch_volume} --region ${REGION}
aws ec2 attach-volume --volume-id ${backup_volume} --instance-id `cat /var/lib/cloud/data/instance-id` --device /dev/sdf --region ${REGION}
aws ec2 attach-volume --volume-id ${scratch_volume} --instance-id `cat /var/lib/cloud/data/instance-id` --device /dev/sdg --region ${REGION}
aws ec2 wait volume-in-use --volume-ids ${backup_volume} --region ${REGION}
aws ec2 wait volume-in-use --volume-ids ${scratch_volume} --region ${REGION}
sleep 10
sudo mkfs -t ext4 /dev/sdg
for vol in backup scratch
do
if ! grep -qs "${vol}" /proc/mounts
then
sudo mount /mnt/${vol}
fi
done
cd /mnt/backup
echo "Backing up"
if ! sudo tar -czf /mnt/scratch/backup-${tag}.tgz \
"cache" \
"data" \
"db" \
"etc" \
"git" \
"index" \
"lib" \
"logs" \
"plugins" \
"static"
then
error "Couldn't create tarball"
else
aws s3 cp /mnt/scratch/backup-${tag}.tgz s3://cb-gerrit.backups/ || \
error "S3 upload failed"
fi
sudo rm -rf /mnt/scratch/backup-${tag}.tgz
| true |
ff7b6bd4a1e03b8179c03840baeb9bb57e6dd734
|
Shell
|
CamilaGalvan/ISU
|
/tercera_entrega/Giuliana/SO/Shells/bajaGrupo.sh
|
UTF-8
| 771 | 3.296875 | 3 |
[] |
no_license
|
#!/bin/bash
DnGroupp=0
while [ $DnGroupp -eq 0 ]
do
clear
tput cup 3 3; echo "Grupo o GID a dar de baja"
tput cup 4 3; read GIDD
busqueda=$(cut -d: -f1 /etc/group |grep "$GIDD")
if [ -z $busqueda ]
then
busqueda=$(cut -d: -f3 /etc/group | grep -n "$GIDD" | cut -d: -f1)
if [ -z $busqueda ]
then
tput cup 2 3; tput setaf 1; read -p "Ingrese un grupo valido" aux; tput setaf 0
else
Groupp=$(head -$busqueda /etc/group | tail -1 | cut -d: -f1 )
DnGroupp=1
fi
else
Groupp=$GIDD
DnGroupp=1
fi
if [ $DnGroupp -eq 1 ]
then
if sudo groupdel $Groupp
then
tput cup 2 3; tput setaf 2; read -p "Grupo $Groupp, eliminado" aux; tput setaf 0
DnGroupp=1
else
tput cup 2 3; tput setaf 1; read -p "Error" aux; tput setaf
fi
fi
done
| true |
19d065e9809e8108dbf2fd8c9a264711e60fe9cb
|
Shell
|
learningclouddex/devops
|
/Scripting/loopcontrol.sh
|
UTF-8
| 469 | 3.328125 | 3 |
[] |
no_license
|
#!/bin/bash
# Example1
i=0
while [[ $i -lt 5 ]]
do
echo "Number: $i"
((i++))
if [[ $i -eq 2 ]]; then
break
fi
done
echo 'All Done!'
# Example2
for i in {1..3}; do
for j in {1..3}; do
if [[ $j -eq 2 ]]; then
break
fi
echo "j: $j"
done
echo "i: $i"
done
echo 'All Done!'
## Continue
# Example1
i=0
while [[ $i -lt 5 ]]; do
((i++))
if [[ "$i" == '2' ]]; then
continue
fi
echo "Number: $i"
done
echo 'All Done!'
| true |
5eaed328fbdd087cb075234f382d0e5c52d1c757
|
Shell
|
mepowerleo10/dotfiles
|
/.bin/select_window.sh
|
UTF-8
| 4,671 | 2.984375 | 3 |
[] |
no_license
|
#! /bin/bash
# -*- mode: sh -*-
KEY=$RANDOM
res1=$(mktemp --tmpdir term-tab1.XXXXXXXX)
res2=$(mktemp --tmpdir term-tab2.XXXXXXXX)
res3=$(mktemp --tmpdir term-tab3.XXXXXXXX)
out=$(mktemp --tmpdir term-out.XXXXXXXX)
# cleanup
trap "rm -f $res1 $res2 $res3 $out" EXIT
export YAD_OPTIONS="--bool-fmt=t --separator='\n' --quoted-output"
rc_file="${1:-$HOME/.Xresources}"
# parse rc file
while read ln; do
case $ln in
*allow_bold:*) bold=$(echo ${ln#*:}) ;;
*font:*) font=$(echo ${ln#*:}) ;;
*scrollBar:*) sb=$(echo ${ln#*:}) ;;
*loginShell:*) ls=$(echo ${ln#*:}) ;;
*title:*) title=$(echo ${ln#*:}) ;;
*termName:*) term=$(echo ${ln#*:}) ;;
*geometry:*) geom=$(echo ${ln#*:}) ;;
*foreground:*) fg=$(echo ${ln#*:}) ;;
*background:*) bg=$(echo ${ln#*:}) ;;
*highlightColor:*) hl=$(echo ${ln#*:}) ;;
*highlightTextColor:*) hlt=$(echo ${ln#*:}) ;;
*color0:*) cl0=$(echo ${ln#*:}) ;;
*color1:*) cl1=$(echo ${ln#*:}) ;;
*color2:*) cl2=$(echo ${ln#*:}) ;;
*color3:*) cl3=$(echo ${ln#*:}) ;;
*color4:*) cl4=$(echo ${ln#*:}) ;;
*color5:*) cl5=$(echo ${ln#*:}) ;;
*color6:*) cl6=$(echo ${ln#*:}) ;;
*color7:*) cl7=$(echo ${ln#*:}) ;;
*color8:*) cl8=$(echo ${ln#*:}) ;;
*color9:*) cl9=$(echo ${ln#*:}) ;;
*color10:*) cl10=$(echo ${ln#*:}) ;;
*color11:*) cl11=$(echo ${ln#*:}) ;;
*color12:*) cl12=$(echo ${ln#*:}) ;;
*color13:*) cl13=$(echo ${ln#*:}) ;;
*color14:*) cl14=$(echo ${ln#*:}) ;;
*color15:*) cl15=$(echo ${ln#*:}) ;;
!*) ;; # skip comments
"") ;; # skip empty lines
*) misc+=$(echo "$ln\n") ;;
esac
done < <(xrdb -query | grep -i rxvt)
width=${geom%%x*}
height=${geom##*x}
fn=$(pfd -p -- "$font")
echo $font
echo $fn
# main page
yad --plug=$KEY --tabnum=1 --form \
--field=$"Title:" "${title:-Terminal}" \
--field=$"Width::num" ${width:-80} \
--field=$"Height::num" ${height:-25} \
--field=$"Font::fn" "${fn:-Monospace}" \
--field=$"Term:" "${term:-rxvt-256color}" \
--field=$"Enable login shell:chk" ${ls:-false} \
--field=$"Enable scrollbars:chk" ${sb:-false} \
--field=$"Use bold font:chk" ${bold:-false} \
--field=":lbl" "" \
--field=$"Foreground::clr" ${fg:-#ffffff} \
--field=$"Background::clr" ${bg:-#000000} \
--field=$"Highlight::clr" ${hl:-#0000f0} \
--field=$"Highlight text::clr" ${hlt:-#ffffff} > $res1 &
# palette page
yad --plug=$KEY --tabnum=2 --form --columns=2 \
--field=$"Black::clr" ${cl0:-#2e3436} \
--field=$"Red::clr" ${cl1:-#cc0000} \
--field=$"Green::clr" ${cl2:-#4e9a06} \
--field=$"Brown::clr" ${cl3:-#c4a000} \
--field=$"Blue::clr" ${cl4:-#3465a4} \
--field=$"Magenta::clr" ${cl5:-#75507b} \
--field=$"Cyan::clr" ${cl6:-#06989a} \
--field=$"Light gray::clr" ${cl7:-#d3d7cf} \
--field=$"Gray::clr" ${cl8:-#555753} \
--field=$"Light red::clr" ${cl9:-#ef2929} \
--field=$"Light green::clr" ${cl10:-#8ae234} \
--field=$"Yellow::clr" ${cl11:-#fce94f} \
--field=$"Light blue::clr" ${cl12:-#729fcf} \
--field=$"Light magenta::clr" ${cl13:-#ad7fa8} \
--field=$"Light cyan::clr" ${cl14:-#34e2e2} \
--field=$"White::clr" ${cl15:-#eeeeec} > $res2 &
# misc page
echo -e $misc | yad --plug=$KEY --tabnum=3 --text-info --editable > $res3 &
# main dialog
yad --window-icon=utilities-terminal \
--notebook --key=$KEY --tab=$"Main" --tab=$"Palette" --tab=$"Misc" \
--title=$"Terminal settings" --image=utilities-terminal \
--width=400 --image-on-top --text=$"Terminal settings (URxvt)"
# recreate rc file
if [[ $? -eq 0 ]]; then
mkdir -p ${rc_file%/*}
eval TAB1=($(< $res1))
eval TAB2=($(< $res2))
echo -e "! urxvt settings\n" > $out
# add main
cat <<EOF >> $out
URxvt.title: ${TAB1[0]}
URxvt.geometry: ${TAB1[1]}x${TAB1[2]}
URxvt.font: $(pfd "${TAB1[3]}")
URxvt.termName: ${TAB1[4]}
URxvt.loginShell: ${TAB1[5]}
URxvt.scrollBar: ${TAB1[6]}
URxvt.allow_bold: ${TAB1[7]}
URxvt.foreground: ${TAB1[9]}
URxvt.background: ${TAB1[10]}
URxvt.highlightColor: ${TAB1[11]}
URxvt.highlightTextColor: ${TAB1[12]}
EOF
# add palette
echo >> $out
for i in {0..15}; do
echo "URxvt.color$i: ${TAB2[$i]}" >> $out
done
echo >> $out
# add misc
cat $res3 >> $out
echo >> $out
# load new settings
#xrdb -merge $out
if [[ $rc_file == $HOME/.Xresources ]]; then
[[ -e $rc_file ]] && sed -i "/^URxvt.*/d" $rc_file
cat $out >> $rc_file
else
mv -f $out $rc_file
fi
fi
| true |
2ced3d14b879aed674226c65e7c748c4ef5af18e
|
Shell
|
p3430233/gaffer-docker
|
/cd/publish_images.sh
|
UTF-8
| 2,222 | 3.65625 | 4 |
[
"Apache-2.0",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
#!/bin/bash
# Copyright 2020 Crown Copyright
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Script for publishing to DockerHub
# Requires the following environment variables to be set:
# APP_VERSION - the release name
# DOCKER_USERNAME - the dockerhub username
# DOCKER_PASSWORD - the docker password
# Gets project root directory by calling two nested "dirname" commands on the this file
getRootDirectory() {
echo "$( cd $(dirname $(dirname $0)) > /dev/null 2>&1 && pwd )"
}
# Pushes Tags to Dockerhub
pushTags() {
name=$1
version=$2
app_version=$3
tags="$(echo ${version} | sed -e "s|\(.*\)\.\(.*\)\..*|${name}:${version}_build.${app_version} ${name}:${version} ${name}:\1.\2 ${name}:\1 ${name}:latest|")"
IFS=' '
read -a tagArray <<< "${tags}"
for tag in "${tagArray[@]}"; do
docker tag "${name}:${version}" "${tag}"
docker push "${tag}"
done
}
ROOT_DIR="$(getRootDirectory)"
# This sets the values for:
# HADOOP_VERSION
# GAFFER_VERSION
# ACCUMULO_VERSION
# GAFFER_TOOLS_VERSION
source "${ROOT_DIR}"/docker/gaffer/.env
# Log in to Dockerhub
docker login -u "${DOCKER_USERNAME}" -p "${DOCKER_PASSWORD}"
# Push images to Dockerhub
pushTags gchq/hdfs "${HADOOP_VERSION}" "${APP_VERSION}"
pushTags gchq/accumulo "${ACCUMULO_VERSION}" "${APP_VERSION}"
pushTags gchq/gaffer "${GAFFER_VERSION}" "${APP_VERSION}"
pushTags gchq/gaffer-rest "${GAFFER_VERSION}" "${APP_VERSION}"
pushTags gchq/gaffer-ui "${GAFFER_TOOLS_VERSION}" "${APP_VERSION}"
pushTags gchq/gaffer-road-traffic-loader "${GAFFER_VERSION}" "${APP_VERSION}"
pushTags gchq/gaffer-operation-runner "${GAFFER_VERSION}" "${APP_VERSION}"
pushTags gchq/gaffer-federated-store "${GAFFER_VERSION}" "${APP_VERSION}"
| true |
3aaa24e71548b80796dabbc932f412f9a84c1963
|
Shell
|
DSC-SPIDAL/harpgbdt
|
/run/16.Final-Sync/yfccdense/standard-block-yfccdense-sync.sh
|
UTF-8
| 2,433 | 2.515625 | 3 |
[] |
no_license
|
#!/bin/bash
export _gbtproject_=`pwd`
export LD_LIBRARY_PATH=/opt/Anaconda3-5.0.1/lib
bin=../bin/xgboost-g++-omp-dense-halftrick-short-splitonnode-fitmem-release
hist=../bin/xgb-latest
tagname=`basename $bin`
echo "run speedup test with tagname=$tagname"
if [ ! -f $bin ]; then
echo "Usage: run-speedup.sh <bin>"
echo "$bin not exist, quit"
exit -1
fi
tag=`date +%m%d%H%M%S`
#
# d6
#
export RUNID=$tag-d6test
#../bin/xgb-convergence.sh ${bin} yfccmeta 300 6 lossguide 32 500000 32 0 4 lossguide data_parallelism=0 group_parallel_cnt=32 topk=4 async_mixmode=2 loadmeta=yfccmeta missing_value=0
#../bin/xgb-convergence.sh ${bin} yfccmeta 300 6 lossguide 32 500000 32 0 32 depth data_parallelism=0 group_parallel_cnt=32 topk=4 async_mixmode=2 loadmeta=yfccmeta missing_value=0
../bin/xgb-convergence.sh ${hist} yfcc 300 6 hist 32 500000 32 0 4 lossguide data_parallelism=0 group_parallel_cnt=32 topk=4 async_mixmode=2 loadmeta=yfccmeta missing_value=0
../bin/xgb-convergence.sh ${hist} yfcc 300 6 hist 32 500000 32 0 32 depth data_parallelism=0 group_parallel_cnt=32 topk=4 async_mixmode=2 loadmeta=yfccmeta missing_value=0
#../bin/lightgbm-convergence.sh ../bin/lightgbm yfcc 300 6 feature 32
##
## standard dp1f0k1n1
##
#export RUNID=$tag-dp1f0k1n1
#../bin/xgb-convergence.sh ${bin} yfccmeta 300 8 lossguide 32 500000 32 0 1 lossguide data_parallelism=1 group_parallel_cnt=32 topk=1 async_mixmode=2 loadmeta=yfccmeta missing_value=0
#../bin/xgb-convergence.sh ${bin} yfccmeta 300 9 lossguide 32 500000 32 0 1 lossguide data_parallelism=1 group_parallel_cnt=32 topk=1 async_mixmode=2 loadmeta=yfccmeta missing_value=0
#../bin/xgb-convergence.sh ${bin} yfccmeta 300 10 lossguide 32 500000 32 0 1 lossguide data_parallelism=1 group_parallel_cnt=32 topk=1 async_mixmode=2 loadmeta=yfccmeta missing_value=0
#
##
## standard dp0f1k1n1
##
#export RUNID=$tag-dp0f1k1n1
#../bin/xgb-convergence.sh ${bin} yfccmeta 300 8 lossguide 32 500000 1 0 1 lossguide data_parallelism=0 group_parallel_cnt=32 topk=1 async_mixmode=2 loadmeta=yfccmeta missing_value=0
#../bin/xgb-convergence.sh ${bin} yfccmeta 300 9 lossguide 32 500000 1 0 1 lossguide data_parallelism=0 group_parallel_cnt=32 topk=1 async_mixmode=2 loadmeta=yfccmeta missing_value=0
#../bin/xgb-convergence.sh ${bin} yfccmeta 300 10 lossguide 32 500000 1 0 1 lossguide data_parallelism=0 group_parallel_cnt=32 topk=1 async_mixmode=2 loadmeta=yfccmeta missing_value=0
#
#
| true |
b721f0b338a3b6eff823392ffd7bbf091e87f021
|
Shell
|
jkarsrud/dotfiles
|
/.bash_profile
|
UTF-8
| 314 | 2.921875 | 3 |
[] |
no_license
|
# Load ~/.bash_prompt, ~/.exports, ~/.aliases, ~/.functions
for file in ~/.{bash_prompt,exports,aliases,functions,git-completion}; do
[ -r "$file" ] && source "$file"
done
unset file
# Case-insensitive globbing (used in pathname expansion)
shopt -s nocaseglob
export LC_ALL=en_US.UTF-8
export LANG=en_US.UTF-8
| true |
a7a764cbe2319c999252b68f02a42698de0e13b0
|
Shell
|
ravitomar2010/hello-world
|
/a2i-devops/stage/terraform/resources/nifi/prepare_final_yaml.sh
|
UTF-8
| 693 | 3.078125 | 3 |
[] |
no_license
|
#!/bin/sh
filename='./nifi/node_list.txt'
cat ./nifi.yaml > tmp_nifi.yaml
node_count=1
while read line; do
sed "s/dns_name_of_server_nifi_$node_count/dns_name_of_server/g" tmp_nifi.yaml > tmp_nifi1.yaml
sed "s/service_name_nifi_$node_count/service_name/g" tmp_nifi1.yaml > tmp_nifi2.yaml
sed "s/hostname_nifi_$node_count/hostname/g" tmp_nifi2.yaml > tmp_nifi3.yaml
sed "s/iscleanupneeded_nifi_$node_count/iscleanupneeded/g" tmp_nifi3.yaml > tmp_nifi4.yaml
((node_count++))
cat tmp_nifi4.yaml > tmp_nifi.yaml
done < $filename
cat tmp_nifi.yaml > ./nifi.yaml
##cleanup
rm -rf tmp_nifi.yaml
rm -rf tmp_nifi1.yaml
rm -rf tmp_nifi2.yaml
rm -rf tmp_nifi3.yaml
rm -rf tmp_nifi4.yaml
| true |
80a3c2155210192fb5bda9de53ed0bf9ae0a3b5a
|
Shell
|
DeprecatedCode/nate-profile
|
/index.sh
|
UTF-8
| 1,022 | 2.65625 | 3 |
[] |
no_license
|
# Nate Ferrero
# nateferrero.com
# This Document
alias n='cd ~/nate-profile && clear && git status'
alias pr='nano ~/nate-profile/index.sh'
alias ra='source ~/nate-profile/index.sh'
# Nginx
alias nns='sudo /etc/init.d/nginx start'
alias nnp='sudo /etc/init.d/nginx stop'
alias nnr='sudo /etc/init.d/nginx reload'
# Git
alias pull='git pull --rebase'
alias push='git push'
alias sync='git pull --rebase && git push'
alias gs='git status'
# Custom - VPS Status
alias vps='nodemon --no-stdin ~/nate-vps-system/app.js &'
alias guide='nodemon --no-stdin ~/snapplab-guide/app.js &'
# Software
alias s='cd ~/software && clear && ls'
# Gallery
alias eg='cd ~/software/nateferrero/gallery && . ./venv/bin/activate'
alias xg='sudo /home/nate/software/nateferrero/gallery/venv/bin/uwsgi --master --emperor /etc/uwsgi/apps-enabled --die-on-term --uid www-data --gid www-data --logto /var/log/uwsgi/emperor.log &;sudo chmod 777 /var/www/run/nateferrero.gallery.sock;sudo chown www-data:www-data /var/www/run/nateferrero.gallery.sock'
| true |
b073fce057b4ec8b85fcc0fc188ba68616dc7e56
|
Shell
|
sohalartz/feralfilehosting
|
/Feral Wiki/HTTP/Redirecting HTTP to HTTPS/scripts/nginxhttps.sh
|
UTF-8
| 4,627 | 3.953125 | 4 |
[
"CC-BY-4.0"
] |
permissive
|
#!/bin/bash
# nginx force https on Dual URL
scriptversion="1.0.2"
scriptname="nginxhttps"
#
# wget -qO ~/nginxhttps.sh http://git.io/A34SpA && bash ~/nginxhttps.sh
#
############################
## Version History Starts ##
############################
#
# v1.0.2 template updated
#
############################
### Version History Ends ###
############################
#
############################
###### Variable Start ######
############################
#
scripturl="https://raw.github.com/feralhosting/feralfilehosting/master/Feral%20Wiki/HTTP/Redirecting%20HTTP%20to%20HTTPS/scripts/nginxhttps.sh"
#
############################
####### Variable End #######
############################
#
############################
#### Self Updater Start ####
############################
#
mkdir -p "$HOME/bin"
#
if [[ ! -f "$HOME/$scriptname.sh" ]]
then
wget -qO "$HOME/$scriptname.sh" "$scripturl"
fi
if [[ ! -f "$HOME/bin/$scriptname" ]]
then
wget -qO "$HOME/bin/$scriptname" "$scripturl"
fi
#
wget -qO "$HOME/000$scriptname.sh" "$scripturl"
#
if ! diff -q "$HOME/000$scriptname.sh" "$HOME/$scriptname.sh" > /dev/null 2>&1
then
echo '#!/bin/bash
scriptname="'"$scriptname"'"
wget -qO "$HOME/$scriptname.sh" "'"$scripturl"'"
wget -qO "$HOME/bin/$scriptname" "'"$scripturl"'"
bash "$HOME/$scriptname.sh"
exit 1' > "$HOME/111$scriptname.sh"
bash "$HOME/111$scriptname.sh"
exit 1
fi
if ! diff -q "$HOME/000$scriptname.sh" "$HOME/bin/$scriptname" > /dev/null 2>&1
then
echo '#!/bin/bash
scriptname="'"$scriptname"'"
wget -qO "$HOME/$scriptname.sh" "'"$scripturl"'"
wget -qO "$HOME/bin/$scriptname" "'"$scripturl"'"
bash "$HOME/$scriptname.sh"
exit 1' > "$HOME/222$scriptname.sh"
bash "$HOME/222$scriptname.sh"
exit 1
fi
cd && rm -f {000,111,222}"$scriptname.sh"
chmod -f 700 "$HOME/bin/$scriptname"
#
############################
##### Self Updater End #####
############################
#
############################
#### Core Script Starts ####
############################
#
echo
echo -e "Hello $(whoami), you have the latest version of the" "\033[36m""$scriptname""\e[0m" "script. This script version is:" "\033[31m""$scriptversion""\e[0m"
echo
read -ep "The scripts have been updated, do you wish to continue [y] or exit now [q] : " updatestatus
echo
if [[ "$updatestatus" =~ ^[Yy]$ ]]
then
#
############################
#### User Script Starts ####
############################
#
echo "1: Creating the conf file"
echo
#
echo -n "# Set the variable so we can also redirect to server.feralhosting.com/username/
set \$feraluser \"$(whoami)\";
# Checks if https is NOT on and then SETs part one of our custom variable.
if (\$http_x_forwarded_proto != https ) {
set \$forceh A;
}
# Check for the URL format username.server.feralhosting.com and SETs part 2 of our custom variable.
if (\$http_x_host = \$http_host) {
set \$forceh \"\${forceh}1\";
}
# Check for the URL format server.feralhosting.com/username and SETs part 2 of our custom variable.
if (\$http_x_host != \$http_host) {
set \$forceh \"\${forceh}2\";
}
# Check x_host when it does match host that it is also specific to feral, so as not to apply to a users's VHost domains.
if (\$http_x_host ~* ^([0-9a-zA-Z-]+)\.([0-9a-zA-Z-]+)\.feralhosting\.com\$) {
set \$forceh \"\${forceh}3\";
}
# Check x_host when it does not match host that it is also specific to feral, so as not to apply to a users's VHost domains.
if (\$http_x_host ~* ^([0-9a-zA-Z-]+)\.feralhosting\.com\$) {
set \$forceh \"\${forceh}4\";
}
# Combines the SET options to rewrite this URL format: username.server.feralhosting.com
if (\$forceh = A13) {
rewrite ^ https://\$http_x_host\$request_uri? permanent;
}
# Combines the SET options to rewrite this URL format: server.feralhosting.com/username
if (\$forceh = A24) {
rewrite ^ https://\$http_x_host/\$feraluser\$request_uri? permanent;
}" > ~/.nginx/conf.d/000-default-server.d/https.conf
#
echo "2: Reloading the nginx configuration"
echo
/usr/sbin/nginx -s reload -c ~/.nginx/nginx.conf > /dev/null 2>&1
echo "3: Done. You may need to clear your browser cache to see the changes."
echo
exit 1
#
############################
##### User Script End #####
############################
#
else
echo -e "You chose to exit after updating the scripts."
echo
cd && bash
exit 1
fi
#
############################
##### Core Script Ends #####
############################
#
| true |
adba549ebce4b018267a0654f3a3cff25223c9c2
|
Shell
|
Phillip-a-richmond/AnnotateVariants
|
/Cloud/PullTrioCallVariants.sh
|
UTF-8
| 6,852 | 3.171875 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
#SBATCH [email protected]
#SBATCH --mail-type=ALL
## CPU Usage
#SBATCH --nodes=1
#SBATCH --cpus-per-task=8
## Output and Stderr
#SBATCH --output=%x-%j.out
#SBATCH --error=%x-%j.error
##########
# Set up #
##########
# open up scratch
sudo chmod ugo=rwx -R /scratch/
##########
# Part 1 #
##########
# Here we will arrange which trio sample we are working on
# Running for this trio stored in a ped file. All the ped files are stored in that directory, which we will loop through in the next script
# Setting this for dev before running the array
SLURM_ARRAY_TASK_ID=1
# Found this online. real useful
# https://stackoverflow.com/questions/21668471/bash-script-create-array-of-all-files-in-a-directory
shopt -s nullglob
# Pulling from our list of ped files here
pedfiles=(/shared/AnnotateVariants/Cloud/1kG_Data/*ped)
pedfile=${pedfiles[$SLURM_ARRAY_TASK_ID]}
echo $pedfile
# Use awk to get the sample ids. I know that the father is line 2 (NR==2) and is the first column ($1)
# mother is line 3, child is line 4
fatherid=$( awk 'NR==2 {print $1}' $pedfile)
motherid=$( awk 'NR==3 {print $1}' $pedfile)
childid=$( awk 'NR==4 {print $1}' $pedfile)
# Check the ids
echo $fatherid
echo $motherid
echo $childid
### Set working space
# move this back to scratch in production
Working_Dir=/scratch/$childid
mkdir -p $Working_Dir
cd $Working_Dir
##########
# Part 2 #
##########
# Download the data for father, mother, child from s3, and rename it
# download father data
aws s3 cp --dryrun --recursive --no-sign-request s3://1000genomes/data/$fatherid/alignment/ .
aws s3 cp --recursive --no-sign-request s3://1000genomes/data/$fatherid/alignment/ .
# rename it
mv $fatherid*cram $fatherid.cram
mv $fatherid*cram.crai $fatherid.cram.crai
# download mother data
aws s3 cp --dryrun --recursive --no-sign-request s3://1000genomes/data/$motherid/alignment/ .
aws s3 cp --recursive --no-sign-request s3://1000genomes/data/$motherid/alignment/ .
# rename it
mv $motherid*cram $motherid.cram
mv $motherid*cram.crai $motherid.cram.crai
# download child data
aws s3 cp --recursive --no-sign-request --dryrun s3://1000genomes/1000G_2504_high_coverage/additional_698_related/data/ ./ --exclude "*" --include "*$childid*"
aws s3 cp --recursive --no-sign-request s3://1000genomes/1000G_2504_high_coverage/additional_698_related/data/ ./ --exclude "*" --include "*$childid*"
mv ./ERR*/$childid*cram $childid.cram
mv ./ERR*/$childid*cram.crai $childid.cram.crai
# Where we pull the reads to, this is on the scratch space (onboard NVMe)
CRAM_Dir=$Working_Dir
Output_Dir=$Working_Dir/Variants/
mkdir -p $Output_Dir
### For final output
Final_Dir=/shared/DeepVariantOutput/
#### GRCh38 ####
echo "GRCh38 genome"
Genome=GRCh38
Seq_Type=WGS
Fasta_Dir=/shared/AnnotateVariants/Cloud/Genomes/
Fasta_File=GRCh38_full_analysis_set_plus_decoy_hla.fa
##########
# Part 3 #
##########
# here I will get docker, then run deepvariant separately for each sample
# Get Docker
BIN_VERSION="1.3.0"
sudo apt -y update
sudo apt-get -y install docker.io
sudo docker pull google/deepvariant:"${BIN_VERSION}"
# Father Call variants
sudo docker run \
-v "${CRAM_Dir}":"/cramdir" \
-v "${Fasta_Dir}":"/genomedir" \
-v "${Output_Dir}":"/output" \
google/deepvariant:"${BIN_VERSION}" \
/opt/deepvariant/bin/run_deepvariant \
--model_type=${Seq_Type} \
--ref="/genomedir/$Fasta_File" \
--intermediate_results_dir="/output/intermediate_results_dir" \
--reads="/cramdir/$fatherid.cram" \
--output_vcf="/output/$fatherid.deepvariant.$BIN_VERSION.vcf.gz" \
--output_gvcf="/output/$fatherid.deepvariant.$BIN_VERSION.gvcf.gz" \
--num_shards=16
# Mother Call variants
sudo docker run \
-v "${CRAM_Dir}":"/cramdir" \
-v "${Fasta_Dir}":"/genomedir" \
-v "${Output_Dir}":"/output" \
google/deepvariant:"${BIN_VERSION}" \
/opt/deepvariant/bin/run_deepvariant \
--model_type=${Seq_Type} \
--ref="/genomedir/$Fasta_File" \
--intermediate_results_dir="/output/intermediate_results_dir" \
--reads="/cramdir/$motherid.cram" \
--output_vcf="/output/$motherid.deepvariant.$BIN_VERSION.vcf.gz" \
--output_gvcf="/output/$motherid.deepvariant.$BIN_VERSION.gvcf.gz" \
--num_shards=16
# Child Call variants
sudo docker run \
-v "${CRAM_Dir}":"/cramdir" \
-v "${Fasta_Dir}":"/genomedir" \
-v "${Output_Dir}":"/output" \
google/deepvariant:"${BIN_VERSION}" \
/opt/deepvariant/bin/run_deepvariant \
--model_type=${Seq_Type} \
--ref="/genomedir/$Fasta_File" \
--intermediate_results_dir="/output/intermediate_results_dir" \
--reads="/cramdir/$childid.cram" \
--output_vcf="/output/$childid.deepvariant.$BIN_VERSION.vcf.gz" \
--output_gvcf="/output/$childid.deepvariant.$BIN_VERSION.gvcf.gz" \
--num_shards=16
##########
# Part 4 #
##########
# Run deepTrio
sudo docker pull google/deepvariant:deeptrio-"${BIN_VERSION}"
sudo docker run \
-v "${CRAM_Dir}":"/cramdir" \
-v "${Fasta_Dir}":"/genomedir" \
-v "${Output_Dir}":"/output" \
google/deepvariant:deeptrio-"${BIN_VERSION}" \
/opt/deepvariant/bin/deeptrio/run_deeptrio \
--model_type=WGS \
--intermediate_results_dir="/output/intermediate_results_dir" \
--ref="/genomedir/$Fasta_File" \
--sample_name_child "$childid" \
--sample_name_parent1 "$fatherid" \
--sample_name_parent2 "$motherid" \
--reads_child=/cramdir/$childid.cram \
--reads_parent1=/cramdir/$fatherid.cram \
--reads_parent2=/cramdir/$motherid.cram \
--output_vcf_child /output/$childid.deeptrio.$BIN_VERSION.vcf.gz \
--output_vcf_parent1 /output/$fatherid.deeptrio.$BIN_VERSION.vcf.gz \
--output_vcf_parent2 /output/$motherid.deeptrio.$BIN_VERSION.vcf.gz \
--output_gvcf_child /output/$childid.deeptrio.$BIN_VERSION.gvcf.gz \
--output_gvcf_parent1 /output/$fatherid.deeptrio.$BIN_VERSION.gvcf.gz \
--output_gvcf_parent2 /output/$motherid.deeptrio.$BIN_VERSION.gvcf.gz \
--num_shards 16
##########
# Part 5 #
##########
# Copy Variant data back to /shared/
cp $Output_Dir/*vcf.gz $Final_Dir
cp $Output_Dir/*gvcf.gz $Final_Dir
cp $Output_Dir/*tbi $Final_Dir
cp $Output_Dir/*html $Final_Dir
exit
##########
# Part 6 #
##########
# Get discordant reads in bed file
## Get tool
wget -O excord https://github.com/brentp/excord/releases/download/v0.2.2/excord_linux64
chmod +x ./excord
## Activate miniconda environment
source /shared/miniconda3/etc/profile.d/conda.sh
conda activate /shared/miniconda3/envs/Mamba/envs/SeqTools
# Make a bam file
samtools view -@ 8 -b $CRAM_Dir/$Sample_CRAM -o $CRAM_Dir/$Sample_ID.bam
samtools view -b $CRAM_Dir/$Sample_ID.bam |
./excord \
--discordantdistance 500 \
--fasta $Fasta_Dir/$Fasta_File \
/dev/stdin \
| LC_ALL=C sort --buffer-size 2G -k1,1 -k2,2n -k3,3n \
| bgzip -c $Output_Dir/$Sample_ID.bed.gz
cp $Sample_ID.bed.gz $Final_Dir
exit
| true |
3e7e51e6791c70a8514907c368502257493821cf
|
Shell
|
amennen/rtAttenPenn_analysis
|
/qsub_fmriprep.sh
|
UTF-8
| 850 | 2.765625 | 3 |
[] |
no_license
|
#!/bin/bash
# Run within BIDS code/ directory:
# sbatch slurm_mriqc.sh
# Set current working directory
#$ -wd '/data/jux/cnds/amennen/rtAttenPenn/fmridata/Nifti/derivatives/logs/'
# #$ -cwd
#$ -t 12
#$ -j y
#$ -m ea
#$ -M [email protected]
#$ -N fmriprep
#$ -w e
#$ -binding linear:4
#$ -pe unihost 4
#$ -l h_rt=72:00:00
#$ -l h_vmem=5.5G
#$ -l s_vmem=5G
##### #$ -l h_data=30G
echo SGE_TASK_ID: $SGE_TASK_ID
date
# Set subject ID based on array index
subject_vector=( 1 2 3 4 5 6 7 8 9 10 11 12 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 )
subject=${subject_vector[$SGE_TASK_ID - 1]}
printf -v subj "%03d" $subject
# Run fMRIPrep script with participant argument
echo "Running FMRIPREP on sub-$subj"
bash /data/jux/cnds/amennen/rtAttenPenn/fmridata/Nifti/code/run_fmriprep.sh $subj
echo "Finished running FMRIPREP on sub-$subj"
date
| true |
cca12fbbf805cc94795d832a9c75e4c6b9a95964
|
Shell
|
mikenowak/docker-nrpe
|
/check_time_skew
|
UTF-8
| 1,848 | 4.5625 | 5 |
[] |
no_license
|
#!/bin/sh
# Check for clock skew on hosts not running ntpd
# v1.0 (C) 2008 Stuart Teasdale <[email protected]>
program=/usr/sbin/ntpdate
program_opts="-q pool.ntp.org"
warntime=1
crittime=2
# helper functions
. /usr/lib/nagios/plugins/utils.sh
usage()
{
cat <<EOF
Usage: `basename $0` [-w <seconds>] [-c <seconds>] [-n <ntpserver>]
Check the time of the machine compared to an ntp server (pool.ntp.org
by default) and report a warn or error if it is outside of specified
boundaries
EOF
exit 1
}
main()
{
parse_arguments "$@"
timecheck
}
# convert exit code to Nagios output
exitcode_to_state()
{
if [ "$#" != 1 ]; then
echo "Warning - exitcode_to_state called incorrectly ($@)"
return 1
fi
local exitcode="$1"
case "$exitcode" in
$STATE_OK) echo "OK";;
$STATE_WARNING) echo "WARNING";;
$STATE_CRITICAL) echo "CRITICAL";;
$STATE_UNKNOWN) echo "UNKNOWN";;
$STATE_DEPENDENT) echo "DEPENDENT";;
*) echo "exitcode_to_state called with bad exitcode \"$exitcode\"" >&2;
echo "UNKNOWN";;
esac
}
timecheck()
{
rc=$STATE_OK
msg=""
skew=`$program $program_opts 2>/dev/null | grep ntpdate |awk '{ print $10}'`
if [ -z "$skew" ]; then
msg="Problem executing $program";
rc=$STATE_UNKNOWN
else
msg="$msg clock skew of ${skew}s - Machine time is `date`"
if [ `echo "$skew < 0"|bc` = 1 ]; then skew=`echo "0 - $skew"|bc` ; fi
if [ `echo "$skew > $warntime"|bc` = 1 ]; then
rc=$STATE_WARNING
fi
if [ `echo "$skew > $crittime"|bc` = 1 ]; then
rc=$STATE_CRITICAL
fi
fi
echo "TIME `exitcode_to_state $rc` - $msg"
return $rc
}
parse_arguments()
{
while getopts "hw:c:n:" opt; do
case "$opt" in
h) usage;;
w) warntime="$OPTARG";;
c) crittime="$OPTARG";;
n) program_opts="-q $OPTARG";;
esac
done
}
main "$@"
| true |
05a368c412f9fd9e836d1437cae13794b9aaf3db
|
Shell
|
daniel-vera-g/dotfiles
|
/linux/shell/scripts/backup/uni/backup-uni-docs.sh
|
UTF-8
| 1,288 | 3.640625 | 4 |
[
"MIT"
] |
permissive
|
#!/bin/bash
currDate=$(date +"%Y-%m-%d")
toBackupPath="/home/dvg/workspace/edu/uni"
backupPath="/home/dvg/sync/notebook/projects/education/uni/repo-backup"
backupCount=$(ls ~/sync/notebook/projects/education/uni/repo-backup/ | wc -l)
# Create destination directory
echo "----------------------------------------------------------"
echo "Creating directory for uni backup: $backupPath/$currDate"
mkdir $backupPath/$currDate
echo "----------------------------------------------------------"
for I in $(ls ~/workspace/edu/uni/); do
# Create backup
echo "----------------------------------------------------------"
echo "Creating backup for $toBackupPath/$I"
echo "tar cJvf $backupPath/$currDate/$I.tar.xz $toBackupPath/$I"
tar -cJvf $backupPath/$currDate/$I.tar.xz $toBackupPath/$I
echo "----------------------------------------------------------"
done
# Clean up 5th oldest backup
echo $backupCount
if (($backupCount > 5)); then
# Get last backup item and remove it
echo "----------------------------------------------------------"
echo "Removing $(ls ~/sync/notebook/projects/education/uni/repo-backup/ | head -n 1)"
rm -ivrf $backupPath/$(ls $backupPath | tail -n 1)
echo "----------------------------------------------------------"
fi
| true |
9f493f1a042217df6ee9b28d376eadeb86c41002
|
Shell
|
mclaugsf/nps
|
/nps_check.sh
|
UTF-8
| 17,192 | 3.328125 | 3 |
[] |
no_license
|
#! /bin/bash
if [ $# -lt 2 ]; then
echo "Usage: nps_check.sh nps_command ..."
echo "nps_command:"
echo " stdgt traindir traintag"
echo " init workdir"
echo " last workdir winshift1 winshift2 ..."
echo " score workdir valdir valtag winshift1 winshift2 ..."
echo ""
echo "Note: \"nps_check.sh last\" will figure out the following checks by itself:"
echo " decor workdir winshift1 winshift2 ..."
echo " prune workdir winshift1 winshift2 ..."
echo " gwassig workdir winshift1 winshift2 ..."
echo " prep_part workdir winshift1 winshift2 ..."
echo " part workdir winshift1 winshift2 ..."
echo " weight workdir winshift1 winshift2 ..."
echo " back2snpeff workdir winshift1 winshift2 ..."
exit 1
fi
step=$1
status=0
# Check Rscript and R version
Rver=`Rscript -e 'cat(":NPS:\t", version$major, "\n", sep='')' | grep -F ':NPS:' | cut -f2`
Rver_string=`Rscript -e 'cat(":NPS:\t", version$version.string, "\n", sep='')' | grep -F ':NPS:' | cut -f2`
if [ $? != 0 ]; then
echo "ERROR: cannot run Rscript"
exit 2
fi
if [ $Rver -lt 3 ]; then
echo "ERROR: R-3.0 or later is required: $Rver_string"
exit 2
fi
if [ $step == "last" ]; then
workdir=$2
echo "NPS data directory: $workdir"
# back2snpeff
auto=`ls -t $workdir/args.RDS $workdir/*.Q.RDS $workdir/*.pruned.table $workdir/*.pruned.tailfix.table $workdir/*trPT* $workdir/*.adjbetahat.* $workdir/*part.RDS $workdir/*PTwt.RDS 2> /dev/null | head -n 1 | grep adjbetahat | wc -l `
if [ $auto != 0 ]; then
cmdargs=( $@ )
cmdargs=("${cmdargs[@]:1}")
./nps_check.sh back2snpeff ${cmdargs[@]}
exit $?
fi
# weight
auto=`ls -t $workdir/args.RDS $workdir/*.Q.RDS $workdir/*.pruned.table $workdir/*.pruned.tailfix.table $workdir/*trPT* $workdir/*.adjbetahat.* $workdir/*part.RDS $workdir/*PTwt.RDS 2> /dev/null | head -n 1 | grep PTwt | grep -v tail | wc -l `
if [ $auto != 0 ]; then
cmdargs=( $@ )
cmdargs=("${cmdargs[@]:1}")
./nps_check.sh weight ${cmdargs[@]}
exit $?
fi
# part
auto=`ls -t $workdir/args.RDS $workdir/*.Q.RDS $workdir/*.pruned.table $workdir/*.pruned.tailfix.table $workdir/*trPT* $workdir/*.adjbetahat.* $workdir/*part.RDS $workdir/*PTwt.RDS 2> /dev/null | head -n 1 | grep trPT | grep -v tail | wc -l `
if [ $auto != 0 ]; then
cmdargs=( $@ )
cmdargs=("${cmdargs[@]:1}")
./nps_check.sh part ${cmdargs[@]}
exit $?
fi
# prep_part
auto=`ls -t $workdir/args.RDS $workdir/*.Q.RDS $workdir/*.pruned.table $workdir/*.pruned.tailfix.table $workdir/*trPT* $workdir/*.adjbetahat.* $workdir/*part.RDS $workdir/*PTwt.RDS 2> /dev/null | head -n 1 | grep part.RDS | wc -l `
if [ $auto != 0 ]; then
cmdargs=( $@ )
cmdargs=("${cmdargs[@]:1}")
./nps_check.sh prep_part ${cmdargs[@]}
exit $?
fi
# gwassig
auto=`ls -t $workdir/args.RDS $workdir/*.Q.RDS $workdir/*.pruned.table $workdir/*.pruned.tailfix.table $workdir/*trPT* $workdir/*.adjbetahat.* $workdir/*part.RDS $workdir/*PTwt.RDS 2> /dev/null | head -n 1 | grep pruned.tailfix.table | wc -l `
if [ $auto != 0 ]; then
cmdargs=( $@ )
cmdargs=("${cmdargs[@]:1}")
# always check prune and decor as well
./nps_check.sh decor ${cmdargs[@]}
if [ $? != 0 ]; then
exit $?
fi
./nps_check.sh prune ${cmdargs[@]}
if [ $? != 0 ]; then
exit $?
fi
./nps_check.sh gwassig ${cmdargs[@]}
exit $?
fi
# prune
auto=`ls -t $workdir/args.RDS $workdir/*.Q.RDS $workdir/*.pruned.table $workdir/*.pruned.tailfix.table $workdir/*trPT* $workdir/*.adjbetahat.* $workdir/*part.RDS $workdir/*PTwt.RDS 2> /dev/null | head -n 1 | grep pruned.table | wc -l `
if [ $auto != 0 ]; then
cmdargs=( $@ )
cmdargs=("${cmdargs[@]:1}")
./nps_check.sh prune ${cmdargs[@]}
exit $?
fi
# decor
auto=`ls -t $workdir/args.RDS $workdir/*.Q.RDS $workdir/*.pruned.table $workdir/*.pruned.tailfix.table $workdir/*trPT* $workdir/*.adjbetahat.* $workdir/*part.RDS $workdir/*PTwt.RDS 2> /dev/null | head -n 1 | grep Q.RDS | wc -l `
if [ $auto != 0 ]; then
cmdargs=( $@ )
cmdargs=("${cmdargs[@]:1}")
./nps_check.sh decor ${cmdargs[@]}
exit $?
fi
# init
auto=`ls -t $workdir/args.RDS $workdir/*.Q.RDS $workdir/*.pruned.table $workdir/*.pruned.tailfix.table $workdir/*trPT* $workdir/*.adjbetahat.* $workdir/*part.RDS $workdir/*PTwt.RDS 2> /dev/null | head -n 1 | grep args.RDS | wc -l `
if [ $auto != 0 ]; then
cmdargs=( $@ )
cmdargs=("${cmdargs[@]:1}")
./nps_check.sh init $workdir
exit $?
fi
echo "ERROR: cannot automatically figure out the previous step"
exit 1
fi
if [ $step == "stdgt" ]; then
echo "Verifying nps_stdgt:"
if [ $# -ne 3 ]; then
echo "Usage: nps_check.sh stdgt traindir traintag"
exit 1
fi
traindir=$2
traintag=$3
for chrom in `seq 1 22`
do
filepre="$traindir/chrom$chrom.$traintag"
echo -n "Checking $filepre ..."
if [ ! -s $filepre.meandos ]; then
echo "FAIL: .meandos missing or empty"
status=1
continue
fi
if [ ! -s $filepre.snpinfo ]; then
echo "FAIL: .snpinfo missing or empty"
status=1
continue
fi
if [ ! -s $filepre.stdgt.gz ]; then
echo "FAIL: .stdgt.gz missing or empty"
status=1
continue
fi
gzip -t $filepre.stdgt.gz
if [ $? != 0 ]; then
echo "FAIL: .stdgt.gz broken"
status=1
continue
fi
echo "OK"
done
if [ $status != 0 ]; then
echo "FAILED"
fi
exit $status
elif [ $step == "init" ]; then
echo "Verifying nps_$step:"
if [ $# -ne 2 ]; then
echo "Usage: nps_check.sh init workdir"
exit 1
fi
workdir=$2
echo -n "Checking $workdir/args.RDS ..."
if [ ! -f $workdir/args.RDS ]; then
echo "FAIL"
exit 1
fi
ver=`Rscript -e "args <- readRDS(\"$workdir/args.RDS\"); cat(\":NPS:\\t\", args[[\"VERSION\"]], sep='');" | grep -F ':NPS:' | cut -f2 `
echo "OK (version $ver)"
echo -n "Checking $workdir/log ..."
if [ ! -d $workdir/log ]; then
echo "FAIL"
exit 1
fi
echo "OK"
# older log files
outdated=`find $workdir/log/ -name "*.Rout.*" ! -newer "$workdir/args.RDS" | wc -l`
if [ $outdated -gt 0 ]; then
echo "WARNING: Outdated log files in $workdir/log/"
echo "$outdated Rout files are found older than $workdir/args.RDS"
fi
elif [ $step == "decor" ] || [ $step == "prune" ] || [ $step == "gwassig" ] || [ $step == "part" ] || [ $step == "back2snpeff" ]; then
echo "Verifying nps_$step:"
if [ $# -lt 3 ]; then
echo "Usage: nps_check.sh $step workdir winshift1 wishift2 ..."
exit 1
fi
workdir=$2
cmdargs=( $@ )
argslen=${#cmdargs[@]}
for (( k=2; k<argslen; k++ ))
do
winshift=${cmdargs[$k]}
echo "----- Shifted by $winshift -----"
for chrom in `seq 1 22`
do
logfile="$workdir/log/nps_$step.Rout.$winshift.$chrom"
echo -n "Checking $logfile ..."
if [ ! -f $logfile ]; then
echo "FAIL (missing)"
status=1
continue
fi
last=`grep -w Done $logfile | tail -n 1`
if [ "$last" != "Done" ]; then
echo "FAIL (incomplete)"
status=1
continue
fi
echo "OK"
done
if [ $status != 0 ]; then
echo "FAILED"
exit $status
fi
if [ $step == "prune" ]; then
echo -n "Checking window count..."
if [ $winshift == 0 ]; then
win1=`ls -l $workdir/win.*.Q.RDS | wc -l`
win2=`ls -l $workdir/win.*.pruned.table | wc -l`
if [ $win1 != $win2 ]; then
echo "FAIL ($win1 != $win2)"
exit 1
else
echo "OK ($win1 windows)"
fi
echo -n "Checking timestamp..."
for chrom in `seq 1 22`
do
decorfile=`ls -t $workdir/win.$chrom.*.Q.RDS | head -n 1`
outdated=`find $workdir/ -name "win.$chrom.*.pruned.table" ! -newer "$decorfile" | wc -l`
if [ $outdated != 0 ]; then
echo "FAIL (outdated pruning data: chr$chrom)"
exit 1
fi
done
echo "OK"
else
win1=`ls -l $workdir/win_$winshift.*.Q.RDS | wc -l`
win2=`ls -l $workdir/win_$winshift.*.pruned.table | wc -l`
if [ $win1 != $win2 ]; then
echo "FAIL ($win1 != $win2)"
exit 1
else
echo "OK ($win1 windows)"
fi
echo -n "Checking timestamp..."
for chrom in `seq 1 22`
do
decorfile=`ls -t $workdir/win_$winshift.$chrom.*.Q.RDS | head -n 1`
outdated=`find $workdir/ -name "win_$winshift.$chrom.*.pruned.table" ! -newer "$decorfile" | wc -l`
if [ $outdated != 0 ]; then
echo "FAIL (outdated pruning data)"
exit 1
fi
done
echo "OK"
fi
elif [ $step == "gwassig" ]; then
traintag=`Rscript -e "args <- readRDS(\"$workdir/args.RDS\"); cat(\":NPS:\\t\", args[[\"traintag\"]], sep='');" | grep -F ':NPS:' | cut -f2 `
traindir=`Rscript -e "args <- readRDS(\"$workdir/args.RDS\"); cat(\":NPS:\\t\", args[[\"traindir\"]], sep='');" | grep -F ':NPS:' | cut -f2 `
# check tail_betahat files
for chrom in `seq 1 22`
do
tailbetahatfile="$workdir/tail_betahat.$chrom.table"
echo -n "Checking $tailbetahatfile ..."
if [ ! -s $tailbetahatfile ]; then
echo "FAIL (missing or empty)"
status=1
continue
fi
M1=`tail -n +2 $traindir/chrom$chrom.$traintag.snpinfo | wc -l`
M2=`cat $tailbetahatfile | wc -l`
if [ $M1 != $M2 ]; then
echo "FAIL (incomplete)"
status=1
continue
fi
echo "OK"
done
if [ $status != 0 ]; then
echo "FAILED"
exit $status
fi
# check timestamp
echo -n "Checking timestamp..."
if [ $winshift == 0 ]; then
for chrom in `seq 1 22`
do
prunefile=`ls -t $workdir/win.$chrom.*.pruned.table | head -n 1`
outdated=`find $workdir/ -name "win.$chrom.*.pruned.tailfix.table" ! -newer "$prunefile" | wc -l`
if [ $outdated != 0 ]; then
echo "FAIL (outdated gwassig data)"
exit 1
fi
outdated=`find $workdir/ -name "tail_betahat.$chrom.table" ! -newer "$prunefile" | wc -l`
if [ $outdated != 0 ]; then
echo "FAIL (outdated gwassig data)"
exit 1
fi
if [ -f "$workdir/trPT.$chrom.tail.RDS" ]; then
outdated=`find $workdir/ -name "trPT.$chrom.tail.RDS" ! -newer "$prunefile" | wc -l`
if [ $outdated != 0 ]; then
echo "FAIL (outdated gwassig data)"
exit 1
fi
fi
done
else
for chrom in `seq 1 22`
do
prunefile=`ls -t $workdir/win_$winshift.$chrom.*.pruned.table | head -n 1`
outdated=`find $workdir/ -name "win_$winshift.$chrom.*.pruned.tailfix.table" ! -newer "$prunefile" | wc -l`
if [ $outdated != 0 ]; then
echo "FAIL (outdated gwassig data)"
exit 1
fi
done
fi
echo "OK"
elif [ $step == "part" ]; then
for chrom in `seq 1 22`
do
if [ $winshift == 0 ]; then
trPT="$workdir/trPT.$chrom.RDS"
else
trPT="$workdir/win_$winshift.trPT.$chrom.RDS"
fi
echo -n "Checking $trPT ..."
if [ ! -s $trPT ]; then
echo "FAIL (missing or empty)"
status=1
continue
fi
dim=`Rscript -e "trPT <- readRDS(\"$trPT\"); cat(\":NPS:\\t\", paste(dim(trPT), collapse=' x '), sep='');" | grep -F ':NPS:' | cut -f2 `
echo "OK ($dim)"
done
if [ $status != 0 ]; then
echo "FAILED"
exit $status
fi
echo -n "Checking timestamp ..."
if [ $winshift == 0 ]; then
outdated=`find $workdir/ -name "trPT.*.RDS" ! -newer "$workdir/part.RDS" | grep -v tail.RDS | wc -l`
else
outdated=`find $workdir/ -name "win_$winshift.trPT.*.RDS" ! -newer "$workdir/win_$winshift.part.RDS" | grep -v tail.RDS | wc -l`
fi
if [ $outdated != 0 ]; then
echo "FAIL (outdated trPT data)"
exit 1
fi
echo "OK"
elif [ $step == "back2snpeff" ]; then
traintag=`Rscript -e "args <- readRDS(\"$workdir/args.RDS\"); cat(\":NPS:\\t\", args[[\"traintag\"]], sep='');" | grep -F ':NPS:' | cut -f2 `
traindir=`Rscript -e "args <- readRDS(\"$workdir/args.RDS\"); cat(\":NPS:\\t\", args[[\"traindir\"]], sep='');" | grep -F ':NPS:' | cut -f2 `
for chrom in `seq 1 22`
do
if [ $winshift == 0 ]; then
snpeff="$workdir/$traintag.adjbetahat.chrom$chrom.txt"
else
snpeff="$workdir/$traintag.win_$winshift.adjbetahat.chrom$chrom.txt"
fi
echo -n "Checking $snpeff ..."
if [ ! -s $snpeff ]; then
echo "FAIL (missing or empty)"
status=1
continue
fi
M1=`tail -n +2 $traindir/chrom$chrom.$traintag.snpinfo | wc -l`
M2=`cat $snpeff | wc -l`
if [ $M1 != $M2 ]; then
echo "FAIL (marker count mismatch: $M1 != $M2)"
status=1
continue
fi
echo "OK"
done
if [ $status != 0 ]; then
echo "FAILED"
exit $status
fi
echo -n "Checking timestamp ..."
if [ $winshift == 0 ]; then
outdated=`find $workdir/ -name "$traintag.adjbetahat.chrom*.txt" ! -newer "$workdir/PTwt.RDS" | wc -l`
else
outdated=`find $workdir/ -name "$traintag.win_$winshift.adjbetahat.chrom*.txt" ! -newer "$workdir/win_$winshift.PTwt.RDS" | wc -l`
fi
if [ $outdated != 0 ]; then
echo "FAIL (outdated snpeff data)"
exit 1
fi
echo "OK"
fi
done
elif [ $step == "prep_part" ]; then
echo "Verifying nps_$step:"
if [ $# -lt 3 ]; then
echo "Usage: nps_check.sh $step workdir winshift1 wishift2 ..."
exit 1
fi
workdir=$2
cmdargs=( $@ )
argslen=${#cmdargs[@]}
for (( k=2; k<argslen; k++ ))
do
winshift=${cmdargs[$k]}
echo "----- Shifted by $winshift -----"
if [ $winshift == 0 ]; then
partfile="part.RDS"
prevfile=`ls -t $workdir/win.*.pruned.table $workdir/win.*.pruned.tailfix.table | head -n 1`
else
partfile="win_$winshift.part.RDS"
prevfile=`ls -t $workdir/win_$winshift.*.pruned.table $workdir/win_$winshift.*.pruned.tailfix.table | head -n 1`
fi
echo -n "Checking $workdir/$partfile ..."
if [ ! -s $workdir/$partfile ]; then
echo "FAIL (missing or empty)"
exit 1
fi
echo "OK"
echo -n "Checking timestamp ..."
outdated=`find $workdir/ -name "$partfile" ! -newer "$prevfile" | wc -l`
if [ $outdated != 0 ]; then
echo "FAIL (outdated partition files)"
exit 1
fi
echo "OK"
done
elif [ $step == "weight" ]; then
echo "Verifying nps_$step:"
if [ $# -lt 3 ]; then
echo "Usage: nps_check.sh $step workdir winshift1 wishift2 ..."
exit 1
fi
workdir=$2
cmdargs=( $@ )
argslen=${#cmdargs[@]}
for (( k=2; k<argslen; k++ ))
do
winshift=${cmdargs[$k]}
echo "----- Shifted by $winshift -----"
if [ $winshift == 0 ]; then
echo -n "Checking S0 weight ..."
if [ ! -s "$workdir/PTwt.tail.RDS" ]; then
echo "FAIL (missing or empty $workdir/PTwt.tail.RDS)"
exit 1
fi
echo "OK"
ptwtfile="$workdir/PTwt.RDS"
else
ptwtfile="$workdir/win_$winshift.PTwt.RDS"
fi
echo -n "Checking partition weights ..."
if [ ! -s "$ptwtfile" ]; then
echo "FAIL (missing or empty)"
exit 1
fi
dim=`Rscript -e "PTwt <- readRDS(\"$ptwtfile\"); cat(\":NPS:\\t\", paste(dim(PTwt), collapse=' x '), sep='')" | grep -F ':NPS:' | cut -f2 `
echo "OK ($dim)"
echo -n "Checking timestamp ..."
if [ $winshift == 0 ]; then
prevfile=`ls -t $workdir/trPT.*.RDS | head -n 1`
outdated=`find $workdir/ -name "PTwt*.RDS" ! -newer "$prevfile" | wc -l`
else
prevfile=`ls -t $workdir/win_$winshift.trPT.*.RDS | head -n 1`
outdated=`find $workdir/ -name "win_$winshift.PTwt.RDS" ! -newer "$prevfile" | wc -l`
fi
if [ $outdated != 0 ]; then
echo "FAIL (outdated PTwt data)"
exit 1
fi
echo "OK"
done
elif [ $step == "score" ]; then
echo "Verifying nps_$step:"
if [ $# -lt 5 ]; then
echo "Usage: nps_check.sh $step workdir valdir valtag winshift1 wishift2 ..."
exit 1
fi
workdir=$2
valdir=$3
valtag=$4
cmdargs=( $@ )
argslen=${#cmdargs[@]}
for (( k=4; k<argslen; k++ ))
do
winshift=${cmdargs[$k]}
echo "----- Shifted by $winshift -----"
traintag=`Rscript -e "args <- readRDS(\"$workdir/args.RDS\"); cat(\":NPS:\\t\", args[[\"traintag\"]], sep='');" | grep -F ':NPS:' | cut -f2 `
if [ $winshift == 0 ]; then
modtag=$traintag
else
modtag="$traintag.win_${winshift}"
fi
for chrom in `seq 1 22`
do
scorefile="$valdir/$modtag.predY.chrom$chrom.txt"
echo -n "Checking $scorefile ..."
if [ ! -s $scorefile ]; then
echo "FAIL (missing or empty)"
status=1
continue
fi
# check line number
N=`zcat $valdir/chrom${chrom}.${valtag}.dosage.gz | head -n 1 | tr " " "\n" | tail -n +7 | wc -l`
N0=`cat $scorefile | wc -l`
if [ $N != $N0 ]; then
echo "FAIL (incomplete)"
status=1
continue
fi
echo "OK (N=$N)"
done
if [ $status != 0 ]; then
echo "FAILED"
exit $status
fi
echo -n "Checking timestamp ..."
prevfile=`ls -t $workdir/$modtag.adjbetahat.chrom*.txt | head -n 1`
outdated=`find $valdir/ -name "$modtag.predY.chrom*.txt" ! -newer "$prevfile" | wc -l`
if [ $outdated != 0 ]; then
echo "FAIL (outdated score data)"
exit 1
fi
echo "OK"
done
else
echo "ERROR: unknown NPS step: $step"
exit 1
fi
exit $status
| true |
5aa334881dbbf545160b95a0d1805d217b033e99
|
Shell
|
Nycto/ExtraNimble
|
/travis-setup.sh
|
UTF-8
| 869 | 3.65625 | 4 |
[] |
no_license
|
#!/bin/bash
set -e
set -o pipefail
set -o xtrace
export NIM_ROOT=$HOME/Nim
compile() {
./bin/nim c koch
./koch boot -d:release
./koch nimble
}
# If Nim and nimble are still cached from the last time
if [ -x "$NIM_ROOT/bin/nim" ]; then
# Check if the Nim cache is older than 7 days
if ! find "$NIM_ROOT" -maxdepth 0 -type d -ctime +7 -exec false {} +; then
# Now check to see if there is a new revision
cd "$NIM_ROOT"
git fetch origin
test "$(git rev-parse HEAD)" == "$(git rev-parse @{u})" || compile
fi
# Download nim from scratch and compile it
else
git clone -b devel --depth 1 git://github.com/nim-lang/nim "$NIM_ROOT"
cd "$NIM_ROOT"
git clone --depth 1 git://github.com/nim-lang/csources csources
(cd csources && sh build.sh)
rm -rf csources
compile
fi
ls "$NIM_ROOT/bin"
| true |
af027ab75366e9590d4ec7bc71cbfd395d463fa8
|
Shell
|
davidshue/vbc-cloud
|
/node/run.sh
|
UTF-8
| 538 | 3.078125 | 3 |
[] |
no_license
|
#!/bin/bash
CONFIG_HOST=${CONFIG_HOST:=vbc-config}
CONFIG_PORT=${CONFIG_PORT:=8888}
JAVA_OPTS=${JAVA_OPTS:-"-server -Xms64m -Xmx128m -XX:+UseParallelOldGC"}
a=0;
while ! nc -z ${CONFIG_HOST} ${CONFIG_PORT};
do
a=$(($a+1));
if [ "$a" -gt 20 ]
then
echo startup failed!
exit 0 # startup failed
fi
echo sleeping $a;
sleep 5;
done;
PROFILE=${VBC_PROFILE:-"default"}
cd /opt
java $JAVA_OPTS -jar -Dspring.profiles.active=$PROFILE $ENVIRONMENT_ARGS node-*.jar
| true |
738b4d8a279a78522a3df021663ac32ba76a417f
|
Shell
|
zhangheting407051/scripts
|
/dailybuild/DailyBuild_A158.sh
|
GB18030
| 4,147 | 3.3125 | 3 |
[] |
no_license
|
#!/bin/bash
############################### û趨 ʼ ############################################
DailyBuildDir=/home1/zhangheting/DailyBuildSW1
CodeDir=$DailyBuildDir/Code
DAILYBUILDLOG=$DailyBuildDir/DailyBuild_A158.log
VersionDIR=$DailyBuildDir/Version
Date=$(date +%y%m%d)
############################### û趨 ############################################
############################### ű ʼ ############################################
LastDate=2000-01-01
function run_script()
{
cd $CodeDir/$FOLDER_NAME
./Auto_build_A158.sh<< EOF
1 2
EOF
echo " start pickup modify details" >> $DAILYBUILDLOG
pickup_modify
Copy_files
cd ..
}
function pickup_modify()
{
CodeDIR=$CodeDir/$FOLDER_NAME
Modify_File=$CodeDIR/modify.txt
Tmp1_File=$CodeDIR/modify_tmp1.txt
Tmp2_File=$CodeDIR/modify_tmp2.txt
Tmp3_File=$CodeDIR/modify_tmp3.txt
Tmp4_File=$CodeDIR/modify_tmp4.txt
final_file=$CodeDIR/ModifyDetails_$(date +%y%m%d).txt
cd wind
git log --since="27 hours" >> $Modify_File
echo "$Modify_File"
cd $CodeDIR
sed '/^$/d' $Modify_File > $Tmp1_File
grep -v "commit\|Author\|Date\|Change-Id\|Merge\|Revert" $Tmp1_File > $Tmp2_File
sed -n '/Subject/,/Ripple Effect/{/Subject/n;/Ripple Effect/b;p}' $Tmp2_File > $Tmp3_File
grep -v "Bug Number" $Tmp3_File > $Tmp4_File
count=0
flag=0
FLAG=0;
echo "$Tmp3_File"
cat $Tmp4_File | while read LINE
do
((count+=1))
if !((count%2)) ;then
((flag+=1))
BugID=$LINE
echo "$flag. $BugID $Details " >> $final_file
else
Details=$LINE
fi
done
echo "$final_file is build!" >>$DAILYBUILDLOG
rm -rf $Modify_File $Tmp1_File $Tmp2_File $Tmp3_File $Tmp4_File
chmod 777 $final_file
cd $CodeDIR
}
function Copy_files(){
USER=`whoami`
Date=$(date +%y%m%d)
cd $CodeDir/$FOLDER_NAME
echo "curent pwd in Copy_files is `pwd`" >> $DAILYBUILDLOG
WIND_SW1_IN_VERSION_NUMBER=`awk -F = 'NR==1 {printf $2}' version`
if [ $WIND_SW1_IN_VERSION_NUMBER = "" ] ; then
WIND_SW1_IN_VERSION_NUMBER = $FOLDER_NAME
fi
BUILDTYPE=`grep -n 'ro.build.type' out/target/product/A158/system/build.prop | cut -d '=' -f 2`
echo "'$BUILDTYPE' = $BUILDTYPE" >> $DAILYBUILDLOG
cd $DailyBuildDir
if [ ! -d Version ] ;then
mkdir Version
fi
cd Version
VersionZipName=${WIND_SW1_IN_VERSION_NUMBER}_DBSW1_${Date}_${BUILDTYPE}
mkdir $VersionZipName
Folder_CP_name=$VersionDIR/$VersionZipName
cp $final_file $Folder_CP_name
cd $Folder_CP_name
cp -a $CodeDir/${FOLDER_NAME}/version_package/* $Folder_CP_name
echo "cp -a $CodeDir/${FOLDER_NAME}/version_package/* $Folder_CP_name" >> $DAILYBUILDLOG
filelist=`ls -a $Folder_CP_name`
echo "$filelist" >> $DAILYBUILDLOG
for filename in $filelist
do
if [ x"$filename" == x"system-sign.img" ] ;then
rm -rf boot.img cache.img lk.bin logo.bin recovery.img secro.img system.img trustzone.bin userdata.img
fi
done
cd $VersionDIR
zip -rq $VersionZipName.zip $VersionZipName
cp $VersionZipName.zip /data/mine/test/MT6572/$USER/$VersionZipName.zip
echo "cp $VersionZipName.zip /data/mine/test/MT6572/$USER/$VersionZipName.zip" >> $DAILYBUILDLOG
cd $VersionDIR
rm -rf *
}
function create_product()
{
cd $CodeDir/
rm -rf ${FOLDER_NAME}
mkdir ${FOLDER_NAME}
cp $DailyBuildDir/Auto_build_A158.sh ./${FOLDER_NAME}/
run_script
}
function main()
{
echo "$(date +%Y-%m-%d_%H:%M:%S) begining!!!" >> $DAILYBUILDLOG
cd $DailyBuildDir
export USER=`whoami`
Project_info=`grep BUILD_PROJECT= ./Auto_build_A158.sh`
echo "$Project_info" >> $DAILYBUILDLOG
export FOLDER_NAME=${Project_info:14:30}_${Date}
echo "FOLDER_NAME = $FOLDER_NAME" >> $DAILYBUILDLOG
if [ ! -d Code ] ;then
mkdir Code
cd Code
fi
echo "Currentdir is `pwd`" >> $DAILYBUILDLOG
case $1 in
all)
echo "start create product " >>$DAILYBUILDLOG
create_product
;;
*)
;;
esac
}
main $1
############################### ű ############################################
| true |
f5b5353e27d1aba910fad4c9b01c59fecd6daa41
|
Shell
|
hmcts/cnp-aks-rbac
|
/create-ad-app.sh
|
UTF-8
| 3,220 | 3.46875 | 3 |
[] |
no_license
|
#!/bin/bash
BASE_NAME="${1}"
SERVER_APP_NAME="${BASE_NAME}-server"
CLIENT_APP_NAME="${BASE_NAME}-client"
function usage() {
echo "usage: ./create-ad-app.sh <app-name>"
}
if [ -z "${BASE_NAME}" ] ; then
usage
exit 1
fi
SERVER_APP_PASSWORD=$(openssl rand -base64 32 | grep -o '[[:alnum:]]' | tr -d '\n')
export SERVER_APP_ID=$(az ad app create --display-name ${SERVER_APP_NAME} --required-resource-accesses @server-manifest.json --identifier-uri http://AKSAADServer-${SERVER_APP_NAME} --password ${SERVER_APP_PASSWORD} --query appId -o tsv)
echo "Ignore the warning about \"Property 'groupMembershipClaims' not found on root\""
az ad app update --id ${SERVER_APP_ID} --set groupMembershipClaims=All
envsubst < client-manifest.template.json > client-manifest.json
while true; do
read -p "You now need to go to the portal, Azure AD -> app registrations -> ${SERVER_APP_NAME} -> settings -> required permissions, click grant permissions, after complete type (done)? " answer
case $answer in
[dD]* ) break;;
* ) echo "Please answer with 'done'";;
esac
done
CLIENT_APP_ID=$(az ad app create --display-name ${CLIENT_APP_NAME} --native-app --reply-urls http://localhost/client --required-resource-accesses @client-manifest.json --query appId -o tsv)
CLIENT_SP_OBJECT_ID=$(az ad sp create --id ${CLIENT_APP_ID} --query objectId -o tsv)
# without the sleep I was getting:
# Operation failed with status: 'Bad Request'. Details: 400 Client Error: Bad Request for url: https://graph.windows.net/a0d77fc4-df1e-4b0d-8e35-46750ca5a672/oauth2PermissionGrants?api-version=1.6
sleep 5
# You can only add delegated permissions via the CLI
# see support case: 119011625000863
az ad app permission grant --id ${CLIENT_SP_OBJECT_ID} --api ${SERVER_APP_ID}
az group create --name ${BASE_NAME} --location uksouth
SUBSCRIPTION_ID=$(az account show --query id -o tsv)
VNET_RG=core-infra-${BASE_NAME}
VNET_NAME=${BASE_NAME}
AKS_SP=$(az ad sp create-for-rbac --name http://${BASE_NAME} \
--role contributor \
--scopes /subscriptions/${SUBSCRIPTION_ID}/resourceGroups/${BASE_NAME} /subscriptions/${SUBSCRIPTION_ID}/resourceGroups/${VNET_RG}/providers/Microsoft.Network/virtualNetworks/${VNET_NAME})
AKS_SP_APP_ID=$(echo ${AKS_SP} | jq -r .appId)
AKS_SP_APP_PASSWORD=$(echo ${AKS_SP} | jq -r .password)
CLUSTER_ADMINS_GROUP_NAME="${BASE_NAME}-cluster-admins"
CLUSTER_ADMIN_GROUP=$(az ad group list --query "[?displayName=='${CLUSTER_ADMINS_GROUP_NAME}'].objectId" -o tsv)
if [ -z "${CLUSTER_ADMIN_GROUP}" ]; then
echo "Cluster admin group doesn't exist, creating"
CLUSTER_ADMIN_GROUP=$(az ad group create --display-name ${CLUSTER_ADMINS_GROUP_NAME} --mail-nickname ${CLUSTER_ADMINS_GROUP_NAME} --query objectId -o tsv)
fi
echo "Server app ID: ${SERVER_APP_ID}"
echo "Server app password: ${SERVER_APP_PASSWORD}"
echo "Server app display name: ${SERVER_APP_NAME}"
echo "Client app ID: ${CLIENT_APP_ID}"
echo "Client app display name: ${CLIENT_APP_NAME}"
echo "AKS SP client id: ${AKS_SP_APP_ID}"
echo "AKS SP client secret: ${AKS_SP_APP_PASSWORD}"
./create-aks.sh ${BASE_NAME} ${SERVER_APP_ID} ${SERVER_APP_PASSWORD} ${CLIENT_APP_ID} ${AKS_SP_APP_ID} ${AKS_SP_APP_PASSWORD}
| true |
954ceaae82beaf6a920a458359b39053a408cac3
|
Shell
|
altnight/individual-sandbox
|
/diary/gen.sh
|
UTF-8
| 116 | 2.765625 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
name=`date +%Y%m%d`
test -d ${name} || mkdir -p ${name}
cd ${name}
test -f README.md || touch README.md
| true |
fdfa72df64fee3c0af51fea0b3108db09c0cb425
|
Shell
|
821-N/holberton-system_engineering-devops
|
/0x04-loops_conditions_and_parsing/7-clock
|
UTF-8
| 207 | 3.375 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
# prints hours and minutes
hour=0
while [ $hour -le 12 ]
do
echo "Hour: $hour"
minute=1
while [ $minute -lt 60 ]
do
echo $minute
minute=$((minute+1))
done
hour=$((hour+1))
done
| true |
bcd180bbe01369e77346f7ea08176432c2426865
|
Shell
|
grzebiel/dotfiles
|
/misc/install
|
UTF-8
| 869 | 3.015625 | 3 |
[] |
no_license
|
#!/bin/bash
# install usb notifications
sudo cp 99-usb.rules /etc/udev/rules.d/
sudo cp usbup /usr/local/bin
# set user to modify backlight HEL
if [ $HOSTNAME == "hel" ] || [ $HOSTNAME == "frigg" ]
then
#groupadd backlight
sudo usermod -aG video grzebiel
#enable battery notifications
systemctl --user enable battery-deamon.service
fi
# set screenlock on suspend
sudo cp sleep_lock.service /etc/systemd/system
sudo systemctl daemon-reload
sudo systemctl enable sleep_lock.service
# enable the ssh-agent
systemctl enable --user ssh-agent.service
# allow user to activate existing wifi profile
sudo cp switch_wifi_to /usr/local/bin
sudo bash -c 'echo "grzebiel ALL = NOPASSWD : /usr/local/bin/switch_wifi_to" > /etc/sudoers.d/10_switch_wifi_to'
# add modification rights brightness to video group via udev
sudo cp backlight.rules /etc/udev/rules.d
| true |
46f0e0ffc4ece946732d7ca9f3f07bea8dfc38cd
|
Shell
|
djdagovs/autosshvpn
|
/file/vpnfix.sh
|
UTF-8
| 1,397 | 3.046875 | 3 |
[] |
no_license
|
#!/bin/bash
if [[ `ifconfig -a | grep "venet0"` ]]
then
cekvirt='OpenVZ'
elif [[ `ifconfig -a | grep "venet0:0"` ]]
then
cekvirt='OpenVZ'
elif [[ `ifconfig -a | grep "venet0:0-00"` ]]
then
cekvirt='OpenVZ'
elif [[ `ifconfig -a | grep "venet0-00"` ]]
then
cekvirt='OpenVZ'
elif [[ `ifconfig -a | grep "eth0"` ]]
then
cekvirt='KVM'
elif [[ `ifconfig -a | grep "eth0:0"` ]]
then
cekvirt='KVM'
elif [[ `ifconfig -a | grep "eth0:0-00"` ]]
then
cekvirt='KVM'
elif [[ `ifconfig -a | grep "eth0-00"` ]]
then
cekvirt='KVM'
fi
if [ $cekvirt = 'KVM' ]; then
iptables -t nat -I POSTROUTING -s 10.8.0.0/24 -o eth0 -j MASQUERADE
iptables -t nat -I POSTROUTING -s 10.9.0.0/24 -o eth0 -j MASQUERADE
elif [ $cekvirt = 'OpenVZ' ]; then
iptables -t nat -I POSTROUTING -s 10.8.0.0/24 -o venet0 -j MASQUERADE
iptables -t nat -I POSTROUTING -s 10.9.0.0/24 -o venet0 -j MASQUERADE
fi
if [[ -e /etc/pptpd.conf ]]; then
if [ $cekvirt = 'KVM' ]; then
iptables -I INPUT -s 10.1.0.1/8 -i ppp0 -j ACCEPT
iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE
iptables -t nat -A POSTROUTING -s 10.1.0.1/24 -o eth0 -j MASQUERADE
elif [ $cekvirt = 'OpenVZ' ]; then
iptables -I INPUT -s 10.1.0.1/8 -i ppp0 -j ACCEPT
iptables -t nat -A POSTROUTING -o venet0 -j MASQUERADE
iptables -t nat -A POSTROUTING -s 10.1.0.1/24 -o venet0 -j MASQUERADE
fi
else
echo "PPTP not installed"
fi
echo "DONE!!"
| true |
b611a923d97c928243ae111134fc34ad383caa9b
|
Shell
|
amannocci/logbulk
|
/core/build/build-docker-image.sh
|
UTF-8
| 595 | 3.390625 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
# Correct path
cd "$(dirname "$0")"
BASE_DIR=$PWD
BASE_PROJECT=$(dirname "$BASE_DIR")
# Load common
source ${BASE_DIR}/common.sh
info "Loading common"
# Check needed
if [ "$(is_install mvn)" == "1" ]
then
error "Please install maven to continue"
exit 1
fi
if [ "$(is_install docker)" == "1" ]
then
error "Please install docker to continue"
exit 1
fi
# Extract version
cd ${BASE_PROJECT}
get_version
# Build logbulk before anything
mvn clean install package
# Build all plugins
${BASE_DIR}/build-plugins.sh
# Build logbulk
docker build -t amannocci/logbulk:${VERSION} .
| true |
eb295e0d38c5476aa9717a8c1585af604da283ba
|
Shell
|
khankawais/Prometheus-Installation-Scripts
|
/install_mysql_exporter.sh
|
UTF-8
| 3,118 | 3.46875 | 3 |
[] |
no_license
|
#!/bin/bash
################################################################################
# Author : Awais Khan #
# Use this script as a root user. #
# This script is used to create a MySQL exporter service for prometheus #
# the service will use port 9104 by default #
################################################################################
export DEBIAN_FRONTEND=noninteractive
apt update
which mysql
if [ $? == 1 ];then
printf "\n Please Install Mysql Server before installing the Exporter . \n\n"
else
printf "\n Installed : \n\n"
which curl wget tar
if [ $? == 1 ];then
printf "\n Make sure that you have these dependencies installed
curl , wget , tar \n\n
"
else
curl -s https://api.github.com/repos/prometheus/mysqld_exporter/releases/latest | grep browser_download_url | grep linux-amd64 | cut -d '"' -f 4 | wget -i -
printf "\n\n ------------------------ Extracting Files ------------------------\n"
tar xvf mysqld_exporter*.tar.gz
mv mysqld_exporter-*.linux-amd64/mysqld_exporter /usr/local/bin/
chmod +x /usr/local/bin/mysqld_exporter
groupadd --system mysql_exporter
useradd -s /sbin/nologin --system -g mysql_exporter mysql_exporter
printf "\n\n ------------------------ Please Provide the following Things ------------------------\n\n"
cat << EOF > /tmp/commands.sql
CREATE USER 'mysqld_exporter'@'localhost' IDENTIFIED BY 'prometheus' WITH MAX_USER_CONNECTIONS 2;
GRANT PROCESS, REPLICATION CLIENT, SELECT ON *.* TO 'mysqld_exporter'@'localhost';
FLUSH PRIVILEGES;
EXIT
EOF
read -p "Enter Username of Your MySQL database > " Mysql_username
read -p "Enter Password of Your MySQL database > " Mysql_password
mysql -u $Mysql_username --password=$Mysql_password < /tmp/commands.sql
rm -rf /tmp/commands.sql
cat << EOF > /etc/.mysqld_exporter.cnf
[client]
user=mysqld_exporter
password=prometheus
host=localhost
EOF
chown root:mysql_exporter /etc/.mysqld_exporter.cnf
cat << EOF > /etc/systemd/system/mysql_exporter.service
[Unit]
Description=Prometheus MySQL Exporter
After=network.target
User=mysql_exporter
Group=mysql_exporter
[Service]
Type=simple
Restart=always
ExecStart=/usr/local/bin/mysqld_exporter \
--config.my-cnf /etc/.mysqld_exporter.cnf \
--collect.global_status \
--collect.info_schema.innodb_metrics \
--collect.auto_increment.columns \
--collect.info_schema.processlist \
--collect.binlog_size \
--collect.info_schema.tablestats \
--collect.global_variables \
--collect.info_schema.query_response_time \
--collect.info_schema.userstats \
--collect.info_schema.tables \
--collect.perf_schema.tablelocks \
--collect.perf_schema.file_events \
--collect.perf_schema.eventswaits \
--collect.perf_schema.indexiowaits \
--collect.perf_schema.tableiowaits \
--collect.slave_status \
--web.listen-address=0.0.0.0:9104
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable mysql_exporter
systemctl start mysql_exporter
fi
fi
| true |
16885add627edf15236d5baf738313b00fb74123
|
Shell
|
s20024/app-stdlog
|
/stdlog.sh
|
UTF-8
| 1,996 | 3.75 | 4 |
[] |
no_license
|
#!/bin/sh
# 今いるディレクトリパスの取得
path=`pwd`
# アプリケーションの説明の表示
eog 1.png
# Windowsで実行した場合のエラー処理
if [ $? -ne 0 ]
then
echo "Ubuntu以外で実行されました。"
echo "今いるディレクトリ内にある画像ファイル(1~5)までを読んで以下に進んでください"
# 上のやつを読む時間を待つための処理
sleep 20
fi
# 実行していいかの判断
read -p "実行してよろしいですか? yes(y)/no(n):" judge
if [ "$judge" = "y" ]; then
# 今日のデータの取得]
today=`date "+%y:%m:%d"`
# ホームディレクトリの下に隠しディレクトリの作成&移動
mkdir -p ~/.stdlog
cd ~/.stdlog
#githubからshellscriptのダウンロード
git clone --depth 1 https://github.com/s20024/stdlog.git
# ダウンロードしたものを移動
mv stdlog/* .
# いらないディレクトリとREADME.mdの削除
rm -rf stdlog README.md
# 必要なファイルの作成
touch stdlog_breaktime stdlog_in
# todayファイルの作成
echo "$today" > stdlog_today
# ホームディレクトリにstdlogディレクトリの作成&移動
cd ~/
mkdir -p stdlog
cd stdlog
# data.csvの作成&最初の記入
touch data.csv
echo "year_month_day,study_time_hour,study_time_minute" > data.csv
# 今日のデータの入力(一応)
echo "$today,0,0" >> data.csv
# .bashrcに記入(開いたときに読み込まれるから次回からaliasを打たなくていい)
echo "
alias in=\". ~/.stdlog/in.sh\"
alias out=\". ~/.stdlog/out.sh\"
alias breaktime=\". ~/.stdlog/breaktime.sh\"
" >> ~/.bashrc
# 一応.bashrc の再読込
. ~/.bashrc
# 最初にいた位置に帰宅w
cd $path
# 必要なくなったこのディレクトリの削除
cd ..
rm -rf app-stdlog
fi
| true |
3bf4270f4c60dac819effc5ad081159bf69c801c
|
Shell
|
latifkabir/n3He_Soft
|
/Watchdog/bin/transferFile.sh
|
UTF-8
| 537 | 3.171875 | 3 |
[] |
no_license
|
#!/bin/bash
RUN=$1
LAST=$2
while [ $RUN -ne $LAST ]
do
if [ -f /home/daq/DATA/run-${RUN}data-21 ] && [ -f /home/daq/DATA/run-${RUN}data-22 ] && [ -f /home/daq/DATA/run-${RUN}data-23 ] && [ -f /home/daq/DATA/run-${RUN}data-24 ] && [ -f /home/daq/DATA/run-${RUN}data-30 ]
then
scp -p /home/daq/DATA/run-${RUN}data-{21,22,23,24,30} basestar:/mnt/idata02/data/.
else
echo "FAILED to transfer run:${RUN} . Files does NOT exist."
echo "run:${RUN} " >> /home/daq/Watchdog/bin/failedRuns.txt
fi
RUN=`expr $RUN + 1`
done
| true |
eb7ed881263b53c2573f2e2d6c0cfcdaa6eb5670
|
Shell
|
wildgarden/nvml-docker
|
/check-images.sh
|
UTF-8
| 571 | 3.421875 | 3 |
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
[[ $TRAVIS_REPO_SLUG == "wojtuss/nvml-docker" && $TRAVIS_BRANCH == "master" ]] || exit 0
commitRange=$([[ -n "$TRAVIS_COMMIT_RANGE" ]] && echo ${TRAVIS_COMMIT_RANGE/\.+/ } || echo $TRAVIS_COMMIT)
files=$(git diff-tree --no-commit-id --name-only -r $commitRange)
base_dir=testdir
for file in $files; do
if [[ $file =~ ^($base_dir)\/Dockerfile\.($OS)-($OS_VER)$ ]] \
|| [[ $file =~ ^($base_dir)\/.*\.sh$ ]]
then
cd testdir && ./build-image.sh $OS:$OS_VER
if [[ $TRAVIS_EVENT_TYPE != "pull_request" ]]; then
echo 1
fi
exit 0
fi
done
| true |
2d7bf76ac5fe7e65c4fa5ceb7f5eebdffc241cc0
|
Shell
|
cloudfoundry-incubator/kubo-ci
|
/scripts/lib/semver.sh
|
UTF-8
| 758 | 3.953125 | 4 |
[] |
no_license
|
#!/bin/bash
SEMVER_REGEX="^(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)(\.(0|[1-9][0-9]*))?$"
semver_arr() {
version=$1
if [[ "$version" =~ $SEMVER_REGEX ]]; then
local major=${BASH_REMATCH[1]}
local minor=${BASH_REMATCH[2]}
local patch=${BASH_REMATCH[3]}
if [ ! -z $patch ]; then
patch=$(echo $patch | cut -c 2-)
fi
eval "$2=(\"$major\" \"$minor\" \"$patch\")"
fi
}
compare_semvers() {
semver_arr $1 a
semver_arr $2 b
for i in 0 1 2; do
local x=${a[$i]}
local y=${b[$i]}
if [ -z $x ]; then
x=0
fi
if [ -z $y ]; then
y=0
fi
local diff=$(($x - $y))
if [[ $diff -lt 0 ]]; then
echo -1; return 0
elif [[ $diff -gt 0 ]]; then
echo 1; return 0
fi
done
echo 0
}
| true |
73894a92197acd1eab9572d1ba2880127f249b3f
|
Shell
|
chenliru/cweb.io
|
/shell/sys_script/crontabs/root/tuxLogClean.ksh
|
UTF-8
| 1,175 | 3.40625 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/ksh
###############################################################################
#
# Name: tuxLogClean.ksh
#
# Reference: n/a
#
# Description: backup locus tuxedo instance ULOG file and achive
#
# Parameters: None
#
# Modification History:
#
# Date Name Description
# -----------------------------------------------------------
# 2002-10-22 Bob Chong Original
# 2006-08-01 Denny Guo Modified
#
################################################################################
set -v
set -x
TuxDir=/usr/apps/ipg/ver001/srv
LocusInstance=$TuxDir/locus
clean(){
set -x
integer n=$2
dir=$1
ar=ULOGS
Logs=ULOG.*
cd $dir
[ -L $ar ] || exit 1
set `ls -t $Logs`
shift
[ $1 ] && mv $* $ar
cd $ar
set `ls -t $Logs`
until (((n-=1)<0))
do
[ $1 ] && shift
done
rm -f $*
compress $Logs >/dev/null 2>&1
}
clean $LocusInstance 3
#
InsightDir=/usr/apps/ipg/ver001/srv/insight
BdsDir=/usr/apps/ipg/ver001/srv/bds/pgm/ip_0p
find $InsightDir -name "ULOG.*" -type f -mtime +4 -exec /usr/bin/rm -f {} \;
find $BdsDir -name "ULOG.*" -type f -mtime +4 -exec /usr/bin/rm -f {} \;
exit 0
| true |
692b4e44ba6aa0275f78887e7b085f390109c602
|
Shell
|
plamolinux/qbilinux
|
/qbilinux/02_x11/xfplamoconfig/PackageBuild.xfplamoconfig
|
UTF-8
| 1,077 | 3.046875 | 3 |
[] |
no_license
|
#!/bin/sh
######################################################################
pkgbase=xfplamoconfig
vers=1.5
url=""
srcfiles="xfplamoconfig xfplamoconfig.sh"
apply_arch="noarch"
arch=noarch
build=1
src=$pkgbase
patchfiles=""
OPT_CONFIG=""
DOCS=""
patchfiles=''
compress=txz
SRC_URL="https://qbilinux.org/pub/source/"
SRC_DIR="/home/archives/source/"
######################################################################
source /usr/src/qbilinux/PackageBuild.def
do_prepare() {
echo "nothing to do in prepare."
}
do_config() {
if [ -d ${B[$1]} ] ; then rm -rf ${B[$1]} ; fi
mkdir ${B[$1]}
cd ${B[$1]}
}
do_build() {
cd ${B[$1]}
}
do_install() {
cd ${B[$1]}
# add extra func
install -d $P/sbin
for i in $srcfiles ; do
install $W/$i $P/sbin
done
install -d $docdir/$src
for i in $srcfiles ; do
cp $W/$i $docdir/$src/$i
gzip $docdir/$src/$i
done
}
do_package() {
for i in $pkgbase ; do
cd $P
/sbin/makepkg $W/$pkg.$compress <<EOF
y
1
EOF
done
}
source /usr/src/qbilinux/PackageBuild.func
| true |
ad2604a3f5254bbc319de824762c83acba562a32
|
Shell
|
supreem61/journals
|
/duplexer.sh
|
UTF-8
| 358 | 3.296875 | 3 |
[] |
no_license
|
#!/bin/bash
echo 'Enter suffix including .'
read $string
count=1
for odd in `ls odd* |sort -V`
do
mv $odd page-"$count$string"
count=`expr $count + 2`
done
count=0
for even in `ls even*|sort -V`
do
count=`expr $count + 2`
done
for even in `ls even*|sort -V`
do
mv $even page-"$count$string"
count=`expr $count - 2`
done
read $string
| true |
95feb1d39fa08e5de9bf5193683ac39383c53b2c
|
Shell
|
Geek-Chic/EasyLinux
|
/Module/Centos/install/update.zz
|
UTF-8
| 879 | 2.859375 | 3 |
[] |
no_license
|
#!/bin/bash
# File Name:update.zz
# Description: update function
# @Author: evil
# Created Time:Mon 26 Jar 2015 21:05:06 PM CST
#更新pip
update_pip(){
pip install -U pip
}
#升级gcc到4.8
update_gcc(){
# cat << EOF| sudo tee -a /etc/yum.repos.d/DevToolset.repo
# [DevToolset-2]
# name=RedHat DevToolset v2 $releasever - $basearch
# baseurl=http://puias.princeton.edu/data/puias/DevToolset/$releasever/$basearch/
# enabled=1
# gpgcheck=0
# EOF
sudo sh -c "cat >> /etc/yum.repos.d/DevToolset.repo << "EOF"
[DevToolset-2]
name=RedHat DevToolset v2 $releasever - $basearch
baseurl=http://puias.princeton.edu/data/puias/DevToolset/$releasever/$basearch/
enabled=1
gpgcheck=0
EOF
"
sudo yum install devtoolset-2-gcc-4.8.1 devtoolset-2-gcc-c++-4.8.1
sudo ln -s /opt/rh/devtoolset-2/root/usr/bin/* /usr/local/bin/
hash -r
gcc --version
}
| true |
b95869f1740a7d8b9888aedb8b2009c0fd433e8a
|
Shell
|
evolarjun/amrfindertest
|
/ubuntu_test/80_source_v2.t
|
UTF-8
| 756 | 2.953125 | 3 |
[] |
no_license
|
#!/bin/bash
echo 1..3 # for TAP
set -e
>&2 echo "Testing assembly of v2 branch (used for development)"
if [ -d "amr" ]
then
rm -rf amr
fi
curl -fsSL https://github.com/ncbi/amr/archive/v2.tar.gz | tar xz
cd amr-2
make
./amrfinder -u
./amrfinder --plus -p test_prot.fa -g test_prot.gff -O Escherichia > test_prot.got
diff test_prot.expected test_prot.got
echo "ok 1 v2 branch nucleotide test"
./amrfinder --plus -n test_dna.fa -O Escherichia > test_dna.got
diff test_dna.expected test_dna.got
echo "ok 2 v2 branch protein tests"
./amrfinder --plus -n test_dna.fa -p test_prot.fa -g test_prot.gff -O Escherichia > test_both.got
diff test_both.expected test_both.got
echo "ok 3 v2 branch combined test"
| true |
da4b513ce6eccb9d52c47a4d519519f44559a27b
|
Shell
|
Chris24680/sustainable-rails-docker
|
/bin/vars
|
UTF-8
| 712 | 2.609375 | 3 |
[] |
no_license
|
# Set this to the port in the Docker container you want exposed
EXPOSE=3000
# Set this to the port on your localhost you want to use to access
# the EXPOSE port above
LOCAL_PORT=9999
# Docker/Docker Hub setup. This is here to allow pushing a built
# image to Docker Hub and to ensure proper namespace isolation
# of the image that's built by this repo
#
# Set this to your account name on Docker Hub (required)
ACCOUNT=your-account
# Set this to the repo name on Docker Hub (required)
REPO=rails-dev-environment
# Set this to the tag name to use (required)
TAG=some-tag
# Set this to the directory inside the Docker image you want to mirror
# your project's root directory
WORKDIR=/root/work
# vim: ft=bash
| true |
637f81807a88cd043de1efa6955fd039f9435784
|
Shell
|
fnfly2005/public_work
|
/mtmm_code/05tmp/wl01.sh
|
UTF-8
| 2,334 | 2.6875 | 3 |
[] |
no_license
|
#!/bin/bash
clock=0
t1=${1:-`date -d "yesterday ${clock}" +"%Y-%m-%d %T"`}
t2=${2:-`date -d "${t1% *} -1 days ago ${clock}" +"%Y-%m-%d %T"`}
t3=`date -d "${t2% *}" +"%Y%m%d%H%M%S"`
path="/data/fannian/"
fut() {
echo `grep -iv "\-time" ${path}sql/${1}.sql`
}
w=`fut warehouse`
sso=`fut sale_sub_order`
file="wl01"
attach="${path}00output/${file}.csv"
presto_e="/opt/presto/bin/presto --server hc:9980 --catalog hive --execute "
se="set session optimize_hash_generation=true;"
${presto_e}"
${se}
with w as (
${w}
where
sp_id=0
),
sso as (
${sso}
dt>='${t1% *}'
and dt<'${t2% *}'
and supplier_id=0
and order_status<>0
),
sss as (
select
sso.supplier_id,
supplier_name,
warehouse_name,
sub_order_id,
approve_time,
case when delivered_time is null then '${t2}'
else delivered_time end delivered_time,
case when express_time is null then '${t3}'
when length(express_time)<>14 then null
else express_time end express_time
from
sso
join w using(warehouse_id)
where
approve_time is not null
),
sw as (
select
supplier_id,
supplier_name,
warehouse_name,
sub_order_id,
date_parse(approve_time,'%Y%m%d%H%i%S') approve_time,
date_parse(delivered_time,'%Y-%m-%d %H:%i:%S') delivered_time,
date_parse(express_time,'%Y%m%d%H%i%S') express_time
from
sss
),
s1 as (
select
supplier_id,
supplier_name,
warehouse_name,
sub_order_id,
case when delivered_time is null then null
else date_diff('hour',approve_time,delivered_time) end ad,
case when express_time is null then null
else date_diff('hour',approve_time,express_time) end ae
from
sw
),
temp as (select 1)
select
'${t1% *}' dt,
supplier_id,
supplier_name,
warehouse_name,
count(distinct sub_order_id) all_sub,
count(distinct case when ad<24 then sub_order_id end) ad24_sub,
count(distinct case when ae<24 then sub_order_id end) ae24_sub,
count(distinct case when ae<48 then sub_order_id end) ae48_sub,
count(distinct case when ae<72 then sub_order_id end) ae72_sub,
count(distinct case when ae>=72 then sub_order_id end) ae72o_sub,
count(distinct case when ae is null or ad is null then sub_order_id end) aenull_sub
from
s1
group by
1,2,3,4
"|grep -iv "SET">>${attach}
| true |
12068fde812396be51dc285ab47d0173818d3dec
|
Shell
|
ring-lang/ring
|
/extensions/android/ringlibsdl/project/jni/libpng-1.6.2/tests/pngstest
|
UTF-8
| 335 | 3.640625 | 4 |
[
"LicenseRef-scancode-free-unknown",
"MIT",
"Libpng",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
#!/bin/sh
#
# Usage:
#
# tests/pngstest pattern
#
# Runs pngstest on all the contrib/pngsuite/[^x]*${pattern}.png files
# NOTE: pattern is used to name the temporary files pngstest generates
#
pattern="$1"
shift
exec ./pngstest --strict --tmpfile "${pattern}" --log ${1+"$@"}\
"${srcdir}/contrib/pngsuite/"[a-wyz]*${pattern}".png"
| true |
222c1de2f987e0058168f74ac14c03ac9bf74eee
|
Shell
|
kevfield/AdventOfCode2019
|
/day1/day1p1.sh
|
UTF-8
| 270 | 3.140625 | 3 |
[] |
no_license
|
#!/bin/bash
#
# number divide by 3
# round it down
# subtract 2
#
####################################
for i in $(cat /tmp/fuelinput)
do
divided=$(($i/3))
subracted=$(($divided-2))
echo $subracted >> /tmp/newfuelinput
done
paste -sd+ /tmp/newfuelinput | bc
| true |
d74a91b66eb8e23dd03dfa428dd6c277d781585b
|
Shell
|
kobehaha/Afs
|
/scripts/dataServer.sh
|
UTF-8
| 411 | 2.65625 | 3 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
export LISTEN_ADDRESS=127.0.0.1:7070
export RABBITMQ_SERVER=amqp://test:[email protected]:15672
export STORAGE_ROOT=/var/
export LOG_DIR=/var/logs/afs/
export LOG_LEVEL=DEBUG
if [ ! -d $LOG_DIR ];then
mkdir -p $LOG_DIR
touch $LOG_DIR/afs.log
fi
if [ -d $STORAGE_ROOT ];then
go run ../main/dataServer.go
else
mkdir -p $STORAGE_ROOT
go run ../main/dataServer.go
fi
| true |
b0716798dd2735dfc81253430b0fe40c1dbb1d88
|
Shell
|
webon100/openshift-origin-weblogic-cartridge
|
/bin/install
|
UTF-8
| 4,980 | 3.265625 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -e
echo ''
echo '######################################################################'
echo '# INSTALL #'
echo '######################################################################'
echo ''
source $OPENSHIFT_CARTRIDGE_SDK_BASH
case "$1" in
-v|--version)
version="$2"
esac
echo "OPENSHIFT_WEBLOGIC_HOME=${OPENSHIFT_WEBLOGIC_HOME}"
echo "$version" > "$OPENSHIFT_WEBLOGIC_DIR/env/OPENSHIFT_WEBLOGIC_VERSION"
echo "/etc/alternatives/jre/" > "$OPENSHIFT_WEBLOGIC_DIR/env/JAVA_HOME"
export JAVA_HOME="/etc/alternatives/jre/"
export ANT_HOME="/usr/share/ant/"
export PATH="$PATH:$JAVA_HOME/bin"
echo 'Generating username and password'
mkdir -p ${OPENSHIFT_WEBLOGIC_DIR}user_projects/domains/base_domain/servers/AdminServer/security
domainname="server1"
username="weblogic"
#password=$(generate_password "9" "a-zA-Z0-9")
#password="${password}1"
password="Welcome1"
admin_console_url="http://${OPENSHIFT_GEAR_DNS}/console"
echo "$username" > ${OPENSHIFT_WEBLOGIC_DIR}/env/OPENSHIFT_WEBLOGIC_ADMIN_USERNAME
echo "$password" > ${OPENSHIFT_WEBLOGIC_DIR}/env/OPENSHIFT_WEBLOGIC_ADMIN_PASSWORD
echo "$admin_console_url" > ${OPENSHIFT_WEBLOGIC_DIR}/env/OPENSHIFT_WEBLOGIC_ADMIN_CONSOLE_URL
OPENSHIFT_WEBLOGIC_ADMIN_PASSWORD=${password}
export OPENSHIFT_WEBLOGIC_ADMIN_PASSWORD
OPENSHIFT_WEBLOGIC_ADMIN_USERNAME=${username}
export OPENSHIFT_WEBLOGIC_ADMIN_USERNAME
OPENSHIFT_WEBLOGIC_DOMAIN_DIRECTORY="${OPENSHIFT_WEBLOGIC_DIR}user_projects/domains/${OPENSHIFT_APP_NAME}"
echo "${OPENSHIFT_WEBLOGIC_DOMAIN_DIRECTORY}" > ${OPENSHIFT_WEBLOGIC_DIR}/env/OPENSHIFT_WEBLOGIC_DOMAIN_DIRECTORY
export OPENSHIFT_WEBLOGIC_DOMAIN_DIRECTORY
echo "OPENSHIFT_WEBLOGIC_ADMIN_USERNAME=${OPENSHIFT_WEBLOGIC_ADMIN_USERNAME}"
echo "OPENSHIFT_WEBLOGIC_ADMIN_PASSWORD=${OPENSHIFT_WEBLOGIC_ADMIN_PASSWORD}"
echo "OPENSHIFT_WEBLOGIC_DOMAIN_DIRECTORY=${OPENSHIFT_WEBLOGIC_DOMAIN_DIRECTORY}"
export OPENSHIFT_WEBLOGIC_DOMAIN_DIRECTORY
# Create Weblogic Profile
creation_result=`${OPENSHIFT_WEBLOGIC_HOME}wlserver/common/bin/wlst.sh -skipWLSModuleScanning ${OPENSHIFT_WEBLOGIC_DIR}container-scripts/create-wls-domain.py`
echo "username=${OPENSHIFT_WEBLOGIC_ADMIN_USERNAME}" > ${OPENSHIFT_WEBLOGIC_DOMAIN_DIRECTORY}/servers/AdminServer/security/boot.properties
echo "password=${OPENSHIFT_WEBLOGIC_ADMIN_PASSWORD}" >> ${OPENSHIFT_WEBLOGIC_DOMAIN_DIRECTORY}/servers/AdminServer/security/boot.properties
echo ". ${OPENSHIFT_WEBLOGIC_DOMAIN_DIRECTORY}/bin/setDomainEnv.sh" >> ${OPENSHIFT_WEBLOGIC_DIR}.bashrc
echo "export PATH=$PATH:${OPENSHIFT_WEBLOGIC_HOME}wlserver/common/bin:${OPENSHIFT_WEBLOGIC_DOMAIN_DIRECTORY}/bin" >> ${OPENSHIFT_WEBLOGIC_DIR}.bashrc
# Create softlink to Weblogic binary installation
ln -s ${OPENSHIFT_WEBLOGIC_HOME} ${OPENSHIFT_WEBLOGIC_DIR}/install
# Create Hot Deployment Dir
echo "${OPENSHIFT_WEBLOGIC_DOMAIN_DIRECTORY}/autodeploy" > ${OPENSHIFT_WEBLOGIC_DIR}/env/OPENSHIFT_WEBLOGIC_DEPLOYMENT_DIR
OPENSHIFT_WEBLOGIC_DEPLOYMENT_DIR="${OPENSHIFT_WEBLOGIC_DOMAIN_DIRECTORY}/autodeploy"
export OPENSHIFT_WEBLOGIC_DEPLOYMENT_DIR
# Setup logging
ln -s ${OPENSHIFT_WEBLOGIC_DOMAIN_DIRECTORY}/servers/AdminServer/logs/AdminServer.log ${OPENSHIFT_LOG_DIR}/AdminServer.log
# Prepare Build and Deploy for Sample Code
OPENSHIFT_WEBLOGIC_MAVEN_DIR="${OPENSHIFT_DATA_DIR}/maven"
OPENSHIFT_WEBLOGIC_MAVEN_REPO_DIR="${OPENSHIFT_WEBLOGIC_MAVEN_DIR}/repository"
mkdir ${OPENSHIFT_WEBLOGIC_MAVEN_DIR}
mkdir ${OPENSHIFT_WEBLOGIC_MAVEN_REPO_DIR}
cp ${OPENSHIFT_WEBLOGIC_DIR}/usr/settings.base.xml ${OPENSHIFT_WEBLOGIC_MAVEN_DIR}
cp ${OPENSHIFT_WEBLOGIC_DIR}/usr/settings.rhcloud.xml ${OPENSHIFT_WEBLOGIC_MAVEN_DIR}
sed -i "s|{MAVEN_REPO_PATH}|${OPENSHIFT_WEBLOGIC_MAVEN_REPO_DIR}|g" ${OPENSHIFT_WEBLOGIC_MAVEN_DIR}/settings.base.xml
sed -i "s|{MAVEN_REPO_PATH}|${OPENSHIFT_WEBLOGIC_MAVEN_REPO_DIR}|g" ${OPENSHIFT_WEBLOGIC_MAVEN_DIR}/settings.rhcloud.xml
sed -i "s/{APP_NAME}/${OPENSHIFT_APP_NAME}/g" ${OPENSHIFT_WEBLOGIC_DIR}/template/pom.xml
sed -i "s|{WEBLOGIC_ADMIN_CONSOLE_URL}|${admin_console_url}|g" ${OPENSHIFT_WEBLOGIC_DIR}/template/src/main/webapp/index.html
# Create initial deployment
pushd ${OPENSHIFT_WEBLOGIC_DIR}/template/src/main/webapp 1>/dev/null
jar cvf ${OPENSHIFT_WEBLOGIC_DEPLOYMENT_DIR}/ROOT.war ./*
popd 1> /dev/null
# Finalize
client_result "######################################################################"
client_result "$creation_result"
client_result "######################################################################"
echo ''
echo '######################################################################'
echo '######################################################################'
echo ''
client_result ""
client_result "Weblogic version: ${version} created. Please make note of these Administrative User credentials:"
client_result ""
client_result " Username: $username"
client_result " Password: $password"
client_result " Admin Console: $admin_console_url"
client_result ""
client_result ""
| true |
2b16c3bb1cd41c8d4984470019e04b5a63d0e876
|
Shell
|
sumatej/nuage-kubernetes
|
/demo-scripts/demo3.sh
|
UTF-8
| 5,390 | 3.15625 | 3 |
[] |
permissive
|
nclude the magic
########################
. demo-magic.sh
# hide the evidence
clear
get_pods()
{
pe " kubectl get pods"
}
get_pods_2()
{
y=`kubectl get pods $(kubectl get pods | grep -v NAME | awk '{ print $1}') -o 'jsonpath={.items[*].status.phase}'`
while [ "$y" != "Running Running Running" ]
do
sleep 5
echo -e "${CYAN}All pods are still not in running state. Waiting..."
y=`kubectl get pods $(kubectl get pods | grep -v NAME | awk '{ print $1}') -o 'jsonpath={.items[*].status.phase}'`
done
rm -rf pods
x=`kubectl get pods | grep -v NAME | awk '{ print $1}'`
for i in $x
do
echo "pod $i info" >> pods
echo "status = $(kubectl get pod $i -o 'jsonpath={.status.phase}')" >> pods
echo "name = $(kubectl get pod $i -o 'jsonpath={.metadata.name}')" >> pods
echo "hostIP = $(kubectl get pod $i -o 'jsonpath={.status.hostIP}')" >> pods
echo "podIP = $(kubectl get pod $i -o 'jsonpath={.status.podIP}')" >> pods
done
sleep 1
cat pods
}
get_pods_3()
{
y=`kubectl get pods $(kubectl get pods --namespace=guestbook | grep -v NAME | awk '{ print $1}') --namespace=guestbook -o 'jsonpath={.items[*].status.phase}'`
while [ "$y" != "Running Running Running Running Running Running" ]
do
sleep 5
echo -e "{CYAN}All pods are still not in running state. Waiting..."
y=`kubectl get pods $(kubectl get pods --namespace=guestbook | grep -v NAME | awk '{ print $1}') --namespace=guestbook -o 'jsonpath={.items[*].status.phase}'`
done
rm -rf pods
x=`kubectl get pods --namespace=guestbook | grep -v NAME | awk '{ print $1}'`
for i in $x
do
echo "pod $i info" >> pods
echo "status = $(kubectl get pod $i --namespace=guestbook -o 'jsonpath={.status.phase}')" >> pods
echo "name = $(kubectl get pod $i --namespace=guestbook -o 'jsonpath={.metadata.name}')" >> pods
echo "hostIP = $(kubectl get pod $i --namespace=guestbook -o 'jsonpath={.status.hostIP}')" >> pods
echo "podIP = $(kubectl get pod $i --namespace=guestbook -o 'jsonpath={.status.podIP}')" >> pods
done
sleep 1
cat pods
}
get_pods_4()
{
y=`kubectl get pods $(kubectl get pods --namespace=demo | grep -v NAME | awk '{ print $1}') --namespace=demo -o 'jsonpath={.items[*].status.phase}'`
while [ "$y" != "Running Running" ]
do
sleep 5
echo -e "${CYAN} All pods are still not in running state. Waiting..."
y=`kubectl get pods $(kubectl get pods --namespace=demo | grep -v NAME | awk '{ print $1}') --namespace=demo -o 'jsonpath={.items[*].status.phase}'`
done
rm -rf pods
x=`kubectl get pods --namespace=demo | grep -v NAME | awk '{ print $1}'`
for i in $x
do
echo "pod $i info" >> pods
echo "status = $(kubectl get pod $i --namespace=demo -o 'jsonpath={.status.phase}')" >> pods
echo "name = $(kubectl get pod $i --namespace=demo -o 'jsonpath={.metadata.name}')" >> pods
echo "hostIP = $(kubectl get pod $i --namespace=demo -o 'jsonpath={.status.hostIP}')" >> pods
echo "podIP = $(kubectl get pod $i --namespace=demo -o 'jsonpath={.status.podIP}')" >> pods
done
sleep 1
cat pods
}
echo -e "${CYAN} Welcome again to the Nuage Kubernetes Integration demo"
echo -e "${CYAN} Let's start with a same cluster for Operations workflow"
echo -e "${CYAN} Creating Zone, Subnet and corresponding ACLs on the Nuage VSD for Operational workflow"
p " python nuage-vsdk.py"
echo -e "${CYAN} Creating pod which has nuage labels as shown"
pe " cat kubernetes/docs/user-guide/new-nginx-deployment.yaml"
pe " kubectl create -f kubernetes/docs/user-guide/new-nginx-deployment.yaml"
get_pods_2
pe " kubectl get deployments"
pe " kubectl create -f kubernetes/docs/user-guide/new-nginx-service.yaml"
echo -e "${CYAN} Creating another nginx pod in a different Nuage Zone"
pe " cat kubernetes/docs/user-guide/pod.yaml "
kubectl create namespace demo
pe " kubectl create -f kubernetes/docs/user-guide/pod.yaml --namespace=demo"
pe " kubectl create -f kubernetes/docs/user-guide/pod-no-pg.yaml --namespace=demo"
get_pods_4
pe " kubectl get services "
echo -e "${CYAN} Let's see if the pod placed in Policy group demo can reach the service IP in Zone tfd"
pe " kubectl exec --namespace=demo $(kubectl get pod --namespace=demo $(kubectl get pods --namespace=demo | grep -v NAME | awk 'NR==1{ print $1}') -o 'jsonpath={.metadata.name}') -- curl $(kubectl get services | grep -i $(kubectl get deployments | grep -v NAME | awk 'NR==1{ print $1}') | awk 'NR==1{print $2}') "
p " ##################################################"
p " Pod 2 (not in Policy Group) --> Service IP"
p " ##################################################"
pe " kubectl exec --namespace=demo $(kubectl get pod --namespace=demo $(kubectl get pods --namespace=demo | grep -v NAME | awk 'NR==2{ print $1}') -o 'jsonpath={.metadata.name}') -- curl --connect-timeout 5 $(kubectl get services | grep -i $(kubectl get deployments | grep -v NAME | awk 'NR==1{ print $1}') | awk 'NR==1{print $2}') "
echo -e "${CYAN} Cleaning up pods and deployments"
pe " kubectl delete service $(kubectl get services | grep -v NAME | awk 'NR==2{ print $1}') "
pe " kubectl delete deployments $(kubectl get deployments | grep -v NAME | awk 'NR==1{ print $1}') "
pe " kubectl delete pods nginx nginx-2 --namespace=demo"
pe " kubectl delete namespace demo "
| true |
e663653cba1bff83e901856d41e1f929fc3d1080
|
Shell
|
grisov/pi-radio
|
/start-radio
|
UTF-8
| 301 | 2.890625 | 3 |
[] |
no_license
|
#!/bin/bash
#startup script, no params needed
ROOTDIR=`dirname $0`
cd $ROOTDIR
mkdir logs >/dev/null
pid=${ROOTDIR}/pi-radio-mpd.pid
kill `cat $pid` 1>/dev/null 2>/dev/null
kill -9 `cat $pid` 1>/dev/null 2>/dev/null
$ROOTDIR/pi-radio-mpd.py 1>logs/pi-radio.out 2>logs/pi-radio.err &
echo $! > $pid
| true |
0594dc08419593c489ff4ec6b10f5ed084b694d0
|
Shell
|
nobodyme/linux-automated-setup
|
/setup.sh
|
UTF-8
| 4,971 | 2.703125 | 3 |
[] |
no_license
|
install_googlechrome() {
wget -N https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb -P ~/
dpkg -i --force-depends ~/google-chrome-stable_current_amd64.deb
apt-get -f install -y
rm ~/google-chrome-stable_current_amd64.deb
}
install_git() {
apt install -y git
git config --global user.name "nobodyme"
git config --global user.email "[email protected]"
}
install_fontrendering() {
# for better font rendering
add-apt-repository ppa:no1wantdthisname/ppa
apt-get update
apt-get install -y fontconfig-infinality
# Infinality config here
bash /etc/fonts/infinality/infctl.sh setstyle linux
chmod a+w /etc/profile.d/infinality-settings.sh
# search for USE_STYLE in the below file to set it
xdg-open /etc/profile.d/infinality-settings.sh
}
install_java() {
apt install -y openjdk-9-jre-headless openjdk-9-jdk-headless
}
install_sublime() {
wget -qO - https://download.sublimetext.com/sublimehq-pub.gpg | apt-key add -
echo "deb https://download.sublimetext.com/ apt/stable/" | tee /etc/apt/sources.list.d/sublime-text.list
apt-get update
apt-get install sublime-text
}
install_python() {
# Might wanna look at this as well for future tasks https://askubuntu.com/questions/244641/how-to-set-up-and-use-a-virtual-python-environment-in-ubuntu
apt install -y virtualenv
apt install -y python3-pip
apt install -y python-pip
# useful for vim plugins as well
apt-get install -y build-essential cmake
apt-get install -y python3-dev
apt-get install -y python3-setuptools
# mpg123 dependencies -- used in a pet project
apt-get install -y libasound-dev portaudio19-dev libportaudio2 libportaudiocpp0
}
install_vim() {
# vim
apt install -y vim
# downloads vundle plugin manager for vim
git clone https://github.com/VundleVim/Vundle.vim.git ~/.vim/bundle/Vundle.vim > /dev/null 2>&1
# fetch uploaded .vimrc
cp .vimrc ~/.vimrc
# install all plugins
vim +PluginInstall +qall
# vim plugins requirements
install_python
# install youcomplete me plugin syntax highlighter
cd ~/.vim/bundle/YouCompleteMe
./install.py --all
}
install_cplus() {
apt-get install -y g++
# for C++ to work
apt-get install -y libc6-dev
}
install_chromedriverselenium() {
# Install ChromeDriver.
wget -N http://chromedriver.storage.googleapis.com/2.33/chromedriver_linux64.zip -P ~/
unzip ~/chromedriver_linux64.zip -d ~/
rm ~/chromedriver_linux64.zip
mv -f ~/chromedriver /usr/local/bin/chromedriver
chown root:root /usr/local/bin/chromedriver
chmod 0755 /usr/local/bin/chromedriver
# Install Selenium.
wget -N http://selenium-release.storage.googleapis.com/3.4/selenium-server-standalone-3.4.0.jar -P ~/
mv -f ~/selenium-server-standalone-3.4.0.jar /usr/local/bin/selenium-server-standalone.jar
chown root:root /usr/local/bin/selenium-server-standalone.jar
chmod 0755 /usr/local/bin/selenium-server-standalone.jar
}
install_tmux() {
apt-get install -y tmux
cp .tmux.conf ~/.tmux.conf
source ~/.bashrc
}
install_mactheme() {
# source - http://www.linuxandubuntu.com/home/macbuntu-transform-ubuntu-1604-to-mac-os-x
add-apt-repository -y ppa:noobslab/macbuntu
apt-get update
apt-get install -y macbuntu-os-icons-lts-v7
apt-get install -y macbuntu-os-ithemes-lts-v7
# mac type launcher
# apt-get install slingscold
apt-get install -y plank
apt-get install -y macbuntu-os-plank-theme-lts-v7
# changing desktop name
cd && wget -O Mac.po http://drive.noobslab.com/data/Mac/change-name-on-panel/mac.po
cd /usr/share/locale/en/LC_MESSAGES;
msgfmt -o unity.mo ~/Mac.po;rm ~/Mac.po;cd
# changing side bar icon
wget -O launcher_bfb.png http://drive.noobslab.com/data/Mac/launcher-logo/apple/launcher_bfb.png
mv /usr/share/unity/icons/launcher_bfb.png /usr/share/unity/icons/launcher_bfb_ubuntu.png
mv launcher_bfb.png /usr/share/unity/icons/
apt-get install unity-tweak-tool compiz compizconfig-settings-manager
}
install_all() {
install_googlechrome
install_sublime
install_git
install_vim
install_java
install_chromedriverselenium
install_cplus
install_tmux
install_mactheme
install_fontrendering
}
install_minimal() {
install_git
install_sublime
install_python
install_cplus
install_tmux
}
for i in "$@"
do
case $i in
-minimal) install_minimal;;
-chrome) install_googlechrome;;
-git) install_git;;
-sublime) install_sublime;;
-c++) install_cplus;;
-vim) install_vim;;
-java) install_java;;
-python) install_python;;
-better-font) install_fontrendering;;
-mac-theme) install_mactheme;;
-chromedriverselenium) install_chromedriverselenium;;
-all) install_all;;
*) echo "wrong argument" exit 1;;
esac
done
| true |
6d197af76c497784ca41a2ce3c692be9a6b1f3d3
|
Shell
|
sanguineSnail/ys17
|
/set_ap.sh
|
UTF-8
| 943 | 3.21875 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
clear
# ARG 1 sets Rpi to be WiFi access host
# ARG 0 return to original set
echo AAAA
if [ "$1" = 1 ]
then
echo "WiFi HotSpot is being created"
sudo cp /etc/network/interfaces.hostapd /etc/network/interfaces
sudo cp /etc/sysctl.conf.hostapd /etc/sysctl.conf
sudo cp /etc/dhcpcd.conf.hostapd /etc/dhcpcd.conf
sudo cp /etc/default/hostapd.zz.hostapd /etc/default/hostapd
sudo service hostapd start
sudo service dnsmasq start
sudo ifdown wlan0
sudo ifup wlan0
else
echo "WiFi HotSpot is being replaced with orig WiFi"
sudo cp /etc/network/interfaces.orig /etc/network/interfaces
sudo cp /etc/sysctl.conf.orig /etc/sysctl.conf
sudo cp /etc/dhcpcd.conf.orig /etc/dhcpcd.conf
sudo cp /etc/default/hostapd.orig /etc/default/hostapd
sudo service hostapd stop
sudo service dnsmasq stop
sudo ifdown wlan0
sudo ifup wlan0
fi
| true |
32e8682f834435f38f28e6bc4c5d8ef7414f6d1d
|
Shell
|
fanhongtao/utils
|
/shell/test/endswith_test.sh
|
UTF-8
| 565 | 2.90625 | 3 |
[] |
no_license
|
#!/bin/sh
# set -x
. ../string.sh
. ./test_utils.sh
# -------------------------------
test_endswith_true() {
printf "\"$1\" endswith \"$2\""
assertTrue $(endswith $1 $2)
}
test_endswith_false() {
printf "\"$1\" endswith \"$2\""
assertFalse $(endswith $1 $2)
}
test_endswith_false "abcdef" ""
test_endswith_true "abcdef" "f"
test_endswith_true "abcdef" "ef"
test_endswith_true "abcdef" "def"
test_endswith_true "abcdef" "cdef"
test_endswith_true "abcdef" "bcdef"
test_endswith_true "abcdef" "abcdef"
test_endswith_false "abcdef" "abcdefg"
| true |
654209fa4a3f04e07bf6190b89f5ffac5c3e7153
|
Shell
|
InvokIT/livelife-containers
|
/transcoder/hls-transcode.sh
|
UTF-8
| 1,114 | 3.328125 | 3 |
[] |
no_license
|
#!/bin/bash
# CLI arguments:
# 1 Address to the origin RTMP server (rtmp://[x.x.x.x]/ingest)
# 2 The stream name (rtmp://x.x.x.x/ingest/[stream name])
# 3 The destination file prefix
# Uses the following environment variables:
# HLS_OUT Where to store HLS output files
# LOG_PATH where to log to
set -e
RTMPSERVER=$1
STREAM=$2
DEST_PATH=/sink/hls
DEST_PREFIX=$3
AUDIO_BASEOPTIONS="-c:a libfdk_aac"
VIDEO_BASEOPTIONS="-c:v libx264 -preset ultrafast -profile:v baseline -g 40 -r 20"
ENCODER_OPTIONS="-hls_time 2 -hls_list_size 10"
FILENAME=${STREAM}
sed s/{{STREAMNAME}}/${FILENAME}/ /etc/hls-index-template.m3u8 > ${DEST_PATH}/${FILENAME}.m3u8
COMMAND="/usr/local/bin/avconv -loglevel warning -i ${RTMPSERVER}/${STREAM} \
$AUDIO_BASEOPTIONS -b:a 128k $VIDEO_BASEOPTIONS -b:v 512k $ENCODER_OPTIONS -f hls ${DEST_PATH}/${FILENAME}_high.m3u8 \
$AUDIO_BASEOPTIONS -b:a 64k $VIDEO_BASEOPTIONS -b:v 256k $ENCODER_OPTIONS -f hls ${DEST_PATH}/${FILENAME}_med.m3u8 \
$AUDIO_BASEOPTIONS -b:a 32k -ac 1 $VIDEO_BASEOPTIONS -b:v 128K $ENCODER_OPTIONS -f hls ${DEST_PATH}/${FILENAME}_low.m3u8"
echo $COMMAND
exec $($COMMAND)
| true |
2e587cf93378d1a083bb65a98cfe1fd521c6a11a
|
Shell
|
cyu0913/kaldi-recipe-extention
|
/aurora4/_s5multi/local/score_ubm_iv.TRAIN.sh
|
UTF-8
| 4,301 | 3.1875 | 3 |
[] |
no_license
|
#!/bin/bash
log_start(){
echo "#####################################################################"
echo "Spawning *** $1 *** on" `date` `hostname`
echo ---------------------------------------------------------------------
}
log_end(){
echo ---------------------------------------------------------------------
echo "Done *** $1 *** on" `date` `hostname`
echo "#####################################################################"
}
rm -f ./path.sh; cp ./path.cluster.sh ./path.sh;
. cmd.sh
. path.sh
#set -e # exit on error
mkdir -p exp/trials.TR;
mkdir -p exp/score.ubm exp/score.tra; rm -rf score.ubm/* score.tra/*
make_trial(){
rm -f exp/trials.TR/trial.utt2utt; rm -f exp/trials.TR/trial.utt2utt.keys; rm -f exp/trials.TR/trial.utt2utt.keys;
# Check similarity of all i-vectors belong to two desired speakers. Code below compare speaker '01jo' vs '40po'
for i in `cat data/test_eval92.iv/ivector.scp | awk '{print $1}' | awk 'NR == 1 || NR % 20 == 0'`;do
for j in `cat data/test_eval92.iv/ivector.scp | awk '{print $1}' | awk 'NR == 1 || NR % 30 == 0'`;do
si=`grep $i data/test_eval92/utt2spk | awk '{print $2}'`
sj=`grep $j data/test_eval92/utt2spk | awk '{print $2}'`
exclude_i=`grep 'Did not' exp/tri2b_multi_ali_eval92/log/* | awk '{print $8}' | awk -F',' '{print $1}' | egrep $i `;
exclude_j=`grep 'Did not' exp/tri2b_multi_ali_eval92/log/* | awk '{print $8}' | awk -F',' '{print $1}' | grep $j`;
if [ -z $exclude_i ] && [ -z $exclude_j ]; then
echo "$i $j" >> exp/trials.TR/trial.utt2utt
if [ "$si" == "$sj" ]; then
echo "target" >> exp/trials.TR/trial.utt2utt.keys
else
echo "nontarget" >> exp/trials.TR/trial.utt2utt.keys
fi
fi
done
done
}
make_trial
trials=exp/trials.TR/trial.utt2utt
trials_key=exp/trials.TR/trial.utt2utt.keys
run_cds_score_UBM-IV(){
cat $trials | awk '{print $1, $2}' | \
ivector-compute-dot-products - \
scp:data/test_eval92.iv/ivector.scp \
scp:data/test_eval92.iv/ivector.scp \
exp/score.ubm/cds.output 2> exp/score.ubm/cds.log
awk '{print $3}' exp/score.ubm/cds.output > exp/score.ubm/cds.score
paste exp/score.ubm/cds.score $trials_key > exp/score.ubm/cds.score.key
echo "CDS EER : `compute-eer exp/score.ubm/cds.score.key 2> exp/score.ubm/cds_EER`"
}
run_cds_score_UBM-IV
run_lda_plda(){
mkdir -p exp/score.ubm/ivector_plda; rm -rf exp/score.ubm/ivector_plda/*
ivector-compute-lda --dim=50 --total-covariance-factor=0.1 \
'ark:ivector-normalize-length scp:data/train_si84_multi.iv/ivector.scp ark:- |' \
ark:data/train_si84_multi/utt2spk \
exp/score.ubm/ivector_plda/lda_transform.mat 2> exp/score.ubm/ivector_plda/lda.log
ivector-compute-plda ark:data/train_si84_multi/spk2utt \
'ark:ivector-transform exp/score.ubm/ivector_plda/lda_transform.mat scp:data/train_si84_multi.iv/ivector.scp ark:- | ivector-normalize-length ark:- ark:- |' \
exp/score.ubm/ivector_plda/plda 2>exp/score.ubm/ivector_plda/plda.log
ivector-plda-scoring \
"ivector-copy-plda --smoothing=0.0 exp/score.ubm/ivector_plda/plda - |" \
"ark:ivector-transform exp/score.ubm/ivector_plda/lda_transform.mat scp:data/test_eval92.iv/ivector.scp ark:- | ivector-subtract-global-mean ark:- ark:- |" \
"ark:ivector-transform exp/score.ubm/ivector_plda/lda_transform.mat scp:data/test_eval92.iv/ivector.scp ark:- | ivector-subtract-global-mean ark:- ark:- |" \
"cat '$trials' | awk '{print \$1, \$2}' |" exp/score.ubm/ivector_plda/plda.output 2> exp/score.ubm/ivector_plda/plda.log
awk '{print $3}' exp/score.ubm/ivector_plda/plda.output > exp/score.ubm/ivector_plda/plda.score
paste exp/score.ubm/ivector_plda/plda.score $trials_key > exp/score.ubm/ivector_plda/plda.score.key
echo "PLDA EER : `compute-eer exp/score.ubm/ivector_plda/plda.score.key 2> exp/score.ubm/ivector_plda/plda_EER`"
}
run_lda_plda
| true |
4553f0c55f706929c1cc2e8cbd972d88bef33aba
|
Shell
|
redhat-developer/service-binding-operator
|
/hack/upgrade-sbo-env.sh
|
UTF-8
| 1,028 | 2.53125 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -x
OUTPUT_DIR=${OUTPUT_DIR:-out}
mkdir -p $OUTPUT_DIR
export TEST_ACCEPTANCE_START_SBO=scenarios
export TEST_OPERATOR_INDEX_IMAGE=${OPERATOR_INDEX_IMAGE_REF:-quay.io/redhat-developer/servicebinding-operator:index}
export TEST_OPERATOR_CHANNEL=candidate
operator_index_yaml=$OUTPUT_DIR/operator-index.yaml
opm render ${TEST_OPERATOR_INDEX_IMAGE} -o yaml > $operator_index_yaml
yq_exp='select(.schema=="olm.channel") | select(.name=="'${TEST_OPERATOR_CHANNEL}'").entries[] | select(.replaces == null).name'
export TEST_OPERATOR_PRE_LATEST_CSV=$(yq eval "$yq_exp" "$operator_index_yaml")
yq_exp='select(.schema=="olm.channel") | select(.name=="'${TEST_OPERATOR_CHANNEL}'").entries[] | select(.replaces == "'${TEST_OPERATOR_PRE_LATEST_CSV}'").name'
export TEST_OPERATOR_LATEST_CSV=$(yq eval "$yq_exp" "$operator_index_yaml")
yq_exp='select(.schema=="olm.channel") | select(.name=="'${TEST_OPERATOR_CHANNEL}'").package'
export TEST_OPERATOR_PACKAGE=$(yq eval "$yq_exp" "$operator_index_yaml")
env | grep TEST_OPERATOR
| true |
60883c85661fed783e77b1b37b4d4d79bf2945be
|
Shell
|
dungltr/hanoi-phap
|
/start.sh
|
UTF-8
| 181 | 2.734375 | 3 |
[] |
no_license
|
#!/bin/bash
if [ $# -gt 0 ];then
exec "$@"
else
tmux new-session -d -x "23" -y "80" -s "skuska" "echo start;sleep 60;echo stop" && \
tmux set -g status off && tmux attach
fi
| true |
a5f45d0f8b354189b8a1c55e711296c1156da310
|
Shell
|
kelleyrw/pac
|
/analysis/ewkino2012/scripts/merge_signal.sh
|
UTF-8
| 1,599 | 2.96875 | 3 |
[] |
no_license
|
#!/bin/bash
verbose=0
tag=v10
ana_type=ss
path=/nfs-7/userdata/${USER}/babies/ewkino2012/${ana_type}/$tag
# make the output dirs
mkdir -p logs
# do the merging
function post_process
{
mkdir -p $output_path
mkdir -p logs/${tag}
local at=$1
local sample=$2
local input=$3
local br=${4:-1.}
cmd="ewkino2012_postprocess_signal_baby --sample $sample --input \"$input\" --output \"${output_path}/${sample}.root\" --br $br"
echo $cmd > logs/${tag}/${sample}_${tag}_baby_merge.log 2>&1 &
eval $cmd >> logs/${tag}/${sample}_${tag}_baby_merge.log 2>&1 &
}
input_path=/home/users/fgolf/pac/analysis/ewkino2012/crab/test_crab/
#input_path=/nfs-7/userdata/fgolf/babies/ewkino2012/ss/v9-20-10/signal/unmerged/test_crab
#input_path=/hadoop/cms/store/user/fgolf/babies/ewkino2012/ss/v10/mc/WinoNLSP_WinoNLSP/
output_path=$path/signal
input_files="${input_path}/SMS-TChiWH_WlnuHWW_2J_mChargino-130to225_mLSP-1to50_TuneZ2star_8TeV-madgraph-tauola_Summer12-START53_V7C_FSIM-v1/res/*.root"
input_files="${input_files},${input_path}/SMS-TChiWH_WlnuHWW_2J_mChargino-200to500_mLSP-170to370_TuneZ2star_8TeV-madgraph-tauola_Summer12-START53_V7C_FSIM-v1/res/*.root"
input_files="${input_files},${input_path}/SMS-TChiWH_WlnuHWW_2J_mChargino-250to500_mLSP-1to50_TuneZ2star_8TeV-madgraph-tauola_Summer12-START53_V7C_FSIM-v1/res/*.root"
input_files="${input_files},${input_path}/SMS-TChiWH_WlnuHWW_2J_mChargino-200to500_mLSP-70to150_TuneZ2star_8TeV-madgraph-tauola_Summer12-START53_V7C_FSIM-v1/res/*.root"
#input_files="${input_path}/*.root"
post_process ss tchiwh ${input_files} 0.07577
| true |
ea9906e17b40e3fbfe5cdf169114951d9749f457
|
Shell
|
nfitch/minecrab
|
/bin/minecrab-annihilate
|
UTF-8
| 853 | 3.90625 | 4 |
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/usr/bin/env bash
# -*- mode: shell-script; fill-column: 80; -*-
#
# Copyright (c) 2013 Joyent Inc., All rights reserved.
#
source $(dirname $0)/common.sh
usage() { echo "Usage: $0 <server-name>" 1>&2; exit 1; }
if [ "$#" -ne 1 ]; then
usage
fi
SERVER_NAME=$1
echo "Checking to see if server is still running..."
find_server $SERVER_NAME
if [[ $? -ne 0 ]]; then
fatal "Failed to check if the server is running. Try again later :("
fi
if [ ! -z "$IP" ]; then
fatal "Seems $SERVER_NAME is still running. Please shut it down first."
fi
echo
echo "WARNING: It *really* will become lost forever..."
read -p "Are you sure you want to annihilate $SERVER_NAME? " -n 1 -r
echo
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
exit 1
fi
echo
echo -n "Annihilating world (please be patient)... "
mrm -r $SERVERS_LOCATION/$SERVER_NAME
echo "Done!"
| true |
57b414b773b5707134d1c07740f3fa8c6dbd653e
|
Shell
|
abdessamadcdg/Docker-Cluster
|
/Dockerfiles/S1-deploy/tools/wait_for_SportsOne
|
UTF-8
| 1,273 | 3.359375 | 3 |
[] |
no_license
|
#!/bin/sh
export USER=${USER:=SYSTEM}
export PASSWORD=Toor1234
export HOST=${HOST:=sports-hana.mo.lmy.corp}
export PORT=${PORT:=30827}
export CHECK="#0"
export MAXTIME=${MAXTIME:=3600}
export SLEEPTIME=${SLEEPTIME:=10}
export IS_CLOUD=$(echo $HOST | grep 'hana.ondemand.com')
if [ -z "$IS_CLOUD" ]; then
export IS_CLOUD=$(echo $HOST | grep 'itc.lmy.com')
fi
if [ $PORT == "443" ]; then
export INSTANCE=00
export PROTOCOL=https
else
export INSTANCE=${PORT: -2:2}
if [ $PORT == $INSTANCE ]; then
export INSTANCE=00 #fallback if port has just 2 chars
fi
export PROTOCOL=http
fi
echo "Waiting till SportsOne is launched..."
num_try=0
nb_try=`expr $MAXTIME / $SLEEPTIME`
while [ $num_try -lt $nb_try ]
do
export SportsOne=`curl -u ${USER:=SYSTEM}:${PASSWORD:=Toor1234} -s -L -w "%{http_code}" "${PROTOCOL:=http}://${HOST:=sports-hana.mo.lmy.corp}:${PORT:=30827}" | grep 'name: "ui.login"' | sed -e s/^[[:space:]]*// | cut -c 1-5`
if [ "$SportsOne" == "name:" ]; then
echo "SportsOne is launched"
break
else
echo $num_try "0 seconds"
num_try=`expr $num_try + 1`
sleep $SLEEPTIME
fi
echo "SportsOne is launching..."
done
if [ $num_try -ge $nb_try ]; then
echo "SportsOne not respond after timeout, stop!"
sh /tools/export_hana_logs
exit 1
fi
| true |
85b45d692f9ba970cd9ad0209bf0418fd6c4bf8d
|
Shell
|
gogenesis/6824-final-proj
|
/src/test/run_perf_tests.sh
|
UTF-8
| 13,522 | 2.9375 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
export JENKINS_FAIL=0
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
mkdir -p $SCRIPT_DIR/outfiles
if [ -z $JENKINS ]; then
rm $SCRIPT_DIR/outfiles/*
fi
touch $SCRIPT_DIR/outfiles/log
export DFS_DISABLE_ASSERTIONS="true"
export DFS_DEFAULT_DEBUG_LEVEL=0
iter() {
testname="$1"
index="$2"
outfile="$SCRIPT_DIR/outfiles/${testname}.${index}.out"
touch "$outfile"
#echo "running ${testname}.${index}"
if [ -z $JENKINS ]; then
go test -run "$testname" > "$outfile" 2>&1
exit_code=$?
else
$GOBIN test -run "$testname" > "$outfile" 2>&1
exit_code=$?
fi
if [ $exit_code == 0 ]; then
echo "${testname} success ${index}" | tee -a $SCRIPT_DIR/outfiles/log
rm "$outfile"
else
echo "${testname} FAIL ${index}!" | tee -a $SCRIPT_DIR/outfiles/log
export JENKINS_FAIL=1
fi
}
run_test() {
testname="$1"
quantity="$2"
let n=0 #
#echo "running $testname $quantity times"
while [ $n -lt ${quantity} ]; do
let n++
time iter $testname $n
done
}
cd $SCRIPT_DIR/../memoryFS
echo Begin Core MemoryFS Tests
run_test "TestMemoryFS_TestOpenCloseDeleteRoot" 0
run_test "TestMemoryFS_TestOpenBlockNoContention" 0
run_test "TestMemoryFS_TestWriteClosedFile" 0
run_test "TestMemoryFS_TestWrite1MBytes" 0
run_test "TestMemoryFS_TestMkdirTree" 0
run_test "TestMemoryFS_TestOpenOpened" 0
run_test "TestMemoryFS_TestOpenRWClose64" 0
run_test "TestMemoryFS_TestOpenOffsetEqualsZero" 0
run_test "TestMemoryFS_TestWriteSomeButNotAll" 0
run_test "TestMemoryFS_TestRndWriteReadVerfiyHoleExpansion" 0
run_test "TestMemoryFS_TestBasicOpenClose" 0
run_test "TestMemoryFS_TestDeleteNotFound" 0
run_test "TestMemoryFS_TestOpenTruncate" 0
run_test "TestMemoryFS_TestOpenCloseLeastFD" 0
run_test "TestMemoryFS_TestWrite1Byte" 0
run_test "TestMemoryFS_TestOpenRWClose4" 0
run_test "TestMemoryFS_TestWrite8Bytes" 0
run_test "TestMemoryFS_TestDeleteCannotDeleteRootDir" 0
run_test "TestMemoryFS_TestWrite10MBytes512Kx20" 0
run_test "TestMemoryFS_TestRndWriteRead8BytesIter64" 0
run_test "TestMemoryFS_TestRndWriteRead6400BytesIter64K" 0
run_test "TestMemoryFS_TestSeekErrorBadFD" 0
run_test "TestMemoryFS_TestSeekOffEOF" 0
run_test "TestMemoryFS_TestWriteReadBasic" 0
run_test "TestMemoryFS_TestOpenRWClose" 0
run_test "TestMemoryFS_TestWrite10MBytes128Kx80" 0
run_test "TestMemoryFS_TestOpenCloseDeleteAcrossDirectories" 0
run_test "TestMemoryFS_TestWrite10MBytes10Mx1" 0
run_test "TestMemoryFS_TestMkdirNotFound" 0
run_test "TestMemoryFS_TestOpenROClose4" 0
run_test "TestMemoryFS_TestRndWriteRead1ByteSimple" 0
run_test "TestMemoryFS_TestRndWriteRead8BytesSimple" 0
run_test "TestMemoryFS_TestOpenNotFound" 0
run_test "TestMemoryFS_TestOpenAppend" 0
run_test "TestMemoryFS_TestOpenBlockOnlyOne" 0
run_test "TestMemoryFS_TestWriteReadBasic4" 0
run_test "TestMemoryFS_TestWrite10MBytes64Kx160" 0
run_test "TestMemoryFS_TestRndWriteRead512KBIter1MB" 0
run_test "TestMemoryFS_TestRndWriteRead128KBIter10MB" 0
run_test "TestMemoryFS_TestCannotReadFromWriteOnly" 0
run_test "TestMemoryFS_TestOpenROClose64" 0
run_test "TestMemoryFS_TestOpenCloseDeleteMaxFD" 0
run_test "TestMemoryFS_TestOpenBlockMultipleWaiting" 0
run_test "TestMemoryFS_TestWrite10MBytes256Kx40" 0
run_test "TestMemoryFS_TestReadClosedFile" 0
run_test "TestMemoryFS_TestRndWriteRead8BytesIter8" 0
run_test "TestMemoryFS_TestCloseClosed" 0
run_test "TestMemoryFS_TestOpenCloseDeleteRootMax" 0
run_test "TestMemoryFS_TestSeekErrorBadOffsetOperation" 0
run_test "TestMemoryFS_TestOpenBlockOneWaiting" 0
run_test "TestMemoryFS_TestWrite1KBytes" 0
run_test "TestMemoryFS_TestMkdirAlreadyExists" 0
run_test "TestMemoryFS_TestMkdir" 0
run_test "TestMemoryFS_TestOpenROClose" 0
run_test "TestMemoryFS_TestCannotWriteToReadOnly" 0
run_test "TestMemoryFS_TestWrite10MBytes1Mx10" 0
run_test "TestMemoryFS_TestOpenAlreadyExists" 0
run_test "TestMemoryFS_TestRndWriteRead64BytesSimple" 0
cd $SCRIPT_DIR/../fsraft
echo Begin Raft Difficulty 1 Tests - Reliable Network - Clerk_OneClerkThreeServersNoErrors Tests
run_test "TestClerk_OneClerkThreeServersNoErrors_TestOpenCloseLeastFD" 0
run_test "TestClerk_OneClerkThreeServersNoErrors_TestWrite1Byte" 0
run_test "TestClerk_OneClerkThreeServersNoErrors_TestOpenRWClose4" 0
run_test "TestClerk_OneClerkThreeServersNoErrors_TestWrite8Bytes" 0
run_test "TestClerk_OneClerkThreeServersNoErrors_TestDeleteCannotDeleteRootDir" 0
run_test "TestClerk_OneClerkThreeServersNoErrors_TestSeekErrorBadFD" 0
run_test "TestClerk_OneClerkThreeServersNoErrors_TestSeekOffEOF" 0
run_test "TestClerk_OneClerkThreeServersNoErrors_TestWriteReadBasic" 0
run_test "TestClerk_OneClerkThreeServersNoErrors_TestWrite10MBytes512Kx20" 1
run_test "TestClerk_OneClerkThreeServersNoErrors_TestRndWriteRead8BytesIter64" 0
run_test "TestClerk_OneClerkThreeServersNoErrors_TestRndWriteRead6400BytesIter64K" 0
run_test "TestClerk_OneClerkThreeServersNoErrors_TestOpenRWClose" 0
run_test "TestClerk_OneClerkThreeServersNoErrors_TestWrite10MBytes128Kx80" 1
run_test "TestClerk_OneClerkThreeServersNoErrors_TestOpenCloseDeleteAcrossDirectories" 0
run_test "TestClerk_OneClerkThreeServersNoErrors_TestWrite10MBytes10Mx1" 1
run_test "TestClerk_OneClerkThreeServersNoErrors_TestMkdirNotFound" 0
run_test "TestClerk_OneClerkThreeServersNoErrors_TestOpenROClose4" 0
run_test "TestClerk_OneClerkThreeServersNoErrors_TestRndWriteRead1ByteSimple" 0
run_test "TestClerk_OneClerkThreeServersNoErrors_TestRndWriteRead8BytesSimple" 0
run_test "TestClerk_OneClerkThreeServersNoErrors_TestOpenNotFound" 0
run_test "TestClerk_OneClerkThreeServersNoErrors_TestOpenAppend" 0
run_test "TestClerk_OneClerkThreeServersNoErrors_TestOpenBlockOnlyOne" 0
run_test "TestClerk_OneClerkThreeServersNoErrors_TestWriteReadBasic4" 0
run_test "TestClerk_OneClerkThreeServersNoErrors_TestWrite10MBytes64Kx160" 1
run_test "TestClerk_OneClerkThreeServersNoErrors_TestRndWriteRead512KBIter1MB" 0
run_test "TestClerk_OneClerkThreeServersNoErrors_TestRndWriteRead128KBIter10MB" 1
run_test "TestClerk_OneClerkThreeServersNoErrors_TestOpenROClose64" 0
run_test "TestClerk_OneClerkThreeServersNoErrors_TestOpenCloseDeleteMaxFD" 0
run_test "TestClerk_OneClerkThreeServersNoErrors_TestOpenBlockMultipleWaiting" 0
run_test "TestClerk_OneClerkThreeServersNoErrors_TestCannotReadFromWriteOnly" 0
run_test "TestClerk_OneClerkThreeServersNoErrors_TestCloseClosed" 0
run_test "TestClerk_OneClerkThreeServersNoErrors_TestOpenCloseDeleteRootMax" 0
run_test "TestClerk_OneClerkThreeServersNoErrors_TestSeekErrorBadOffsetOperation" 0
run_test "TestClerk_OneClerkThreeServersNoErrors_TestWrite10MBytes256Kx40" 1
run_test "TestClerk_OneClerkThreeServersNoErrors_TestReadClosedFile" 0
run_test "TestClerk_OneClerkThreeServersNoErrors_TestRndWriteRead8BytesIter8" 0
run_test "TestClerk_OneClerkThreeServersNoErrors_TestOpenBlockOneWaiting" 0
run_test "TestClerk_OneClerkThreeServersNoErrors_TestWrite1KBytes" 0
run_test "TestClerk_OneClerkThreeServersNoErrors_TestMkdirAlreadyExists" 0
run_test "TestClerk_OneClerkThreeServersNoErrors_TestOpenROClose" 0
run_test "TestClerk_OneClerkThreeServersNoErrors_TestCannotWriteToReadOnly" 0
run_test "TestClerk_OneClerkThreeServersNoErrors_TestWrite10MBytes1Mx10" 1
run_test "TestClerk_OneClerkThreeServersNoErrors_TestMkdir" 0
run_test "TestClerk_OneClerkThreeServersNoErrors_TestOpenAlreadyExists" 0
run_test "TestClerk_OneClerkThreeServersNoErrors_TestRndWriteRead64BytesSimple" 0
run_test "TestClerk_OneClerkThreeServersNoErrors_TestWrite1MBytes" 0
run_test "TestClerk_OneClerkThreeServersNoErrors_TestMkdirTree" 0
run_test "TestClerk_OneClerkThreeServersNoErrors_TestOpenOpened" 0
run_test "TestClerk_OneClerkThreeServersNoErrors_TestOpenRWClose64" 0
run_test "TestClerk_OneClerkThreeServersNoErrors_TestOpenOffsetEqualsZero" 0
run_test "TestClerk_OneClerkThreeServersNoErrors_TestOpenCloseDeleteRoot" 0
run_test "TestClerk_OneClerkThreeServersNoErrors_TestOpenBlockNoContention" 0
run_test "TestClerk_OneClerkThreeServersNoErrors_TestWriteClosedFile" 0
run_test "TestClerk_OneClerkThreeServersNoErrors_TestBasicOpenClose" 0
run_test "TestClerk_OneClerkThreeServersNoErrors_TestDeleteNotFound" 0
run_test "TestClerk_OneClerkThreeServersNoErrors_TestOpenTruncate" 0
run_test "TestClerk_OneClerkThreeServersNoErrors_TestWriteSomeButNotAll" 0
run_test "TestClerk_OneClerkThreeServersNoErrors_TestRndWriteReadVerfiyHoleExpansion" 0
echo Begin Raft Difficulty 2 Tests - Lossy Network - Clerk_OneClerkFiveServersUnreliableNet Tests
run_test "TestClerk_OneClerkFiveServersUnreliableNet_TestWrite10MBytes10Mx1" 1
run_test "TestClerk_OneClerkFiveServersUnreliableNet_TestMkdirNotFound" 0
run_test "TestClerk_OneClerkFiveServersUnreliableNet_TestOpenROClose4" 0
run_test "TestClerk_OneClerkFiveServersUnreliableNet_TestRndWriteRead1ByteSimple" 0
run_test "TestClerk_OneClerkFiveServersUnreliableNet_TestRndWriteRead8BytesSimple" 0
run_test "TestClerk_OneClerkFiveServersUnreliableNet_TestOpenNotFound" 0
run_test "TestClerk_OneClerkFiveServersUnreliableNet_TestOpenAppend" 0
run_test "TestClerk_OneClerkFiveServersUnreliableNet_TestOpenBlockOnlyOne" 0
run_test "TestClerk_OneClerkFiveServersUnreliableNet_TestWriteReadBasic4" 0
run_test "TestClerk_OneClerkFiveServersUnreliableNet_TestWrite10MBytes64Kx160" 1
run_test "TestClerk_OneClerkFiveServersUnreliableNet_TestRndWriteRead512KBIter1MB" 0
run_test "TestClerk_OneClerkFiveServersUnreliableNet_TestRndWriteRead128KBIter10MB" 1
run_test "TestClerk_OneClerkFiveServersUnreliableNet_TestOpenROClose64" 0
run_test "TestClerk_OneClerkFiveServersUnreliableNet_TestOpenCloseDeleteMaxFD" 0
run_test "TestClerk_OneClerkFiveServersUnreliableNet_TestOpenBlockMultipleWaiting" 0
run_test "TestClerk_OneClerkFiveServersUnreliableNet_TestCannotReadFromWriteOnly" 0
run_test "TestClerk_OneClerkFiveServersUnreliableNet_TestReadClosedFile" 0
run_test "TestClerk_OneClerkFiveServersUnreliableNet_TestRndWriteRead8BytesIter8" 0
run_test "TestClerk_OneClerkFiveServersUnreliableNet_TestCloseClosed" 0
run_test "TestClerk_OneClerkFiveServersUnreliableNet_TestOpenCloseDeleteRootMax" 0
run_test "TestClerk_OneClerkFiveServersUnreliableNet_TestSeekErrorBadOffsetOperation" 0
run_test "TestClerk_OneClerkFiveServersUnreliableNet_TestWrite10MBytes256Kx40" 1
run_test "TestClerk_OneClerkFiveServersUnreliableNet_TestOpenBlockOneWaiting" 0
run_test "TestClerk_OneClerkFiveServersUnreliableNet_TestWrite1KBytes" 0
run_test "TestClerk_OneClerkFiveServersUnreliableNet_TestMkdirAlreadyExists" 0
run_test "TestClerk_OneClerkFiveServersUnreliableNet_TestOpenROClose" 0
run_test "TestClerk_OneClerkFiveServersUnreliableNet_TestCannotWriteToReadOnly" 0
run_test "TestClerk_OneClerkFiveServersUnreliableNet_TestWrite10MBytes1Mx10" 1
run_test "TestClerk_OneClerkFiveServersUnreliableNet_TestMkdir" 0
run_test "TestClerk_OneClerkFiveServersUnreliableNet_TestOpenAlreadyExists" 0
run_test "TestClerk_OneClerkFiveServersUnreliableNet_TestRndWriteRead64BytesSimple" 0
run_test "TestClerk_OneClerkFiveServersUnreliableNet_TestOpenBlockNoContention" 0
run_test "TestClerk_OneClerkFiveServersUnreliableNet_TestWriteClosedFile" 0
run_test "TestClerk_OneClerkFiveServersUnreliableNet_TestWrite1MBytes" 0
run_test "TestClerk_OneClerkFiveServersUnreliableNet_TestMkdirTree" 0
run_test "TestClerk_OneClerkFiveServersUnreliableNet_TestOpenOpened" 0
run_test "TestClerk_OneClerkFiveServersUnreliableNet_TestOpenRWClose64" 0
run_test "TestClerk_OneClerkFiveServersUnreliableNet_TestOpenOffsetEqualsZero" 0
run_test "TestClerk_OneClerkFiveServersUnreliableNet_TestOpenCloseDeleteRoot" 0
run_test "TestClerk_OneClerkFiveServersUnreliableNet_TestRndWriteReadVerfiyHoleExpansion" 0
run_test "TestClerk_OneClerkFiveServersUnreliableNet_TestBasicOpenClose" 0
run_test "TestClerk_OneClerkFiveServersUnreliableNet_TestDeleteNotFound" 0
run_test "TestClerk_OneClerkFiveServersUnreliableNet_TestOpenTruncate" 0
run_test "TestClerk_OneClerkFiveServersUnreliableNet_TestWriteSomeButNotAll" 0
run_test "TestClerk_OneClerkFiveServersUnreliableNet_TestOpenCloseLeastFD" 0
run_test "TestClerk_OneClerkFiveServersUnreliableNet_TestWrite1Byte" 0
run_test "TestClerk_OneClerkFiveServersUnreliableNet_TestOpenRWClose4" 0
run_test "TestClerk_OneClerkFiveServersUnreliableNet_TestWrite8Bytes" 0
run_test "TestClerk_OneClerkFiveServersUnreliableNet_TestDeleteCannotDeleteRootDir" 0
run_test "TestClerk_OneClerkFiveServersUnreliableNet_TestRndWriteRead8BytesIter64" 0
run_test "TestClerk_OneClerkFiveServersUnreliableNet_TestRndWriteRead6400BytesIter64K" 0
run_test "TestClerk_OneClerkFiveServersUnreliableNet_TestSeekErrorBadFD" 0
run_test "TestClerk_OneClerkFiveServersUnreliableNet_TestSeekOffEOF" 0
run_test "TestClerk_OneClerkFiveServersUnreliableNet_TestWriteReadBasic" 0
run_test "TestClerk_OneClerkFiveServersUnreliableNet_TestWrite10MBytes512Kx20" 1
run_test "TestClerk_OneClerkFiveServersUnreliableNet_TestOpenRWClose" 0
run_test "TestClerk_OneClerkFiveServersUnreliableNet_TestWrite10MBytes128Kx80" 1
run_test "TestClerk_OneClerkFiveServersUnreliableNet_TestOpenCloseDeleteAcrossDirectories" 0
echo "Begin Singleton Tests"
run_test "TestOneClerkFiveServersPartition" 0
run_test "TestKVBasic" 0
if [ ! -z $JENKINS ]; then
exit $JENKINS_FAIL
fi
| true |
77f70c6aa435df2f7025e9c5b00ff3a6d29905d4
|
Shell
|
travi/dotfiles
|
/osx/environment.sh
|
UTF-8
| 15,723 | 3.171875 | 3 |
[] |
no_license
|
#!/bin/bash
# OSX-only stuff. Abort if not OSX.
[[ "$OSTYPE" == darwin* ]] || return 1
heading 'Configuring OS X'
# Ask for the administrator password upfront
sudo -v
# Keep-alive: update existing `sudo` time stamp until `.osx` has finished
while true; do sudo -n true; sleep 60; kill -0 "$$" || exit; done 2>/dev/null &
###############################################################################
# Bash #
###############################################################################
# Change the default shell to bash v4
if [ -e "$(brew --prefix bash)/bin/bash" ] && ! grep "$(brew --prefix bash)/bin/bash" /etc/shells; then
echo "$(brew --prefix bash)/bin/bash" | sudo tee -a /etc/shells;
chsh -s "$(brew --prefix bash)/bin/bash";
info 'Default shell changed to bash v4'
fi;
###############################################################################
# General UI/UX #
###############################################################################
# Enable subpixel font rendering on non-Apple LCDs
defaults write NSGlobalDomain AppleFontSmoothing -int 2
# Set appearance to dark
defaults write NSGlobalDomain AppleInterfaceStyle -string "Dark"
# Set UI Chrome Appearance to Graphite
/usr/bin/defaults write -g 'AppleAquaColorVariant' -int 6
# Set highlight color to graphite
defaults write NSGlobalDomain AppleHighlightColor -string "0.780400 0.815700 0.858800"
# Set highlight color to green
#defaults write NSGlobalDomain AppleHighlightColor -string "0.764700 0.976500 0.568600"
# Expand save panel by default
defaults write NSGlobalDomain NSNavPanelExpandedStateForSaveMode -bool true
defaults write NSGlobalDomain NSNavPanelExpandedStateForSaveMode2 -bool true
# Expand print panel by default
defaults write NSGlobalDomain PMPrintingExpandedStateForPrint -bool true
defaults write NSGlobalDomain PMPrintingExpandedStateForPrint2 -bool true
# Automatically quit printer app once the print jobs complete
defaults write com.apple.print.PrintingPrefs "Quit When Finished" -bool true
# Menu bar: hide the Time Machine, Volume, User, and Bluetooth icons
for domain in ~/Library/Preferences/ByHost/com.apple.systemuiserver.*; do
defaults write "${domain}" dontAutoLoad -array \
"/System/Library/CoreServices/Menu Extras/TimeMachine.menu" \
"/System/Library/CoreServices/Menu Extras/Volume.menu" \
"/System/Library/CoreServices/Menu Extras/User.menu"
done
defaults write com.apple.systemuiserver menuExtras -array \
"/System/Library/CoreServices/Menu Extras/Bluetooth.menu" \
"/System/Library/CoreServices/Menu Extras/AirPort.menu" \
"/System/Library/CoreServices/Menu Extras/Battery.menu" \
"/System/Library/CoreServices/Menu Extras/Clock.menu"
# Require password immediately after sleep or screen saver begins
defaults write com.apple.screensaver askForPassword -int 1
defaults write com.apple.screensaver askForPasswordDelay -int 0
# Save screenshots to the desktop
defaults write com.apple.screencapture location -string "$HOME/Desktop"
# Set language and text formats
defaults write NSGlobalDomain AppleLanguages -array "en" "us"
defaults write NSGlobalDomain AppleLocale -string "en_US@currency=USD"
defaults write NSGlobalDomain AppleMeasurementUnits -string "Inches"
defaults write NSGlobalDomain AppleMetricUnits -bool false
# Set the timezone; see `systemsetup -listtimezones` for other values
systemsetup -settimezone "America/Chicago" > /dev/null
# Disable auto-correct
defaults write NSGlobalDomain NSAutomaticSpellingCorrectionEnabled -bool false
###############################################################################
# Users #
###############################################################################
# Hide puppet from the login screen
sudo defaults write /Library/Preferences/com.apple.loginwindow HiddenUsersList -array-add puppet
###############################################################################
# Trackpad, mouse, keyboard, Bluetooth accessories, and input #
###############################################################################
# Enable tap to click for this user and for the login screen
defaults write com.apple.driver.AppleBluetoothMultitouch.trackpad Clicking -bool true
defaults -currentHost write NSGlobalDomain com.apple.mouse.tapBehavior -int 1
defaults write NSGlobalDomain com.apple.mouse.tapBehavior -int 1
defaults write com.apple.AppleMultitouchTrackpad Clicking -bool true
# Map `click or tap with two fingers` to the secondary click
defaults write com.apple.driver.AppleBluetoothMultitouch.trackpad TrackpadRightClick -bool true
defaults -currentHost write NSGlobalDomain com.apple.trackpad.enableSecondaryClick -bool true
defaults write com.apple.driver.AppleBluetoothMultitouch.trackpad TrackpadCornerSecondaryClick -int 0
defaults -currentHost write NSGlobalDomain com.apple.trackpad.trackpadCornerClickBehavior -int 0
# Enable 3-finger drag. (Moving with 3 fingers in any window "chrome" moves the window.)
defaults write com.apple.driver.AppleBluetoothMultitouch.trackpad TrackpadThreeFingerDrag -bool true
defaults write com.apple.AppleMultitouchTrackpad TrackpadThreeFingerDrag -bool true
### Move content in the direction of finger movement when scrolling or navigating (natural)
defaults write -g com.apple.swipescrolldirection -bool true
# Always show scrollbars
defaults write NSGlobalDomain AppleShowScrollBars -string "Always"
# Possible values: `WhenScrolling`, `Automatic` and `Always`
# Enable dragging a window by clicking anywhere within it while holding ctrl-opt-cmd: https://twitter.com/nibroc/status/963088893758259200
defaults write -g NSWindowShouldDragOnGesture YES
# Use all F1, F2, etc. keys as standard function keys
defaults write -g com.apple.keyboard.fnState -bool true
### Launchpad: no
/usr/bin/defaults write com.apple.dock 'showLaunchpadGestureEnabled' -bool false
### Show Desktop: yes
/usr/bin/defaults write com.apple.dock 'showDesktopGestureEnabled' -bool true
# Enable full keyboard access for all controls
# (e.g. enable Tab in modal dialogs)
defaults write NSGlobalDomain AppleKeyboardUIMode -int 3
# Disable smart quotes
defaults write NSGlobalDomain NSAutomaticQuoteSubstitutionEnabled -bool false
# Disable smart dashes
defaults write NSGlobalDomain NSAutomaticDashSubstitutionEnabled -bool false
### Modifier Keys… > Apple Internal Keyboard / Trackpad > Caps Lock ( ⇪) Key: No Action
defaults -currentHost write -g 'com.apple.keyboard.modifiermapping.1452-566-0' -array '<dict><key>HIDKeyboardModifierMappingDst</key><integer>-1</integer><key>HIDKeyboardModifierMappingSrc</key><integer>0</integer></dict>'
### Modifier Keys… > Apple Keyboard [External] > Caps Lock ( ⇪) Key: No Action
defaults -currentHost write -g 'com.apple.keyboard.modifiermapping.1452-544-0' -array '<dict><key>HIDKeyboardModifierMappingDst</key><integer>-1</integer><key>HIDKeyboardModifierMappingSrc</key><integer>0</integer></dict>'
# Disable Mission Control Keyboard Shortcut since it conflicts with WebStorm (ctrl + up)
defaults write com.apple.symbolichotkeys.plist AppleSymbolicHotKeys -dict-add 32 "<dict><key>enabled</key><false/></dict>"
defaults write com.apple.symbolichotkeys.plist AppleSymbolicHotKeys -dict-add 34 "<dict><key>enabled</key><false/></dict>"
# Disable Application Windows Keyboard Shortcut since it conflicts with WebStorm (ctrl + down)
defaults write com.apple.symbolichotkeys.plist AppleSymbolicHotKeys -dict-add 33 "<dict><key>enabled</key><false/></dict>"
defaults write com.apple.symbolichotkeys.plist AppleSymbolicHotKeys -dict-add 35 "<dict><key>enabled</key><false/></dict>"
###############################################################################
# Finder #
###############################################################################
# Enable snap-to-grid for icons on the desktop and in other icon views
/usr/libexec/PlistBuddy -c "Set :DesktopViewSettings:IconViewSettings:arrangeBy grid" ~/Library/Preferences/com.apple.finder.plist
/usr/libexec/PlistBuddy -c "Set :FK_StandardViewSettings:IconViewSettings:arrangeBy grid" ~/Library/Preferences/com.apple.finder.plist
/usr/libexec/PlistBuddy -c "Set :StandardViewSettings:IconViewSettings:arrangeBy grid" ~/Library/Preferences/com.apple.finder.plist
# Select text in a Quick Look preview window
defaults write com.apple.finder QLEnableTextSelection -bool TRUE
# Avoid creating .DS_Store files on network volumes
defaults write com.apple.desktopservices DSDontWriteNetworkStores -bool true
###############################################################################
# Dock, Dashboard, and hot corners #
###############################################################################
# Wipe all (default) app icons from the Dock
# This is only really useful when setting up a new Mac, or if you don’t use
# the Dock to launch apps.
defaults write com.apple.dock persistent-apps -array ""
# Don’t automatically rearrange Spaces based on most recent use
defaults write com.apple.dock mru-spaces -bool false
# Automatically hide and show the Dock
defaults write com.apple.dock autohide -bool true
# Make Dock icons of hidden applications translucent
defaults write com.apple.dock showhidden -bool true
# disable Dashboard
defaults write com.apple.dashboard mcx-disabled -bool true
### Don't Show Dashboard as a space
/usr/bin/defaults write com.apple.dock 'dashboard-in-overlay' -bool false
###############################################################################
# Safari & WebKit #
###############################################################################
# Allow hitting the Backspace key to go to the previous page in history
defaults write com.apple.Safari com.apple.Safari.ContentPageGroupIdentifier.WebKit2BackspaceKeyNavigationEnabled -bool true
# Enable Safari’s debug menu
defaults write com.apple.Safari IncludeInternalDebugMenu -bool true
# Make Safari’s search banners default to Contains instead of Starts With
defaults write com.apple.Safari FindOnPageMatchesWordStartsOnly -bool false
# Remove useless icons from Safari’s bookmarks bar
defaults write com.apple.Safari ProxiesInBookmarksBar "()"
# Enable the Develop menu and the Web Inspector in Safari
defaults write com.apple.Safari IncludeDevelopMenu -bool true
defaults write com.apple.Safari WebKitDeveloperExtrasEnabledPreferenceKey -bool true
defaults write com.apple.Safari com.apple.Safari.ContentPageGroupIdentifier.WebKit2DeveloperExtrasEnabled -bool true
# Add a context menu item for showing the Web Inspector in web views
defaults write NSGlobalDomain WebKitDeveloperExtras -bool true
###############################################################################
# Google Chrome & Google Chrome Canary #
###############################################################################
# Disable two-finger swipe gestures
defaults write com.google.Chrome.plist AppleEnableSwipeNavigateWithScrolls -bool FALSE
###############################################################################
# iTunes #
###############################################################################
# Stop Apple Photos from Auto Launching when a phone is plugged into USB
defaults -currentHost write com.apple.ImageCapture disableHotPlug -bool YES
###############################################################################
# Git #
###############################################################################
# Make the diff-highlight script available for diffs, etc
ln -sf "$(brew --prefix git)/share/git-core/contrib/diff-highlight/diff-highlight" ~/bin/diff-highlight
###############################################################################
# Terminal & iTerm 2 #
###############################################################################
# Only use UTF-8 in Terminal.app
defaults write com.apple.terminal StringEncodings -array 4
#TERM_PROFILE='Travi xterm-256color';
#CURRENT_PROFILE="$(defaults read com.apple.terminal 'Default Window Settings')";
#if [ "${CURRENT_PROFILE}" != "${TERM_PROFILE}" ]; then
#open "$HOME/.files/osx/Travi.terminal";
#sleep 1; # Wait a bit to make sure the theme is loaded
#defaults write com.apple.terminal 'Default Window Settings' -string "${TERM_PROFILE}";
#defaults write com.apple.terminal 'Startup Window Settings' -string "${TERM_PROFILE}";
#killall "Terminal"
#fi
osascript <<EOD
tell application "Terminal"
local allOpenedWindows
local initialOpenedWindows
local windowID
set themeName to "Travi"
(* Store the IDs of all the open terminal windows. *)
set initialOpenedWindows to id of every window
(* Open the custom theme so that it gets added to the list
of available terminal themes (note: this will open two
additional terminal windows). *)
do shell script "open '$HOME/.files/osx/" & themeName & ".terminal'"
(* Wait a little bit to ensure that the custom theme is added. *)
delay 1
(* Set the custom theme as the default terminal theme. *)
set default settings to settings set themeName
(* Get the IDs of all the currently opened terminal windows. *)
set allOpenedWindows to id of every window
repeat with windowID in allOpenedWindows
(* Close the additional windows that were opened in order
to add the custom theme to the list of terminal themes. *)
if initialOpenedWindows does not contain windowID then
close (every window whose id is windowID)
(* Change the theme for the initial opened terminal windows
to remove the need to close them in order for the custom
theme to be applied. *)
else
set current settings of tabs of (every window whose id is windowID) to settings set themeName
end if
end repeat
end tell
EOD
###############################################################################
# Transmission #
###############################################################################
# Use `~/Documents/Torrents` to store incomplete downloads
defaults write org.m0k.transmission UseIncompleteDownloadFolder -bool true
defaults write org.m0k.transmission IncompleteDownloadFolder -string "${HOME}/Documents/Torrents"
# Don’t prompt for confirmation before downloading
defaults write org.m0k.transmission DownloadAsk -bool false
# Trash original torrent files
defaults write org.m0k.transmission DeleteOriginalTorrent -bool true
# Hide the donate message
defaults write org.m0k.transmission WarningDonate -bool false
# Hide the legal disclaimer
defaults write org.m0k.transmission WarningLegal -bool false
###############################################################################
# Kill affected applications #
###############################################################################
for app in "Dock" "Finder" "Safari" "SystemUIServer" "cfprefsd"; do
killall "${app}" > /dev/null 2>&1
done
echo "Done. Note that some of these changes require a logout/restart to take effect."
| true |
3446aac1cda24c5c1c2c176710b65eaf2d120e8a
|
Shell
|
nileshgr/utilities
|
/admin/ipfw.rules.sh
|
UTF-8
| 3,589 | 2.90625 | 3 |
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
tcp_service_ports="22 80 443"
udp_service_ports=""
out_tcp_ports="22 25 53 80 443 465 587 993 9418"
out_udp_ports="53 123"
allow_ftp=false
# define nat IP address
nat_ip=
ipfw -f flush
ipfw -f table all flush
table=0
if [ -r /etc/ipfw.own_networks ]; then
for ip in $(cat /etc/ipfw.own_networks); do
ipfw table $table add $ip
done
ipfw add 200 allow all from "table($table)" to me in
ipfw add 201 allow all from me to "table($table)" out
table=$((table+1))
fi
ipfw nat 123 config ip $nat_ip same_ports unreg_only
index=1
ipfw add $index allow ip from 127.0.0.0/8 to 127.0.0.0/8 via lo0
ipfw add $index allow ip from ::1 to ::1 via lo0
ipfw add $index allow log ip from 10.0.0.0/8 to 127.0.0.1
ipfw add $index allow log ip from 127.0.0.1 to 10.0.0.0/8
ipfw add $index allow ip from fc00::/7 to ::1
ipfw add $index allow ip from ::1 to fc00::/7
index=$((index+1))
ipfw add $index allow ip from 10.0.0.0/24 to 10.0.0.0/24
ipfw add $index allow ip from fc00::1:0/112 to fc00::1:0/112 # 1:0 because 0:0 is in invalid ipv6 addr
ipfw add $((index+1)) allow ip from 10.0.1.0/24 to 10.0.1.0/24
ipfw add $((index+1)) allow ip from fc00::2:0/112 to fc00::2:0/112
ipfw add $((index+2)) allow ip from 10.0.2.0/24 to 10.0.2.0/24
ipfw add $((index+2)) allow ip from fc00::3:0/112 to fc00::3:0/112
index=$((index+5))
ipfw add $((index+1)) allow ipv6-icmp from :: to ff02::/16
ipfw add $index allow ipv6-icmp from fe80::/10 to fe80::/10
ipfw add $((index+1)) allow ipv6-icmp from fe80::/10 to ff02::/16
ipfw add $((index+1)) allow ipv6-icmp from any to any ip6 icmp6types 1
ipfw add $((index+1)) allow ipv6-icmp from any to any ip6 icmp6types 2,135,136
index=100
ipfw add $index nat 123 ip from any to $nat_ip in
ipfw add $((index+1)) check-state
ipfw add $((index+1)) allow icmp from me to any keep-state
ipfw add $index allow ipv6-icmp from me to any keep-state
ipfw add $((index+1)) allow icmp from any to any icmptypes 8
ipfw add $index allow ipv6-icmp from any to any ip6 icmp6types 128,129
ipfw add $((index+1)) allow icmp from any to any icmptypes 3,4,11
ipfw add $index allow ipv6-icmp from any to any ip6 icmp6types 3
index=199
# add port redirection (ipfw add $index fwd) between the last rule above and before rule 200
index=200
for port in $tcp_service_ports; do
ipfw add $index allow tcp from any to me $port in
ipfw add $index allow tcp from me $port to any out
index=$((index+1))
done
index=300
for port in $udp_service_ports; do
ipfw add $index allow udp from any to me $port in
ipfw add $index allow udp from me $port to any out
index=$((index+1))
done
index=400
for port in $out_tcp_ports; do
ipfw add $index skipto 700 tcp from 10.0.0.0/8 to any $port out setup keep-state
ipfw add $index allow tcp from me to any $port out setup keep-state
index=$((index+1))
done
index=500
for port in $out_udp_ports; do
ipfw add $index skipto 700 udp from 10.0.0.0/8 to any $port out keep-state
ipfw add $index allow udp from me to any $port out keep-state
index=$((index+1))
done
index=600
# add some other stuff here, like permitting special outgoing ports from specific IPs
# if you have multiple IPs
index=700
ipfw add $index nat 123 ip4 from 10.0.0.0/8 to not 10.0.0.0/8 out
if $allow_ftp; then
index=65000
# FTP control port
ipfw add $index allow tcp from me to any 21 out setup keep-state
# Active FTP
ipfw add $((index+1)) allow tcp from any 20 to me 1024-65535 in setup keep-state
# Passive FTP
ipfw add $((index+2)) allow tcp from me 1024-65535 to any 1024-65535 out setup keep-state
fi
ipfw add 65534 deny all from any to any
| true |
325b2e25a901762841ddf5827960095a485edaca
|
Shell
|
Jiansen/Rejuvenation
|
/experiment/convert_date.sh
|
UTF-8
| 214 | 2.78125 | 3 |
[] |
no_license
|
# original unit 10 minutes
declare -i day_of_year
day_of_year=1+6*24*12*30
awk '!/#/ {$1= $1/(6 * 24 * 30);} 1' $1.dat > $1.month.dat
head -n $day_of_year $1.dat | awk '!/#/ {$1= $1/(6 * 24);} 1' > $1.day.dat
| true |
b9b2d861aca3bb0ee84348fe36e80bca4b22750a
|
Shell
|
ahurta92/cpp_starter_project
|
/BashScript/test.sh
|
UTF-8
| 1,215 | 4.0625 | 4 |
[] |
no_license
|
#!/bin/bash
# testing and control flow with if, [ and [[, and/or
NUM_REQUIRED_ARGS=2
if [[ $# -lt NUM_REQUIRED_ARGS ]]; then
echo "Not enough arguments with ./{$0} <name> <number>"
fi
## helpers
# && and
# || or
## in your terminal
echo "hi." || echo "This won't happen."
$(ls nonexistantfile) || echo "This happens if the first thing fails"
echo $(pwd) && echo "This also happens!"
## Strings
str1="a"
str2="b"
if [[ "$str1" = "$str2" ]]; then
echo "${str1} is equal to ${str2}!"
fi
if [[ "$str1" != "$str2" ]]; then
echo "${str1} is not equal to ${str2}!"
fi
# Not Null (-n) or Zero Length (-z)
notnully="This is something (not nothing)"
nully=""
if [ -n "$notnully" ]; then
echo "This is not nully..."
fi
if [ -z "$nully"]; then
echo "nully/zeroooo (length)"
fi
# Integers
int1=4
int2=7
if [ $int1 -eq $int2 ]; then
echo "equal"
fi
if [ $int1 -ne $int2 ]; then
echo "${int1} is NOT equal to ${int2}."
fi
if [ $int1 -gt $int2 ]; then
echo "greater than"
fi
if [ $int1 -le $int2 ]; then
echo "less than"
fi
# string comparison operators can be used with double parenthese
if (( $int1 == $int2 )); then
echo "${int1} is equal to ${int2}."
fi
| true |
daa4dfac6bf412ee0baf9788ce4435ed6d4fb419
|
Shell
|
miaoyuanxi/aswf-docker
|
/scripts/base/build_cppunit.sh
|
UTF-8
| 559 | 2.890625 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
# Copyright (c) Contributors to the aswf-docker Project. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
set -ex
if [ ! -f $DOWNLOADS_DIR/cppunit-${CPPUNIT_VERSION}.tar.gz ]; then
curl --location http://dev-www.libreoffice.org/src/cppunit-${CPPUNIT_VERSION}.tar.gz -o $DOWNLOADS_DIR/cppunit-${CPPUNIT_VERSION}.tar.gz
fi
tar xf $DOWNLOADS_DIR/cppunit-${CPPUNIT_VERSION}.tar.gz
cd cppunit-${CPPUNIT_VERSION}
./configure --prefix=${ASWF_INSTALL_PREFIX}
make -j$(nproc)
make install
cd ..
rm -rf cppunit-${CPPUNIT_VERSION}
| true |
14ddb5b110a601dbfa8017588cda7c60a153ac8b
|
Shell
|
delphix/dx-workshops
|
/scripts/tests/provision_vdb.sh
|
UTF-8
| 9,411 | 3.265625 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
echo "###Validate MASKED VDB###"
set -e
DEIP="virtualizationengine"
USER="delphix_admin"
PASS="Landshark-12"
function JOB_WAIT {
JOBID=${1}
until [[ "$(curl -s "http://${DEIP}/resources/json/delphix/job/${JOBID}" \
-b ~/cookies.txt -H "Content-Type: application/json" \
| jq -r .result.jobState)" != "RUNNING" ]]; do
echo "Waiting for ${JOBID}"
sleep 3
done
JOB_STATE=$(curl -s "http://${DEIP}/resources/json/delphix/job/${JOBID}" \
-b ~/cookies.txt -H "Content-Type: application/json" \
| jq -r .result.jobState)
echo "${JOBID} ${JOB_STATE}"
if [[ "${JOB_STATE}" != "COMPLETED" ]]; then
exit 1
fi
}
# 1) Create Delphix API Session
echo -e "\n\nCreating Session\n"
curl -sX POST -k --data @- http://${DEIP}/resources/json/delphix/session \
-c ~/cookies.txt -H "Content-Type: application/json" <<EOF
{
"type": "APISession",
"version": {
"type": "APIVersion",
"major": 1,
"minor": 9,
"micro": 0
}
}
EOF
# 2) Delphix Login
echo -e "\n\nLogging in\n"
curl -sX POST -k --data @- http://${DEIP}/resources/json/delphix/login \
-b ~/cookies.txt -c ~/cookies.txt -H "Content-Type: application/json" <<EOF
{
"type": "LoginRequest",
"username": "${USER}",
"password": "${PASS}"
}
EOF
# echo "Refreshing Environments"
# ENVJOBID1=$(curl -sX POST \
# "http://${DEIP}/resources/json/delphix/environment/UNIX_HOST_ENVIRONMENT-1/refresh" \
# -b ~/cookies.txt -H 'Content-Type: application/json'| jq -r .job)
# ENVJOBID2=$(curl -sX POST \
# "http://${DEIP}/resources/json/delphix/environment/UNIX_HOST_ENVIRONMENT-2/refresh" \
# -b ~/cookies.txt -H 'Content-Type: application/json'| jq -r .job)
# JOB_WAIT $ENVJOBID1
# JOB_WAIT $ENVJOBID2
ENVUSER=$(curl -s "http://${DEIP}/resources/json/delphix/environment" \
-b ~/cookies.txt -H 'Content-Type: application/json' | \
jq -r ".result[]| select(.name==\"proddb\").primaryUser")
echo $ENVUSER
ENVREF=$(curl -s "http://${DEIP}/resources/json/delphix/environment" \
-b ~/cookies.txt -H 'Content-Type: application/json' | \
jq -r ".result[]| select(.name==\"proddb\").reference")
echo $ENVREF
if [[ $(ssh centos@proddb sudo -i -u oracle whoami) ]]; then
echo -e "\nOracle it is\n"
REPOREF=$(curl -s "http://${DEIP}/resources/json/delphix/sourceconfig?environment=${ENVREF}" \
-b ~/cookies.txt -H 'Content-Type: application/json' | \
jq -r ".result[]| select(.name==\"PATMM\").repository")
echo ${REPOREF}
CDBCONFIG=$(curl -s "http://${DEIP}/resources/json/delphix/sourceconfig?environment=${ENVREF}" \
-b ~/cookies.txt -H 'Content-Type: application/json' | \
jq -r ".result[]| select(.name==\"PATMM\").cdbConfig")
echo ${CDBCONFIG}
VDBJOB=$(curl -sX POST \
"http://${DEIP}/resources/json/delphix/database/provision" \
-b ~/cookies.txt -H 'Content-Type: application/json' \
-d @- <<-EOF
{
"sourceConfig": {
"databaseName": "test",
"cdbConfig": "${CDBCONFIG}",
"nonSysUser": null,
"nonSysCredentials": null,
"linkingEnabled": true,
"environmentUser": "${ENVUSER}",
"repository": "${REPOREF}",
"type": "OraclePDBConfig"
},
"source": {
"operations": {
"configureClone": [
],
"preRefresh": [
],
"postRefresh": [
],
"preRollback": [
],
"postRollback": [
],
"preSnapshot": [
],
"postSnapshot": [
],
"preStart": [
],
"postStart": [
],
"preStop": [
],
"postStop": [
],
"type": "VirtualSourceOperations"
},
"customEnvVars": [
],
"allowAutoVDBRestartOnHostReboot": false,
"mountBase": "/mnt/provision",
"logCollectionEnabled": false,
"name": "test",
"type": "OracleVirtualPdbSource"
},
"container": {
"diagnoseNoLoggingFaults": true,
"preProvisioningEnabled": false,
"sourcingPolicy": {
"logsyncMode": "UNDEFINED",
"logsyncInterval": 5,
"logsyncEnabled": false,
"type": "OracleSourcingPolicy"
},
"performanceMode": "DISABLED",
"group": "GROUP-4",
"name": "test",
"type": "OracleDatabaseContainer"
},
"timeflowPointParameters": {
"type": "TimeflowPointSemantic",
"location": "LATEST_SNAPSHOT",
"container": "ORACLE_DB_CONTAINER-4"
},
"masked": false,
"type": "OracleMultitenantProvisionParameters"
}
EOF
)
elif [[ $(ssh centos@proddb sudo -i -u postgres whoami) ]]; then
echo -e "\nPostgres it is\n"
REPOREF=$(curl -s "http://${DEIP}/resources/json/delphix/sourceconfig?environment=${ENVREF}" \
-b ~/cookies.txt -H 'Content-Type: application/json' | \
jq -r ".result[]| select(.path==\"/mnt/provision/patmm\").repository")
echo ${REPOREF}
VDBJOB=$(curl -sX POST \
"http://${DEIP}/resources/json/delphix/database/provision" \
-b ~/cookies.txt -H 'Content-Type: application/json' \
-d @- <<-EOF
{
"container": {
"sourcingPolicy": {
"logsyncEnabled": false,
"type": "SourcingPolicy"
},
"group": "GROUP-4",
"name": "test",
"type": "AppDataContainer"
},
"source": {
"operations": {
"configureClone": [
],
"preRefresh": [
],
"postRefresh": [
],
"preRollback": [
],
"postRollback": [
],
"preSnapshot": [
],
"postSnapshot": [
],
"preStart": [
],
"postStart": [
],
"preStop": [
],
"postStop": [
],
"type": "VirtualSourceOperations"
},
"parameters": {
"postgresPort": 5477,
"configSettingsStg": [
{
"propertyName": "listen_addresses",
"value": "*"
}
]
},
"additionalMountPoints": [
],
"allowAutoVDBRestartOnHostReboot": false,
"logCollectionEnabled": false,
"name": "test",
"type": "AppDataVirtualSource"
},
"sourceConfig": {
"path": "/mnt/provision/test",
"name": "test",
"repository": "${REPOREF}",
"linkingEnabled": true,
"environmentUser": "${ENVUSER}",
"type": "AppDataDirectSourceConfig"
},
"timeflowPointParameters": {
"type": "TimeflowPointSemantic",
"location": "LATEST_SNAPSHOT",
"container": "APPDATA_CONTAINER-2"
},
"masked": false,
"type": "AppDataProvisionParameters"
}
EOF
)
else
echo "Nether expected user found. Exiting"
exit 1
fi
echo ""
VDBJOBID=$(echo $VDBJOB | jq -r .job)
if [[ "$VDBJOBID" == "null" ]]; then
echo "VDB wasn't created. Failing"
echo $VDBJOB
exit 1
fi
JOB_WAIT $VDBJOBID
echo -e "\nValidating Data\n"
if [[ $(ssh centos@proddb sudo -i -u oracle whoami) ]]; then
echo -e "\nOracle it is\n"
OUTPUT=$(ssh centos@proddb sudo -i -u oracle sqlplus -s delphixdb/delphixdb@localhost:1521/test <<-EOF
set pagesize 0 feedback off verify off heading off echo off;
select lastname,city
from patients
where id = ( select max(id) from patients );
quit;
EOF
)
if [[ "$(echo $OUTPUT| awk '{print $1}')" == "Bowen" || "$(echo $OUTPUT| awk '{print $2}')" != "Funkytown" ]]; then
echo "Unexpected data"
echo $OUTPUT
exit 1
fi
elif [[ $(ssh centos@proddb sudo -i -u postgres whoami) ]]; then
echo -e "\Postgres it is\n"
OUTPUT=$(psql -h proddb -p 5477 -U delphixdb dafdb -t -c 'select lastname,city from patients where id = ( select max(id) from patients );')
if [[ "$(echo $OUTPUT| awk '{print $1}')" == "Bowen" || "$(echo $OUTPUT| awk '{print $3}')" != "Funkytown" ]]; then
echo "Unexpected data"
echo $OUTPUT
exit 1
fi
fi
echo -e "\nData validated\n"
| true |
aa26912039af5c88a20059cde4a67913a672cc6f
|
Shell
|
chechiachang/bash-scripts
|
/uptime.sh
|
UTF-8
| 1,968 | 3.390625 | 3 |
[] |
no_license
|
#!/bin/bash
#
# Library for uptime robot api
# https://uptimerobot.com/api
# UPTIME_API_KEY=<your-uptime-api-key>
uptime::curl(){
local http_method=$1; shift;
local uptime_method=$1; shift;
local body=$*
if [[ -z ${UPTIME_API_KEY} ]]; then
echo "UPTIME_API_KEY is required but not found."
return 1
fi
curl -X ${http_method} \
-H "Cache-Control: no-cache" \
-H "Content-Type: application/x-www-form-urlencoded" \
-d ${body} "https://api.uptimerobot.com/v2/${uptime_method}"
}
uptime::get_account_details(){
uptime::curl POST getAccountDetails "api_key=${UPTIME_API_KEY}&format=json"
}
uptime::get_monitors(){
uptime::curl POST getMonitors "api_key=${UPTIME_API_KEY}&format=json"
}
uptime::get_monitors::aurora_dev(){
uptime::curl POST getMonitors "api_key=${UPTIME_API_KEY}&search=aurora-dev&format=json"
}
uptime::get_monitors::aurora_staging(){
uptime::curl POST getMonitors "api_key=${UPTIME_API_KEY}&search=aurora-staging&format=json"
}
uptime::stop_aurora_dev(){
monitor_id=$(uptime::get_monitors::aurora_dev | jq .monitors[0].id)
key_values="status=0"
uptime::curl POST editMonitor "api_key=${UPTIME_API_KEY}&id=${monitor_id}&${key_values}&format=json"
}
uptime::start_aurora_dev(){
monitor_id=$(uptime::get_monitors::aurora_dev | jq .monitors[0].id)
key_values="status=1"
uptime::curl POST editMonitor "api_key=${UPTIME_API_KEY}&id=${monitor_id}&${key_values}&format=json"
}
uptime::stop_aurora_staging(){
monitor_id=$(uptime::get_monitors::aurora_staging | jq .monitors[0].id)
key_values="status=0"
uptime::curl POST editMonitor "api_key=${UPTIME_API_KEY}&id=${monitor_id}&${key_values}&format=json"
}
uptime::start_aurora_staging(){
monitor_id=$(uptime::get_monitors::aurora_staging | jq .monitors[0].id)
key_values="status=1"
uptime::curl POST editMonitor "api_key=${UPTIME_API_KEY}&id=${monitor_id}&${key_values}&format=json"
}
| true |
33adf777d0df62c798f03dfd562bccd16c9d7afd
|
Shell
|
elerch/vcsh_i3config
|
/.local/bin/cache
|
UTF-8
| 1,081 | 3.984375 | 4 |
[] |
no_license
|
#!/bin/sh
if [ "$#" -lt 2 ]; then
echo 'usage: cache [-q] <seconds> <command>'
return 1
fi
if [ "$1" = "-q" ]; then
shift
else
echo 'WARNING: this utility only caches stdout of successful commands' >&2
fi
uid=$(id -u)
cachedir="${XDG_RUNTIME_DIR:-/run/user}"
[ "$cachedir" = "/run/user" ] && cachedir="${cachedir}/${uid}/"
cachedir="${cachedir}/commandcache"
mkdir -p "$cachedir"
seconds="$1"
shift
cmdsha="$(echo "$@" | shasum|cut -d\ -f1)"
filename="${cachedir}/${cmdsha}-out.cache"
time="${cachedir}/${cmdsha}-time.cache"
# POSIX way of getting seconds
secondssinceepoch=$(PATH=$(getconf PATH) awk 'BEGIN{srand();print srand()}')
if [ -r "$time" ]; then
commandtime="$(cat "$time")"
if [ "$(( commandtime + seconds ))" -lt "$secondssinceepoch" ]; then
# cache expired
rm "$filename"
rm "$time"
fi
fi
if [ ! -r "$filename" ]; then
(exec "$@") 2>/dev/null > "$filename"
rc=$?
if [ $rc -ne 0 ]; then
rm "$filename"
echo "ERROR: Command not successful" >&2
return $rc
fi
printf "%s" "$secondssinceepoch" > "$time"
fi
cat "$filename"
| true |
48380498342ab7d2c3b84ae86a1c4462392635ec
|
Shell
|
daniarherikurniawan/Cassandra-QoE
|
/CassandraCodes/cassandra_trace.sh
|
UTF-8
| 259 | 2.578125 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
mkdir results_cass_scan
for ((t=20; t<=70; t+=5))
do
python3 cassandraclient.py $t
mv cass_read_latency.txt 'results_cass_scan/cass_read_latency_'$t.txt
done
echo 'Collection is OK'
zip -r results_cass_scan.zip results_cass_scan
| true |
3a5ac4e614c247e6d4f77d0120fd4793302f129c
|
Shell
|
julienbenjamin/administration-scripts
|
/debian-fresh-install-setup.sh
|
UTF-8
| 3,158 | 4.3125 | 4 |
[
"MIT"
] |
permissive
|
#!/bin/bash
# Setup a fresh install of Debian - Must be run as root
if (( $# != 0 )); then
echo "Illegal number of parameters"
fi
# Trap SIGINT
trap ctrl_c INT
function ctrl_c() {
echo "Script interrupted."
}
echo "---- Fresh install setup for Debian ----"
if [ $(id -u) -eq 0 ]; then
# Select options for setup
while true; do
read -p "Do you wish to grant sudo privileges to a user? [Y/N] " yn
case $yn in
[Yy]* ) GRANT_SUDO=true; break;;
[Nn]* ) GRANT_SUDO=false; break;;
* ) echo "Type Y for yes or N for no.";;
esac
done
while true; do
read -p "Do you want to install NVIDIA GPU driver? [Y/N] " yn
case $yn in
[Yy]* ) NSTALL_NVIDIA_DRIVER=true; break;;
[Nn]* ) NSTALL_NVIDIA_DRIVER=false; break;;
* ) echo "Type Y for yes or N for no.";;
esac
done
while true; do
read -p "Do you want to install NVIDIA CUDA toolkit? [Y/N] " yn
case $yn in
[Yy]* ) NSTALL_NVIDIA_DRIVER=true; break;;
[Nn]* ) NSTALL_NVIDIA_DRIVER=false; break;;
* ) echo "Type Y for yes or N for no.";;
esac
done
## Create a new "sources.list" to enable contrib and non-free repositories
sh ./debian-enable-contrib-and-non-free-repositories.sh
## Update and upgrade already installed packages
echo "-> Updating packages cache"
apt update
echo "-> Upgrading packages"
apt upgrade
## Install various packages
echo "-> Installing packages"
apt -y install $(cat packages-list)
## Granting sudo privileges to a specific user if necessary
if [ "$GRANT_SUDO" = true ] ; then
echo "-> Granting sudo privileges for a user"
sh ./grant_sudo_privileges.sh
fi
## Install NVIDIA driver/tools
while true; do
read -p "Do you want to install NVIDIA GPU driver? [Y/N] " yn
case $yn in
[Yy]* ) NSTALL_NVIDIA_DRIVER=true; break;;
[Nn]* ) NSTALL_NVIDIA_DRIVER=false; break;;
* ) echo "Type Y for yes or N for no.";;
esac
done
if [ "$INSTALL_NVIDIA_DRIVER" = true ] ; then
apt -y install nvidia-driver nvidia-smi
## Create a configuration file for Xorg server
mkdir /etc/X11/xorg.conf.d
echo -e 'Section "Device"\n\tIdentifier "My GPU"\n\tDriver "nvidia"\nEndSection' > /etc/X11/xorg.conf.d/20-nvidia.conf
echo "Driver installed. Applying modifications and blacklisting Nouveau will require a reboot."
fi
if [ "$INSTALL_CUDA" = true ] && [ $INSTALL_NVIDIA_DRIVER = true ]; then
echo "-> Installing CUDA"
apt -y install nvidia-cuda-dev nvidia-cuda-toolkit
# create a symlink for convenience
ln -sf /usr/lib/nvidia-cuda-toolkit /usr/local/cuda
fi
## Restart services
echo "-> Restarting services to apply modifications. "
if [ "$GRANT_SUDO" = true ] ; then
/etc/init.d/sudo restart
fi
#service lightdm restart
echo "-> Services restarted"
echo "-> Setup finished!"
else
echo "This script must be run as root"
exit 2
fi
| true |
fe1719f6c545f93e3d7216bc7d04f3cd5e80cc76
|
Shell
|
WMP/wseifree
|
/livecd/livechraptsource.sh
|
UTF-8
| 566 | 2.96875 | 3 |
[] |
no_license
|
#!/bin/bash
export HOME=/root
export LC_ALL=C
# zmiana repozytoriow
echo "Kopiowanie sources.list"
cp /etc/apt/sources.list /etc/apt/sources.list_bak
#nowe repa
echo "Kopiowanie nowego sources.list"
cp /root/etc/apt/sources.list /etc/apt/sources.list
apt-get update
# dociagniece brakujacych kluczy
echo 'Prosze czekac...' && sudo apt-get update 2> klucze > /dev/null && sed -i '/NO_PUBKEY/!d;s/.*NO_PUBKEY //' klucze && gpg --keyserver keyserver.ubuntu.com --recv-keys $(cat klucze) && gpg --export --armor $(cat klucze) | sudo apt-key add - && rm -f klucze
| true |
086bb7d4183ac40b05d6f41eece372791482d2af
|
Shell
|
xtremespb/zoia
|
/teamcity/remove_container.sh
|
UTF-8
| 501 | 3.625 | 4 |
[
"MIT"
] |
permissive
|
#!/bin/bash
CONTAINER=$1
echo ":: Container to remove $CONTAINER"
if [ "$(docker ps -qa -f name=^$CONTAINER$)" ]; then
echo ":: Found container - $CONTAINER"
if [ "$(docker ps -q -f name=^$CONTAINER$)" ]; then
echo ":: Stopping running container - $CONTAINER"
docker stop $CONTAINER;
else
echo ":: The found container is not running"
fi
echo ":: Removing stopped container - $CONTAINER"
docker rm $CONTAINER;
else
echo ":: Container $CONTAINER not found"
fi
| true |
9f36dbc64c9d71ed59c9c1511b823d9039463ab2
|
Shell
|
anthonygtellez/splunk_deployment_automation
|
/rhlinux/firewalld/splunk-core-service.sh
|
UTF-8
| 1,484 | 3.390625 | 3 |
[] |
no_license
|
#!/bin/bash
#
# ===========================================================
# Purpose: This script will create a custom syslog service for firewalld
# Parameters: None
# Example usage: $ bash configure_firewalld_syslog.sh
#
# Privileges: Must be run as root
# Author: Anthony Tellez
#
# Notes: You can change the ports/protocol by modifying the XML syntax in the echo for example:
# <port protocol="tcp" port="12345"/> You should only run this script once. Running it again will append
# to the same file: /etc/firewalld/services/syslog.xml and will break the service!
#
# Revision: Last change: 03/01/2016 by AT :: Added details about script
# ===========================================================
touch /etc/firewalld/services/splunkd.xml
cat >/etc/firewalld/services/splunkd.xml <<EOF
<?xml version="1.0" encoding="utf-8"?>
<service>
<short>splunkd</short>
<description>Splunkd service for rest and communication.</description>
<port protocol="tcp" port="8089"/>
<port protocol="tcp" port="8080"/>
<port protocol="tcp" port="9997"/>
<port protocol="tcp" port="8000"/>
<port protocol="tcp" port="8191"/>
<port protocol="tcp" port="8065"/>
</service>
EOF
restorecon /etc/firewalld/services/splunkd.xml
chmod 640 /etc/firewalld/services/splunkd.xml
firewall-cmd --reload
echo "set selinux permissions"
firewall-cmd --permanent --add-service=splunkd
firewall-cmd --reload
firewall-cmd --list-service
echo "done."
| true |
6aeaf73795b462a3142c5d6a916863d568fc0d0a
|
Shell
|
zatax/spirit
|
/test.sh
|
UTF-8
| 792 | 3.546875 | 4 |
[] |
no_license
|
#!/bin/bash
test2 () {
filename="serv.txt"
while read -r line
do
serv=$line
echo "$serv"
done < "$filename"
}
serveur () {
PS3='Choix du serveur: '
options=("freppau-lpegf01" "freppau-lpegf02" "freppau-lpegf011" "Quit")
select opt in "${options[@]}"
do
case $opt in
"freppau-lpegf01")
srv="freppau-lpegf01"
echo "vous avez choisi $srv"
break
;;
"freppau-lpegf02")
srv="reppau-lpegf02"
echo "$srv"
break
;;
"freppau-lpegf011")
echo "choix 3"
srv="reppau-lpegf011"
break
;;
"Quit")
break
;;
*) echo invalid option;;
done
}
test2
| true |
d0fe89cd1de3d5105ab9a2717f3d40775d71405c
|
Shell
|
maxlorenz/cros-auto-backlight
|
/backlightd
|
UTF-8
| 1,494 | 3.734375 | 4 |
[
"WTFPL"
] |
permissive
|
#!/bin/bash
# Max brightness value from /sys/class/backlight/*/brightness
MAX=937
# array to store light values for averaging
# TODO: this should be extended such that an arbitrarily-
# sized array can be generated
LIGHT_ARRAY=(100 100 100 100)
# polling rate in seconds (interval = rate * arraylen)
POLLING_RATE="1"
# Fitted function constants for auto-brightness
source "./blsettings"
# updates the ALS (ambient light sensor) data array
ambient_light() {
# Raw IR values from 0 (dark) to something really big...
light=$(cat /sys/bus/iio/devices/iio:device0/in_intensity_ir_raw)
LIGHT_ARRAY=(${LIGHT_ARRAY[@]:1:3} $light)
}
# calculates the target brightness level (with cutoff)
calc_bright() {
# Compute average of LIGHT_ARRAY
# TODO: this is ugly, probably inefficient, and should be improved
average=$(($((${LIGHT_ARRAY[0]}+${LIGHT_ARRAY[1]}+${LIGHT_ARRAY[2]}+${LIGHT_ARRAY[3]}))/4))
#echo "AVG: $average"
# Brightness based on raw values
rawbright="($M*$average)+$B"
# Brightness normalized to a percentage
bright=$(echo "scale=10;($rawbright)/$MAX*100" | bc)
}
# adjusts brightness
adj_bright() {
#echo "BRIGHT: $bright"
xbacklight -set $bright
}
renice 19 -p $$ >/dev/null 2>&1
# adjustment loop
while [ 1 ]; do
# loop fully updates LIGHT_ARRAY
# could poll and update every time instead...
for i in {1..4}; do
ambient_light
sleep $POLLING_RATE
done
calc_bright
adj_bright
done
| true |
48cc1ffd9409546a9c4dfea88e74d1081aed830e
|
Shell
|
ubaceslab/swenv
|
/build_scripts/build_anaconda.sh
|
UTF-8
| 1,788 | 3.8125 | 4 |
[] |
no_license
|
#!/bin/sh
set -e
echo "Building Anaconda for ${UBCESLAB_SYSTEMTYPE:?undefined}"
CURRENT_DIR=$PWD
mkdir -p ${UBCESLAB_SWENV_PREFIX:?undefined}/sourcesdir/anaconda
(cd $UBCESLAB_SWENV_PREFIX/sourcesdir/anaconda
if [ ! -f Miniconda-3.8.3-Linux-x86_64.sh ]; then
wget http://repo.continuum.io/miniconda/Miniconda-3.8.3-Linux-x86_64.sh
fi
)
ANACONDA_DIR=${UBCESLAB_SWENV_PREFIX:?undefined}/apps/anaconda
# Clear out old installation. Anaconda's installer will error if directory is there.
rm -rf $ANACONDA_DIR
# For the anaconda, we run the bash script, then install the packages we want
# We'll try to remind the user what they'll need to do.
echo "ATTENTION: You will need to agree to the license."
echo "ATTENTION: Then, you need to set the install location to:"
echo "ATTENTION: $ANACONDA_DIR"
echo "ATTENTION: Then, this script will set packages to install."
echo "ATTENTION: You will need to agree that these packages get installed."
echo ""
echo "Press any key to continue with installation."
read -n 1 -s
# Run Anaconda installer
bash $UBCESLAB_SWENV_PREFIX/sourcesdir/anaconda/Miniconda-3.8.3-Linux-x86_64.sh
export PATH=$ANACONDA_DIR/bin:$PATH
# Now install desired packages
conda install numpy scipy h5py sympy matplotlib
cd $CURRENT_DIR
MODULEDIR=$UBCESLAB_SWENV_PREFIX/apps/lmod/modulefiles/anaconda
mkdir -p $MODULEDIR
echo "local version = \"$ANACONDA_VERSION\"" > $MODULEDIR/anaconda.lua
echo "local apps_dir = \"$UBCESLAB_SWENV_PREFIX/apps\"" >> $MODULEDIR/anaconda.lua
cat ../modulefiles/anaconda.lua >> $MODULEDIR/anaconda.lua
| true |
4934fbdfee5302823185c16e21b725f03fdb9624
|
Shell
|
chris-ritsen/read_book
|
/read_book_wrapper.zsh
|
UTF-8
| 1,344 | 3.96875 | 4 |
[] |
no_license
|
function session() {
local TMUX_SESSION_DIR="${XDG_CONFIG_HOME}"'/tmux/sessions/'
local args="${*}"
local name=''
if [[ -z $args ]]; then
name=$(fzf "${TMUX_SESSION_DIR}" | sed '/^$/d')
else
name="${1}"
fi
local filename="${TMUX_SESSION_DIR}""${name}"
if [[ ! -f "${filename}" ]]; then
echo "${filename}" 'not found'
return 1
fi
local tmp_dir="${XDG_RUNTIME}"'/tmux/'
local socket="${tmp_dir}"'tmux-'"${UID}"'/'"${name}"
local has_session=$(tmux -S "${socket}" has-session -t "${name}" 2> /dev/null; echo "${?}")
if [[ "${has_session}" == '0' ]]; then
local has_attached_clients=$(tmux -S "${socket}" ls -F '#{session_attached}' 2> /dev/null | tail -n1)
tmux -S "${socket}" attach 2>/dev/null
else
mkdir -p "${tmp_dir}"'tmux-'"${UID}"'/'
tmux -S "${socket}" -f "${filename}" attach 2>/dev/null
fi
}
function read_books() {
function get_rate() {
local default_rate=$(redis-cli get reading_speed)
if [[ -z $default_rate ]]; then
default_rate=1.0
fi
echo $default_rate
}
local rate
if [[ $1 ]]; then
rate=$1
if [[ $rate -gt 0 ]]; then
rate=$((1.0 / rate))
else
rate=$(get_rate)
fi
redis-cli set reading_speed $rate 1> /dev/null
else
rate=$(get_rate)
fi
systemctl --user restart read_book
}
| true |
afe6c5fb95135ddbacd8f4884a76207a98dc3c3f
|
Shell
|
freedragon/az-hpcapps
|
/apps/pamcrash/run_case.sh
|
UTF-8
| 3,424 | 3.71875 | 4 |
[] |
no_license
|
#!/bin/bash
pam_case=$1
threads=$2
mpi=$3
VERSION=2018
if [ -z "$mpi" ]; then
mpi="impi"
fi
if [ -z "$threads" ]; then
threads="1"
fi
start_time=$SECONDS
extension="${pam_case##*.}"
case_name="${pam_case%%.*}"
echo "downloading case ${pam_case}..."
wget -q "${STORAGE_ENDPOINT}/pamcrash-benchmarks/${pam_case}?${SAS_KEY}" -O ${pam_case}
case "$extension" in
gz)
gunzip ${pam_case}
;;
zip)
unzip ${pam_case}
;;
esac
# if there is a directory named with the case name, move all files one level up
if [ -d $case_name ]; then
mv -f ./$case_name/* .
fi
end_time=$SECONDS
download_time=$(($end_time - $start_time))
echo "Download time is ${download_time}"
source /etc/profile # so we can load modules
export PAMHOME=/opt/esi-group
export PAM_LMD_LICENSE_FILE=27000@${LICENSE_SERVER}
case "$VERSION" in
2017)
export PAMCRASH=$PAMHOME/pamcrash_safe/2017.0.2/pamcrash
;;
2018)
export PAMCRASH=$PAMHOME/pamcrash_safe/2018.01/Linux_x86_64/bin/pamcrash
;;
*)
echo "unknown version $VERSION"
exit 1
;;
esac
function setup_impi()
{
# setup Intel MPI environment for Infiniband
module load mpi/impi-2018.4.274
source $MPI_BIN/mpivars.sh
export MPI_DIR=$MPI_BIN
PAM_MPI=impi-5.1.3
case "$INTERCONNECT" in
ib)
export MPI_OPTIONS="-hosts $MPI_HOSTLIST -perhost ${PPN} -genv I_MPI_FABRICS shm:dapl -genv I_MPI_DAPL_PROVIDER ofa-v2-ib0 -genv I_MPI_DYNAMIC_CONNECTION 0 -genv I_MPI_FALLBACK_DEVICE 0"
;;
sriov)
# I_MPI_DEBUG=4
export MPI_OPTIONS="-hosts $MPI_HOSTLIST -perhost ${PPN} -genv I_MPI_FABRICS shm:ofa -genv I_MPI_DYNAMIC_CONNECTION 0 -genv I_MPI_FALLBACK_DEVICE 0 $impi_options"
;;
tcp)
;;
esac
}
function setup_pmpi()
{
module load mpi/pmpi
export MPI_DIR=$MPI_BIN
PAM_MPI=platform-9.1.4
case "$INTERCONNECT" in
ib)
;;
sriov)
export MPI_OPTIONS="-hostfile $MPI_MACHINEFILE -prot -aff -affopt=vv -e MPI_IB_PKEY=$IB_PKEY"
;;
tcp)
;;
esac
}
case "$mpi" in
impi)
setup_impi
;;
pmpi)
setup_pmpi
;;
*)
setup_impi
;;
esac
$PAMCRASH -np ${CORES} \
-nt $threads \
-lic CRASHSAF \
-mpi $PAM_MPI \
-mpiexe mpirun \
-mpidir $MPI_DIR \
-mpiext '$MPI_OPTIONS' \
${case_name}.pc
end_time=$SECONDS
task_time=$(($end_time - $start_time))
# extract telemetry
case_output=../stdout.txt
if [ -f "${case_output}" ]; then
line=$(grep " ELAPSED TIME" ${case_output} | tail -1)
printf -v elapsed_time '%.2f' $(echo $line | cut -d' ' -f3)
line=$(grep " CPU TIME" ${case_output} | tail -1)
printf -v cpu_time '%.2f' $(echo $line | cut -d' ' -f3)
version=$(grep "| Version" ${case_output} | cut -d':' -f2| cut -d'|' -f1 | xargs)
precision=$(grep "| Precision" ${case_output} | cut -d':' -f2| cut -d'|' -f1 | xargs)
mpi_version=$(mpirun -version | head -n 1)
cat <<EOF >$APPLICATION.json
{
"version": "$version",
"mpi_version": "$mpi_version",
"pam_mpi": "$PAM_MPI",
"model": "$case_name",
"precision": "$precision",
"elapsed_time": $elapsed_time,
"cpu_time": $cpu_time,
"task_time": $task_time,
"download_time": $download_time
}
EOF
fi
| true |
184e249ff80b596a81be05bc9025654512634c2e
|
Shell
|
kedii/configs
|
/conf/bash/zsh/amer-widgets.zsh
|
UTF-8
| 2,270 | 3.03125 | 3 |
[
"MIT"
] |
permissive
|
# bindkey '^Y' yank-last-arg # No such func
zle -N yank-current yank_current
bindkey "^Y" yank-current
bindkey -a "^Y" yank-current
function yank_current() {
if ! [ "$BUFFER" ]; then
BUFFER="$(fc -ln -1)"
fi
printf "$BUFFER" | xsel --input --clipboard
}
zle -N prepend-sudo prepend_sudo
bindkey "^S" prepend-sudo
function prepend_sudo() {
if ! [ "$BUFFER" ]; then
BUFFER="$(fc -ln -1)"
fi
if [ "$BUFFER" != "${BUFFER#'sudo '}" ]; then
BUFFER="${BUFFER#'sudo '}"
else
BUFFER="sudo $BUFFER"
CURSOR=$(($CURSOR+5))
fi
}
zle -N synchro-dir-push synchro_dir_push
function synchro_dir_push {
curr="$(pwd)"
if [ "$curr" != ~ ]; then
printf "$curr\n" > /tmp/zsh_chosedir
fi
}
zle -N synchro-dir-pop synchro_dir_pop
function synchro_dir_pop {
if [ -f /tmp/zsh_chosedir ]; then
curr="$(cat /tmp/zsh_chosedir)"
if [ "$(pwd)" != "$curr" ]; then
# Change directories and redisplay the prompt
# (Still don't fully understand this magic combination of commands)
# [[ -o zle ]] && zle -R && cd "$(cat ~/.pwd)" && precmd && zle reset-prompt 2>/dev/null
cd "$curr" && zle reset-prompt
fi
fi
}
bindkey -a "^O" synchro-dir-pop
bindkey "^O" synchro-dir-push
### ----------- Untested
# autoload smart-insert-last-word
# bindkey "\e." smart-insert-last-word-wrapper
# bindkey "\e," smart-insert-prev-word
# zle -N smart-insert-last-word-wrapper
# zle -N smart-insert-prev-word
# function smart-insert-last-word-wrapper() {
# _altdot_reset=1
# smart-insert-last-word
# }
# function smart-insert-prev-word() {
# if (( _altdot_reset )); then
# _altdot_histno=$HISTNO
# (( _altdot_line=-_ilw_count ))
# _altdot_reset=0
# _altdot_word=-2
# elif (( _altdot_histno != HISTNO || _ilw_cursor != $CURSOR )); then
# _altdot_histno=$HISTNO
# _altdot_word=-1
# _altdot_line=-1
# else
# _altdot_word=$((_altdot_word-1))
# fi
# smart-insert-last-word $_altdot_line $_altdot_word 1
# if [[ $? -gt 0 ]]; then
# _altdot_word=-1
# _altdot_line=$((_altdot_line-1))
# smart-insert-last-word $_altdot_line $_altdot_word 1
# fi
# }
### -----------
| true |
24784e281285e781f2ad0f43744d90ff87ee021c
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/dovecot2-antispam-git/PKGBUILD
|
UTF-8
| 860 | 2.640625 | 3 |
[] |
no_license
|
# Maintainer: Carsten Feuls <[email protected]>
pkgname=dovecot2-antispam-git
pkgver=v2.0.11.g963c046
pkgrel=1
pkgdesc="Integrates DSPAM into dovecot IMAP server. Git Version for dovecot >= 2.1"
arch=( 'i686' 'x86_64' )
url="http://hg.dovecot.org/dovecot-antispam-plugin/"
license=('GPL')
depends=('dovecot>=2.2.0')
makedepends=('git')
conflicts=(dovecot-antispam)
provides=(dovecot-antispam)
source=('dovecot2-antispam-git::git+http://git.sipsolutions.net/dovecot-antispam.git/')
md5sums=('SKIP')
pkgver() {
cd ${srcdir}/${pkgname}
git describe --always | sed 's|-|.|g'
}
build() {
cd ${srcdir}/${pkgname}
msg "Starting make..."
make
#./configure --prefix=/usr --with-dovecot=/usr/lib/dovecot
}
package() {
cd "${srcdir}/${pkgname}"
mkdir -p "${pkgdir}/usr/lib/dovecot/modules"
make INSTALLDIR="${pkgdir}/usr/lib/dovecot/modules" install
}
| true |
da1211be5b8c7d7817dcb57f52a24f580b0961c2
|
Shell
|
xwiz4rd/Shellscripts
|
/reading.sh
|
UTF-8
| 116 | 2.640625 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
read -p "Digite seu nome: " nome
read -p "Digite sua idade: " idade
echo "$nome você tem $idade anos"
| true |
2a60c3f0d28911fc45dd569dbff18a0b034ffb8a
|
Shell
|
niXman/mingw-builds
|
/clean
|
UTF-8
| 5,464 | 3.5625 | 4 |
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
#
# The BSD 3-Clause License. http://www.opensource.org/licenses/BSD-3-Clause
#
# This file is part of MinGW-W64(mingw-builds: https://github.com/niXman/mingw-builds) project.
# Copyright (c) 2011-2023 by niXman (i dotty nixman doggy gmail dotty com)
# Copyright (c) 2012-2015 by Alexpux (alexpux doggy gmail dotty com)
# All rights reserved.
#
# Project: MinGW-Builds ( https://github.com/niXman/mingw-builds )
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the distribution.
# - Neither the name of the 'MinGW-W64' nor the names of its contributors may
# be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# **************************************************************************
CLEAN_INSTALLED=no
CLEAN_RUNTIME=no
CLEAN_PREREQS=no
CLEAN_EXTRACTED_SOURCES=no
CLEAN_ARCHIVES=no
CLEAN_ALL_SOURCES=no
PRINT_VERBOSE_ARG=
# **************************************************************************
[[ $# == 1 && $1 == --help || $[ $# == 0 ] == 1 ]] && {
echo "usage:"
echo " ./${0##*/} [OPTIONS]"
echo " help:"
echo " --buildroot=<path> - specifies the build root directory"
echo " --installed - cleans the prerequisite installs"
echo " --sources - deletes the extracted sources"
echo " --archives - cleans the archives directory"
echo " --generated - cleans everything except the compressed sources"
echo " --full - remove the buildroot folder itself"
exit 0
}
# **************************************************************************
while [[ $# > 0 ]]; do
case $1 in
--buildroot=*)
ROOT_DIR=${1/--buildroot=/}
ROOT_DIR=${ROOT_DIR//:/:\/}
ROOT_DIR=${ROOT_DIR//\/\//\/}
mkdir -p ${ROOT_DIR} || die "incorrect buildroot directory: \"${ROOT_DIR}\". terminate."
pushd ${ROOT_DIR} > /dev/null
ROOT_DIR=$PWD
popd > /dev/null
;;
--installed) CLEAN_INSTALLED=yes ;;
--prereqs) CLEAN_PREREQS=yes ;;
--sources) CLEAN_EXTRACTED_SOURCES=yes ;;
--archives) CLEAN_ARCHIVES=yes ;;
--generated)
CLEAN_OUTPUT=yes
CLEAN_INSTALLED=yes
CLEAN_RUNTIME=yes
CLEAN_PREREQS=yes
CLEAN_EXTRACTED_SOURCES=yes
CLEAN_ARCHIVES=yes
;;
--full)
CLEAN_OUTPUT=yes
CLEAN_INSTALLED=yes
CLEAN_RUNTIME=yes
CLEAN_PREREQS=yes
CLEAN_ALL_SOURCES=yes
CLEAN_ARCHIVES=yes
;;
--verbose)
PRINT_VERBOSE_ARG=-print
;;
*)
>&2 echo "bad command line: \"$1\". terminate."
exit 1
;;
esac
shift
done
# **************************************************************************
readonly PREREQ_DIR=$ROOT_DIR/prerequisites
readonly RUNTIME_DIR=$ROOT_DIR/runtime
readonly ARCHIVES_DIR=$ROOT_DIR/archives
readonly PREREQ_BUILD_DIR=$ROOT_DIR/prerequisites-build
readonly PREREQ_LOGS_DIR=$ROOT_DIR/prerequisites-logs
readonly SRCS_DIR=$ROOT_DIR/src
readonly MARKERS_DIR=$SRCS_DIR/MARKERS
# **************************************************************************
[[ $CLEAN_OUTPUT == "yes" ]] && {
echo "Deleting main output directories"
find $ROOT_DIR -mindepth 1 -maxdepth 1 -type d -name i686* $PRINT_VERBOSE_ARG -exec rm -rf {} \;
find $ROOT_DIR -mindepth 1 -maxdepth 1 -type d -name x86_64* $PRINT_VERBOSE_ARG -exec rm -rf {} \;
}
[[ $CLEAN_RUNTIME == "yes" ]] && {
echo "Deleting runtime directory"
rm -Rf $RUNTIME_DIR
}
[[ $CLEAN_INSTALLED == "yes" ]] && {
echo "Deleting _installed.marker files"
[[ -d $PREREQ_BUILD_DIR ]] && {
find $PREREQ_BUILD_DIR -name _installed.marker $PRINT_VERBOSE_ARG -delete
}
echo "Deleting prerequisites install directory"
rm -Rf $PREREQ_DIR
}
[[ $CLEAN_PREREQS == "yes" ]] && {
echo "Deleting the prereq build and log directories"
rm -Rf $PREREQ_BUILD_DIR
rm -Rf $PREREQ_LOGS_DIR
}
[[ $CLEAN_EXTRACTED_SOURCES == "yes" ]] && {
echo "Deleting extracted source directories"
find $SRCS_DIR -mindepth 1 -maxdepth 1 -type d ! -name MARKERS ! -name mingw-w64 ! -name "gcc-*-branch" ! -name gcc-trunk $PRINT_VERBOSE_ARG -exec rm -rf {} \;
}
[[ $CLEAN_ALL_SOURCES == "yes" ]] && {
echo "Deleting entire src directory"
rm -Rf $SRCS_DIR
}
[[ $CLEAN_ARCHIVES == "yes" ]] && {
echo "Deleting archives folder"
rm -Rf $ARCHIVES_DIR
}
| true |
ef4e02a8abe7c89cc2f745521e49a595a13532ba
|
Shell
|
KatieMishra/VoiceClassification
|
/sphinxbase/autoconf-2.69/automake-1.14/t/location.sh
|
UTF-8
| 2,829 | 3.109375 | 3 |
[
"GPL-2.0-only",
"GPL-3.0-only",
"GPL-1.0-or-later",
"FSFAP",
"GPL-3.0-or-later",
"Autoconf-exception-3.0",
"LicenseRef-scancode-other-copyleft",
"GPL-2.0-or-later",
"MIT",
"BSD-2-Clause"
] |
permissive
|
#! /bin/sh
# Copyright (C) 2002-2013 Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Test for locations in error messages.
. test-init.sh
cat >> configure.ac << 'END'
AM_CONDITIONAL([COND1], [true])
AM_CONDITIONAL([COND2], [true])
AC_PROG_CC
AM_PROG_AR
AC_PROG_RANLIB
END
cat > Makefile.am << 'END'
bin_PROGRAMS = libfoo.a
if COND2
lib_LIBRARIES = libfoo.a
endif
if COND1
bin_PROGRAMS += ctags
endif
VAR = 1 \
2 \
3
VAR = 1 \
2 \
3
END
: > ar-lib
# Smash the useless difference of lib file locations.
smash_useless_diffs ()
{
sed -e "s,^$am_amdir/\\([a-z]*\.am\\),\\1," \
-e "s,^automake-$APIVERSION:,automake:," ${1+"$@"};
}
$ACLOCAL
AUTOMAKE_fails -Wno-error
smash_useless_diffs stderr >observed
# Apparently useless use of sed here required to avoid spuriously
# triggering some maintainer-checks failures.
sed 's/^> //' > expected << 'END'
> Makefile.am:12: warning: VAR multiply defined in condition TRUE ...
> Makefile.am:8: ... 'VAR' previously defined here
> automake: error: libfoo_a_OBJECTS should not be defined
> Makefile.am:3: while processing library 'libfoo.a'
> automake: error: use 'libfoo_a_LDADD', not 'libfoo_a_LIBADD'
> Makefile.am:3: while processing library 'libfoo.a'
> library.am: warning: deprecated feature: target 'libfoo.a' overrides 'libfoo.a$(EXEEXT)'
> library.am: change your target to read 'libfoo.a$(EXEEXT)'
> Makefile.am:3: while processing library 'libfoo.a'
> program.am: target 'libfoo.a$(EXEEXT)' was defined here
> Makefile.am:1: while processing program 'libfoo.a'
> program.am: warning: redefinition of 'libfoo.a$(EXEEXT)' ...
> Makefile.am:1: while processing program 'libfoo.a'
> library.am: ... 'libfoo.a' previously defined here
> Makefile.am:3: while processing library 'libfoo.a'
> tags.am: warning: redefinition of 'ctags' ...
> program.am: ... 'ctags$(EXEEXT)' previously defined here
> Makefile.am:6: while processing program 'ctags'
END
cat expected
cat observed
diff expected observed || exit 1
AUTOMAKE_fails -Werror
smash_useless_diffs stderr >observed
(echo 'automake: warnings are treated as errors' && cat expected) > t
mv -f t expected
cat expected
cat observed
diff expected observed || exit 1
:
| true |
4a78cb2c41f890b755ec1160128263131b43c01c
|
Shell
|
vendi-advertising/a11y-report-admin
|
/run-phpunit.sh
|
UTF-8
| 4,500 | 3.671875 | 4 |
[] |
no_license
|
#!/bin/bash
##see: http://stackoverflow.com/questions/192249/how-do-i-parse-command-line-arguments-in-bash
# Use -gt 1 to consume two arguments per pass in the loop
# Use -gt 0 to consume one or more arguments per pass in the loop
DB_USER='unit_chris'
DB_PASS='unit_chris'
DB_NAME='unit_chris'
CREATE_DB=false
RUN_LINT=false
RUN_SEC=false
RUN_PHP_CS=false
RUN_PHP_UNIT=false
while [[ $# -gt 0 ]]
do
key="$1"
case $key in
-g|--group)
GROUP="$2"
shift # past argument
;;
-d|--database)
DB_NAME="$2"
shift # past argument
;;
-u|--user)
DB_USER="$2"
shift # past argument
;;
-p|--password)
DB_PASS="$2"
shift # past argument
;;
--create-database)
CREATE_DB=true
;;
--test-only)
RUN_LINT=true
RUN_PHP_UNIT=true
;;
--no-test)
RUN_PHP_CS=true
RUN_LINT=true
;;
*)
# unknown option
;;
esac
shift # past argument or value
done
maybe_run_php_cs()
{
echo "Maybe running PHP CS Fixer...";
if [ "$RUN_PHP_CS" = true ]; then
{
echo "running...";
vendor/bin/php-cs-fixer fix
if [ $? -ne 0 ]; then
{
echo "Error with composer... exiting";
exit 1;
}
fi
}
else
echo "skipping";
fi
printf "\n";
}
maybe_update_composer()
{
echo "Maybe updating composer...";
if [ "$UPDATE_COMPOSER" = true ]; then
{
echo "running...";
composer update
if [ $? -ne 0 ]; then
{
echo "Error with composer... exiting";
exit 1;
}
fi
}
else
echo "skipping";
fi
printf "\n";
}
maybe_run_linter()
{
echo "Maybe running linter...";
if [ "$RUN_LINT" = true ]; then
{
echo "running...";
./vendor/bin/parallel-lint --exclude vendor/ .
if [ $? -ne 0 ]; then
{
echo "Error with PHP linter... exiting";
exit 1;
}
fi
}
else
echo "skipping";
fi
printf "\n";
}
maybe_run_security_check()
{
echo "Maybe running security check...";
if [ "$RUN_SEC" = true ]; then
{
echo "running...";
vendor/bin/security-checker security:check --end-point=http://security.sensiolabs.org/check_lock --timeout=30 ./composer.lock
if [ $? -ne 0 ]; then
{
echo "Error with security checker... exiting";
exit 1;
}
fi
}
else
echo "skipping";
fi
printf "\n";
}
setup_wordpress_config()
{
echo "Setting up WordPress...";
CONFIG_TEMPLATE_FILE='./vendor/WordPress/wordpress-develop/wp-tests-config-sample.php';
CONFIG_FILE='./vendor/WordPress/wordpress-develop/wp-tests-config.php';
if [ ! -f "$CONFIG_TEMPLATE_FILE" ]; then
{
echo "Cannot find WordPress config template... exiting";
exit 1;
}
fi
cp $CONFIG_TEMPLATE_FILE $CONFIG_FILE &&
sed -i "s|youremptytestdbnamehere|${DB_NAME}|g" $CONFIG_FILE &&
sed -i "s|yourusernamehere|${DB_USER}|g" $CONFIG_FILE &&
sed -i "s|yourpasswordhere|${DB_PASS}|g" $CONFIG_FILE
if [ $? -ne 0 ]; then
{
echo "Error with local test configuration... exiting";
exit 1;
}
fi
printf "\n";
}
maybe_create_database()
{
echo "Maybe creating database...";
if [ "$CREATE_DB" = true ]; then
{
echo "running...";
mysqladmin create $DB_NAME --user="$DB_USER" --password="$DB_PASS"
if [ $? -ne 0 ]; then
{
echo "Error creating database... exiting";
exit 1;
}
fi
}
else
echo "skipping";
fi
printf "\n";
}
run_php_unit()
{
echo "Running PHPUnit...";
if [ -z "$GROUP" ]; then
./vendor/bin/phpunit --coverage-html ./tests/logs/coverage/
else
./vendor/bin/phpunit --coverage-html ./tests/logs/coverage/ --group $GROUP
fi
}
if [ ! -d './vendor' ]; then
composer update
fi
maybe_update_composer;
maybe_run_linter;
maybe_run_php_cs;
maybe_run_security_check;
maybe_create_database;
setup_wordpress_config;
run_php_unit;
| true |
e646f7a9c3e8e91d740a30147ec7e96615efda1a
|
Shell
|
disc0nn3ct/lab_miem
|
/OS/lab2 Shell/test.sh
|
UTF-8
| 403 | 3.421875 | 3 |
[] |
no_license
|
#!/bin/sh
clear
bufer=0
chekByNULL()
{
echo ow
local spname=$1
echo $spname ------------
if [ ! -r $spname ] || [ -z $spname ]
then
echo Введите файл туут
read spname
while [ ! -r $spname ] || [ -z $spname ]
do
echo Введите файл апапапп $spname
read spname
done
fi
bufer=$spname
}
name1="1.txt4"
# read name1
chekByNULL $name1
# echo oooooo $?
| true |
0939227ba26abffceddd9aee928e633de0fc33af
|
Shell
|
KrestenKjaer/bash-provision
|
/test/type-file.bats
|
UTF-8
| 4,743 | 3.421875 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bats
. test/helpers.sh
file () { . $BORK_SOURCE_DIR/types/file.sh $*; }
setup () {
readsum=$(eval $(md5cmd $platform Readme.md))
}
# -- without arguments -------------------------------
@test "file status: returns MISSING when file is missing" {
respond_to "[ -f missing ]" "return 1"
run file status missing Readme.md
[ "$status" -eq $STATUS_MISSING ]
}
@test "file status: returns FAILED_ARGUMENTS when source file is missing" {
run file status somefile missingfile
[ "$status" -eq $STATUS_FAILED_ARGUMENTS ]
}
@test "file status: returns CONFLICT_UPGRADE when sum doesn't match" {
respond_to "$(md5cmd $platform wrongfile)" "echo 123456"
run file status wrongfile Readme.md
[ "$status" -eq $STATUS_CONFLICT_UPGRADE ]
expected="expected sum: $readsum"
[ "${lines[0]}" = $expected ]
[ "${lines[1]}" = "received sum: 123456" ]
}
@test "file status: returns OK when all is well" {
respond_to "$(md5cmd $platform goodfile)" "echo $readsum"
run file status goodfile Readme.md
[ "$status" -eq $STATUS_OK ]
}
@test "file install: creates directory, copies file" {
run file install path/to/target path/from/source
[ "$status" -eq 0 ]
run baked_output
[ "${lines[0]}" = "mkdir -p path/to" ]
[ "${lines[1]}" = "cp path/from/source path/to/target" ]
}
@test "file install: ignores directory if not present" {
run file install target source
[ "$status" -eq 0 ]
run baked_output
[ "${lines[0]}" = "cp source target" ]
}
# -- with permission argument ------------------------
@test "file status: returns MISMATCH_UPGRADE when target file has incorrect permissions" {
respond_to "$(md5cmd $platform tfile)" "echo $readsum"
respond_to "$(permission_cmd $platform) tfile" "echo 755"
run file status tfile Readme.md --permissions=700
[ "$status" -eq $STATUS_MISMATCH_UPGRADE ]
[ "${lines[0]}" = "expected permissions: 700" ]
[ "${lines[1]}" = "received permissions: 755" ]
}
@test "file install: sets permissions for file after copying" {
run file install target path/from/source --permissions=700
[ "$status" -eq 0 ]
run baked_output
[ "${lines[1]}" = "chmod 700 target" ]
}
# -- with owner argument -----------------------------
@test "file status: returns FAILED_ARGUMENT_PRECONDITION when target user doesn't exist" {
respond_to "id -u kermit" "echo 'id: kermit: no such user'; return 1"
run file status target Readme.md --owner=kermit
[ "$status" -eq $STATUS_FAILED_ARGUMENT_PRECONDITION ]
[ "${lines[0]}" = "unknown owner: kermit" ]
}
@test "file status: returns MISMATCH_UPGRADE when target file has incorrect owner" {
respond_to "sudo $(md5cmd $platform target)" "echo $readsum"
respond_to "sudo ls -l target" "echo -rw-r--r-- 1 kermit staff 4604"
run file status target Readme.md --owner=bork
[ "$status" -eq $STATUS_MISMATCH_UPGRADE ]
[ "${lines[0]}" = "expected owner: bork" ]
[ "${lines[1]}" = "received owner: kermit" ]
}
@test "file status: returns OK with owner and all is well" {
respond_to "sudo $(md5cmd $platform target)" "echo $readsum"
respond_to "sudo ls -l target" "echo -rw-r--r-- 1 kermit staff 4604"
run file status target Readme.md --owner=kermit
[ "$status" -eq $STATUS_OK ]
}
@test "file install: copies file as correct user" {
run file install path/to/target path/from/source --owner=kermit
[ "$status" -eq 0 ]
run baked_output
[ "${lines[0]}" = "sudo mkdir -p path/to" ]
[ "${lines[1]}" = "sudo chown kermit path/to" ]
[ "${lines[2]}" = "sudo cp path/from/source path/to/target" ]
[ "${lines[3]}" = "sudo chown kermit path/to/target" ]
}
# --- compile ----------------------------------------
@test "file compile: echoes base64 representation to screen" {
run file compile path/to/target Readme.md
[ "$status" -eq 0 ]
expected="borkfiles__UmVhZG1lLm1kCg=\"$(base64 Readme.md)\""
accum="${lines[2]}"
line=2
while [ "$line" -lt ${#lines[*]} ]; do
(( line++ ))
accum=$(echo "$accum"; echo "${lines[line]}")
done
[[ "$accum" = $expected ]]
}
is_compiled () { [ -n "$is_compiled" ]; }
@test "file status: if compiled, uses stored variable" {
is_compiled=1
borkfiles__cGF0aC9mcm9tL3NvdXJjZQo="$(base64 Readme.md)"
respond_to "$(md5cmd $platform path/to/target)" "echo $readsum"
run file status path/to/target path/from/source
[ "$status" -eq $STATUS_OK ]
}
@test "file install: if compiled, uses stored variable" {
is_compiled=1
borkfiles__cGF0aC9mcm9tL3NvdXJjZQo="$(base64 Readme.md)"
run file install path/to/target path/from/source
[ "$status" -eq $STATUS_OK ]
run baked_output
expected="echo \"$borkfiles__cGF0aC9mcm9tL3NvdXJjZQo\" | base64 --decode > path/to/target"
expected=$(echo $expected)
[[ "${lines[1]}" = $expected ]]
}
| true |
2e00084fdac5d2233c45f7a16d48ad1be3fec80d
|
Shell
|
kaymmm/pulseconnectsecure-arch
|
/rpm/PulseClient.sh.new
|
UTF-8
| 23,730 | 2.828125 | 3 |
[] |
no_license
|
#!/bin/bash
HOMEDIR=$HOME
INSTALLDIR=/usr/local/pulse
PULSEDIR=$HOME/.pulse_secure/pulse
SVCNAME=pulsesvc
LOG=$PULSEDIR/PulseClient.log
args=""
ive_ip=""
NOARGS=$#
SCRARGS=$@
WEBKITGTK_1_SUPPORTED_VERSION=6
WEBKITGTK_3_SUPPORTED_VERSION=7
PACKAGE_TYPE_RPM=1
PACKAGE_TYPE_DEB=2
SCRNAME=`basename $0`
SUPPORTED_OSTYPES_LIST=( ARCH CENTOS_6 CENTOS_7 UBUNTU_14 UBUNTU_15 UBUNTU_16 FEDORA RHEL_7 UNSUPPORTED)
#Arch
ARCH_DEPENDENCIES=( glibc \
nss \
zlib \
glib-networking \
webkitgtk \
xulrunner \
libproxy \
libxmu \
)
#RPM Based
CENTOS_6_DEPENDENCIES=( glibc \
nss-softokn-freebl \
zlib \
glib-networking \
webkitgtk \
xulrunner\
libproxy \
libXmu \
libproxy-gnome \
libproxy-mozjs)
CENTOS_6_DEPENDENCIES_WITH_VERSION=( glibc.i686 \
nss.i686 \
zlib.i686 \
glib-networking.i686 \
webkitgtk.i686 \
xulrunner.i686 \
libproxy.i686 \
libXmu.i686 \
libproxy-gnome.i686 \
libproxy-mozjs.i686)
FEDORA_DEPENDENCIES=( glibc \
nss-softokn-freebl \
zlib \
glib-networking \
webkitgtk \
xulrunner \
libproxy \
mozjs17 \
libproxy-mozjs \
libproxy-gnome)
FEDORA_DEPENDENCIES_WITH_VERSION=( glibc.i686 \
nss.i686 \
zlib.i686 \
glib-networking.i686 \
webkitgtk.i686 \
xulrunner.i686 \
libproxy.i686 \
mozjs17.i686 \
libproxy-mozjs.i686 \
libproxy-gnome.i686)
CENTOS_7_DEPENDENCIES=( glibc \
nss-softokn-freebl \
zlib \
glib-networking \
webkitgtk3 \
libproxy-gnome \
libproxy-mozjs \
libproxy )
CENTOS_7_DEPENDENCIES_WITH_VERSION=( glibc.i686 \
nss.i686 \
zlib.i686 \
glib-networking.i686 \
webkitgtk3.i686 \
libproxy-gnome.i686 \
libproxy-mozjs.i686 \
libproxy.i686 )
RHEL_DEPENDENCIES=( glibc \
nss-softokn-freebl \
zlib \
glib-networking \
webkitgtk3 \
libproxy-gnome \
libproxy-mozjs \
libproxy )
RHEL_DEPENDENCIES_WITH_VERSION=( glibc.i686 \
nss.i686 \
zlib.i686 \
glib-networking.i686 \
webkitgtk3.i686 \
libproxy-gnome.i686 \
libproxy-mozjs.i686 \
libproxy.i686 )
#Debian Based
UBUNTU_14_DEPENDENCIES=( lib32z1 \
libc6-i386 \
webkitgtk \
libproxy1 \
libproxy1-plugin-gsettings \
libproxy1-plugin-webkit \
libdconf1 \
dconf-gsettings-backend)
UBUNTU_14_DEPENDENCIES_WITH_VERSION=( lib32z1 \
libc6-i386 \
libwebkitgtk-1.0-0:i386 \
libproxy1:i386 \
libproxy1-plugin-gsettings:i386 \
libproxy1-plugin-webkit:i386 \
libdconf1:i386 \
dconf-gsettings-backend:i386)
UBUNTU_15_DEPENDENCIES=( lib32z1 \
libc6-i386 \
webkitgtk \
libproxy1 \
libproxy1-plugin-gsettings \
libproxy1-plugin-webkit \
libdconf1 \
dconf-gsettings-backend)
UBUNTU_15_DEPENDENCIES_WITH_VERSION=( lib32z1 \
libc6-i386 \
libwebkitgtk-1.0-0:i386 \
libproxy1:i386 \
libproxy1-plugin-gsettings:i386 \
libproxy1-plugin-webkit:i386 \
libdconf1:i386 \
dconf-gsettings-backend:i386)
UBUNTU_16_DEPENDENCIES=( lib32z1 \
libc6-i386 \
webkitgtk \
libproxy1 \
libproxy1-plugin-gsettings \
libproxy1-plugin-webkit \
libdconf1 \
dconf-gsettings-backend)
UBUNTU_16_DEPENDENCIES_WITH_VERSION=( lib32z1 \
libc6-i386 \
libwebkitgtk-1.0-0:i386 \
libproxy1:i386 \
libproxy1-plugin-gsettings:i386 \
libproxy1-plugin-webkit:i386 \
libdconf1:i386 \
dconf-gsettings-backend:i386)
tam=${#SUPPORTED_OSTYPES_LIST[@]}
for ((i=0; i < $tam; i++)); do
name=${SUPPORTED_OSTYPES_LIST[i]}
declare -r ${name}=$i
done
install_arch() {
i=$1
sudo -v > /dev/null 2>/dev/null
if [ $? -eq 0 ]; then
echo "sudo password : "
sudo pacman -S $i
if [ $? -ne 0 ]; then
echo "Failed to install dependencies.Please execute following command manually."
echo " sudo pacman -S $i"
fi
else
echo "super user password : "
su -c " pacman -S $i"
if [ $? -ne 0 ]; then
echo "Failed to install dependencies.Please execute following command manually."
echo " pacman -S $i"
fi
fi
}
install_deb() {
i=$1
sudo -v > /dev/null 2>/dev/null
if [ $? -eq 0 ]; then
echo "sudo password : "
sudo apt-get install $i
if [ $? -ne 0 ]; then
echo "Failed to install dependencies.Please execute following command manually."
echo " apt-get install $i"
fi
else
echo "super user password : "
su -c "apt-get install $i"
if [ $? -ne 0 ]; then
echo "Failed to install dependencies.Please execute following command manually."
echo " apt-get install $i"
fi
fi
}
install_rpm() {
i=$1
sudo -v > /dev/null 2>/dev/null
if [ $? -eq 0 ]; then
echo "sudo password "
sudo yum -y install $i
if [ $? -ne 0 ]; then
echo "Failed to install dependencies.Please execute following command manually."
echo " yum install $i"
fi
else
echo "super user password "
su -c "yum -y install $i"
if [ $? -ne 0 ]; then
echo "Failed to install dependencies.Please execute following command manually."
echo " yum install $i"
fi
fi
}
install_from_repo() {
url=$1
sudo -v > /dev/null 2>/dev/null
if [ $? -eq 0 ]; then
echo "sudo password "
sudo rpm -Uvh $url
if [ $? -ne 0 ]; then
echo "Failed to install dependencies.Please execute following command manually."
echo "rpm -Uvh $url"
fi
else
echo "super user password "
su -c " rpm -Uvh $url"
if [ $? -ne 0 ]; then
echo "Failed to install dependencies.Please execute following command manually."
echo " rpm -Uvh $url"
fi
fi
}
#determine the OS TYPE
determine_os_type() {
if [ -f /etc/arch-release ]; then
OS_TYPE=${SUPPORTED_OSTYPES_LIST[$ARCH]}
OS_MAJOR_VERSION=$WEBKITGTK_3_SUPPORTED_VERSION
INSTALLDIR=/opt/pulse
elif [ -f /etc/centos-release ]; then
INSTALLDIR=/usr/local/pulse
OS_MAJOR_VERSION=$(cat /etc/centos-release | grep -o '.[0-9]'| head -1|sed -e 's/ //')
if [ $OS_MAJOR_VERSION = $WEBKITGTK_1_SUPPORTED_VERSION ]; then
OS_TYPE=${SUPPORTED_OSTYPES_LIST[$CENTOS_6]}
elif [ $OS_MAJOR_VERSION = $WEBKITGTK_3_SUPPORTED_VERSION ]; then
OS_TYPE=${SUPPORTED_OSTYPES_LIST[$CENTOS_7]}
else
OS_TYPE=${SUPPORTED_OSTYPES_LIST[$UNSUPPORTED]}
fi
elif [ -f /etc/fedora-release ]; then
INSTALLDIR=/usr/local/pulse
OS_MAJOR_VERSION=6 #Fedora uses webkitgtk-1.0
OS_TYPE=${SUPPORTED_OSTYPES_LIST[$FEDORA]}
elif [ -f /etc/redhat-release ]; then
INSTALLDIR=/usr/local/pulse
OS_MAJOR_VERSION=$(cat /etc/redhat-release | grep -o '.[0-9]'| head -1|sed -e 's/ //')
if [ $OS_MAJOR_VERSION = $WEBKITGTK_3_SUPPORTED_VERSION ]; then
OS_TYPE=${SUPPORTED_OSTYPES_LIST[$RHEL_7]}
else
OS_TYPE=${SUPPORTED_OSTYPES_LIST[$UNSUPPORTED]}
fi
else
INSTALLDIR=/usr/local/pulse
OS_MAJOR_VERSION=6 #Every other flavour uses webkitgtk-1.0
OSNAME=$(lsb_release -d |grep -o "Ubuntu")
if [ "X$OSNAME" != "X" ]; then
UBUNTU_VER=$(lsb_release -d | grep -o '.[0-9]*\.'| head -1|sed -e 's/\s*//'|sed -e 's/\.//')
if [ $UBUNTU_VER = 14 ]; then
OS_TYPE=${SUPPORTED_OSTYPES_LIST[$UBUNTU_14]}
elif [ $UBUNTU_VER = 15 ]; then
OS_TYPE=${SUPPORTED_OSTYPES_LIST[$UBUNTU_15]}
elif [ $UBUNTU_VER = 16 ]; then
OS_TYPE=${SUPPORTED_OSTYPES_LIST[$UBUNTU_16]}
else
OS_TYPE=${SUPPORTED_OSTYPES_LIST[$UNSUPPORTED]}
fi
fi
fi
echo "Please refer to $INSTALLDIR/README for instructions to launch the Pulse Client"
}
ubuntu14_install_webkit(){
sudo apt-get update
sudo apt-get download libenchant1c2a:i386
DEBFILE=libenchant1c2a_1.6.0-10ubuntu1_i386.deb
TMPDIR=`mktemp -d /tmp/deb.XXXXXXXXXX` || exit 1
OUTPUT=`basename "$DEBFILE" .deb`.modfied.deb
if [[ -e "$OUTPUT" ]]; then
echo "$OUTPUT exists."
rm -r "$TMPDIR"
return
fi
dpkg-deb -x "$DEBFILE" "$TMPDIR"
dpkg-deb --control "$DEBFILE" "$TMPDIR"/DEBIAN
if [[ ! -e "$TMPDIR"/DEBIAN/control ]]; then
echo DEBIAN/control not found.
rm -r "$TMPDIR"
return
fi
CONTROL="$TMPDIR"/DEBIAN/control
MOD=`stat -c "%y" "$CONTROL"`
sed -i '/^Depends: /d' "$CONTROL"
if [[ "$MOD" == `stat -c "%y" "$CONTROL"` ]]; then
echo Not modfied.
else
echo Building new deb...
dpkg -b "$TMPDIR" "$OUTPUT"
fi
rm -r "$TMPDIR"
sudo dpkg -i $OUTPUT
sudo apt-get install libwebkitgtk-1.0-0:i386
sudo rm -f libenchant1c2a_1.6.0-10ubuntu1_i386*
}
check_and_install_missing_dependencies() {
echo "Checking for missing dependency packages ..."
if [ $OS_TYPE = ${SUPPORTED_OSTYPES_LIST[$UNSUPPORTED]} ]; then
return
fi
isArchBased=0
isRpmBased=0
isDebBased=0
dependencyListName=${OS_TYPE}_DEPENDENCIES
dependencyListNameWithVersion=${OS_TYPE}_DEPENDENCIES_WITH_VERSION
if [[ $OS_TYPE = ${SUPPORTED_OSTYPES_LIST[$ARCH]} ]]; then
isArchBased=1
elif [[ ($OS_TYPE = ${SUPPORTED_OSTYPES_LIST[$CENTOS_6]}) || \
($OS_TYPE = ${SUPPORTED_OSTYPES_LIST[$CENTOS_7]}) || \
($OS_TYPE = ${SUPPORTED_OSTYPES_LIST[$FEDORA]}) || \
($OS_TYPE = ${SUPPORTED_OSTYPES_LIST[$RHEL_7]})]]; then
isRpmBased=1
elif [[ ($OS_TYPE = ${SUPPORTED_OSTYPES_LIST[$UBUNTU_14]}) || \
($OS_TYPE = ${SUPPORTED_OSTYPES_LIST[$UBUNTU_15]}) || \
($OS_TYPE = ${SUPPORTED_OSTYPES_LIST[$UBUNTU_16]})]]; then
isDebBased=1
fi
if [ $isArchBased = 1 ]; then
eval "depListArr=(\${${dependencyListName}[@]})"
# eval "depListArrWithVersion=(\${${dependencyListNameWithVersion}[@]})"
tam=${#depListArr[@]}
PKGREQ=""
for ((i=0; i < $tam; i++)); do
depPkgName=${depListArr[i]}
curPkgVar=`pacman -Q | grep -i $depPkgName`
if [ "X$curPkgVar" = "X" ]; then
echo "$depPkgName is missing in the machine" > $LOG
PKGREQ="$PKGREQ ${depListArr[i]}"
fi
done
if [ "X" != "X$PKGREQ" ]; then
# Install respective packages based on the current installation
echo ""
echo "Please execute below commands to install missing dependent packages "
for i in `echo $PKGREQ`
do
echo "pacman -S $i"
done
fi
echo $readMeEchoMsg
elif [ $isRpmBased = 1 ]; then
eval "depListArr=(\${${dependencyListName}[@]})"
eval "depListArrWithVersion=(\${${dependencyListNameWithVersion}[@]})"
tam=${#depListArr[@]}
PKGREQ=""
for ((i=0; i < $tam; i++)); do
depPkgName=${depListArr[i]}
curPkgVar=`rpm -qa | grep -i $depPkgName | grep -i "i686\|i386"`
if [ "X$curPkgVar" = "X" ]; then
echo "$depPkgName is missing in the machine"
PKGREQ="$PKGREQ ${depListArrWithVersion[i]}"
fi
done
if [ "X" != "X$PKGREQ" ]; then
# Install respective packages based on the current installation
for i in `echo $PKGREQ`
do
if [ $OS_TYPE = ${SUPPORTED_OSTYPES_LIST[$CENTOS_6]} ]; then
if [ $i = 'libproxy-mozjs.i686' ]; then
url='http://centos.mirror.net.in/centos/6/os/i386/Packages/libproxy-mozjs-0.3.0-10.el6.i686.rpm'
install_from_repo $url
elif [ $i = 'glib-networking.i686' ]; then
url='http://centos.mirror.net.in/centos/6/os/i386/Packages/glib-networking-2.28.6.1-2.2.el6.i686.rpm'
install_from_repo $url
elif [ $i = 'libproxy-gnome.i686' ]; then
url='http://centos.mirror.net.in/centos/6/os/i386/Packages/libproxy-gnome-0.3.0-10.el6.i686.rpm'
install_from_repo $url
else
install_rpm $i
fi
elif [ $OS_TYPE = ${SUPPORTED_OSTYPES_LIST[$FEDORA]} ]; then
if [ $i = 'libproxy-mozjs.i686' ]; then
url='http://dl.fedoraproject.org/pub/fedora/linux/releases/23/Everything/i386/os/Packages/l/libproxy-mozjs-0.4.11-12.fc23.i686.rpm'
install_from_repo $url
elif [ $i = 'mozjs17.i686' ]; then
url='ftp://ftp.ntua.gr/pub/linux/fedora/linux/releases/23/Everything/i386/os/Packages/m/mozjs17-17.0.0-12.fc23.i686.rpm'
install_from_repo $url
elif [ $i = 'libproxy-gnome.i686' ]; then
url='ftp://fr2.rpmfind.net/linux/fedora/linux/releases/23/Everything/i386/os/Packages/l/libproxy-gnome-0.4.11-12.fc23.i686.rpm'
install_from_repo $url
else
echo "yum install $i"
fi
else
install_rpm $i
fi
done
fi
elif [ $isDebBased = 1 ]; then
eval "depListArr=(\${${dependencyListName}[@]})"
eval "depListArrWithVersion=(\${${dependencyListNameWithVersion}[@]})"
tam=${#depListArr[@]}
PKGREQ=""
for ((i=0; i < $tam; i++)); do
depPkgName=${depListArr[i]}
if [ $depPkgName = lib32z1 ]; then
curPkgVar=`dpkg-query -f '${binary:Package}\n' -W | grep -i $depPkgName`
else
curPkgVar=`dpkg-query -f '${binary:Package}\n' -W | grep -i $depPkgName | grep -i "i386\|i686"`
fi
if [ "X$curPkgVar" = "X" ]; then
PKGREQ="$PKGREQ ${depListArrWithVersion[i]}"
fi
done
if [ "X$PKGREQ" != "X" ]; then
for i in `echo $PKGREQ`
do
if [[ ($OS_TYPE = ${SUPPORTED_OSTYPES_LIST[$UBUNTU_14]}) && \
($i = 'libwebkitgtk-1.0-0:i386') ]]; then
ubuntu14_install_webkit
else
install_deb $i
fi
done
fi
echo ""
fi
}
######################################################################################################
# Function to verify if dependencies are installed
# Args : None
# Return : None
#function check_dep ()
#{
function command_line_client_checks()
{
echo "Installed in $INSTALLDIR"
ARCH_DIST=0
RPM_DIST=0
DPKG_DIST=0
# If script is shipped as part of RPM, then
# following will be present.
uname -a | grep -i "i386\|i486\|i586\|i686" >/dev/null
if [ $? -ne 0 ]; then
if [ -e $INSTALLDIR/ConfigurePulse.sh ]; then
if [ -f /etc/arch-release ]; then
ARCH_DIST=1
else
RPM_DIST=1
fi
else
DPKG_DIST=1
fi
if [ $ARCH_DIST -eq 1 ]; then
PKGREQ=""
glibc=`pacman -Q | grep -i glibc`
if [ "X$glibc" = "X" ]; then
echo "glibc is missing in the machine" > $LOG
PKGREQ="glibc"
fi
nss=`pacman -Q | grep -i nss`
if [ "X$nss" = "X" ]; then
echo "nss is missing in the machine" > $LOG
PKGREQ="$PKGREQ nss"
fi
zlib=`pacman -Q | grep -i zlib`
if [ "X$zlib" = "X" ]; then
echo "zlib is missing in the machine" > $LOG
PKGREQ="$PKGREQ zlib"
fi
if [ "X" != "X$PKGREQ" ]; then
sudo -v > /dev/null 2>/dev/null
if [ $? -eq 0 ]; then
echo "sudo password "
sudo pacman -S $PKGREQ
if [ $? -ne 0 ]; then
echo "Failed to install dependencies.Please execute following command manually."
echo " pacman -S $PKGREQ"
fi
else
echo "super user password "
su -c "pacman -S $PKGREQ"
if [ $? -ne 0 ]; then
echo "Failed to install dependencies.Please execute following command manually."
echo " pacman -S $PKGREQ"
fi
fi
fi
elif [ $RPM_DIST -eq 1 ]; then
PKGREQ=""
glibc=`rpm -qa | grep -i glibc | grep -i "i686\|i386"`
if [ "X$glibc" = "X" ]; then
echo "glibc is missing in the machine" > $LOG
PKGREQ="glibc.i686"
fi
nss=`rpm -qa | grep -i nss-softokn-freebl | grep -i "i386\|i686"`
if [ "X$nss" = "X" ]; then
echo "nss is missing in the machine" > $LOG
PKGREQ="$PKGREQ nss.i686"
fi
zlib=`rpm -qa | grep -i zlib | grep -i "i386\|i686"`
if [ "X$zlib" = "X" ]; then
echo "zlib is missing in the machine" > $LOG
PKGREQ="$PKGREQ zlib.i686"
fi
if [ "X" != "X$PKGREQ" ]; then
sudo -v > /dev/null 2>/dev/null
if [ $? -eq 0 ]; then
echo "sudo password "
sudo yum -y install $PKGREQ
if [ $? -ne 0 ]; then
echo "Failed to install dependencies.Please execute following command manually."
echo " yum install $PKGREQ"
fi
else
echo "super user password "
su -c "yum -y install $PKGREQ"
if [ $? -ne 0 ]; then
echo "Failed to install dependencies.Please execute following command manually."
echo " yum install $PKGREQ"
fi
fi
fi
elif [ $DPKG_DIST -eq 1 ]; then
PKGREQ=""
libc=`dpkg-query -f '${binary:Package}\n' -W | grep -i libc6-i386`
if [ "X$libc" = "X" ]; then
PKGREQ="libc6-i386"
fi
zlib=`dpkg-query -f '${binary:Package}\n' -W | grep -i lib32z1`
if [ "X$zlib" = "X" ]; then
PKGREQ="$PKGREQ lib32z1"
fi
if [ "X" != "X$PKGREQ" ]; then
sudo -v > /dev/null 2>/dev/null
if [ $? -eq 0 ]; then
echo "sudo password : "
sudo apt-get install $PKGREQ
if [ $? -ne 0 ]; then
echo "BOBFailed to install dependencies.Please execute following command manually."
echo " apt-get install $PKGREQ"
fi
else
echo "super user password : "
su -c "apt-get install $PKGREQ"
if [ $? -ne 0 ]; then
echo "Failed to install dependencies.Please execute following command manually."
echo " apt-get install $PKGREQ"
fi
fi
fi
fi
fi
#}
#End of function check_dep ()
######################################################################################################
######################################################################################################
# Function to create user specific installation
# Args : None
# Return : None
#function check_install ()
#{
if [ ! -e $INSTALLDIR ]; then
echo "Pulse is not installed. Please check if Pulse is installed properly"
exit 1
fi
# create $HOME/.pulse_secure/pulse/ directory
if [ ! -d $PULSEDIR ]; then
mkdir -p $PULSEDIR
if [ $? -ne 0 ]; then
echo "Setup is not able to create $PULSEDIR. Please check the permission"
exit 2
fi
fi
#}
# End of function check_install ()
######################################################################################################
if [ $NOARGS -eq 0 ]; then
$INSTALLDIR/$SVCNAME -C -H
exit 0
fi
echo "executing command : $INSTALLDIR/$SVCNAME $args"
# -C option added to indicate service is launched from command line - hidden option
#args="-C $args"
# pass the args to pulsesvc binary
$INSTALLDIR/$SVCNAME -C $SCRARGS
}
if [[ "$1" = 'install_dependency_packages' ]] ; then
determine_os_type
check_and_install_missing_dependencies
else
determine_os_type
command_line_client_checks
fi
| true |
2a0824c9a719d39b3f85eba177e2bfe41a773d34
|
Shell
|
tonyblundell/setup-crunchbang
|
/setup-crunchbang.sh
|
UTF-8
| 1,100 | 2.890625 | 3 |
[] |
no_license
|
# Install apps via apt
sudo apt-get install git tmux vim zsh
# Install oh-my-zsh
curl -L https://github.com/robbyrussell/oh-my-zsh/raw/master/tools/install.sh | sh
# Change default shell to ZSH
chsh -s /usr/bin/zsh
# Install ZSH config
cd ~
git clone https://github.com/tonyblundell/.zsh.git
ln -s ~/.zsh/tonyblundell.zsh-theme ~/.oh-my-zsh/themes
rm ~/.zshrc
ln -s ~/.zsh/zshrc ~/.zshrc
# Install terminator config
cd ~
git clone https://github.com/tonyblundell/.terminator.git
mv ~/.config/terminator/config ~/.config/terminator/config.old
ln -s ~/.terminator/config ~/.config/terminator/config
# Install tmux config
git clone https://github.com/tonyblundell/.tmux.git
ln -s .tmux/tmux.conf .tmux.conf
# Install vim config
cd ~
git clone --recursive https://github.com/tonyblundell/.vim.git
ln -s .vim/vimrc .vimrc
# Install Inconsolata font
mkdir ~/.fonts
cd ~/.fonts
wget http://www.levien.com/type/myfonts/Inconsolata.otf
# Friendly advice
echo "Inconsolata font installed, select it in terminal profile settings"
echo "Log out then log back in for default shell setting to take effect"
| true |
5bbcb01ff335f10ebc96b1bbd981aa2c426486ba
|
Shell
|
gaecom/swoole-proxy
|
/gameproxy.sh
|
UTF-8
| 305 | 3.0625 | 3 |
[] |
no_license
|
#!/bin/bash
if [ $1 == "start" ]
then
echo "do start...\n"
php ProxyServer.php &
elif [ $1 == "stop" ]
then
echo "do stop...\n"
ps -ef | grep "gameproxy" | grep -v "grep" | awk '{print $2 }' | xargs kill -9
else
echo "Please make sure the positon variable is start or stop.\n"
fi
| true |
8c9d8b87530fe26a004b87a1650d03f2efaf4296
|
Shell
|
4n6kween/PlistParser
|
/PlistParser.sh
|
UTF-8
| 3,006 | 3 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
echo
echo 'Plist Parser'
echo 'Generating Database...'
mkdir Plists;
find "$@" -name '*.plist' 2>/dev/null | cpio -pdm Plists;
find ./Plists -type f -name "*.plist" -exec plutil -convert xml1 "{}" \;
grep -rs "real>[2-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]" -B 1 Plists > realcc.txt;
gsed -i '
s/,/;/g
/<\/key>/{ N; s/<\/key>.*<real>/,/}
s/plist-.*<key>/plist,/g
s/<\/real>//g
/<\/string/{ N; s/<\/string>.*<real>/,/}
s/plist-.*<string>/plist,/g
s/<\/dict>/dict,/g
/--/d
/addaily.plist/d
s/plist:.*<real>/plist,array,/g
/<integer>/d
/<real>/d
/<array>/d
/<\/array>/d
/dict,/d
/<false\/>/d
/PixelFormatType/d
' realcc.txt ;
grep -rs "integer>1[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]<" -B 1 Plists > integer.txt ;
gsed -i '
s/,/;/g
/<\/key>/{ N; s/<\/key>.*<integer>/,/}
s/plist-.*<key>/plist,/g
/<\/string/{ N; s/<\/string>.*<integer>/,/}
/--/d
s/<\/integer>//g
s/plist-.*<string>/plist,/g
/integer>100/d
s/plist:.*<integer>/plist,array,/g
/<array>/d
/iTunesMetadata.plist/d
/RadioPlayBackHistoryStore/d
/<\/dict>/d
s/plist-.*<integer>/plist,array,/g
' integer.txt ;
grep -rs "real>[1][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]" -B 1 Plists > realeu.txt;
gsed -i '
s/,/;/g
/<\/key>/{ N; s/<\/key>.*<real>/,/}
s/plist-.*<key>/plist,/g
s/<\/real>//g
/<\/string/{ N; s/<\/string>.*<real>/,/}
s/plist-.*<string>/plist,/g
s/<\/dict>/dict,/g
/--/d
/addaily.plist/d
s/plist:.*<real>/plist,array,/g
/<integer>/d
/<real>/d
/<array>/d
/<\/array>/d
/dict,/d
/<false\/>/d
/PixelFormatType/d
' realeu.txt ;
grep -rs "<date>" -B 1 Plists > date.txt;
gsed -i '
s/,/;/g
/<\/key>/{ N; s/<\/key>.*<date>/,/}
/<\/string/{ N; s/<\/string>.*<date>/,/}
s/plist-.*<key>/plist,/g
s/Z<\/date>//g
/--/d
s/plist:.*<date>/plist,array,/g
/<array>/d
s/plist-.*<string>/plist,/g
' date.txt ;
{ echo "directory,activity,timestamp"; cat date.txt;} > dates.txt;
{ echo "directory,activity,timestamp"; cat integer.txt;} > integers.txt;
{ echo "directory,activity,timestamp"; cat realcc.txt;} > realc.txt;
{ echo "directory,activity,timestamp"; cat realeu.txt;} > realu.txt;
rm -r date.txt
rm -r integer.txt
rm -r realcc.txt
rm -r realeu.txt
sqlite3 <<'END_SQL'
.mode csv
.import dates.txt DATE
.import integers.txt INTEGER
.import realc.txt REALC
.import realu.txt REALU
UPDATE REALC
SET timestamp = datetime(timestamp + 978307200, 'unixepoch');
UPDATE REALU
SET timestamp = datetime(timestamp, 'unixepoch');
UPDATE INTEGER
SET timestamp = datetime(timestamp, 'unixepoch');
UPDATE DATE
SET timestamp = replace(timestamp, 'T', ' ');
CREATE TABLE Timestamps(directory Varchar, activity Varchar, timestamp datetime);
INSERT INTO Timestamps SELECT * FROM DATE;
INSERT INTO Timestamps SELECT * FROM INTEGER;
INSERT INTO Timestamps SELECT * FROM REALC;
INSERT INTO Timestamps SELECT * FROM REALU;
.save Timestamps.db
DROP TABLE DATE;
DROP TABLE INTEGER;
DROP TABLE REALC;
DROP TABLE REALU;
.save Timestamps.db
END_SQL
rm -r dates.txt
rm -r integers.txt
rm -r realc.txt
rm -r realu.txt
echo 'Done!'
| true |
74792d78c64b1b9fb8e24b5ab7ca74bced47b08f
|
Shell
|
DimaLife/Bash
|
/leapyear.sh
|
UTF-8
| 221 | 3.078125 | 3 |
[] |
no_license
|
#! /bin/bash
echo "Введите год"
read year
if [[ $year%4 -eq 0 ]] && [[ $year%100 -ne 0 ]] || [[ $year%400 -eq 0 ]]
then
echo "Это високосный год"
else
echo "Это обычный год"
fi
| true |
199397e242d197dda9d6ca89aa38f17a4575ab7c
|
Shell
|
GianGian/SDN-Slicing-in-ComNetsEmu
|
/examples/mininet_demystify/clean.sh
|
UTF-8
| 458 | 2.90625 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
if [ "$EUID" -ne 0 ]; then
echo "*** Please run with sudo."
exit 1
fi
hosts=("h1" "h2" "h3")
hsifaces=("s1-h1" "s1-h2" "s1-h3")
echo "Cleanup all network namespaces, cgroups, veth pairs, qdiscs and OVS bridges."
for i in "${hsifaces[@]}"; do
ip link delete "$i"
done
rmdir /sys/fs/cgroup/cpu,cpuacct/testgroup0
rmdir /sys/fs/cgroup/cpuset/testgroup0
for n in "${hosts[@]}"; do
ip netns delete "$n"
done
ovs-vsctl del-br s1
| true |
1e393e739adb3830c9020b9e14f0b34dbc705b5d
|
Shell
|
sn0wvall/scripts
|
/cmusNotify
|
UTF-8
| 651 | 2.9375 | 3 |
[] |
no_license
|
#!/bin/sh
if ps -C cmus > /dev/null; then
artist=$(cmus-remote -Q |
grep --text '^tag artist' |
sed '/^tag artistsort/d' |
awk '{gsub("tag artist ", "");print}')
title=$(cmus-remote -Q |
grep --text '^tag title' |
sed -e 's/tag title //' |
awk '{gsub("tag title ", "");print}')
duration=$(cmus-remote -Q |
grep --text '^duration' |
sed -e 's/duration //' |
awk '{gsub("tag title ", "");print}')
test -z "$title" && exit 0
test -z "$(cmus-remote -Q | grep status | grep playing)" && symbol="" || symbol=""
dunstify -r 1001 "$symbol $title" " <i>$artist</i> ($(date -d@$duration -u +%M:%S))"
fi
| true |
8a2a2c264b82b515bdf14db6583427c932f2c07a
|
Shell
|
AndersonFCanel/Shell-Bats-CmdsSO
|
/SHELL/FOR.SH
|
UTF-8
| 411 | 3.296875 | 3 |
[] |
no_license
|
#!/bin/bash
echo “Testando o loop for”
for i in {10..0};
do
echo “$i”
done
#Exemplo de uso do loop for com o comando seq com intervalo.
echo “Testando o comando seq”
for i in $(seq 1 5 100);
do
echo “$i”
done
#Exemplo de uso do loop for com o comando seq sem intervalo.
echo “Testando o comando seq”
for i in $(seq 1 100);
do
echo “$i”
done
| true |
ba055efe9fb8f831053f998038de8827494eae8c
|
Shell
|
antonchen/zshrc
|
/zsh/plugins/sudo.zsh
|
UTF-8
| 419 | 3.015625 | 3 |
[] |
no_license
|
#!/bin/zsh
# File: sudo.zsh
# Author: Anton Chen
# Version: 0.1
# Email: [email protected]
# Last Modified: 2018-10-18 13:38:40
# 在命令前插入 sudo
sudo-command-line()
{
[[ -z $BUFFER ]] && zle up-history
if [[ $BUFFER == sudo\ * ]]; then
LBUFFER="${LBUFFER#sudo }"
else
LBUFFER="sudo $LBUFFER"
fi
}
zle -N sudo-command-line
# 定义快捷键为: [Esc] [Esc]
bindkey "\e\e" sudo-command-line
| true |
21562f400d58bcb8c127b09000df0288f4934d74
|
Shell
|
parkerd/dotfiles
|
/git-scripts/git-co
|
UTF-8
| 403 | 3.640625 | 4 |
[] |
no_license
|
#!/bin/bash
set -e
if [ -z "$1" ]; then
echo "usage: $0 <branch>"
else
git stash save --quiet --include-untracked "$(date)"
git checkout $@
branch=$(git rev-parse --abbrev-ref HEAD)
list=$(git stash list | grep "On ${branch}:")
if [ $(git stash list | grep -c "On ${branch}:") -eq 1 ]; then
git stash pop --quiet $(echo $list | cut -d: -f1)
git st
else
echo -n $list
fi
fi
| true |
21970df497c544c2d045625cccbc3f8efacd0fbd
|
Shell
|
projectsku/projects1
|
/projects-vbug.sh
|
UTF-8
| 1,377 | 2.796875 | 3 |
[] |
no_license
|
#!/bin/sh
# data vbug
# code warna
blue='\033[34;1m'
green='\033[32;1m'
purple='\033[35;1m'
cyan='\033[36;1m'
red='\033[31;1m'
white='\033[37;1m'
yellow='\033[33;1m'
# penghapus
clear
# tampilan
figlet virus vbug | lolcat
echo "===============================================" | lolcat
echo "[+] Project By Mr Virus Spm [+]"
echo "===============================================" | lolcat
echo
echo $cyan"pesan: Hubungi Kontak Saya Jika Masih Bingung"
echo "===============================================" | lolcat
echo $green
echo "[1] Install Virus"
echo "[2] Lihat Script"
echo "[3] Tutorial"
echo "===============================================" | lolcat
echo
read -p "pilih: " vbug
if [ $vbug = 1 ]; then
figlet menginstall
git clone https://github.com/Gameye98/vbug
sleep 2
echo "loading" | lolcat
cd vbug
echo "loading" | lolcat
sleep 2
python2 vbug.py
fi
if [ $vbug = 2 ]; then
echo "Salam Mr Virus Spm" | lolcat
echo "===================" | lolcat
echo "pkg update && pkg upgrade"
echo "pkg install figlet"
echo "pkg install python"
echo "pkg install git"
echo "git clone https://github.com/Gameye98/vbug"
echo "cd vbug"
echo "python2 vbug.py"
echo
echo "Gunakan Dengan Bijak" | lolcat
echo
fi
if [ $vbug = 3 ]; then
xdg-open https://www.google.com/amp/s/asalkata.com/cara-membuat-virus-di-termux-100-work/amp/
sh projects-vbug.sh
fi
| true |
002490d83af364751976087066e4c332bff5acbe
|
Shell
|
chelauk/stem_cell_lab
|
/qsub_scripts/naive_overlap.job
|
UTF-8
| 2,249 | 3.328125 | 3 |
[] |
no_license
|
#!/bin/bash -l
# Batch script to run a serial job on Legion with the upgraded
# software stack under SGE.
# 1. Force bash as the executing shell.
#$ -S /bin/bash
# 2. Request 6 hours of wallclock time (format hours:minutes:seconds).
#$ -l h_rt=6:00:0
#$ -l mem=24G
# 4. Request 40 gigabyte of TMPDIR space (default is 10 GB)
#$ -l tmpfs=60G
# 5. Set the name of the job.
#$ -N naive_overlap
# 6 join output streams
#$ -j yes
# 7 export job environment
#$ -V
# ======================
# For narrowPeak files
# ======================
cd "$TMPDIR" || exit
Pooled_narrowPeak_gz=$1
Rep1_narrowPeak_gz=$2
Rep2_narrowPeak_gz=$3
PsRep1_narrowPeak_gz=$4
PsRep2_narrowPeak_gz=$5
prefix=$7
wd=$8
cp "$Pooled_narrowPeak_gz" .
cp "$Rep1_narrowPeak_gz" .
cp "$Rep2_narrowPeak_gz" .
# Find pooled peaks that overlap Rep1 and Rep2 where overlap is defined as the fractional overlap wrt any one of the overlapping peak pairs >= 0.5
intersectBed -wo -a "$Pooled_narrowPeak_gz" -b "$Rep1_narrowPeak_gz" |
awk 'BEGIN{FS="\t";OFS="\t"}{s1=$3-$2; s2=$13-$12; if (($21/s1 >= 0.5) || ($21/s2 >= 0.5)) {print $0}}' | cut -f 1-10 | sort | uniq | \
intersectBed -wo -a stdin -b "$Rep2_narrowPeak_gz" | \
awk 'BEGIN{FS="\t";OFS="\t"}{s1=$3-$2; s2=$13-$12; if (($21/s1 >= 0.5) || ($21/s2 >= 0.5)) {print $0}}' | cut -f 1-10 | sort | uniq > PooledInRep1AndRep2_narrowPeak_gz
# Find pooled peaks that overlap PooledPseudoRep1 and PooledPseudoRep2 where overlap is defined as the fractional overlap wrt any one of the overlapping peak pairs >= 0.5
intersectBed -wo -a "$Pooled_narrowPeak_gz" -b "$PsRep1_narrowPeak_gz" | \
awk 'BEGIN{FS="\t";OFS="\t"}{s1=$3-$2; s2=$13-$12; if (($21/s1 >= 0.5) || ($21/s2 >= 0.5)) {print $0}}' | cut -f 1-10 | sort | uniq | \
intersectBed -wo -a stdin -b "$PsRep2_narrowPeak_gz" | \
awk 'BEGIN{FS="\t";OFS="\t"}{s1=$3-$2; s2=$13-$12; if (($21/s1 >= 0.5) || ($21/s2 >= 0.5)) {print $0}}' | cut -f 1-10 | sort | uniq > PooledInPsRep1AndPsRep2_narrowPeak_gz
# Combine peak lists
zcat PooledInRep1AndRep2_narrowPeak_gz PooledInPsRep1AndPsRep2_narrowPeak_gz | sort | uniq | awk 'BEGIN{OFS="\t"} {if ($5>1000) $5=1000; print $0}' \
| grep -P 'chr[\dXY]+[ \t]' > finalPeakList.narrowPeak.gz
mv finalPeakList.narrowPeak.gz "$wd"/"$prefix".gz
| true |
5c116cd6969b3e34542483011a8689886837c017
|
Shell
|
manusys64/learn-openstack-scripts
|
/scripts/vmware.sh
|
UTF-8
| 698 | 3.734375 | 4 |
[
"MIT"
] |
permissive
|
#!/bin/bash -eux
SSH_USERNAME=${SSH_USERNAME:-vagrant}
function install_open_vm_tools {
echo "==> Installing Open VM Tools"
# Install open-vm-tools so we can mount shared folders
apt-get install -y open-vm-tools
# Add /mnt/hgfs so the mount works automatically with Vagrant
mkdir /mnt/hgfs
}
if [[ $PACKER_BUILDER_TYPE =~ vmware ]]; then
KERNEL_VERSION=$(uname -r | cut -d. -f1-2)
echo "==> Kernel version ${KERNEL_VERSION}"
MAJOR_VERSION=$(echo ${KERNEL_VERSION} | cut -d '.' -f1)
MINOR_VERSION=$(echo ${KERNEL_VERSION} | cut -d '.' -f2)
# open-vm-tools supports shared folders on kernel 4.1 or greater
. /etc/lsb-release
install_open_vm_tools
fi
| true |
e53f0730b88b1c3d33f05bae946a31253a2704c5
|
Shell
|
nevesnunes/env
|
/common/code/snippets/sysadmin/ocr.sh
|
UTF-8
| 850 | 3.828125 | 4 |
[
"MIT"
] |
permissive
|
#!/bin/bash
# Copyright Alexander Jerneck 2014
# Licensed under the MIT license (http://opensource.org/licenses/MIT)
# Script to batch ocr pdfs, by first converting them to tifs.
echo "usage: ocr PATTERN where PATTERN is a glob matching pdf files to be converted."
echo "example: ./ocr file-20*.pdf"
for x in "$@"; do
echo "separating pages for $x"
pdfseparate "$x" .tmp-%d.pdf
for f in $(find . -maxdepth 1 -name '.tmp-*.pdf' | sort -n -t - -k 2); do
echo "converting $f to $f.tif ..."
convert -colorspace Gray -normalize -density 300 -depth 8 -resample 200x200 -background white -flatten +matte "$f" "$f.tif"
tesseract "$f.tif" "$f.txt"
cat "$f.txt.txt" >> "$x.txt"
rm "$f.tif"
rm "$f.txt.txt"
done
echo "cleaning up..."
rm .tmp-*.pdf
echo "text output saved to $x.txt"
done
| true |
6fd3aec7e78221a3e29041031a7c80e3bf771acc
|
Shell
|
aom/test-dockerfile-with-timing
|
/docker-timing
|
UTF-8
| 221 | 3.421875 | 3 |
[] |
no_license
|
#!/usr/bin/env sh
set -e
file=/docker-timing.log
if [ -f $file ]; then
echo "$(($(date +%s)-$(head -n 1 $file))) ${@:-stage}" >> $file
echo "Seconds since last stage: $(tail -n 1 $file)"
else
date +%s > $file
fi
| true |
0c69186223135c6b4d5aae063258b0b1da59bfcb
|
Shell
|
eunjung31/sox_script
|
/change_sr.sh
|
UTF-8
| 303 | 3.515625 | 4 |
[] |
no_license
|
#!/bin/bash
#This script converts sampling rate of multiple files in a folder.
outputdir="../converted"
mkdir -p $outputdir
for file in **/*.wav
do
filename=$(basename $file)
outputfile="${outputdir}/${file}"
mkdir -p $(dirname $outputfile)
sox $file -r 8k $outputfile
done
| true |
65e6b527e52cb271b5840997b3adda1e3421b33c
|
Shell
|
znh1992/Harris_Kovacs_Londo
|
/General/free_energy_calculations_v2.sh
|
UTF-8
| 950 | 3.640625 | 4 |
[] |
no_license
|
#!/bin/bash
##############################################################
# Script takes a list of gene IDs and a fast file containing those gene IDs to calculate MFE.
# Secondary Structure information is thrown away (except the images).
# Sometimes the script only outputs the gene name. If so, `${val[0]}` needs amended.
##############################################################
export LIST=
export RefSeqs=
if [ ! -d Images ]; then
mkdir Images
fi
#Generate Seq Files
while read line; do
samtools faidx $RefSeqs "$line" > $line.fa
RNAfold --noLP < $line.fa > $line.calc
cut -f2,3,4 -d" " $line.calc | sed '/TR/d' | sed '/UA/d' | sed '/UU/d' | sed '/UG/d' | tr -d '[]' | tr -d '()' | tr -d '{' | cut -f1 -d"d"> $line.MFE
readarray val < $line.MFE
echo -e $line '\t' ${val[0]}> $line.fe
rm $line.fa
rm $line.calc
rm $line.MFE
mv *.ps Images/
done < $LIST
cat *.fe | sed 's/ //g' > FREE_ENERGIES.out
rm *.fe
| true |
ab72149638b72456d36e8832d46c580bad317f69
|
Shell
|
learningdynamics/exam
|
/src/run_part1.sh
|
UTF-8
| 118 | 2.5625 | 3 |
[] |
no_license
|
#! /bin/bash
NUM=5
N=2
for i in $(seq 1 $NUM) ; do
python3 part1.py -n $N --save "part1-${N}-${i}.pickle"
done
| true |
d5212fa9471b8d10463ee4de7d3438146b1e8332
|
Shell
|
fleger/fleger_pkgbuilds
|
/firefox-oxygen-kde/PKGBUILD
|
UTF-8
| 970 | 3.046875 | 3 |
[] |
no_license
|
# Maintainer: Florian Léger <florian6 dot leger at laposte dot net>
pkgname=firefox-oxygen-kde
pkgver=4.0_b1
pkgrel=1
pkgdesc="Complete add-on/theme bringing the Oxygen style for Firefox."
arch=(any)
depends=("firefox>=8.0")
url="http://oxygenkde.altervista.org/index.html"
license=('GPL')
source=("http://oxygenkde.altervista.org/download/OxygenKDE_${pkgver//./_}.xpi")
md5sums=('d8ae3d757ef345ddf8747c186f048f14')
package() {
local f
local emid
local destdir
cd "${srcdir}"
for f in "oxykdeopt" "oxykdetheme"; do
mkdir -p "${f}"
bsdtar -xf "${f}.xpi" -C "${f}"
emid="$(sed -n -e '/<\?em:id>\?/!d; s/.*\([\"{].*[}\"]\).*/\1/; s/\"//g; p; q' "${f}/install.rdf")"
#sed -i 's#<em:maxVersion>.*</em:maxVersion>#<em:maxVersion>18.*</em:maxVersion>#' "${f}/install.rdf"
dstdir="${pkgdir}/usr/lib/firefox/extensions/${emid}"
install -d "${dstdir}"
pushd "${f}"
find . -type f -exec install -Dm644 '{}' "${dstdir}/{}" \;
popd
done
}
| true |
79c82df1e9a4a213b65d15a834d8768834367831
|
Shell
|
aeikum/FAudio
|
/cpp/scripts/cross_compile_64
|
UTF-8
| 398 | 3.1875 | 3 |
[
"Zlib",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
# set this to the path where you installed the mingw32 version of SDL
SDL_CROSS_PATH=/usr
MINGW=x86_64-w64-mingw32
if [ ! -f $SDL_CROSS_PATH/$MINGW/bin/sdl2-config ]; then
echo "Please check your mingw32 SDL installation"
exit -1
fi
export PATH=$SDL_CROSS_PATH/$MINGW/bin:$PATH
export CC=${MINGW}-gcc
export CXX=${MINGW}-g++
export AR=${MINGW}-ar
export DLLTOOL=${MINGW}-dlltool
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.