blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
1156ad31e9bb99b279a17a071151bafb9d8bde9e
|
Shell
|
riddik14/mattiols_hassio_repository
|
/bticino_X8000_v2/run.sh
|
UTF-8
| 1,534 | 3.28125 | 3 |
[] |
no_license
|
#!/usr/bin/env bashio
CLIENT_ID=$(bashio::config 'client_id')
CLIENT_SECRET=$(bashio::config 'client_secret')
SUBSCRIPTION_KEY=$(bashio::config 'subscription_key')
DOMAIN=$(bashio::config 'domain')
HAIP=$(bashio::config 'haip')
MQTT_BROKER=$(bashio::config 'mqtt_broker')
MQTT_PORT=$(bashio::config 'mqtt_port')
MQTT_USER=$(bashio::config 'mqtt_user')
MQTT_PASS=$(bashio::config 'mqtt_pass')
JSON_FILE="/config/.bticino_smarter/smarter.json"
API_PIDS=()
#Check smarter file
if [ -s "$JSON_FILE" ]
then
bashio::log.info "Smarter file already exist and contain some data."
else
bashio::log.info "Init Smarter file ..."
mkdir -p /config/.bticino_smarter/
cp config/smarter.json /config/.bticino_smarter/smarter.json
fi
bashio::log.info "Setup config file..."
# Setup config
cat << EOF > config/config.yml
api_config:
client_id: ${CLIENT_ID}
client_secret: <bticino>${CLIENT_SECRET}<bticino>
subscription_key: ${SUBSCRIPTION_KEY}
domain: ${DOMAIN}
haip: ${HAIP}
c2c_enable: true
EOF
cat << EOF > config/mqtt_config.yml
mqtt_config:
mqtt_broker: ${MQTT_BROKER}
mqtt_port: ${MQTT_PORT}
mqtt_user: ${MQTT_USER}
mqtt_pass: ${MQTT_PASS}
EOF
# Start API
python3 bticino.py & > /dev/null
API_PID+=($!)
# Start MQTT
sleep 3
python3 mqtt.py & > /dev/null
API_PID+=($!)
function stop_api() {
bashio::log.info "Kill Processes..."
kill -15 "${API_PID[@]}"
wait "${API_PID[@]}"
bashio::log.info "Done."
}
trap "stop_api" SIGTERM SIGHUP
# Wait until all is done
wait "${API_PID[@]}"
| true |
904700debff03dbdddb8159e830485e3e5114bbe
|
Shell
|
mosbth/env
|
/hosts/stekon/asus_router/jffs/notify_router.sh
|
UTF-8
| 181 | 2.625 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/sh
domain="stekon.mikaelroos.se"
ip=`wget -qO- ipecho.net/plain`
url="https://mikelroos.se/ddns/$domain/$ip"
#echo $url
cmd=`wget -qO- "$url"`
logger "ddns status: $cmd $ip"
| true |
dcabe67169800503c7d02251231ce31a5288f9e1
|
Shell
|
jyhsia5174/CIKM-2019-EXP
|
/preprocess/save-biased-imputation-model/grid.sh
|
UTF-8
| 347 | 2.71875 | 3 |
[] |
no_license
|
#!/bin/bash
k=32
t=100
r=0
w=0
wn=1
ns='--ns'
c=5
tr='tr.100.remap'
te='va.1.remap'
item='item'
logs_pth='logs'
mkdir -p $logs_pth
task(){
for l in 1 4 16
do
echo "./train -k $k -l $l -t ${t} -r $r -w $w -wn $wn $ns -c ${c} -p ${te} ${item} ${tr} > $logs_pth/${tr}.$l.$r.$w.$wn.$k"
done
}
task
task | xargs -d '\n' -P 5 -I {} sh -c {} &
| true |
d386ccfbc458c705652ccc369bc8dcb9bccb6f4d
|
Shell
|
epicfaace/beam
|
/sdks/go/test/run_integration_tests.sh
|
UTF-8
| 6,842 | 3.546875 | 4 |
[
"BSD-3-Clause",
"MIT",
"Python-2.0",
"LicenseRef-scancode-protobuf",
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This script will be run by Jenkins as a post commit test. In order to run
# locally use the following flags:
#
# --gcs_location -> Temporary location to use for service tests.
# --project -> Project name to use for docker images.
# --dataflow_project -> Project name to use for dataflow.
#
# Execute from the root of the repository. It assumes binaries are built.
set -e
set -v
RUNNER=dataflow
# Where to store integration test outputs.
GCS_LOCATION=gs://temp-storage-for-end-to-end-tests
# Project for the container and integration test
PROJECT=apache-beam-testing
DATAFLOW_PROJECT=apache-beam-testing
REGION=us-central1
# Number of tests to run in parallel
PARALLEL=10
while [[ $# -gt 0 ]]
do
key="$1"
case $key in
--runner)
RUNNER="$2"
shift # past argument
shift # past value
;;
--project)
PROJECT="$2"
shift # past argument
shift # past value
;;
--region)
REGION="$2"
shift # past argument
shift # past value
;;
--dataflow_project)
DATAFLOW_PROJECT="$2"
shift # past argument
shift # past value
;;
--gcs_location)
GCS_LOCATION="$2"
shift # past argument
shift # past value
;;
--dataflow_worker_jar)
DATAFLOW_WORKER_JAR="$2"
shift # past argument
shift # past value
;;
--flink_job_server_jar)
FLINK_JOB_SERVER_JAR="$2"
shift # past argument
shift # past value
;;
--spark_job_server_jar)
SPARK_JOB_SERVER_JAR="$2"
shift # past argument
shift # past value
;;
--endpoint)
ENDPOINT="$2"
shift # past argument
shift # past value
;;
--parallel)
PARALLEL="$2"
shift # past argument
shift # past value
;;
*) # unknown option
echo "Unknown option: $1"
exit 1
;;
esac
done
if [[ "$RUNNER" == "universal" ]]; then
PUSH_CONTAINER_TO_GCR=''
else
PUSH_CONTAINER_TO_GCR='yes'
fi
# Go to the root of the repository
cd $(git rev-parse --show-toplevel)
# Verify in the root of the repository
test -d sdks/go/test
# Verify docker and gcloud commands exist
command -v docker
docker -v
if [[ "$PUSH_CONTAINER_TO_GCR" == "yes" ]]; then
command -v gcloud
gcloud --version
# ensure gcloud is version 186 or above
TMPDIR=$(mktemp -d)
gcloud_ver=$(gcloud -v | head -1 | awk '{print $4}')
if [[ "$gcloud_ver" < "186" ]]
then
pushd $TMPDIR
curl https://dl.google.com/dl/cloudsdk/channels/rapid/downloads/google-cloud-sdk-186.0.0-linux-x86_64.tar.gz --output gcloud.tar.gz
tar xf gcloud.tar.gz
./google-cloud-sdk/install.sh --quiet
. ./google-cloud-sdk/path.bash.inc
popd
gcloud components update --quiet || echo 'gcloud components update failed'
gcloud -v
fi
# Build the container
TAG=$(date +%Y%m%d-%H%M%S)
CONTAINER=us.gcr.io/$PROJECT/$USER/beam_go_sdk
echo "Using container $CONTAINER"
./gradlew :sdks:go:container:docker -Pdocker-repository-root=us.gcr.io/$PROJECT/$USER -Pdocker-tag=$TAG
# Verify it exists
docker images | grep $TAG
# Push the container
gcloud docker -- push $CONTAINER
else
TAG=dev
./gradlew :sdks:go:container:docker -Pdocker-tag=$TAG
CONTAINER=apache/beam_go_sdk
fi
if [[ "$RUNNER" == "dataflow" ]]; then
if [[ -z "$DATAFLOW_WORKER_JAR" ]]; then
DATAFLOW_WORKER_JAR=$(find ./runners/google-cloud-dataflow-java/worker/build/libs/beam-runners-google-cloud-dataflow-java-fn-api-worker-*.jar)
fi
echo "Using Dataflow worker jar: $DATAFLOW_WORKER_JAR"
elif [[ "$RUNNER" == "flink" || "$RUNNER" == "spark" || "$RUNNER" == "universal" ]]; then
if [[ -z "$ENDPOINT" ]]; then
# Hacky python script to find a free port. Note there is a small chance the chosen port could
# get taken before being claimed by the job server.
SOCKET_SCRIPT="
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('localhost', 0))
print(s.getsockname()[1])
s.close()
"
JOB_PORT=$(python -c "$SOCKET_SCRIPT")
ENDPOINT="localhost:$JOB_PORT"
echo "No endpoint specified; starting a new $RUNNER job server on $ENDPOINT"
if [[ "$RUNNER" == "flink" ]]; then
java \
-jar $FLINK_JOB_SERVER_JAR \
--flink-master [local] \
--job-port $JOB_PORT \
--artifact-port 0 &
elif [[ "$RUNNER" == "spark" ]]; then
java \
-jar $SPARK_JOB_SERVER_JAR \
--spark-master-url local \
--job-port $JOB_PORT \
--artifact-port 0 &
elif [[ "$RUNNER" == "universal" ]]; then
python \
-m apache_beam.runners.portability.local_job_service_main \
--port $JOB_PORT &
else
echo "Unknown runner: $RUNNER"
exit 1;
fi
fi
fi
echo ">>> RUNNING $RUNNER INTEGRATION TESTS"
./sdks/go/build/bin/integration \
--runner=$RUNNER \
--project=$DATAFLOW_PROJECT \
--region=$REGION \
--environment_type=DOCKER \
--environment_config=$CONTAINER:$TAG \
--staging_location=$GCS_LOCATION/staging-validatesrunner-test \
--temp_location=$GCS_LOCATION/temp-validatesrunner-test \
--worker_binary=./sdks/go/test/build/bin/linux-amd64/worker \
--dataflow_worker_jar=$DATAFLOW_WORKER_JAR \
--endpoint=$ENDPOINT \
--parallel=$PARALLEL \
|| TEST_EXIT_CODE=$? # don't fail fast here; clean up environment before exiting
if [[ ! -z "$JOB_PORT" ]]; then
# Shut down the job server
kill %1 || echo "Failed to shut down job server"
fi
if [[ "$PUSH_CONTAINER_TO_GCR" = 'yes' ]]; then
# Delete the container locally and remotely
docker rmi $CONTAINER:$TAG || echo "Failed to remove container"
gcloud --quiet container images delete $CONTAINER:$TAG || echo "Failed to delete container"
# Clean up tempdir
rm -rf $TMPDIR
fi
if [[ "$TEST_EXIT_CODE" -eq 0 ]]; then
echo ">>> SUCCESS"
else
echo ">>> FAILURE"
fi
exit $TEST_EXIT_CODE
| true |
c0f326a0fc742303e613c0c77e5e5ed89d3ea73d
|
Shell
|
cwt1/scripts-1
|
/projFocus/ceRNA/runs/runResultGslist.sh
|
UTF-8
| 4,925 | 2.578125 | 3 |
[] |
no_license
|
#!/bin/bash
#! -cwd
#By: J.He
#Desp.: all genes, and samples for Mar2014 run of ceRNA project
#TODO:
gslistDir=/ifs/data/c2b2/ac_lab/jh3283/projFocus/result/03102014/gslist
source /ifs/home/c2b2/ac_lab/jh3283/scripts/projFocus/ceRNA/geneUtilsRuns.sh
### after runnning all step1, get all matrix ready, do this script
tglist=$gslistDir/CG_combine_02172014.list
# cnvMat=/ifs/data/c2b2/ac_lab/jh3283/projFocus/result/03102014/cnv/brca_cnv_l3_tumor_Mar-23-2014.matrix.uniq.matrix
# tgcnv=$gslistDir/tgcnv.temp
# $PYTHON $srcDir/processData/extractTargetMatFromAllMat.py -i $tglist -d $cnvMat -o $tgcnv -t uniq
# expMat=/ifs/data/c2b2/ac_lab/jh3283/projFocus/result/03102014/exp/brca_exp_l3_tumor_Mar-21-2014.matrix_Mar-23-2014.voomNormed.matrix
# tgexp=$gslistDir/tgexp.temp
# $PYTHON $srcDir/processData/extractTargetMatFromAllMat.py -i $tglist -d $expMat -o $tgexp -t uniq
# somMatp=/ifs/data/c2b2/ac_lab/jh3283/projFocus/data/03102014/tcgal2som/genome.wustl.edu__Illumina_All.maf.matrix.promoter2k.Mar-20-2014.matrix
# somMat3=/ifs/data/c2b2/ac_lab/jh3283/projFocus/data/03102014/tcgal2som/genome.wustl.edu__Illumina_All.maf.matrix.utr3p.Mar-20-2014.matrix
# somMat5=/ifs/data/c2b2/ac_lab/jh3283/projFocus/data/03102014/tcgal2som/genome.wustl.edu__Illumina_All.maf.matrix.utr5p.Mar-20-2014.matrix
tgsomp=$gslistDir/tgsomp.temp
tgsom3=$gslistDir/tgsom3.temp
tgsom5=$gslistDir/tgsom5.temp
tgsom=$gslistDir/tgsom.temp
# $PYTHON $srcDir/processData/extractTargetMatFromAllMat.py -i $tglist -d $somMatp -o $tgsomp
# # $PYTHON $srcDir/processData/extractTargetMatFromAllMat.py -i $tglist -d $somMat3 -o $tgsom3
# $PYTHON $srcDir/processData/extractTargetMatFromAllMat.py -i $tglist -d $somMat5 -o $tgsom5
#### awk 'NR>1&&FNR!=1' $gslistDir/tgsom*.temp |~/bin/sortxh > $tgsom
# $PYTHON /ifs/home/c2b2/ac_lab/jh3283/scripts/projFocus/ceRNA/processData/combineSomMatrix.py -p $tgsomp -t $tgsom3 -f $tgsom5 -o ${tgsom}_${CDT}.uniq
methdiffMat=/ifs/data/c2b2/ac_lab/jh3283/projFocus/result/03102014/meth/brca_meth_l3_tumor_Mar-24-2014.matrix_diffMeth.matrix
tgmeth=$gslistDir/tgmeth.temp
# $PYTHON $srcDir/processData/extractTargetMatFromAllMat.py -i $tglist -d $methdiffMat -o $tgmeth -t uniq
barcodelist=$gslistDir/barcode_tumor_Mar-21-2014.list
genelist=$gslistDir/CG_target_Mar-23-2014.list
tgcnv=$gslistDir/tgcnv.temp
tgmeth=$gslistDir/tgmeth.temp
tgsom=$gslistDir/tgsom.temp_Mar-24-2014.uniq
out=$gslistDir/gslist_${CDT}
# $PYTHON /ifs/home/c2b2/ac_lab/jh3283/scripts/projFocus/ceRNA/processData/getGslistGint.py -b $barcodelist -g $genelist -c $tgcnv -m $tgmeth -s $tgsom -o $out
# awk -F"\t|;" 'NF>10{print $0 }' $gslistDir/gslist_Mar-24-2014_CnvMethSomFree > $gslistDir/gslist_Mar-24-2014_CnvMethSomFree.10smapMore
tumor=/ifs/data/c2b2/ac_lab/jh3283/projFocus/result/03102014/exp/brca_exp_l3_tumor_Mar-21-2014.matrix
normal=/ifs/data/c2b2/ac_lab/jh3283/projFocus/result/03102014/exp/brca_exp_l3_normal_Mar-21-2014.matrix
###-----------CNV meth free
gslist=$gslistDir/gslist_Mar-24-2014_CnvMethFree.10more
awk -F"\t|;" 'NF>10{print $0 }' $gslistDir/gslist_Mar-24-2014_CnvMethFree > $gslist
out=$gslist.${CDT}
$RSCRIPT /ifs/home/c2b2/ac_lab/jh3283/scripts/projFocus/ceRNA/processData/step1-2_getDEgeneSample.r --tumor $tumor --normal $normal --gslist $gslist --out $out
gslist=$gslistDir/gslist_Mar-24-2014_CnvMethFree.10smapMore.deg_20140325.txt.10more
awk -F"\t|;" 'NF>10{print $0 }' $gslistDir/gslist_Mar-24-2014_CnvMethFree.10more.deg_20140325.txt > $gslist
cernet=/ifs/data/c2b2/ac_lab/jh3283/projFocus/other/brca_ceRNA_network.txt
$PYTHON /ifs/home/c2b2/ac_lab/jh3283/scripts/projFocus/ceRNA/processData/genStat4Gslist.py -i $gslist -c $cernet -o ${gslist}_stat
## RUN /ifs/home/c2b2/ac_lab/jh3283/scripts/projFocus/ceRNA/plot/plotStatsGslist.r on MacOS(using latex)
## report dir output = "/Volumes//ifs/data/c2b2/ac_lab/jh3283/projFocus/report/Mar2014/fig/gslit_gint_withSom_Mar-25-2014_stats_"
###------------- Som Meth Free
gslist=$gslistDir/gslist_Mar-24-2014_CnvMethSomFree.10smapMore
out=${gslist}.${CDT}
$RSCRIPT /ifs/home/c2b2/ac_lab/jh3283/scripts/projFocus/ceRNA/processData/step1-2_getDEgeneSample.r --tumor $tumor --normal $normal --gslist $gslist --out $out
awk -F"\t|;" 'NF>10{print $0 }' $gslistDir/gslist_Mar-24-2014_CnvMethSomFree.10smapMore.deg_20140325.txt > $gslistDir/gslist_Mar-24-2014_CnvMethSomFree.10smapMore.deg_20140325.txt.10more
gslist=$gslistDir/gslist_Mar-24-2014_CnvMethSomFree.10smapMore.deg_20140325.txt.10more
cernet=/ifs/data/c2b2/ac_lab/jh3283/projFocus/other/brca_ceRNA_network.txt
$PYTHON /ifs/home/c2b2/ac_lab/jh3283/scripts/projFocus/ceRNA/processData/genStat4Gslist.py -i $gslist -c $cernet -o ${gslist}_stat
##---viz
## RUN /ifs/home/c2b2/ac_lab/jh3283/scripts/projFocus/ceRNA/plot/plotStatsGslist.r on MacOS(using latex)
## report dir output = "/Volumes//ifs/data/c2b2/ac_lab/jh3283/projFocus/report/Mar2014/fig/gslit_gint_Mar-25-2014_stats_"
| true |
ee808eba1374fbf27b5207a695d088cb1687500a
|
Shell
|
henrypbriffel/virtual-environments
|
/images/linux/scripts/installers/vercel.sh
|
UTF-8
| 856 | 4.03125 | 4 |
[
"MIT"
] |
permissive
|
#!/bin/bash
################################################################################
## File: vercel.sh
## Desc: Installs the Vercel CLI
################################################################################
# Source the helpers for use with the script
source $HELPER_SCRIPTS/document.sh
# Install the Vercel CLI
npm i -g vercel
# Validate the installation
echo "Validate the installation"
if ! command -v vercel; then
echo "Vercel CLI was not installed"
exit 1
fi
echo "Creating the symlink for [now] command to vercel CLI"
ln -s /usr/local/bin/vercel /usr/local/bin/now
echo "Validate the link"
if ! command -v now; then
echo "[Now] symlink to Vercel CLI was not created"
exit 1
fi
# Document the installed version
echo "Document the installed version"
DocumentInstalledItem "Vercel CLI ($(vercel --version))"
| true |
b2bcddf67ab1e06918b9db061f6bdc97a067b0a1
|
Shell
|
Earlz/narm
|
/tests/build.sh
|
UTF-8
| 349 | 3.328125 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
mkdir -p bin/temp
for f in *.s
do
echo "Assembling $f file..."
# take action on each file. $f store current file name
OBJECTFILE="bin/temp/$(basename "$f" .s).o"
FINALFILE="bin/$(basename "$f" .s)"
arm-none-eabi-as -march=armv6-m -o $OBJECTFILE $f
arm-none-eabi-ld -T link.ld -o $FINALFILE $OBJECTFILE
done
rm -rf bin/temp
| true |
17fb575dd92f83d2da20f30e29673df0f0d9642c
|
Shell
|
jandredias/IAED1314
|
/proj1/tests.sh
|
UTF-8
| 1,156 | 2.8125 | 3 |
[] |
no_license
|
#!/bin/bash
# Script teste
echo "***************************"
echo "***** BEGIN OF SCRIPT *****"
echo "***************************"
echo "Downloading..."
git clone [email protected]:pcdiasmega/IAED-proj1.git
echo "Extracting..."
mv IAED-proj1/* ./
echo "Deleting files..."
rm -rf IAED-proj1
echo "Compiling..."
gcc -ansi -Wall -pedantic -o proj1 proj.c
echo "Running Test 01..."
./proj1 <test01.in> test01.myout
diff test01.out test01.myout
echo "Running Test 02..."
./proj1 <test02.in> test02.myout
diff test02.out test02.myout
echo "Running Test 03..."
./proj1 <test03.in> test03.myout
diff test03.out test03.myout
echo "Running Test 04..."
./proj1 <test04.in> test04.myout
diff test04.out test04.myout
echo "Running Test 05..."
./proj1 <test05.in> test05.myout
diff test05.out test05.myout
echo "Running Test 06..."
./proj1 <test06.in> test06.myout
diff test06.out test06.myout
echo "Running Test 07..."
./proj1 <test07.in> test07.myout
diff test07.out test07.myout
echo "Running Test 08..."
./proj1 <test08.in> test08.myout
diff test08.out test08.myout
echo "*************************"
echo "***** END OF SCRIPT *****"
echo "*************************"
| true |
0e113c39a5d69eda537652f0eaf2fecb1e652a0b
|
Shell
|
heyuan7676/ts_eQTLs
|
/Extended_Methods/input_dataset/step2.1_aggreate_filterGenes_noMAF.sh
|
UTF-8
| 647 | 2.75 | 3 |
[
"CC-BY-4.0"
] |
permissive
|
#!/bin/bash
#SBATCH --time 2:00:00
#SBATCH --no-requeue
#SBATCH --nodes=1
#SBATCH --ntasks=12
#SBATCH -p debug
idsDir=/work-zfs/abattle4/heyuan/tissue_spec_eQTL_v8/datasets/cbset_datasets/caviar_output_GTEx_LD/aggregate
rm -f ${idsDir}/v8_cbset_95_allPairs.txt
rm -f ${idsDir}/temp
rm -f ${idsDir}/temp_sorted
for f in `ls ${idsDir}/*_95set_pairs.txt`
do
awk '{print $1,$2}' ${f} | sed 's/ / /g' >> ${idsDir}/temp
done
cat ${idsDir}/temp | sort > ${idsDir}/temp_sorted
echo "Gene SNP" > ${idsDir}/v8_cbset_95_allPairs.txt
cat ${idsDir}/temp_sorted | uniq >> ${idsDir}/v8_cbset_95_allPairs.txt
module load python/2.7
python filter_genes.py
| true |
328f5a355a6a27efdd106f42559016c2b29e3271
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/python2-translate-toolkit-lib/PKGBUILD
|
UTF-8
| 1,179 | 2.671875 | 3 |
[] |
no_license
|
# Maintainer: Rafael Fontenelle <[email protected]>
pkgname=python2-translate-toolkit-lib
_name=translate-toolkit
pkgver=2.2.4
pkgrel=1
pkgdesc="A toolkit to convert between various different translation formats, help process and validate localisations"
arch=('any')
url="http://toolkit.translatehouse.org/"
license=('GPL')
depends=('bash' 'python2-lxml' 'python2-six' 'python2-diff-match-patch')
makedepends=('python2-setuptools' 'python2-setuptools')
optdepends=('python2-iniparse: for ini2po'
'gaupol: for po2sub')
source=("https://github.com/translate/translate/releases/download/$pkgver/$_name-${pkgver}.tar.gz")
md5sums=('4ff21c47bf16e0855b913246ae397a58')
prepare() {
cd $_name-$pkgver
sed -i -e "s|#![ ]*/usr/bin/python$|#!/usr/bin/python2|" \
-e "s|#![ ]*/usr/bin/env python$|#!/usr/bin/env python2|" \
$(find . -name '*.py')
}
build() {
cd $_name-$pkgver
python2 -s setup.py build
}
package() {
cd $_name-$pkgver
python2 -s setup.py install --prefix=/usr --root="$pkgdir" --optimize=1 --skip-build
python2 -m compileall "$pkgdir/usr/lib/python2.7/site-packages/translate"
# avoid conflict with pkg translate-toolkit
rm -rf "$pkgdir/usr/bin"
}
| true |
dc1f55a1ad302e80ab1aeec35708c3c769d52837
|
Shell
|
shisa/shisa-netbsd
|
/src/etc/rc.d/poffd
|
UTF-8
| 269 | 2.53125 | 3 |
[] |
no_license
|
#!/bin/sh
#
# $NetBSD: poffd,v 1.2 2004/08/13 18:08:03 mycroft Exp $
#
# PROVIDE: poffd
# REQUIRE: DAEMON
$_rc_subr_loaded . /etc/rc.subr
name="poffd"
rcvar=$name
command="/usr/sbin/${name}"
start_precmd="test -c /dev/pow0"
load_rc_config $name
run_rc_command "$1"
| true |
eff13ce0a674283e39b963464e1a54048d0b3fd7
|
Shell
|
MQuaresma/EfficientIO-Analysis-via-Tracing
|
/dtrace_bench.sh
|
UTF-8
| 105 | 2.65625 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
NUM=0
while [ $NUM -lt 15 ]; do
iozone -i0 -i1 -i2 >> /dev/null
let NUM=$NUM+1
done
| true |
4e06fbbc20b34978e7eda451a1ac1c4244ddc2e5
|
Shell
|
jiangyangbo/kaldi-gop
|
/egs/gop-compute/local/compute-gmm-gop.sh
|
UTF-8
| 2,664 | 3.4375 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Copyright 2016 Author: Junbo Zhang
# Apache 2.0
# Computes GOP.
# Begin configuration section.
nj=1
cmd=run.pl
# Begin configuration.
# End configuration options.
echo "$0 $@" # Print the command line for logging
[ -f path.sh ] && . ./path.sh # source the path.
. parse_options.sh || exit 1;
if [ $# != 5 ]; then
echo "usage: local/compute-gmm-gop.sh <data-dir> <lang-dir> <src-dir> <ali-dir> <gop-dir>"
echo "e.g.: local/compute-gmm-gop.sh data/train data/lang exp/tri1 exp/tri1_ali exp/tri1_gop"
echo "main options (for others, see top of script file)"
echo " --config <config-file> # config containing options"
echo " --nj <nj> # number of parallel jobs"
echo " --cmd (utils/run.pl|utils/queue.pl <queue opts>) # how to run jobs."
exit 1;
fi
data=$1
lang=$2
srcdir=$3
alidir=$4
dir=$5
for f in $data/text $lang/oov.int $srcdir/tree $srcdir/final.mdl; do
[ ! -f $f ] && echo "$0: expected file $f to exist" && exit 1;
done
oov=`cat $lang/oov.int` || exit 1;
mkdir -p $dir/log
echo $nj > $dir/num_jobs
sdata=$data/split$nj
splice_opts=`cat $srcdir/splice_opts 2>/dev/null` # frame-splicing options.
cp $srcdir/splice_opts $dir 2>/dev/null # frame-splicing options.
cmvn_opts=`cat $srcdir/cmvn_opts 2>/dev/null`
delta_opts=`cat $srcdir/delta_opts 2>/dev/null`
[[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1;
if [ -f $srcdir/final.mat ]; then feat_type=lda; else feat_type=delta; fi
echo "$0: feature type is $feat_type"
case $feat_type in
delta) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | add-deltas $delta_opts ark:- ark:- |";;
lda) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $srcdir/final.mat ark:- ark:- |"
cp $srcdir/final.mat $srcdir/full.mat $dir
;;
*) echo "$0: invalid feature type $feat_type" && exit 1;
esac
# Convenience for debug
# $cmd JOB=1:$nj $dir/log/copyfeats.JOB.log copy-feats "$feats" "ark,t:$dir/feats.JOB" || exit 1;
# $cmd JOB=1:$nj $dir/log/gunzip.JOB.log gunzip $alidir/ali.JOB.gz || exit 1;
echo "$0: computing GOP in $data using model from $srcdir, putting results in $dir"
mdl=$srcdir/final.mdl
tra="ark:utils/sym2int.pl --map-oov $oov -f 2- $lang/words.txt $sdata/JOB/text|";
$cmd JOB=1:$nj $dir/log/gop.JOB.log \
compute-gmm-gop $mdl "ark,t:$dir/feats.JOB" "ark,t:$alidir/ali.JOB" $dir/gop.JOB || exit 1;
echo "$0: done computing GOP."
| true |
88c1b409944d5627c99acebe0b75132ff1c77119
|
Shell
|
qnib/plain-elasticsearch
|
/opt/entry/20-hostname.sh
|
UTF-8
| 597 | 3.390625 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
if [[ -z ${ES_NODE_NAME} ]];then
if [[ -f /etc/conf.d/hostname ]];then
# The Gentoo way
ES_NODE_NAME=$(cat /etc/conf.d/hostname |awk -F= '/hostname=/{print $2}' |tr -d '"')
elif [[ -f /etc/hostname ]];then
ES_NODE_NAME=$(cat /etc/hostname)
else
# should never be called, as /etc/hostname has to be present... (?)
ES_NODE_NAME=$(hostname)
fi
fi
echo ">> sed -i '' -e \"s/node.name: .*/node.name: ${ES_NODE_NAME}/\" /etc/elasticsearch/elasticsearch.yml"
sed -i'' -e "s/node.name:.*/node.name: ${ES_NODE_NAME}/" /etc/elasticsearch/elasticsearch.yml
| true |
013bdf0bd4e65b7f14f165cf2e84f798b92e5f7f
|
Shell
|
deepwellice/asteroid-belt
|
/MAC/Xcode/xcode-switch
|
UTF-8
| 1,632 | 3.453125 | 3 |
[] |
no_license
|
#!/bin/sh
activexcode=/Applications/Xcode.app
xcode5=~/tools/Xcode502.app
xcode6=~/tools/Xcode640.app
xcode7=~/tools/Xcode710.app
export current_xcode_version=`xcodebuild -version | grep "Xcode"`
if grep "Xcode 5" <<< "$current_xcode_version"
then
echo Current version is Xcode 5
export current_xcode_version=5
export backup_xcode_name=$xcode5
elif grep "Xcode 6" <<< "$current_xcode_version"
then
echo Current version is Xcode 6
export current_xcode_version=6
export backup_xcode_name=$xcode6
elif grep "Xcode 7" <<< "$current_xcode_version"
then
echo Current version is Xcode 7
export current_xcode_version=7
export backup_xcode_name=$xcode7
else
echo Unhandled Xcode version, $current_xcode_version.
exit 1
fi
read -p "Target Xcode version(5/6/7):" target_xcode_version
if [ "$target_xcode_version" = "" ]; then exit 0; fi
if [ $current_xcode_version = $target_xcode_version ]
then
echo Already using Xcode $target_xcode_version.
exit 0
fi
echo ====== switching from xcode $current_xcode_version to $target_xcode_version
echo $backup_xcode_name
echo "Autodesk1090" | sudo -S mv $activexcode $backup_xcode_name
if [ "$target_xcode_version" = "5" ]
then
sudo mv "$xcode5" "$activexcode"
elif [ "$target_xcode_version" = "6" ]
then
sudo mv "$xcode6" "$activexcode"
elif [ "$target_xcode_version" = "7" ]
then
sudo mv "$xcode7" "$activexcode"
else
echo Unhandled version.
fi
echo .
xcodebuild -version
sudo xcode-select -r
echo ====== switching successfully
open /Applications/Xcode.app
#defaults write com.apple.dock ResetLaunchPad -bool true
#killall Dock
| true |
e06c42d20ff5da548c76dc970e34baa51d67bb93
|
Shell
|
PawelAdamski/go-web
|
/templates/angular-signup-app/bootstrap.sh
|
UTF-8
| 274 | 3 | 3 |
[] |
no_license
|
#!/bin/bash
if [ "x$1" == "x" ]; then
echo use: $0 name
exit 1
fi
for f in `find . -type f | grep -vE '(git/|lib/)'`; do
if [ "`file -I $f | grep -E '(text|xml)'`" != "" ]; then
sed -e s,angular-signup-app,$1,g < $f > $$.tmp
mv $$.tmp $f
fi
done
rm -f bootstrap.sh
| true |
66cc249f6e93f3078304603f42f616a91a80a4c6
|
Shell
|
gbsf/archlinux-packages
|
/ne/repos/extra-x86_64/PKGBUILD
|
UTF-8
| 499 | 2.578125 | 3 |
[] |
no_license
|
# $Id$
# Maintainer: damir <[email protected]>
# Contributor: Ben <[email protected]>
pkgname=ne
pkgver=1.41
pkgrel=1
pkgdesc="the nice editor"
arch=(i686 x86_64)
url="http://ne.dsi.unimi.it"
depends=('glibc' 'ncurses')
source=(http://ne.dsi.unimi.it/$pkgname-$pkgver.tar.gz)
md5sums=('023e68d23a6216e89737ff2b6996aa77')
build() {
cd $startdir/src/$pkgname-$pkgver/src
sed -i 's|<ncurses/term.h>|<term.h>|g' {ne.h,cm.c,term.c}
make || return 1
mkdir -p $startdir/pkg/usr/bin/
cp ne $startdir/pkg/usr/bin
}
| true |
a42f3b3a40b493f2c832a93d16f7bacb33d55fee
|
Shell
|
isutton/service-binding-operator
|
/hack/deploy-sbo-local.sh
|
UTF-8
| 870 | 3.453125 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
OPERATOR_NAMESPACE=${OPERATOR_NAMESPACE:-}
ZAP_FLAGS=${ZAP_FLAGS:-}
OUTPUT="${OUTPUT:-out/acceptance-tests}"
mkdir -p "$OUTPUT"
SBO_LOCAL_LOG="$OUTPUT/sbo-local.log"
_killall(){
which killall &> /dev/null
if [ $? -eq 0 ]; then
killall $1
else
for i in "$(ps -l | grep $1)"; do if [ -n "$i" ]; then kill $(echo "$i" | sed -e 's,\s\+,#,g' | cut -d "#" -f4); fi; done
fi
}
_killall operator-sdk
_killall service-binding-operator-local
operator-sdk --verbose run --local --namespace="$OPERATOR_NAMESPACE" --operator-flags "$ZAP_FLAGS" > $SBO_LOCAL_LOG 2>&1 &
SBO_PID=$!
attempts=24
while [ -z "$(grep 'Starting workers' $SBO_LOCAL_LOG)" ]; do
if [[ $attempts -ge 0 ]]; then
sleep 5
attempts=$((attempts-1))
else
echo "FAILED"
kill $SBO_PID
exit 1
fi
done
echo $SBO_PID
| true |
1714d132e044aa5b253b3c1ccc870b0837e4842e
|
Shell
|
photron/brickd-lgtm
|
/src/build_data/linux/libusb_static/compile.sh
|
UTF-8
| 438 | 2.96875 | 3 |
[] |
no_license
|
#!/bin/bash -ex
version=1.0.23
rm -rf libusb-${version}
rm -rf libusb-${version}.tar.bz2
rm -rf libusb.h
rm -rf libusb-1.0.a
wget https://github.com/libusb/libusb/releases/download/v${version}/libusb-${version}.tar.bz2
tar -xf libusb-${version}.tar.bz2
pushd libusb-${version}
./configure --disable-shared --disable-udev
make
popd
cp libusb-${version}/libusb/libusb.h .
cp libusb-${version}/libusb/.libs/libusb-1.0.a .
echo done
| true |
0de22ebc63e84bb1fca8829ef802ef3f7eee2a98
|
Shell
|
j05eph2000/mwtest
|
/wagerr_install.sh
|
UTF-8
| 7,616 | 3.375 | 3 |
[] |
no_license
|
#!/bin/bash
TMP_FOLDER=$(mktemp -d)
CONFIG_FILE='wagerr.conf'
CONFIGFOLDER='/root/.wagerr$i'
COIN_DAEMON='wagerrd'
COIN_CLI='wagerr-cli'
COIN_PATH='/usr/local/bin/'
COIN_REPO='https://github.com/wagerr/wagerr.git'
COIN_TGZ='https://github.com/wagerr/wagerr/releases/download/v3.0.1/wagerr-3.0.1-x86_64-linux-gnu.tar.gz'
COIN_FOLDER='/root/wagerr-3.0.1/bin'
COIN_ZIP=$(echo $COIN_TGZ | awk -F'/' '{print $NF}')
COIN_NAME='wagerr'
COIN_PORT=55002
RPCPORT=$(($COIN_PORT*10))
NODEIP=$(curl -s4 icanhazip.com)
HOSTNAME=$(hostname)
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m'
fallocate -l 6G /swapfile
chmod 600 /swapfile
mkswap /swapfile
swapon /swapfile
swapon -s
echo "/swapfile none swap sw 0 0" >> /etc/fstab
echo "How many nodes do you create?."
#echo "Enter alias for new node."
# echo -e "${YELLOW}输入新节点别名.${NC}"
read -e MNCOUNT
#prepare system
echo -e "Prepare the system to install ${GREEN}$COIN_NAME${NC} master node."
apt-get update >/dev/null 2>&1
DEBIAN_FRONTEND=noninteractive apt-get update > /dev/null 2>&1
DEBIAN_FRONTEND=noninteractive apt-get -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" -y -qq upgrade >/dev/null 2>&1
apt install -y software-properties-common >/dev/null 2>&1
echo -e "${GREEN}Adding bitcoin PPA repository"
apt-add-repository -y ppa:bitcoin/bitcoin >/dev/null 2>&1
echo -e "Installing required packages, it may take some time to finish.${NC}"
apt-get update >/dev/null 2>&1
apt-get install -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" make software-properties-common \
build-essential libtool autoconf libssl-dev libboost-dev libboost-chrono-dev libboost-filesystem-dev libboost-program-options-dev \
libboost-system-dev libboost-test-dev libboost-thread-dev sudo automake git wget pwgen curl libdb4.8-dev bsdmainutils libdb4.8++-dev \
libminiupnpc-dev libgmp3-dev ufw pkg-config libevent-dev libdb5.3++ unzip >/dev/null 2>&1
if [ "$?" -gt "0" ];
then
echo -e "${RED}Not all required packages were installed properly. Try to install them manually by running the following commands:${NC}\n"
echo "apt-get update"
echo "apt -y install software-properties-common"
echo "apt-add-repository -y ppa:bitcoin/bitcoin"
echo "apt-get update"
echo "apt install -y make build-essential libtool software-properties-common autoconf libssl-dev libboost-dev libboost-chrono-dev libboost-filesystem-dev \
libboost-program-options-dev libboost-system-dev libboost-test-dev libboost-thread-dev sudo automake git pwgen curl libdb4.8-dev \
bsdmainutils libdb4.8++-dev libminiupnpc-dev libgmp3-dev ufw fail2ban pkg-config libevent-dev unzip"
exit 1
fi
#download node
for i in `seq 1 1 $MNCOUNT`; do
# Create scripts
echo '#!/bin/bash' > ~/bin/wagerrd$i.sh
echo "wagerrd -daemon -conf=$CONFIGFOLDER$i/wagerr.conf -datadir=$CONFIGFOLDER$i "'$*' >> ~/bin/wagerrd$i.sh
echo '#!/bin/bash' > ~/bin/wagerr-cli$i.sh
echo "wagerr-cli -conf=$CONFIGFOLDER$i/wagerr.conf -datadir=$CONFIGFOLDER$i "'$*' >> ~/bin/wagerr-cli$i.sh
echo '#!/bin/bash' > ~/bin/wagerr-tx$i.sh
echo "wagerr-tx -conf=$CONFIGFOLDER$i/wagerr.conf -datadir=$CONFIGFOLDER$i "'$*' >> ~/bin/wagerr-tx$i.sh
chmod 755 ~/bin/wagerr*.sh
echo -e "${GREEN}Downloading and Installing VPS $COIN_NAME Daemon${NC}"
cd $TMP_FOLDER >/dev/null 2>&1
rm $COIN_ZIP >/dev/null 2>&1
wget -q $COIN_TGZ
compile_error
tar xvzf $COIN_ZIP >/dev/null 2>&1
cd $COIN_FOLDER
chmod +x $COIN_DAEMON $COIN_CLI
compile_error
cd - >/dev/null 2>&1
#get ip
declare -a NODE_IPS
for ips in $(netstat -i | awk '!/Kernel|Iface|lo/ {print $1," "}')
do
NODE_IPS+=($(curl --interface $ips --connect-timeout 2 -s4 icanhazip.com))
done
if [ ${#NODE_IPS[@]} -gt 1 ]
then
echo -e "${GREEN}More than one IP. Please type 0 to use the first IP, 1 for the second and so on...${NC}"
INDEX=0
for ip in "${NODE_IPS[@]}"
do
echo ${INDEX} $ip
let INDEX=${INDEX}+1
done
read -e choose_ip
NODEIP=${NODE_IPS[$choose_ip]}
else
NODEIP=${NODE_IPS[0]}
fi
#create config
mkdir $CONFIGFOLDER >/dev/null 2>&1
RPCUSER=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 10 | head -n 1)
RPCPASSWORD=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 20 | head -n 1)
cat << EOF > $CONFIGFOLDER$i/$CONFIG_FILE
rpcuser=$RPCUSER
rpcpassword=$RPCPASSWORD
rpcallowip=127.0.0.1
listen=1
server=1
daemon=1
port=$COIN_PORT+$i
rpcport=$(($COIN_PORT*10))
EOF
#create key
if [[ -z "$COINKEY" ]]; then
$COIN_DAEMON -daemon
sleep 30
if [ -z "$(ps axo cmd:100 | grep $COIN_DAEMON)" ]; then
echo -e "${RED}$COIN_NAME server couldn not start. Check /var/log/syslog for errors.{$NC}"
exit 1
fi
COINKEY=$(wagerr-cli -conf=$CONFIGFOLDER/wagerr.conf -datadir=$CONFIGFOLDER createmasternodekey)
if [ "$?" -gt "0" ];
then
echo -e "${RED}Wallet not fully loaded. Let us wait and try again to generate the Private Key${NC}"
sleep 30
COINKEY=$(wagerr-cli -conf=$CONFIGFOLDER/wagerr.conf -datadir=$CONFIGFOLDER createmasternodekey)
fi
wagerr-cli -conf=$CONFIGFOLDER/wagerr.conf -datadir=$CONFIGFOLDER stop
fi
#update config
cat << EOF >> $CONFIGFOLDER/$CONFIG_FILE
logintimestamps=1
maxconnections=256
masternode=1
bind=$NODEIP
#externalip=$NODEIP:$COIN_PORT
masternodeprivkey=$COINKEY
masternodeaddr=$NODEIP:$COIN_PORT
EOF
#configure systemd
cat << EOF > /etc/systemd/system/$COIN_NAME$i.service
[Unit]
Description=$COIN_NAME$i service
After=network.target
[Service]
User=root
Group=root
Type=forking
#PIDFile=$CONFIGFOLDER/$COIN_NAME.pid
ExecStart=$COIN_PATH$COIN_DAEMON -daemon -conf=$CONFIGFOLDER/$CONFIG_FILE -datadir=$CONFIGFOLDER
ExecStop=-$COIN_PATH_$COIN_CLI -conf=$CONFIGFOLDER/$CONFIG_FILE -datadir=$CONFIGFOLDER stop
Restart=always
PrivateTmp=true
TimeoutStopSec=60s
TimeoutStartSec=10s
StartLimitInterval=120s
StartLimitBurst=5
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
sleep 3
systemctl start $COIN_NAME$i.service
systemctl enable $COIN_NAME$i.service >/dev/null 2>&1
if [[ -z "$(ps axo cmd:100 | egrep $COIN_DAEMON)" ]]; then
echo -e "${RED}$COIN_NAME$i is not running${NC}, please investigate. You should start by running the following commands as root:"
echo -e "${GREEN}systemctl start $COIN_NAME$i.service"
echo -e "systemctl status $COIN_NAME$i.service"
echo -e "less /var/log/syslog${NC}"
exit 1
fi
#important information
echo -e "================================================================================================================================"
echo -e "$COIN_NAME Masternode is up and running listening on port ${GREEN}$COIN_PORT${NC}."
echo -e "Configuration file is: ${RED}$CONFIGFOLDER/$CONFIG_FILE${NC}"
echo -e "Start: ${RED}systemctl start $COIN_NAME.service${NC}"
echo -e "Stop: ${RED}systemctl stop $COIN_NAME.service${NC}"
echo -e "VPS_IP:PORT ${RED}$NODEIP:$COIN_PORT${NC}"
echo -e "MASTERNODE PRIVATEKEY is: ${RED}$COINKEY${NC}"
echo -e "Please check ${GREEN}$COIN_NAME${NC} is running with the following command: ${GREEN}systemctl status $COIN_NAME.service${NC}"
echo -e "${GREEN}复制下列节点配置信息并黏贴到本地钱包节点配置文件${NC}"
echo -e "${GREEN}txhash 和 outputidx在本地钱包转25000WGR后到调试台输入 masternode outputs 得出${NC}"
echo -e "${YELLOW}$HOSTNAME $NODEIP:$COIN_PORT $COINKEY "txhash" "outputidx"${NC}"
echo -e "================================================================================================================================"
done
| true |
ccfc74f582fc6fbf640b437af208fa1c8d47310f
|
Shell
|
sheetal804/Parallel-Processing-Repo
|
/shell_program/perfect.sh
|
UTF-8
| 228 | 3.46875 | 3 |
[] |
no_license
|
echo enter a number
read n
num=n
s=0
while [ $n -ne 0 ]
do
d=`expr $n % 10`
if [ $d -eq 0 ]
then
s=`expr $s + $d`
fi
n=`expr $n / 10`
done
if [ $s -eq num ]
then
echo perfect number
else
echo not a perfect number
fi
| true |
b9717bb1147f347d8a140bd8aaeb10d3d7d01b1f
|
Shell
|
hmidani-abdelilah/Ubuntu_Virtual_Machine_Setup
|
/test.sh
|
UTF-8
| 1,777 | 3.953125 | 4 |
[] |
no_license
|
#!/bin/bash
IFS="\n"
read -p "Enter the username for azure account access : " username
read -s -p "Enter password for azure account access : " password
echo ""
unset IFS
# Install applications to the virtual machine through apt-get
sudo apt-get update
sudo apt-get -y ugrade
sudo apt-get -y install fail2ban
sudo apt-get -y install apache2
sudo apt-get -y install php5 php-pear
# Installs the azure cli and its dependencies
sudo apt-get -y install nodejs-legacy
sudo apt-get -y install npm
sudo npm install -g azure-cli
# Login to the azure cli with the username and password given earlier
azure login -p "$password" "$username"
while [ $? -ne 0 ]; do
IFS="\n"
echo "Error: Invalid Username/Password Combination..."
echo ""
echo "Please re-enter Username/Password OR enter -1 to skip setting up your HTTP endpoint"
read -p "Enter the username for azure account access : " username
if [[ "$username" == "-1" ]]; then
echo ""
echo "Cancelled the enpoint set up through Microsoft Azure!"
echo "Your HTTP Endpoint was not set up."
echo ""
break
fi
read -s -p "Enter password for azure account access : " password
if [[ "$password" == "-1" ]]; then
echo ""
echo "Cancelled the enpoint set up through Microsoft Azure!"
echo "Your HTTP Endpoint was not set up."
echo ""
break
fi
echo ""
unset IFS
azure login -p "$password" "$username"
done
if [[ "$username" != "-1" && "$password" != "-1" ]]; then
# Set up the HTTP endpoint without having to go to the portal online
azure vm endpoint create -n HTTP $HOSTNAME 80 80 #$HOSTNAME is the name of your virtual machine
azure vm endpoint list $HOSTNAME
fi
echo ""
echo "----------------------------------------------------------------------"
echo "Everything on your virtual machine is now set up!"
| true |
2833645f350bf8a6d4fa2d3dcc881311bf3ec1ba
|
Shell
|
Bhaskers-Blu-Org1/cloud-enterprise-examples
|
/iac/15-iks-vpc-cncf-devops-tools/boot/scripts/acp-mgr.sh
|
UTF-8
| 5,545 | 3.59375 | 4 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# --------------------------------------------------------------------------------------------------------
# Name : Account Manager IAM Access Group Policies
#
# Description: Add policies to an access group that allow management of the resoruce groups,
# users, and access groups in an IBM Cloud account.
#
# --------------------------------------------------------------------------------------------------------
#
# input validation
if [ -z "$1" ]; then
echo "Usage: acp-mgr.sh <ACCESS_GROUP>"
echo "Add account management policies to an access group"
echo "<ACCESS_GROUP> - The name of the access group (ex: ACCOUNT-MGR)"
exit
fi
ACCESS_GROUP=$1
# input validation
if [ -z "${ACCESS_GROUP}" ]; then
echo "Usage: acp-mgr.sh <ACCESS_GROUP>"
echo "Please provide the name of the access group (ex: ACCOUNT-MGR)"
exit
fi
# Define the Polices for the Access Group to enable managing an account
# ACCOUNT MANAGEMENT POLICIES
# This doc explains the range of account management services and how to enable them:
# "Assigning access to account management services"
# https://cloud.ibm.com/docs/iam?topic=iam-account-services
# "Who can create resource groups?"
# https://cloud.ibm.com/docs/resources?topic=resources-resources-faq#create-resource
# "Assigning access to account management services > All account management services option"
# https://cloud.ibm.com/docs/iam?topic=iam-account-services#all-account-management
# All account management services - 38
# Administrator role grants that role for all account management services
# This policy alone essentially gives the users all the permissions of the account owner (except classic infrastructure and Cloud Foundry permissions)
# Includes permissions to create resource groups, manage users, and create access groups
#ibmcloud iam access-group-policy-create ${ACCESS_GROUP} --account-management --roles Administrator
# This command grants resource group capabilities only, not the rest of account management capabilities
# Provides ability to do this:
# "Managing resource groups"
# https://cloud.ibm.com/docs/resources?topic=resources-rgs
# All resource group - 38
# Editor role grants access to create and view resource groups; Administrators can also assign access
# Administrator role is needed so Account Managers can grant Environment Admins and Users access to their resource group
# Redundant with --account-management
ibmcloud iam access-group-policy-create ${ACCESS_GROUP} --resource-type "resource-group" --roles Administrator
# "Assigning access to account management services > User management"
# https://cloud.ibm.com/docs/iam?topic=iam-account-services#user-management-account-management
# Provides ability to do this:
# "Inviting users to an account"
# https://cloud.ibm.com/docs/iam?topic=iam-iamuserinv#invite-access
# User Management service - 50
# Editor role grants access to invite and remove users; Administrators can also assign access
# Administrator role is needed so Account Managers can grant other Account Managers access to this service
# Redundant with --account-management but independent of --resource-type "resource-group"
ibmcloud iam access-group-policy-create ${ACCESS_GROUP} --service-name user-management --roles Administrator
# "Assigning access to account management services > Access groups"
# https://cloud.ibm.com/docs/iam?topic=iam-account-services#access-groups-account-management
# Provides ability to do this:
# "Setting up access groups"
# https://cloud.ibm.com/docs/iam?topic=iam-groups
# IAM Access Groups Service service - 55
# Editor role grants access to create groups and add users; Administrators can also assign access
# Redundant with --account-management but independent of --resource-type "resource-group"
ibmcloud iam access-group-policy-create ${ACCESS_GROUP} --service-name iam-groups --roles Administrator
# IAM SERVICES POLICIES
# Similar to env admin policies, but for all resource groups
# Enables acct admin access to all resources in all regions and all resource groups
# "Managing user access with Identity and Access Management"
# https://cloud.ibm.com/docs/Registry?topic=registry-iam
# Container Registry service in All regions - 64
# Manager role grants access to create namespaces for the environment in the image registry
# Administrator role is needed to create clusters
ibmcloud iam access-group-policy-create ${ACCESS_GROUP} --service-name container-registry --roles Administrator,Manager
# "Prepare to create clusters at the account level"
# https://cloud.ibm.com/docs/containers?topic=containers-clusters#cluster_prepare
# "Understanding access to the infrastructure portfolio"
# https://cloud.ibm.com/docs/containers?topic=containers-users#understand_infra
# Kubernetes Service service in All regions - 45
# Administrator role grants access to create and delete clusters
# Manager role grants access to manage clusters
# To create clusters, the user will also need Administrator access to the image registry
ibmcloud iam access-group-policy-create ${ACCESS_GROUP} --service-name containers-kubernetes --roles Administrator,Manager
# "IAM access"
# https://cloud.ibm.com/docs/iam?topic=iam-userroles
# All resources in account (including future IAM enabled services) in All regions - 40
# Administrator role grants access to create and delete service instances (any IAM service)
# Manager role grants access to manage service instances
ibmcloud iam access-group-policy-create ${ACCESS_GROUP} --roles Administrator,Manager
echo "Completed creating polices!"
| true |
a7cefbe5adea7ded85a395f24f00308cd1533190
|
Shell
|
BestJex/kuboard-press
|
/.vuepress/public/install-script/v1.20.x/install_kubelet.sh
|
UTF-8
| 3,630 | 3.125 | 3 |
[] |
no_license
|
#!/bin/bash
# 在 master 节点和 worker 节点都要执行
# 安装 docker
# 参考文档如下
# https://docs.docker.com/install/linux/docker-ce/centos/
# https://docs.docker.com/install/linux/linux-postinstall/
# 卸载旧版本
apt-get remove -y docker \
docker-client \
docker-client-latest \
docker-ce-cli \
docker-common \
docker-latest \
docker-latest-logrotate \
docker-logrotate \
docker-selinux \
docker-engine-selinux \
docker-engine
# 设置 apt-get repository
apt-get update && apt-get install -y apt-transport-https lvm2
curl -fsSL https://mirrors.aliyun.com/docker-ce/linux/ubuntu/gpg | sudo apt-key add -
sudo add-apt-repository "deb [arch=amd64] https://mirrors.aliyun.com/docker-ce/linux/ubuntu $(lsb_release -cs) stable"
apt-get -y update
# 安装并启动 docker
apt-get install -y docker-ce=5:20.10.1~3-0~ubuntu-focal docker-ce-cli=5:20.10.1~3-0~ubuntu-focal containerd.io=
mkdir /etc/docker || true
cat > /etc/docker/daemon.json <<EOF
{
"registry-mirrors": ["${REGISTRY_MIRROR}"],
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "overlay2",
"storage-opts": [
"overlay2.override_kernel_check=true"
]
}
EOF
mkdir -p /etc/systemd/system/docker.service.d
# Restart Docker
systemctl daemon-reload
systemctl enable docker
systemctl restart docker
# 安装 nfs-utils
# 必须先安装 nfs-utils 才能挂载 nfs 网络存储
apt-get install -y nfs-utils
apt-get install -y wget
# 关闭 防火墙
systemctl stop firewalld
systemctl disable firewalld
# 关闭 SeLinux
setenforce 0
sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config
# 关闭 swap
swapoff -a
yes | cp /etc/fstab /etc/fstab_bak
cat /etc/fstab_bak |grep -v swap > /etc/fstab
# 修改 /etc/sysctl.conf
# 如果有配置,则修改
sed -i "s#^net.ipv4.ip_forward.*#net.ipv4.ip_forward=1#g" /etc/sysctl.conf
sed -i "s#^net.bridge.bridge-nf-call-ip6tables.*#net.bridge.bridge-nf-call-ip6tables=1#g" /etc/sysctl.conf
sed -i "s#^net.bridge.bridge-nf-call-iptables.*#net.bridge.bridge-nf-call-iptables=1#g" /etc/sysctl.conf
sed -i "s#^net.ipv6.conf.all.disable_ipv6.*#net.ipv6.conf.all.disable_ipv6=1#g" /etc/sysctl.conf
sed -i "s#^net.ipv6.conf.default.disable_ipv6.*#net.ipv6.conf.default.disable_ipv6=1#g" /etc/sysctl.conf
sed -i "s#^net.ipv6.conf.lo.disable_ipv6.*#net.ipv6.conf.lo.disable_ipv6=1#g" /etc/sysctl.conf
sed -i "s#^net.ipv6.conf.all.forwarding.*#net.ipv6.conf.all.forwarding=1#g" /etc/sysctl.conf
# 可能没有,追加
echo "net.ipv4.ip_forward = 1" >> /etc/sysctl.conf
echo "net.bridge.bridge-nf-call-ip6tables = 1" >> /etc/sysctl.conf
echo "net.bridge.bridge-nf-call-iptables = 1" >> /etc/sysctl.conf
echo "net.ipv6.conf.all.disable_ipv6 = 1" >> /etc/sysctl.conf
echo "net.ipv6.conf.default.disable_ipv6 = 1" >> /etc/sysctl.conf
echo "net.ipv6.conf.lo.disable_ipv6 = 1" >> /etc/sysctl.conf
echo "net.ipv6.conf.all.forwarding = 1" >> /etc/sysctl.conf
# 执行命令以应用
sysctl -p
# 配置K8S的apt-get源
curl https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg | apt-key add -
cat <<EOF > /etc/apt/sources.list.d/kubernetes.list
deb https://mirrors.aliyun.com/kubernetes/apt/ kubernetes-xenial main
EOF
# 卸载旧版本
apt-get remove -y kubelet kubeadm kubectl
# 安装kubelet、kubeadm、kubectl
# 将 ${1} 替换为 kubernetes 版本号,例如 1.19.0
apt-get install -y kubelet=${1}-00 kubeadm=${1}-00 kubectl=${1}-00
# 重启 docker,并启动 kubelet
systemctl daemon-reload
systemctl restart docker
systemctl enable kubelet && systemctl start kubelet
docker version
| true |
bcaa4120afa7cb0c3382292ffb7a8829a185efd5
|
Shell
|
daedroza/bash-scripts
|
/setup-nethunter.sh
|
UTF-8
| 1,005 | 3.140625 | 3 |
[] |
no_license
|
#!/bin/bash
# setup-nethunter, version 1.00
ROOT_UID=0 # $UID = 0 is usually root.
E_NOTROOT=87 # Non-root exit error.
# Run as root
if [ "$UID" -ne "$ROOT_UID" ]
then
echo "You're not running this script root previliges."
echo "Do sudo ./setup-nethunter.sh or su && ./setup-nethunter.sh"
exit $E_NOTROOT
fi
# Update the damn thing once
apt-get update
# Install basic utilites
apt-get -y install gcc libpcap-dev aircrack-ng sqlite3 libsqlite3-dev libssl-dev bully wifite make rfkill cowpatty
# Get my own bash scripts
cd ~
mkdir bash-scripts
cd bash-scripts
git init
git remote add origin https://github.com/daedroza/bash-scripts.git
git pull origin master
# Setup additional tools
cd ~
mkdir make
cd make
git clone https://github.com/t6x/reaver-wps-fork-t6x.git
git clone https://github.com/binkybear/wifite-mod-pixiewps.git
cd reaver-wps-fork-t6x/src/
./configure
make && make install
cp /root/make/wifite-mod-pixiewps/wifite-ng /usr/bin/wifite-ng
chmod +x /usr/bin/wifite-ng
cd ~
rm -rf make
| true |
e7a8cff1aa76a44223060843c355e559fdf13abf
|
Shell
|
therealromster/crux-ports-romster
|
/games/wesnoth-dev/pre-install
|
UTF-8
| 1,549 | 3.640625 | 4 |
[] |
no_license
|
#!/usr/bin/env sh
. /etc/pkgmk.conf
. Pkgfile
OLD_VERSION='1.5.1'
[ -z "$PKGMK_WORK_DIR" ] && PKGMK_WORK_DIR="$PWD/work"
[ -z "$PKGMK_SOURCE_DIR" ] && PKGMK_SOURCE_DIR="$PWD"
if [ -e "$PKGMK_SOURCE_DIR/wesnoth-$version.tar.bz2" ]; then
echo 'You have the lateist source file.'
exit 0
fi
if [ -n "`pkginfo -i | egrep '^xdelta '`" ]; then
if [ -e "$PKGMK_SOURCE_DIR/wesnoth-$OLD_VERSION.tar.bz2" ]; then
if [ ! -e "$PKGMK_WORK_DIR" ]; then
mkdir -p "$PKGMK_WORK_DIR"
fi
cd "$PKGMK_WORK_DIR"
if [ ! -e "$PKGMK_SOURCE_DIR/wesnoth-$OLD_VERSION.tar-wesnoth-$version.tar.xdelta" ]; then
wget http://optusnet.dl.sourceforge.net/sourceforge/wesnoth/wesnoth-$OLD_VERSION.tar-wesnoth-$version.tar.xdelta --directory-prefix=$PKGMK_SOURCE_DIR
fi
cp $PKGMK_SOURCE_DIR/wesnoth-$OLD_VERSION.tar.bz2 .
cp $PKGMK_SOURCE_DIR/wesnoth-$OLD_VERSION.tar-wesnoth-$version.tar.xdelta .
echo -n '[ 33%] Decompressing source, '
bzip2 -d wesnoth-$OLD_VERSION.tar.bz2
echo 'done.'
echo -n '[ 66%] Patching source, '
xdelta patch wesnoth-$OLD_VERSION.tar-wesnoth-$version.tar.xdelta
rm wesnoth-$OLD_VERSION.tar
mv wesnoth-$OLD_VERSION.tar-wesnoth-$version.tar.xdelta $PKGMK_SOURCE_DIR/
echo 'done.'
echo -n '[100%] Compressing patched source, '
bzip2 -9 wesnoth-$version.tar
echo 'done.'
mv wesnoth-$version.tar.bz2 $PKGMK_SOURCE_DIR/
else
echo "'wesnoth-$OLD_VERSION.tar.bz2' not found proceed to run Pkgfile."
exit
fi
else
echo "Warning: 'xdelta' is not installed can not do a incremental update."
fi
# End of file
| true |
c4b1f35be434bec9b3af7d051084f060628d3684
|
Shell
|
nunogt/cassandra-io
|
/export.sh
|
UTF-8
| 4,304 | 4.0625 | 4 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# By default snapshots are stored under the path this script is being run from.
# If you'd rather use an asbolute path, uncomment and edit the variable below.
#
#CASSANDRA_IO_HOME="$HOME/.cassandra-io"
CASSANDRA_CONF="$1"
check_exit_status() {
if [ "$1" != "0" ] ; then
echo "Process exited with error code: $1"
exit $1
fi
}
resolve_links_unix() {
PRG="$0"
while [ -h "$PRG" ]; do
ls=`ls -ld "$PRG"`
link=`expr "$ls" : '.*-> \(.*\)$'`
if expr "$link" : '/.*' > /dev/null; then
PRG="$link"
else
PRG=`dirname "$PRG"`/"$link"
fi
done
CASSANDRA_IO_HOME=`dirname "$PRG"`
}
resolve_links_linux() {
PRG="`readlink -f $0`"
CASSANDRA_IO_HOME=`dirname "$PRG"`
}
resolve_links(){
# GNU coreutils makes this easy, so let's test for that
case "`uname`" in
Linux*) resolve_links_linux;;
*) resolve_links_unix;;
esac
}
bootstrap(){
if [ -z "$CASSANDRA_IO_HOME"] ; then
resolve_links
fi
mkdir -p $CASSANDRA_IO_HOME/{log,snapshots}
check_exit_status $?
TIMESTAMP="`date +%s`"
NODETOOL_LOG="$CASSANDRA_IO_HOME/log/snapshot-$TIMESTAMP.log"
}
cassandra_parse_config(){
# we don't really parse the conf file; it's painful to do it in bash
# let's just extract what we need for now and deal with yaml in future versions
CASSANDRA_DATA="`cat $1 | grep data_file_directories: -A 1 | grep - | awk {'print $2'}`"
CASSANDRA_COMMITLOG="`cat $1 | grep commitlog_directory: | awk {'print $2'}`"
CASSANDRA_CACHES="`cat $1 | grep saved_caches_directory: | awk {'print $2'}`"
}
cassandra_info(){
pid="`ps -ef | grep [C]assandraDaemon | awk '{print $2}'`"
if [ ! -z "$pid" ] ; then
CASSANDRA_PID="$pid"
echo "Cassandra seems to be running with pid $CASSANDRA_PID (this is my best guess)"
if [ -z "$CASSANDRA_PID" ] ; then
echo "Couldn't reliably determine Cassandra pidfile. Is it running?"
exit 1
fi
else
echo "Cassandra is not running"
exit 1
fi
if [ -z "$CASSANDRA_CONF" ] ; then
# attemp to determine cassandra.yaml location (lots of guesswork required)
if [ ! -f "$CASSANDRA_HOME/conf/cassandra.yaml" ] ; then
proccwd=`lsof -p $CASSANDRA_PID | grep cwd | awk '{print $9}'`
if [ -f "$proccwd/../conf/cassandra.yaml" ] ; then
CASSANDRA_HOME="`dirname $proccwd`"
fi
fi
CASSANDRA_CONF="$CASSANDRA_HOME/conf/cassandra.yaml"
fi
if [ -f "$CASSANDRA_CONF" ] ; then
cassandra_parse_config $CASSANDRA_CONF
else
echo "Couldn't find cassandra.yaml configuration file."
echo "Please specify its location by running $0 /path/to/cassandra/conf/cassandra.yaml"
exit 1
fi
}
locate_nodetool() {
cassandra_info
# can we just use nodetool, ie, is it in $PATH ?
if [ -f "`which nodetool`" ] ; then
NODETOOL="`which nodetool`" # yes, let's just use that
else
NODETOOL="$CASSANDRA_HOME/bin/nodetool" # maybe we're lucky
fi
# at this stage NODETOOL must point to something; if it doesn't, i couldn't find it
if [ ! -f "$NODETOOL" ] ; then
echo "Cassandra nodetool couldn't be located. Please include it in your PATH or set CASSANDRA_HOME"
exit 3
fi
}
snapshot_nodetool(){
echo "Taking snapshot of cassandra current state"
$NODETOOL snapshot &> $NODETOOL_LOG
check_exit_status $?
}
snapshot_store(){
while read line ; do
if [[ "$line" == *directory* ]] ; then
SNAPSHOT_NUMBER="`echo $line | awk {'print $3'}`"
fi
done < $NODETOOL_LOG
SNAPSHOTS=$(find $CASSANDRA_DATA/ -type d -iname "$SNAPSHOT_NUMBER")
if [ ! -z "$SNAPSHOTS" ] ; then
for s in $SNAPSHOTS ; do
rsync -aR $CASSANDRA_DATA/./`echo ${s#$CASSANDRA_DATA/}` $CASSANDRA_IO_HOME/snapshots/$SNAPSHOT_NUMBER/
check_exit_status $?
done
else
echo "Couldn't determine snapshot number. I'm confused. Abort."
exit 2
fi
echo "Cassandra snapshot stored at $CASSANDRA_IO_HOME/snapshots/$SNAPSHOT_NUMBER/"
exit 0
}
bootstrap
locate_nodetool
snapshot_nodetool
snapshot_store
| true |
7c180d4b367a85396c728737f0792c22cfb0d9c7
|
Shell
|
amzn/differential-privacy-bayesian-optimization
|
/experiments/output_perturbation/scikit-learn/build_tools/azure/install.sh
|
UTF-8
| 2,391 | 3.40625 | 3 |
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
UNAMESTR=`uname`
if [[ "$UNAMESTR" == "Darwin" ]]; then
# install OpenMP not present by default on osx
HOMEBREW_NO_AUTO_UPDATE=1 brew install libomp
# enable OpenMP support for Apple-clang
export CC=/usr/bin/clang
export CXX=/usr/bin/clang++
export CPPFLAGS="$CPPFLAGS -Xpreprocessor -fopenmp"
export CFLAGS="$CFLAGS -I/usr/local/opt/libomp/include"
export CXXFLAGS="$CXXFLAGS -I/usr/local/opt/libomp/include"
export LDFLAGS="$LDFLAGS -L/usr/local/opt/libomp/lib -lomp"
export DYLD_LIBRARY_PATH=/usr/local/opt/libomp/lib
fi
make_conda() {
TO_INSTALL="$@"
conda create -n $VIRTUALENV --yes $TO_INSTALL
source activate $VIRTUALENV
}
if [[ "$DISTRIB" == "conda" ]]; then
TO_INSTALL="python=$PYTHON_VERSION pip pytest pytest-cov \
numpy=$NUMPY_VERSION scipy=$SCIPY_VERSION \
cython=$CYTHON_VERSION joblib=$JOBLIB_VERSION"
if [[ "$INSTALL_MKL" == "true" ]]; then
TO_INSTALL="$TO_INSTALL mkl"
else
TO_INSTALL="$TO_INSTALL nomkl"
fi
if [[ -n "$PANDAS_VERSION" ]]; then
TO_INSTALL="$TO_INSTALL pandas=$PANDAS_VERSION"
fi
if [[ -n "$PYAMG_VERSION" ]]; then
TO_INSTALL="$TO_INSTALL pyamg=$PYAMG_VERSION"
fi
if [[ -n "$PILLOW_VERSION" ]]; then
TO_INSTALL="$TO_INSTALL pillow=$PILLOW_VERSION"
fi
if [[ -n "$MATPLOTLIB_VERSION" ]]; then
TO_INSTALL="$TO_INSTALL matplotlib=$MATPLOTLIB_VERSION"
fi
make_conda $TO_INSTALL
elif [[ "$DISTRIB" == "ubuntu" ]]; then
sudo apt-get install python3-scipy python3-matplotlib libatlas3-base libatlas-base-dev libatlas-dev python3-virtualenv
python3 -m virtualenv --system-site-packages --python=python3 $VIRTUALENV
source $VIRTUALENV/bin/activate
python -m pip install pytest pytest-cov cython joblib==$JOBLIB_VERSION
fi
if [[ "$COVERAGE" == "true" ]]; then
python -m pip install coverage codecov
fi
if [[ "$TEST_DOCSTRINGS" == "true" ]]; then
python -m pip install sphinx numpydoc # numpydoc requires sphinx
fi
python --version
python -c "import numpy; print('numpy %s' % numpy.__version__)"
python -c "import scipy; print('scipy %s' % scipy.__version__)"
python -c "\
try:
import pandas
print('pandas %s' % pandas.__version__)
except ImportError:
print('pandas not installed')
"
pip list
python setup.py develop
| true |
a6ac80ebea99f23f1363a5192eae9000a7d63fa8
|
Shell
|
mc-b/lernkube
|
/extensions/makekeys.sh
|
UTF-8
| 1,139 | 3.484375 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
#
# Erstellt wg0.conf und die Keys fuer WireGuard Server und Clients
#
umask 077
### Defaults
NET=12
ENDPOINT=<gateway>
PORT=51820
### Server
wg genkey > server_private.key
wg pubkey > server_public.key < server_private.key
cat <<%EOF% >wg0.conf
[Interface]
Address = 192.168.${NET}.1
ListenPort = ${PORT}
#PostUp = sysctl -w net.ipv4.ip_forward=1
#PreDown = sysctl -w net.ipv4.ip_forward=0
PrivateKey = $(cat server_private.key)
%EOF%
### Clients
for client in {10..40}
do
wg genkey > client_${client}_private.key
wg pubkey > client_${client}_public.key < client_${client}_private.key
cat <<%EOF% >client_${client}_wg0.conf
[Interface]
Address = 192.168.${NET}.${client}/24
PrivateKey = $(cat client_${client}_private.key)
[Peer]
PublicKey = $(cat server_public.key)
Endpoint = ${ENDPOINT}:${PORT}
AllowedIPs = 192.168.${NET}.0/24
# This is for if you're behind a NAT and
# want the connection to be kept alive.
PersistentKeepalive = 25
%EOF%
cat <<%EOF% >>wg0.conf
### Client ${client}
[Peer]
PublicKey = $(cat client_${client}_public.key)
AllowedIPs = 192.168.${NET}.${client}
%EOF%
done
| true |
c6b8379562bde90284dd1e78075cba1bd834a569
|
Shell
|
vamshi4220/User_Registration-
|
/User_registration.sh
|
UTF-8
| 1,286 | 3.296875 | 3 |
[] |
no_license
|
#!/bin/bash -x
#UC1
read -p "enter a name " name
p1='^[A-Z]{1}[a-zA-Z]{3,15}$';
if [[ $name =~ $p1 ]]
then
echo "yes"
else
echo "no"
fi
#UC2
read -p "enter a surname " surname
p2='^[A-Z]{1}[a-zA-Z]{3,15}$';
if [[ $surname =~ $p2 ]]
then
echo "yes"
else
echo "no"
fi
#UC3
read -p "enter a emailid " emailid
p3="^[a-zA-Z._-]+@(gmail|bl|yahoo)+.(com|co|in)";
if [[ $emailid =~ $p3 ]]
then
echo "yes"
else
echo "no"
fi
#UC4
read -p "enter a mobile no " num
p4='^((\+)?91)[ ]?[7896]{1}[0-9]{9}$'
if [[ $num =~ $p4 ]]
then
echo "yes"
else
echo "no"
fi
#UC5
read -p "enter a pasword " pw1
p5='^[a-zA-Z]{1,8}$'
if [[ $pw1 =~ $p5 ]]
then
echo "yes"
else
echo "no"
fi
#UC6
read -p "enter a pasword " pw2
p6='^[A-Z]{1}[a-zA-Z]{8,}$';
if [[ $pw2 =~ $p6 ]]
then
echo "yes"
else
echo "no"
fi
#UC7
read -p "enter a pasword " pw3
p7='^[A-Z]{1}[+a-zA-Z0-9]{8,}$'
if [[ $pw3 =~ $p7 ]]
then
echo "yes"
else
echo "no"
fi
#UC8
read -p "enter a pasword " pw4
p8='^[A-Z]{1}[a-zA-Z0-9]*[+.@$#&!*%][+0-9]{8,}$'
if [[ $pw4 =~ $p8 ]]
then
echo "yes"
else
echo "no"
fi
#UC9
read -p "enter a email " pw5
p9="^[a-zA-Z0-9._-+]+@(gmail|outlook|abc|yahoo)+.(com|co|in)";
if [[ $pw5 =~ $p9 ]]
then
echo "yes"
else
echo "no"
fi
| true |
1872ff278eba4bafae9c0da4d190a52235f4b2f7
|
Shell
|
pskarin/calvincontrol
|
/start_scripts/start_one_runtime_proxy_storage.sh
|
UTF-8
| 341 | 2.640625 | 3 |
[] |
no_license
|
rm rt2*
ip=$(/sbin/ip -o -4 addr list eth0 | awk '{print $4}' | cut -d/ -f1)
CALVIN_GLOBAL_STORAGE_TYPE=\"proxy\" CALVIN_GLOBAL_STORAGE_PROXY=\"calvinip://$1:5000\" csruntime -n $ip -p 5002 -c 5003 --name 'rt2' --probe-buffer-size 500000 >> rt2_output.actortrace &
echo "Started two runtimes {rt1, rt2} on host:$ip, with storage on rt1."
| true |
359645878c9adf8c4516889d53b080bdbc148f82
|
Shell
|
mateuszligeza/bash
|
/lab6/zad01/zad01.sh
|
UTF-8
| 420 | 3.03125 | 3 |
[] |
no_license
|
#!/bin/bash
pierwszaLiczba=$1
drugaLiczba=$2
if ((pierwszaLiczba > drugaLiczba))
then
for ((i=pierwszaLiczba; i>=drugaLiczba; i--))
do
if ((pierwszaLiczba % 2 == i % 2))
then
echo -n "$i "
fi
done
else
for ((i=pierwszaLiczba; i<=drugaLiczba; i++))
do
if ((pierwszaLiczba % 2 == i % 2))
then
echo -n "$i "
fi
done
fi
echo
| true |
9f6157307b9c2ac91b255994802be6be2074a253
|
Shell
|
z8jiang/EnsembleVariantCallingPipeline
|
/run_evc_bam.sh
|
UTF-8
| 1,780 | 3.375 | 3 |
[
"BSD-2-Clause-Views",
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
path=${1}
out=${2}
sampleF=${3}
email=${4}
ref=${5}
pon=${6}
dbSNP=${7}
walltime=${8}
queue=${9}
interval_list=${10}
USAGE="\nMissing input arguments..\n
USAGE:\trun_evc_bam \\
path/to/bam/files \\
output/directory \\
path/to/sample.map \\
email.for@notification \\
reference_genome (fasta) \\
pon (INTERNAL_PON) \\
gnomad_dbSNP \\
max_walltime (hours only) \\
queue (hotel or home) \\
interval_list_file_for_mutect \n\n"
if [ -z "${9}" ]
then
printf "$USAGE"
exit 1
fi
mkdir -p ${out}/jobs/strelka
mkdir -p ${out}/jobs/varscan
mkdir -p ${out}/jobs/muse
mkdir -p ${out}/jobs/mutect
cd $out
cat $sampleF|tail -n+2|while read line;
do
if [ -z "$line" ]
then
exit 0
fi
sample=$(echo $line|cut -d ' ' -f1)
tumor=$(echo $line|cut -d ' ' -f2)
normal=$(echo $line|cut -d ' ' -f3)
type=$(echo $line|cut -d ' ' -f4)
mkdir -p ${out}/${sample}
if [ ! -f "${out}/${sample}/${sample}_tumor_final.bam" ]
then
ln -s ${path}/${tumor}.bam ${out}/${sample}/${sample}_tumor_final.bam
ln -s ${path}/${tumor}.bam.bai ${out}/${sample}/${sample}_tumor_final.bam.bai
fi
if [ ! -f "${out}/${sample}/${sample}_normal_final.bam" ]
then
ln -s ${path}/${normal}.bam ${out}/${sample}/${sample}_normal_final.bam
ln -s ${path}/${normal}.bam.bai ${out}/${sample}/${sample}_normal_final.bam.bai
fi
~/EnsembleVariantCallingPipeline/strelka_template.sh $email $sample $ref $out $type $walltime $queue bam
~/EnsembleVariantCallingPipeline/varscan_template.sh $email $sample $ref $out $walltime $queue
~/EnsembleVariantCallingPipeline/muse_template.sh $email $sample $ref $out $type $dbSNP $walltime $queue
~/EnsembleVariantCallingPipeline/mutect_template.sh $email $sample $ref $out $pon $type $dbSNP $walltime $queue ${interval_list} $refine
done
| true |
89b2223ece1f865329321dd1393eb6abb553c29e
|
Shell
|
hbackup/data_selection
|
/scripts/collect-freqs.sh
|
UTF-8
| 329 | 3.03125 | 3 |
[] |
no_license
|
#!/bin/bash
if [ ! -f ~/corpus/europarl/freq.fr.sorted.txt ]; then
for i in en fr; do
cat ~/corpus/europarl/clean.$i | awk '{for(i=1;i<=NF;i++) m[$i]++}END{for(i in m) print i,m[i]}' > ~/corpus/europarl/freq.$i.txt
cat ~/corpus/europarl/freq.$i.txt | sort -k2 -n -r > ~/corpus/europarl/freq.$i.sorted.txt
done
fi
| true |
98433030f5426537d9546ab14ff2ca525321e540
|
Shell
|
PentiumYG/linux-addition
|
/add.sh
|
UTF-8
| 83 | 2.5625 | 3 |
[] |
no_license
|
#!/bin/bash
echo enter a no
read a
echo enter 2 no
read b
c=$((a+b))
echo sum is $c
| true |
ac483a38367c3bae39072baaf62d38d7cfc31885
|
Shell
|
effective-energy/ale-core
|
/bin/stack2nix.sh
|
UTF-8
| 503 | 2.890625 | 3 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env nix-shell
#!nix-shell -p bash nix coreutils cabal-install cabal2nix nix-prefetch-git -i bash
nix_file="ale.nix"
nix_file_dir="build/nix"
root_rel="../.."
ale_root=$(dirname -- $(readlink -f -- "${BASH_SOURCE[0]}"))/../
mkdir -p "$ale_root/$nix_file_dir"
pushd "$ale_root/$nix_file_dir" > /dev/null
stack2nix=$(nix-build -Q --no-out-link -A stack2nix "$root_rel")/bin/stack2nix
$stack2nix --test "$@" "$root_rel" > "$nix_file".new
mv "$nix_file".new "$nix_file"
popd > /dev/null
| true |
5563de142a9039d8a607bc86eff460d3e608c2a3
|
Shell
|
runeio/platform
|
/runeio/generic/dir-packages/rune-lib/sqm/sqm-rebuild-conf.sh
|
UTF-8
| 804 | 3.6875 | 4 |
[] |
no_license
|
#!/bin/sh
check_epoch_file () {
if [ ! -e ${1} ]; then
echo "$0: epoch file: ${1} Not found"
exit 1
fi
}
sqmconf=/etc/config/sqm
rm -f ${sqmconf}
sqm0epoch=/tmp/sqm.config.epoch.eth0
sqm1epoch=/tmp/sqm.config.epoch.eth1
sqm0conf=/tmp/config.sqm.eth0
sqm1conf=/tmp/config.sqm.eth1
if [ -e ${sqm0conf} ]; then
check_epoch_file ${sqm0epoch}
echo -n "# " >> ${sqmconf}
cat ${sqm0epoch} >> ${sqmconf}
echo " " >> ${sqmconf}
cat ${sqm0conf} >> ${sqmconf}
echo " " >> ${sqmconf}
else
if [ ! -e ${sqm1conf} ]; then
echo "$0: Neither of SQM configs for eth0 or eth1 exist"
exit 2
fi
fi
if [ -e ${sqm1conf} ]; then
check_epoch_file ${sqm1epoch}
echo -n "# " >> ${sqmconf}
cat ${sqm1epoch} >> ${sqmconf}
echo " " >> ${sqmconf}
cat ${sqm1conf} >> ${sqmconf}
echo " " >> ${sqmconf}
fi
| true |
19f3da27352436521f892c7fb3a58960e15553df
|
Shell
|
computator/salt-deluge-state
|
/copy-torrents.sh
|
UTF-8
| 1,352 | 3.21875 | 3 |
[] |
no_license
|
{% set torrent_root = '/var/lib/deluged/torrents' -%}
{% set subscription_file = salt['pillar.get']('deluge:yarss:subscription_file') -%}
{% if subscription_file -%}
{% import_yaml salt['pillar.get']('deluge:yarss:subscription_file') as subscriptions -%}
{% endif -%}
{% set subscriptions = salt['pillar.get']('deluge:yarss:subscriptions', subscriptions|default({}), merge=true) -%}
{% set r = subscriptions.regex|default() -%}
{% set ext_pattern = '[^/]*\\.(?:' + subscriptions.file_ext_pattern|default('mkv|mp4|avi') + ')' -%}
#!/bin/bash
declare -A torrent_match
torrent_match=(
{%- for name, pattern in subscriptions.patterns|dictsort %}
{%- if pattern.regex|default() %}
['{{ name }}']='{{ pattern.regex + ext_pattern }}'
{%- else %}
['{{ name }}']='{{ r.script_prefix|default('') + (pattern.match if pattern.match|default() else (pattern if pattern else name))|replace(' ', '.') + r.suffix|default('.*S\\d{2}E\\d{2}.*(?:720|1080)p') + ext_pattern }}'
{%- endif -%}
{%- endfor %}
)
for torrent in "${!torrent_match[@]}"
do
src=$(find '{{ torrent_root }}/queue' -type f | grep -iP "${torrent_match[$torrent]}")
if [[ "$src" ]]
then
[ -d "{{ subscriptions.copy_target }}/$torrent/" ] || mkdir "{{ subscriptions.copy_target }}/$torrent/"
echo "$src" | xargs -d '\n' cp -uvt "{{ subscriptions.copy_target }}/$torrent/"
fi
done
| true |
be38eb6140bbaa13fb13900ea42eeafe1ba565d7
|
Shell
|
rem7/g
|
/libexec/g-resolve
|
UTF-8
| 253 | 2.859375 | 3 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# Usage: g resolve
# Summary: resolves all the dependencies in a golangdeps file
# Help: g resolve
set -e
source $_G_ROOT/share/g/meta.sh
setGoPath
while read line; do
echo "Resolving $line"
g get $line
done < golangdeps
| true |
cb081f4d356820bf95b84a8dc3b0817e790a192b
|
Shell
|
lprashant-94/scripts
|
/bash_init.sh
|
UTF-8
| 1,374 | 3.296875 | 3 |
[] |
no_license
|
#!/bin/bash
db_set() {
echo "$1,$2" >> ~/database
}
db_get() {
grep "^$1," ~/database | sed -e "s/^$1,//" | tail -n 1
}
_db_suggest() {
local cur
local stringreply
COMPREPLY=()
cur=${COMP_WORDS[COMP_CWORD]}
if (($COMP_CWORD == 1 ))
then
stringreply=$(cat /home/prashant/database | sed -e "s/,.*//" | grep "$cur" | tr '\n' ' ' )
COMPREPLY=($stringreply)
else
return 0
fi
}
function countdown(){
date1=$((`date +%s` + $1));
while [ "$date1" -ge `date +%s` ]; do
echo -ne "$(date -u --date @$(($date1 - `date +%s`)) +%H:%M:%S)\r";
sleep 0.1
done
}
function stopwatch(){
date1=`date +%s`;
while true; do
echo -ne "$(date -u --date @$((`date +%s` - $date1)) +%H:%M:%S)\r";
sleep 0.1
done
}
alias urldecode='python -c "import sys, urllib as ul; \
print ul.unquote_plus(sys.argv[1])"'
alias urlencode='python -c "import sys, urllib as ul; \
print ul.quote_plus(sys.argv[1])"'
alias beutify_json='python -mjson.tool'
complete -F _db_suggest db_get
function datonis_sign_in(){
echo "$1,$2"
curl -X POST -H 'Content-Type:application/json' -d "{\"email\":\"$1\",\"password\":\"$2\"}" 'https://api.datonis.io/api_sign_in'
}
alias ks=ls
alias sl=ls
alias s=ls
alias LS=ls
function epoch_con(){
date -d "@$1"
}
| true |
e8a83caddc235f16d029e472077e3fe68e66e88a
|
Shell
|
HongyuanWu/lncRNA_analysis
|
/scripts/htseq_count.sh
|
UTF-8
| 621 | 3.40625 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
if (($# == 0)); then
echo "Usage:"
echo "-s = sample ID"
echo "-m = Mapping directory"
echo "-g = Stringtie GTF"
echo "-o = path for output BAMs"
exit 2
fi
while getopts s:m:g:o: option
do
case "${option}"
in
s) SAMPLEID=${OPTARG};;
g) STRGTF=${OPTARG};;
m) MAPDIR=${OPTARG};;
o) OUTDIR=${OPTARG};;
esac
done
echo $OUTDIR
if [ ! -d $OUTDIR ]; then
mkdir -p $OUTDIR
fi
htseq-count -r pos -f bam -i gene_id -s reverse $MAPDIR/"$SAMPLEID"Aligned.sortedByCoord.out.bam $STRGTF > "$OUTDIR"/"$SAMPLEID".counts
# END
| true |
186905e84d5a74da012cc241d2d2e12f88207e0a
|
Shell
|
phper5/docker
|
/shadowsocks/shadowsocks
|
UTF-8
| 344 | 2.734375 | 3 |
[] |
no_license
|
#!/bin/sh
case $1 in
server)
if [ "$2"x = ""x ]; then
ssserver -c /etc/shadowsocks/server.js
else
ssserver -c $2
fi
;;
client)
if [ "$2"x = ""x ]; then
sslocal -c /etc/shadowsocks/007.js
else
sslocal -c $2
fi
;;
esac
| true |
734f140176295bf3b50c05088b2f50d76abc425c
|
Shell
|
MorrellLAB/Deleterious_GP
|
/Analysis_Scripts/Genetic_Analysis/Summarize_Pairwise_Diversity.sh
|
UTF-8
| 1,288 | 3.171875 | 3 |
[] |
no_license
|
#!/bin/bash
# Summarize pairwise diversity for various partitions of SNPs in the genomic
# prediction experiment. This will use the imputed SNPs.
# Set paths
BASE="/Volumes/LaCie/Genomic_Prediction/Summary_Stats/"
VCF="/Volumes/LaCie/Genomic_Prediction/Summary_Stats/AllChr_AlphaPeel_VCF.vcf"
PARTITIONS="/Users/tomkono/Dropbox/GitHub/Deleterious_GP/Results/SNP_Annotations"
# Set functional classes and cycles
CYCLES=("parents" "cycle1" "cycle2" "cycle3" "All")
CLASSES=("Noncoding" "Synonymous" "Nonsynonymous" "Deleterious" "All")
for CYC in ${CYCLES[@]}
do
for FUNC in ${CLASSES[@]}
do
if [[ ${CYC} == "All" ]]
then
if [[ ${FUNC} == "All" ]]
then
vcftools --vcf ${VCF} --site-pi --out GP_${CYC}_${FUNC}
else
vcftools --vcf ${VCF} --snps ${PARTITIONS}/GP_${FUNC}.txt --site-pi --out GP_${CYC}_${FUNC}
fi
else
if [[ ${FUNC} == "All" ]]
then
vcftools --vcf ${VCF} --keep ${BASE}/${CYC}.list --site-pi --out GP_${CYC}_${FUNC}
else
vcftools --vcf ${VCF} --snps ${PARTITIONS}/GP_${FUNC}.txt --keep ${BASE}/${CYC}.list --site-pi --out GP_${CYC}_${FUNC}
fi
fi
done
done
| true |
70b7985d8d389fbf9fcae62efa43dac300d47311
|
Shell
|
Learner-Bee/Shell
|
/shell脚本/4_file-exist.sh
|
UTF-8
| 147 | 2.75 | 3 |
[] |
no_license
|
#!/bin/bash
echo "please input a filename"
read a
if test -e /home/lilybo/shell-learn/$a
then
echo "$a is exist"
else echo "$a is not exist"
fi
| true |
312b8559dac70f008f84424a1c19773220f95e2f
|
Shell
|
adamflanagan/dotfiles
|
/docker/aliases.zsh
|
UTF-8
| 431 | 2.53125 | 3 |
[
"MIT"
] |
permissive
|
alias dc='docker-compose'
alias dcrm='docker-compose rm'
alias dck='docker-compose kill'
alias dms='docker-machine start default'
alias dmr='docker-machine restart default'
alias dmip='docker-machine ip default'
alias dme='eval "$(docker-machine env default)"'
function dcu() {
if [ -f ./docker-compose.local.yml ]; then
docker-compose --file ./docker-compose.local.yml up
else
docker-compose up
fi
}
| true |
4928c2508075c5d5c2c5940816aa86fb2e98fcb1
|
Shell
|
xo/usql-logo
|
/gen.sh
|
UTF-8
| 953 | 3.296875 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
SRC="$(cd -P "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
pushd $SRC &> /dev/null
LOGO=usql
ICON=icon
OPTIMIZED=${LOGO}-optimized
MINIMIZED=${LOGO}-minimized
WIDTH=220
HEIGHT=80
# optimize
svgo \
--pretty \
--indent=2 \
--precision=4 \
--output=$OPTIMIZED.svg \
${LOGO}.svg
# remove width + height attributes and convert to viewBox
#perl -pi -e 's/ (width|height)="100%"//g' $OPTIMIZED.svg
#perl -pi -e 's/width="90" height="90"/viewBox="0 0 90 90"/' $OPTIMIZED.svg
# minimize
svgo \
--precision=4 \
--output=$MINIMIZED.svg \
$OPTIMIZED.svg
# generate png
inkscape \
--export-area-page \
--export-width=$((WIDTH*8)) \
--export-height=$((HEIGHT*8)) \
--export-type=png \
-o $LOGO.png \
$LOGO.svg
for i in 32 48 64 128 256 512; do
# generate png
inkscape \
--export-area-page \
--export-width=$i \
--export-height=$i \
--export-type=png \
-o $ICON-${i}x${i}.png \
$ICON.svg
done
popd &> /dev/null
| true |
a3b7518d86590e50577c0c6c5ca3654f83b09437
|
Shell
|
suntech123/BigData
|
/run-hive1-auto.sh
|
UTF-8
| 30,932 | 3.96875 | 4 |
[] |
no_license
|
#!/bin/bash
#Purpose: The script will read a file containing a list of tables, one table per row
# in the format of SCHEMA.TABLE_NAME.COLUMN. The COLUMN is an optional entry.
# If a COLUMN is specified sqoop will use that column to split the extract
# among the mappers using that column. It then calls a sqoop job to extract the
# data from the database and put the data in HDFS. Lastly, a script will be executed
# to import the data into Splice Machine.
#
# Parameter:
# 1. Sqoop Configuration File - File containing the sqoop parameters
# 2. Table List File - File containing list of table(s) in the format of SCHEMA.TABLE_NAME.COLUMN:MAPPERS
# where COLUMN and MAPPERS are optional
# 3. Splice Schema Name - The name of the schema in Splice to import to
# 4. Import File Path - The path to the directory containing the import sql statements.
# Each table being imported must have an associated file in this directory
# named in the format import-<schema>-<table>.sql. The file names are case
# sensitive.
#
# Usage: ./run-sqoop-full.sh <config file> <table list file> <splice schema name> <import file path> <query file path> <log file name>
SQOOP_CONFIG_FILE=$1
FILE_LIST=$2
SPLICE_SCHEMA=$3
IMPORT_PATH=$4
UPSERT_PATH=$5
QUERY_PATH=$6
LOG_FILE=$7
SQLSERVER_DBO="dbo"
SQLSERVER_DBOC="DBO"
COLON=":"
DEFAULT_MAPPERS="8"
JOB_RUN_DATE=$(date +"%Y-%m-%d %T.0")
JOB_RUN_DATE1=$(date +"%Y-%m-%d-%T")
jobstartdate=$(date +"%Y-%m-%d %T.0")
# check for correct number of parameters
if [ $# -eq 7 ]; then
# check if the sqoop config file exists
if [ ! -f $SQOOP_CONFIG_FILE ]; then
echo $SQOOP_CONFIG_FILE not found
exit 1
fi
# check if the file list exists
if [ ! -f $FILE_LIST ]; then
echo $FILE_LIST not found
exit 1
else
cp $FILE_LIST $FILE_LIST-FAILED
# loop through th file list and parse each line
cat $FILE_LIST | while IFS=. read schema_id table_id splice_tablename frequency schema loadtype sourcetype hive_diff hive_cdc sqooprequired table column
do {
echo Exporting $schema.$table
loadtype1=$loadtype
if [ $column ]; then
echo Splitting on $column
fi
# check if the schema is dbo
if [ "$schema" == "$SQLSERVER_DBO" ] || [ "$schema" == "$SQLSERVER_DBOC" ]; then
echo Has dbo as schema
TABLE=$table
else
echo Does NOT have dbo as schema
TABLE=$schema.$table
fi
echo $TABLE
# Determine if there was a column specified then call sqoop accordingly optionally passing in the --split-by value
# Also checks if the number of mappers is specified
if [ $column ]; then
COLON_INDEX=$(expr index $column $COLON)
echo Colon Index: $COLON_INDEX
if [ $COLON_INDEX -gt 0 ]; then
COLUMN_END_POS=$(($COLON_INDEX-1))
START_MAPPER_POS=$(($COLON_INDEX+1))
END_MAPPER_POS=$((${#column}-$COLON_INDEX))
echo Full column length: ${#column}
echo Start Mapper Pos: $START_MAPPER_POS
echo End Mapper Pos: $END_MAPPER_POS
COLUMN=$(expr substr $column 1 $COLUMN_END_POS)
MAPPERS=$(expr substr $column $START_MAPPER_POS $END_MAPPER_POS)
else
COLUMN=$column
MAPPERS=$DEFAULT_MAPPERS
fi
echo Column: $COLUMN
echo "uppper"
echo Mappers: $MAPPERS
else
COLON_INDEX=$(expr index $table $COLON)
echo Colon Index: $COLON_INDEX
if [ $COLON_INDEX -gt 0 ]; then
COLUMN_END_POS=$(($COLON_INDEX-1))
START_MAPPER_POS=$(($COLON_INDEX+1))
END_MAPPER_POS=$((${#table}-$COLON_INDEX))
echo Full column length: ${#column}
echo Start Mapper Pos: $START_MAPPER_POS
echo End Mapper Pos: $END_MAPPER_POS
TABLE=$(expr substr $table 1 $COLUMN_END_POS)
MAPPERS=$(expr substr $table $START_MAPPER_POS $END_MAPPER_POS)
table=$TABLE
else
MAPPERS=$DEFAULT_MAPPERS
fi
echo $schema $SQLSERVER_DBO $SQLSERVER_DBOC
if [ "$schema" != "$SQLSERVER_DBO" ] || [ "$schema" != "$SQLSERVER_DBOC" ]; then
echo "inner"
TABLE="$schema.$table"
fi
echo Table: $TABLE
echo Mappers: $MAPPERS
echo "lower"
fi
if [ $hive_diff = 'N' ]; then
# check if there is a query file for this table
queryFile=$QUERY_PATH/query-$SPLICE_SCHEMA-$table.sql
echo The query file is: $queryFile
if [ -f "$queryFile" ] || [ $sqooprequired = 'N' ]; then
query=$(cat $queryFile)
######### New Changes start ####################
if [ $loadtype = 'I' ]; then #for incremental load
MAXFILE=/home/splice/cetera/sqoop/runtemp/$SPLICE_SCHEMA-$table-$JOB_RUN_DATE1-$loadtype1-max.sql
echo "select max(end_run_date) from spliceadmin.last_run where schema_id = $schema_id and table_id = $table_id and status = 'SUCCESS';" > $MAXFILE
LAST_RUN_DTM=`sqlshell.sh -f $MAXFILE | head -11 | tail -1 | awk '{print $1" "$2}'`
rm $MAXFILE
echo $LAST_RUN_DTM
INCRCOLFILE1=/home/splice/cetera/sqoop/runtemp/$SPLICE_SCHEMA-$table-$JOB_RUN_DATE1-$loadtype1-INC1.sql
INCRCOLFILE2=/home/splice/cetera/sqoop/runtemp/$SPLICE_SCHEMA-$table-$JOB_RUN_DATE1-$loadtype1-INC2.txt
echo "select '#'||column_name||'#' from spliceadmin.refresh_control_tbl where schemaname = '$SPLICE_SCHEMA' and tablename = '$table';" > $INCRCOLFILE1
sqlshell.sh -f $INCRCOLFILE1 | grep '#' | grep -v 'splice> select' | awk -F# '{print $2}' > $INCRCOLFILE2
mapfile -t COLUMN_INCREMENTAL < $INCRCOLFILE2
echo ${#COLUMN_INCREMENTAL[@]}
rm $INCRCOLFILE1 $INCRCOLFILE2
where_condition=""
if [[ "${LAST_RUN_DTM}" = "NULL " ]]; then
if [ $sqooprequired = 'N' ] ; then
importFile=$IMPORT_PATH/import-$SPLICE_SCHEMA-$table.sql
cp $importFile $IMPORT_PATH/import-$SPLICE_SCHEMA-$table-temp.sql
sed -i '/XXXX/d' $IMPORT_PATH/import-$SPLICE_SCHEMA-$table-temp.sql
loadtype1=$loadtype
loadtype=F
else
echo "NO LAST RUN - FULL LOAD"
loadtype1=$loadtype
loadtype=F
fi
else
if [ $sqooprequired = 'N' ]; then
importFile=$IMPORT_PATH/import-$SPLICE_SCHEMA-$table.sql
cp $importFile $IMPORT_PATH/import-$SPLICE_SCHEMA-$table-temp.sql
sed -i "s/'XXXX'/'$LAST_RUN_DTM'/g" $IMPORT_PATH/import-$SPLICE_SCHEMA-$table-temp.sql
else
count=0
for t in "${COLUMN_INCREMENTAL[@]}"
do
if [[ $count -eq 1 ]]; then
where_condition+=" and "
fi
if [ $sourcetype = 'O' ]; then
where_condition+="$t>= TO_TIMESTAMP('$LAST_RUN_DTM', 'YYYY-MM-DD HH24:MI:SS.FF') and $t<= TO_TIMESTAMP('$JOB_RUN_DATE', 'YYYY-MM-DD HH24:MI:SS.FF')" #oracle
else
where_condition+="$t>= '$LAST_RUN_DTM' and $t<= '$JOB_RUN_DATE'" #SQLSERVER
fi
count=1
done
echo $where_condition
query=${query/'$CONDITIONS'/"$where_condition and \$CONDITIONS"}
fi
fi
fi
######### New Changes End ####################
echo $query
else
query=""
echo Query file not found. $query
fi
# Make sure the directories exist in hdfs, clear the directory, and set permissions
freeout=`free`
freeout1=`cat /proc/meminfo`
freeout2=`ulimit -a`
echo "$freeout"
echo "$freeout1"
echo "$freeout2"
sudo -su hdfs hadoop fs -mkdir -p /data/sqoop/$SPLICE_SCHEMA/$table
sudo -su hdfs hadoop fs -chmod 777 /data/sqoop/$SPLICE_SCHEMA/$table
sudo -su hdfs hadoop fs -rm -skipTrash /data/sqoop/$SPLICE_SCHEMA/$table/*
sudo -su hdfs hadoop fs -mkdir -p /bad/$SPLICE_SCHEMA/$table
sudo -su hdfs hadoop fs -chmod 777 /bad/$SPLICE_SCHEMA/$table
# exit script if an error is encountered
set -e
echo The query param is: $query!
echo The column param is: $column!
if [ $sqooprequired = 'Y' ]; then
remark="Sqoop started"
jobstartdate=$(date +"%Y-%m-%d %T.0")
sqooprowcount=0
splicerowcount=0
sqlshell.sh << !
insert into spliceadmin.last_run values('$SPLICE_SCHEMA','$splice_tablename','$jobstartdate',$sqooprowcount,$splicerowcount,'$frequency',$schema_id,$table_id,'RUNNING','$jobstartdate','$remark');
!
if [ -z "$query" ]; then
if [ $column ]; then
echo Execute sqoop with split-by
sqoop import -Dhadoop.security.credential.provider.path=jceks://hdfs/data/sqoop/keystore/hadoop-credentials-store --options-file $SQOOP_CONFIG_FILE --append --table $TABLE --split-by $COLUMN --target-dir /data/sqoop/$SPLICE_SCHEMA/$table --m $MAPPERS
else
echo Execute sqoop without split-by
sqoop import -Dhadoop.security.credential.provider.path=jceks://hdfs/data/sqoop/keystore/hadoop-credentials-store --options-file $SQOOP_CONFIG_FILE --append --table $TABLE --target-dir /data/sqoop/$SPLICE_SCHEMA/$table --m $MAPPERS
fi
else
if [ $column ]; then
echo Execute sqoop with split-by
sqoop import -Dhadoop.security.credential.provider.path=jceks://hdfs/data/sqoop/keystore/hadoop-credentials-store --options-file $SQOOP_CONFIG_FILE --append --query "$query" --split-by $COLUMN --target-dir /data/sqoop/$SPLICE_SCHEMA/$table --m $MAPPERS
else
echo Sqoop extract for $SCHEMA.$TABLE failed because a query file is present but no column specified for the split-by.
remark="Sqoop failed as split by column not specified"
jobenddate=$(date +"%Y-%m-%d %T.0")
sqlshell.sh << !
update spliceadmin.last_run set status = 'FAILED', remark = '$remark', END_RUN_DATE = '$jobenddate' where schema_id = $schema_id and table_id = $table_id and START_RUN_DATE = '$jobstartdate';
!
exit 1
fi
fi
if [ $? -gt 0 ]; then
echo Sqoop extract failed
remark="Sqoop failed"
jobenddate=$(date +"%Y-%m-%d %T.0")
sqlshell.sh << !
update spliceadmin.last_run set status = 'FAILED', remark = '$remark', END_RUN_DATE = '$jobenddate' where schema_id = $schema_id and table_id = $table_id and START_RUN_DATE = '$jobstartdate';
!
exit 1
else
echo Export of $SPLICE_SCHEMA.$table successful
echo Importing $SPLICE_SCHEMA.$table to Splice Machine
remark="Sqoop Completed. Splice Import running."
if [ $sqooprequired = 'Y' ] ; then
sqooprowcount=$(grep -i "mapreduce.ImportJobBase: Retrieved" $LOG_FILE | tail -1 | cut -d" " -f6)
else
sqooprowcount=0
fi
jobenddate=$(date +"%Y-%m-%d %T.0")
sqlshell.sh << !
update spliceadmin.last_run set status = 'RUNNING', remark = '$remark', SQOOP_ROW_COUNT = $sqooprowcount, END_RUN_DATE = '$jobenddate' where schema_id = $schema_id and table_id = $table_id and START_RUN_DATE = '$jobstartdate';
!
fi
fi
# Import data from HDFS to Splice
if [ $sqooprequired = 'Y' ] ; then
if [ $loadtype = 'I' ]; then ##Upsert
IMPORT_FILE=$UPSERT_PATH/upsert-$SPLICE_SCHEMA-$table.sql
else
IMPORT_FILE=$IMPORT_PATH/import-$SPLICE_SCHEMA-$table.sql
fi
else
IMPORT_FILE=$IMPORT_PATH/import-$SPLICE_SCHEMA-$table-temp.sql
fi
if [ -f $IMPORT_FILE ]; then
sqlshell.sh -f $IMPORT_FILE
STATUS=$(grep "ERROR 42Y55\|ERROR XIE0M\|ERROR XJ001\|ERROR SE\|ERROR 08006\|ClosedChannelException\|ERROR 42Y03\|ERROR:\|ERROR 42Z23" $LOG_FILE | wc -l)
echo $STATUS
if [ $STATUS -gt 0 ]; then
remark="Splice import failed"
jobenddate=$(date +"%Y-%m-%d %T.0")
sqlshell.sh << !
update spliceadmin.last_run set status = 'FAILED', remark = '$remark', END_RUN_DATE = '$jobenddate' where schema_id = $schema_id and table_id = $table_id and START_RUN_DATE = '$jobstartdate';
!
echo Splice import failed
exit 1
else
sqlshell.sh << !
update $SPLICE_SCHEMA.$splice_tablename set INSERT_LOAD_TS = '$jobenddate' where INSERT_LOAD_TS is null;
!
echo Import of $SPLICE_SCHEMA.$table completed
echo $SPLICE_SCHEMA.$table
# sed -i "/$schema.$loadtype1.$sourcetype.$sqooprequired.$table/d" $FILE_LIST-FAILED
sed -i "/$schema_id.$table_id/d" $FILE_LIST-FAILED
# grep -v "$schema.$table" $FILE_LIST-FAILED > $FILE_LIST-FAILED1
# mv $FILE_LIST-FAILED1 $FILE_LIST-FAILED
echo $SPLICE_SCHEMA-$table-$JOB_RUN_DATE1-$loadtype1-RC.sql
SPLICETABLE=`grep -i 'SYSCS_UTIL' $IMPORT_FILE | cut -d ',' -f2 | cut -d\' -f2`
TABLEROWCOUNT=/home/splice/cetera/sqoop/runtemp/$SPLICE_SCHEMA-$table-$JOB_RUN_DATE1-$loadtype1-RC.sql
LASTRUNDETAIL=/home/splice/cetera/sqoop/runtemp/$SPLICE_SCHEMA-$table-$JOB_RUN_DATE1-$loadtype1-LR.sql
echo "select '##'||trim(cast(count(*) as char(100)))||'##' from $SPLICE_SCHEMA.$SPLICETABLE;" > $TABLEROWCOUNT
#row_count=`sqlshell.sh -f $TABLEROWCOUNT | grep '##' | grep -v '||trim'| awk -F'#' '{print $3}'`
if [ ${#row_count} -eq 0 ];then
echo "select '##'||trim(cast(count(*) as char(100)))||'##' from $SPLICE_SCHEMA.\"$SPLICETABLE\";" > $TABLEROWCOUNT
#row_count=`sqlshell.sh -f $TABLEROWCOUNT | grep '##' | grep -v '||trim'| awk -F'#' '{print $3}'`
fi
echo "splice-row-count:" $row_count
echo " update spliceadmin.last_run set status = 'SUCCESS', remark= '$remark', END_RUN_DATE = '$jobenddate' where schema_id = $schema_id and table_id = $table_id and START_RUN_DATE = '$jobstartdate';"
remark="Splice import successfull"
jobenddate=$(date +"%Y-%m-%d %T.0")
sqlshell.sh << !
update spliceadmin.last_run set status = 'SUCCESS', remark= '$remark', END_RUN_DATE = '$jobenddate' where schema_id = $schema_id and table_id = $table_id and START_RUN_DATE = '$jobstartdate';
!
# echo "insert into spliceadmin.last_run values('$SPLICE_SCHEMA','$SPLICETABLE','$JOB_RUN_DATE',$row_count,0);" > $LASTRUNDETAIL
# sqlshell.sh -f $LASTRUNDETAIL
# rm $LASTRUNDETAIL
rm $TABLEROWCOUNT
fi
else
echo $IMPORT_FILE not found.... skipping import to Splice Machine
remark="Import file not found."
jobenddate=$(date +"%Y-%m-%d %T.0")
sqlshell.sh << !
update spliceadmin.last_run set status = 'FAILED', remark = '$remark', END_RUN_DATE = '$jobenddate' where schema_id = $schema_id and table_id = $table_id and START_RUN_DATE = '$jobstartdate';
!
exit 1
fi
# do not exit script if there are errors
set +e
else
queryFile=$QUERY_PATH/query-$SPLICE_SCHEMA-$table.sql
echo The query file is: $queryFile
if [ -f "$queryFile" ]; then
query=$(cat $queryFile)
else
query=""
echo Query file not found. $query
fi
# Make sure the directories exist in hdfs, clear the directory, and set permissions
sudo -su hdfs hadoop fs -mkdir -p /data/hive/$SPLICE_SCHEMA/$table
sudo -su hdfs hadoop fs -chmod 777 /data/hive/$SPLICE_SCHEMA/$table
sudo -su hdfs hadoop fs -rm -skipTrash /data/hive/$SPLICE_SCHEMA/$table/*
sudo -su hdfs hadoop fs -mkdir -p /bad/$SPLICE_SCHEMA/$table
sudo -su hdfs hadoop fs -chmod 777 /bad/$SPLICE_SCHEMA/$table
sudo -su hdfs hadoop fs -mkdir -p /data/diff/$SPLICE_SCHEMA/$table
sudo -su hdfs hadoop fs -chmod 777 /data/diff/$SPLICE_SCHEMA/$table
sudo -su hdfs hadoop fs -rm -skipTrash /data/diff/$SPLICE_SCHEMA/$table/*
remark="Sqoop started"
jobstartdate=$(date +"%Y-%m-%d %T.0")
sqooprowcount=0
splicerowcount=0
sqlshell.sh << !
insert into spliceadmin.last_run values('$SPLICE_SCHEMA','$splice_tablename','$jobstartdate',$sqooprowcount,$splicerowcount,'$frequency',$schema_id,$table_id,'RUNNING','$jobstartdate','$remark');
!
if [ -z "$query" ]; then
if [ $column ]; then
echo Execute sqoop with split-by
echo "first"
sqoop import -Dhadoop.security.credential.provider.path=jceks://hdfs/data/sqoop/keystore/hadoop-credentials-store --options-file $SQOOP_CONFIG_FILE --append --table $TABLE --split-by $COLUMN --target-dir /data/hive/$SPLICE_SCHEMA/$table --m $MAPPERS
else
echo Execute sqoop without split-by
sqoop import -Dhadoop.security.credential.provider.path=jceks://hdfs/data/sqoop/keystore/hadoop-credentials-store --options-file $SQOOP_CONFIG_FILE --append --table $TABLE --target-dir /data/hive/$SPLICE_SCHEMA/$table --m $MAPPERS
fi
else
if [ $column ]; then
echo Execute sqoop with split-by
echo "second"
sqoop import -Dhadoop.security.credential.provider.path=jceks://hdfs/data/sqoop/keystore/hadoop-credentials-store --options-file $SQOOP_CONFIG_FILE --append --query "$query" --split-by $COLUMN --target-dir /data/hive/$SPLICE_SCHEMA/$table --m $MAPPERS
else
echo Sqoop extract for $SCHEMA.$TABLE failed because a query file is present but no column specified for the split-by.
remark="Sqoop failed as split by column not specified"
jobenddate=$(date +"%Y-%m-%d %T.0")
sqlshell.sh << !
update spliceadmin.last_run set status = 'FAILED', remark = '$remark', END_RUN_DATE = '$jobenddate' where schema_id = $schema_id and table_id = $table_id and START_RUN_DATE = '$jobstartdate';
!
exit 1
fi
fi
if [ $? -gt 0 ]; then
echo Sqoop extract failed
remark="Sqoop failed"
jobenddate=$(date +"%Y-%m-%d %T.0")
sqlshell.sh << !
update spliceadmin.last_run set status = 'FAILED', remark = '$remark', END_RUN_DATE = '$jobenddate' where schema_id = $schema_id and table_id = $table_id and START_RUN_DATE = '$jobstartdate';
!
exit 1
else
echo Export of $SPLICE_SCHEMA.$table successful
echo Importing $SPLICE_SCHEMA.$table to Splice Machine
remark="Sqoop Completed."
if [ $sqooprequired = 'Y' ] ; then
sqooprowcount=$(grep -i "mapreduce.ImportJobBase: Retrieved" $LOG_FILE | tail -1 | cut -d" " -f6)
else
sqooprowcount=0
fi
jobenddate=$(date +"%Y-%m-%d %T.0")
sqlshell.sh << !
update spliceadmin.last_run set status = 'RUNNING', remark = '$remark', SQOOP_ROW_COUNT = $sqooprowcount, END_RUN_DATE = '$jobenddate' where schema_id = $schema_id and table_id = $table_id and START_RUN_DATE = '$jobstartdate';
!
fi
rm /diffkit/diffdata-${SPLICE_SCHEMA}-${table}.txt || true
if [ $hive_cdc = 'N' ];then
# java -jar /home/splice/cetera/sqoop/diffkit/diffkit-hive-app.jar -planfiles //home/splice/cetera/sqoop/diffkit/conf/$SPLICE_SCHEMA/diffkitt-${SPLICE_SCHEMA}-${table}-hive.plan.xml
remark="Diffkit started."
jobenddate=$(date +"%Y-%m-%d %T.0")
sqlshell.sh << !
update spliceadmin.last_run set status = 'FAILED', remark = '$remark', END_RUN_DATE = '$jobenddate' where schema_id = $schema_id and table_id = $table_id and START_RUN_DATE = '$jobstartdate';
!
ssh splice@papplsl040 "java -jar /data/diffkit-hive-app.jar -planfiles //data/conf/$SPLICE_SCHEMA/diffkitt-${SPLICE_SCHEMA}-${table}-hive.plan.xml"
if [ $? -gt 0 ]; then
echo diffkit failed
remark="Diffkit failed"
jobenddate=$(date +"%Y-%m-%d %T.0")
sqlshell.sh << !
update spliceadmin.last_run set status = 'FAILED', remark = '$remark', END_RUN_DATE = '$jobenddate' where schema_id = $schema_id and table_id = $table_id and START_RUN_DATE = '$jobstartdate';
!
exit 1
fi
else
sudo -su hdfs hive -f /home/splice/cetera/sqoop/$SPLICE_SCHEMA/hive-query/hive-query-$SPLICE_SCHEMA-$table.sql
if [ $? -gt 0 ]; then
echo hive failed
remark="hive failed."
jobenddate=$(date +"%Y-%m-%d %T.0")
sqlshell.sh << !
update spliceadmin.last_run set status = 'FAILED', remark = '$remark', END_RUN_DATE = '$jobenddate' where schema_id = $schema_id and table_id = $table_id and START_RUN_DATE = '$jobstartdate';
!
rm /diffkit/diffdata-${SPLICE_SCHEMA}-${table}.txt || true
exit 1
fi
fi
if [ $hive_cdc = 'N' ];then
#echo wait for few moments
# sleep 10
# sed -i '/^$/d' /diffkit/diffdata-${SPLICE_SCHEMA}-${table}.txt
diff_count=$(wc -l /diffkit/diffdata-${SPLICE_SCHEMA}-${table}.txt| cut -d ' ' -f1)
echo $diff_count
sudo -su hdfs hadoop fs -put /diffkit/diffdata-${SPLICE_SCHEMA}-${table}.txt /data/diff/$SPLICE_SCHEMA/$table/
fi
if [ $loadtype = 'I' ]; then ##Upsert
IMPORT_FILE=$UPSERT_PATH/upsert-$SPLICE_SCHEMA-$table.sql
else
IMPORT_FILE=$IMPORT_PATH/import-$SPLICE_SCHEMA-$table.sql
fi
sqlshell.sh -f $IMPORT_FILE
STATUS=$(grep "ERROR 42Y55\|ERROR XIE0M\|ERROR XJ001\|ERROR SE\|ERROR 08006\|ClosedChannelException\|ERROR 42Y03\|ERROR:\|ERROR 42Z23" $LOG_FILE | wc -l)
echo $STATUS
if [ $STATUS -gt 0 ]; then
echo Splice import failed
rm /diffkit/diffdata-${SPLICE_SCHEMA}-${table}.txt
remark="Splice import failed"
jobenddate=$(date +"%Y-%m-%d %T.0")
sqlshell.sh << !
update spliceadmin.last_run set status = 'FAILED', remark = '$remark', END_RUN_DATE = '$jobenddate' where schema_id = $schema_id and table_id = $table_id and START_RUN_DATE = '$jobstartdate';
!
exit 1
else
echo Import of $SPLICE_SCHEMA.$table completed
echo $SPLICE_SCHEMA.$table
# sed -i "/$schema.$loadtype1.$sourcetype.$sqooprequired.$table/d" $FILE_LIST-FAILED
sed -i "/$schema_id.$table_id/d" $FILE_LIST-FAILED
# grep -v "$schema.$table" $FILE_LIST-FAILED > $FILE_LIST-FAILED1
# mv $FILE_LIST-FAILED1 $FILE_LIST-FAILED
echo $SPLICE_SCHEMA-$table-$JOB_RUN_DATE1-$loadtype1-RC.sql
SPLICETABLE=`grep -i 'SYSCS_UTIL' $IMPORT_FILE | cut -d ',' -f2 | cut -d\' -f2`
TABLEROWCOUNT=/home/splice/cetera/sqoop/runtemp/$SPLICE_SCHEMA-$table-$JOB_RUN_DATE1-$loadtype1-RC.sql
LASTRUNDETAIL=/home/splice/cetera/sqoop/runtemp/$SPLICE_SCHEMA-$table-$JOB_RUN_DATE1-$loadtype1-LR.sql
echo "select '##'||trim(cast(count(*) as char(100)))||'##' from $SPLICE_SCHEMA.$SPLICETABLE;" > $TABLEROWCOUNT
#row_count=`sqlshell.sh -f $TABLEROWCOUNT | grep '##' | grep -v '||trim'| awk -F'#' '{print $3}'`
if [ ${#row_count} -eq 0 ];then
echo "select '##'||trim(cast(count(*) as char(100)))||'##' from $SPLICE_SCHEMA.\"$SPLICETABLE\";" > $TABLEROWCOUNT
# row_count=`sqlshell.sh -f $TABLEROWCOUNT | grep '##' | grep -v '||trim'| awk -F'#' '{print $3}'`
fi
echo "splice-row-count:" $row_count
echo " update spliceadmin.last_run set status = 'SUCCESS', remark= '$remark', END_RUN_DATE = '$jobenddate' where schema_id = $schema_id and table_id = $table_id and START_RUN_DATE = '$jobstartdate';"
remark="Splice import successfull"
jobenddate=$(date +"%Y-%m-%d %T.0")
sqlshell.sh << !
update spliceadmin.last_run set status = 'SUCCESS', remark= '$remark', END_RUN_DATE = '$jobenddate' where schema_id = $schema_id and table_id = $table_id and START_RUN_DATE = '$jobstartdate';
!
# echo "insert into spliceadmin.last_run values('$SPLICE_SCHEMA','$SPLICETABLE','$JOB_RUN_DATE',$row_count,0);" > $LASTRUNDETAIL
# sqlshell.sh -f $LASTRUNDETAIL
# rm $LASTRUNDETAIL
rm $TABLEROWCOUNT
fi
# hive -e "Truncate table ${SPLICE_SCHEMA}.${table}"
sudo -su hdfs hive -e "Truncate table ${SPLICE_SCHEMA}.${table}; INSERT INTO TABLE ${SPLICE_SCHEMA}.${table} select * from ${SPLICE_SCHEMA}.${table}_ext;"
if [ $? -gt 0 ]; then
echo hive failed
remark="hive failed."
jobenddate=$(date +"%Y-%m-%d %T.0")
sqlshell.sh << !
update spliceadmin.last_run set status = 'FAILED', remark = '$remark', END_RUN_DATE = '$jobenddate' where schema_id = $schema_id and table_id = $table_id and START_RUN_DATE = '$jobstartdate';
!
rm /diffkit/diffdata-${SPLICE_SCHEMA}-${table}.txt || true
exit 1
fi
rm /diffkit/diffdata-${SPLICE_SCHEMA}-${table}.txt || true
# sudo -su hdfs hadoop fs -rm -skipTrash /data/hive/$SPLICE_SCHEMA/$table/*
fi
} < /dev/null; done
fi
else
echo Incorrect number of parameters specified to execute script
echo "Usage: ./run-sqoop-full.sh <config file> <table list file> <splice schema name> <import file path> <log file name>"
exit 1
fi
if [ $? -eq 0 ]; then
echo Sqoop and Splice import job completed.
rm $FILE_LIST-FAILED
exit 0
else
exit 1
fi
| true |
9d6ef22ff1c4d6b8f674f558966845b60a67e32c
|
Shell
|
tharindulak/tempory-ath
|
/docker/deployment-init/files/setup-is.sh
|
UTF-8
| 6,811 | 3.34375 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
# ------------------------------------------------------------------------
#
# Copyright 2019 WSO2, Inc. (http://wso2.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
#
# ------------------------------------------------------------------------
source idp-variables.sh
echo "Waiting for IdP to start"
while [[ true ]];
do
PING_STATUS_CODE=$(curl -sL -k -w "%{http_code}" -I "${CELLERY_HUB_IDP_URL}/carbon/admin/login.jsp" -o /dev/null)
if [[ "${PING_STATUS_CODE}" == "000" ]]
then
echo "Ping IdP - No Response"
else
echo "Ping IdP - Status Code ${PING_STATUS_CODE}"
fi
if [[ "${PING_STATUS_CODE}" == "200" ]]
then
break
fi
sleep 5
done
export -n create_google_body=$(cat create-google-idp.xml)
export -n create_github_body=$(cat create-github-idp.xml)
export -n create_oauth2_app_cellery_hub=$(cat create-oauth2-app-cellery-hub.xml)
export -n create_oauth2_app_cli=$(cat create-oauth2-app-cli.xml)
export -n update_cellery_hub_application=$(cat update-cellery-hub-application.xml)
export -n update_cli_application=$(cat update-cli-application.xml)
set -e
unset IFS
args=() i=0
for var in $(compgen -e); do
if [[ $var == CELLERY_HUB_* ]] ;
then
export tempEnvVal=$(echo ${!var})
export create_google_body=$(echo $create_google_body | sed "s#{$var}#${tempEnvVal}#g")
export create_github_body=$(echo $create_github_body | sed "s#{$var}#${tempEnvVal}#g")
export create_oauth2_app_cellery_hub=$(echo $create_oauth2_app_cellery_hub | sed "s#{$var}#${tempEnvVal}#g")
export create_oauth2_app_cli=$(echo $create_oauth2_app_cli | sed "s#{$var}#${tempEnvVal}#g")
export update_cellery_hub_application=$(echo $update_cellery_hub_application | sed "s#{$var}#${tempEnvVal}#g")
export update_cli_application=$(echo $update_cli_application | sed "s#{$var}#${tempEnvVal}#g")
fi
done
split_resluts(){
export BODY=$(echo $HTTP_RESPONSE | sed -e 's/HTTPSTATUS\:.*//g')
export STATUS=$(echo $HTTP_RESPONSE | tr -d '\n' | sed -e 's/.*HTTPSTATUS://')
}
echo_results () {
split_resluts
if [ $STATUS -eq 200 ]; then
tput setaf 2;
echo $1
tput sgr0;
else
tput setaf 1;
echo "$2 , Status code : $STATUS"
tput sgr0;
echo "response from server: $BODY"
fi
}
HTTP_RESPONSE=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" --header "Content-Type: application/soap+xml;charset=UTF-8" --header "SOAPAction:urn:addIdP" -u ${CELLERY_HUB_IDP_ADMIN_USERNAME}:${CELLERY_HUB_IDP_ADMIN_PASSWORD} --data "$create_google_body" ${CELLERY_HUB_IDP_URL}/services/IdentityProviderMgtService.IdentityProviderMgtServiceHttpsSoap12Endpoint -k)
echo_results "Google IDP added successfully" "Error while adding Google IDP"
HTTP_RESPONSE=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" --header "Content-Type: application/soap+xml;charset=UTF-8" --header "SOAPAction:urn:addIdP" -u ${CELLERY_HUB_IDP_ADMIN_USERNAME}:${CELLERY_HUB_IDP_ADMIN_PASSWORD} --data "$create_github_body" ${CELLERY_HUB_IDP_URL}/services/IdentityProviderMgtService.IdentityProviderMgtServiceHttpsSoap12Endpoint -k)
echo_results "Github IDP added successfully" "Error while adding Github IDP"
HTTP_RESPONSE=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" --header "Content-Type: application/soap+xml;charset=UTF-8" --header "SOAPAction:urn:registerOAuthApplicationData" -u ${CELLERY_HUB_IDP_ADMIN_USERNAME}:${CELLERY_HUB_IDP_ADMIN_PASSWORD} --data "$create_oauth2_app_cli"l ${CELLERY_HUB_IDP_URL}/services/OAuthAdminService.OAuthAdminServiceHttpsSoap12Endpoint/ -k)
echo_results "CLI OAuth2 application added successfully" "Error while adding CLI OAuth2 application"
HTTP_RESPONSE=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" --header "Content-Type: application/soap+xml;charset=UTF-8" --header "SOAPAction:urn:registerOAuthApplicationData" -u ${CELLERY_HUB_IDP_ADMIN_USERNAME}:${CELLERY_HUB_IDP_ADMIN_PASSWORD} --data "$create_oauth2_app_cellery_hub" ${CELLERY_HUB_IDP_URL}/services/OAuthAdminService.OAuthAdminServiceHttpsSoap12Endpoint/ -k)
echo_results "Cellery Hub application added successfully" "Error while adding Cellery hub OAuth2 application"
HTTP_RESPONSE=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" --header "Content-Type: application/soap+xml;charset=UTF-8" --header "SOAPAction:urn:createApplication" -u ${CELLERY_HUB_IDP_ADMIN_USERNAME}:${CELLERY_HUB_IDP_ADMIN_PASSWORD} --data @create-cli-app.xml ${CELLERY_HUB_IDP_URL}/services/IdentityApplicationManagementService.IdentityApplicationManagementServiceHttpsSoap12Endpoint/ -k)
echo_results "CLI service provider created" "Error while creating CLI service provider"
HTTP_RESPONSE=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" --header "Content-Type: application/soap+xml;charset=UTF-8" --header "SOAPAction:urn:createApplication" -u ${CELLERY_HUB_IDP_ADMIN_USERNAME}:${CELLERY_HUB_IDP_ADMIN_PASSWORD} --data @create-web-portal-app.xml ${CELLERY_HUB_IDP_URL}/services/IdentityApplicationManagementService.IdentityApplicationManagementServiceHttpsSoap12Endpoint/ -k)
echo_results "Cellery Hub Web Portal service provider created" "Error while creating Cellery Hub Web Portal service provider"
HTTP_RESPONSE=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" --header "Content-Type: application/soap+xml;charset=UTF-8" --header "SOAPAction:urn:updateApplication" -u ${CELLERY_HUB_IDP_ADMIN_USERNAME}:${CELLERY_HUB_IDP_ADMIN_PASSWORD} --data "$update_cellery_hub_application" ${CELLERY_HUB_IDP_URL}/services/IdentityApplicationManagementService.IdentityApplicationManagementServiceHttpsSoap12Endpoint/ -k)
echo_results "Cellery Hub service provider updated with OAuth2 app" "Error while updating Cellery Hub service provider with OAuth2 app"
HTTP_RESPONSE=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" --header "Content-Type: application/soap+xml;charset=UTF-8" --header "SOAPAction:urn:updateApplication" -u ${CELLERY_HUB_IDP_ADMIN_USERNAME}:${CELLERY_HUB_IDP_ADMIN_PASSWORD} --data "$update_cli_application" ${CELLERY_HUB_IDP_URL}/services/IdentityApplicationManagementService.IdentityApplicationManagementServiceHttpsSoap12Endpoint/ -k)
echo_results "CLI service provider updated with OAuth2 app" "Error while updating CLI service provider with OAuth2 app"
set +e
| true |
83ba3b315926ce0b4f05d4d19814391892834083
|
Shell
|
justariy/termux-root-packages
|
/disabled-packages/squashfuse/build.sh
|
UTF-8
| 456 | 2.65625 | 3 |
[
"Apache-2.0"
] |
permissive
|
TERMUX_PKG_HOMEPAGE=https://github.com/vasi/squashfuse
TERMUX_PKG_DESCRIPTION="FUSE filesystem to mount squashfs archives"
TERMUX_PKG_LICENSE="BSD 2-Clause"
TERMUX_PKG_VERSION=0.1.103
TERMUX_PKG_SHA256=bba530fe435d8f9195a32c295147677c58b060e2c63d2d4204ed8a6c9621d0dd
TERMUX_PKG_SRCURL=https://github.com/vasi/squashfuse/archive/${TERMUX_PKG_VERSION}.tar.gz
TERMUX_PKG_DEPENDS="libfuse"
termux_step_pre_configure () {
aclocal --install
autoreconf -vfi
}
| true |
e855cb48255cafd643bb9ba48718207e3de62bcf
|
Shell
|
64kramsystem/aws-utils-dev
|
/create_s3_bucket.sh
|
UTF-8
| 611 | 2.796875 | 3 |
[] |
no_license
|
list="
bucketname-1
bucketname-2
bucketname-3"
$REGION=ap-southeast-2
for BUCKETNAME in $(echo $list)
do
aws s3api create-bucket --bucket $BUCKETNAME --region $REGION --create-bucket-configuration LocationConstraint=$REGION
aws s3api put-bucket-encryption --bucket $BUCKETNAME --server-side-encryption-configuration '{"Rules": [{"ApplyServerSideEncryptionByDefault": {"SSEAlgorithm": "AES256"}}]}'
aws s3api put-public-access-block --bucket $BUCKETNAME --public-access-block-configuration "BlockPublicAcls=true,IgnorePublicAcls=true,BlockPublicPolicy=true,RestrictPublicBuckets=true"
done
| true |
e2bb47bcae351725bae46831cab68c1f080dec81
|
Shell
|
vmware/alb-sdk
|
/create_release.sh
|
UTF-8
| 2,218 | 3.5 | 4 |
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
set -x
assets=""
echo "Usage ./create_release.sh <branch> <release_name>"
REL=$2
BRANCH=$1
if [ $BRANCH = "eng" ]; then
BRANCH="master"
fi
if [ -z $REL ]; then
echo "Pl. give the release name eg. latest"
exit 1
fi
export PYTHONPATH=`pwd`/python:$PYTHONPATH
REL_TAG=tag-$REL
git tag -d $REL_TAG
git tag -d $REL
git tag $REL_TAG
git push -f origin $REL_TAG
set -e
git checkout -B $BRANCH
AVI_VERSION=`python ./python/version.py`
cd python
rm -rf dist/
releases=`/usr/local/bin/hub release`
hub_op="create"
for r in $releases
do
if [ "$r" = "$REL_TAG" ]; then
hub_op="edit"
break
fi
done
./create_sdk_pip_packages.sh sdk
./create_sdk_pip_packages.sh migrationtools
./create_sdk_pypi.sh sdk
./create_sdk_pkgs.sh
mv dist/avisdk-$AVI_VERSION.tar.gz ../avisdk-$AVI_VERSION.tar.gz
if [ -e dist/python-avisdk_0_all.deb ]; then
mv dist/python-avisdk_0_all.deb ../avisdk-$AVI_VERSION.deb
else
echo "Avi API SDK Debian package not found. Aborting"
exit 1
fi
if [ -e dist/avisdk-$AVI_VERSION-1.noarch.rpm ]; then
mv dist/avisdk-$AVI_VERSION-1.noarch.rpm ../avisdk-$AVI_VERSION.rpm
else
echo "Avi API SDK RPM package not found. Aborting"
exit 1
fi
if [ -e dist/avimigrationtools-$AVI_VERSION.tar.gz ]; then
mv dist/avimigrationtools-$AVI_VERSION.tar.gz ../avimigrationtools-$AVI_VERSION.tar.gz
else
echo "Avi Migration tools package not found. Aborting"
exit 1
fi
rm -rf dist
rm -rf avisdk.egg-info
assets="$assets -a avisdk-$AVI_VERSION.tar.gz#pip-package-avisdk-$AVI_VERSION -a avimigrationtools-$AVI_VERSION.tar.gz#pip-package-avimigrationtools-$AVI_VERSION -a avisdk-$AVI_VERSION.deb#debian-package-avisdk-$AVI_VERSION -a avisdk-$AVI_VERSION.rpm#rpm--package-avisdk-$AVI_VERSION"
cd ../
# avinetworks/avitools release handling
rm -rf avitools
git clone https://github.com/avinetworks/avitools
cd avitools
git remote set-url origin [email protected]:avinetworks/avitools.git
git tag $REL
git push -f origin $REL
cd ..
rm -rf avitools
/usr/local/bin/hub release $hub_op $assets -F ReleaseNote $REL_TAG
rm -rf avisdk-$AVI_VERSION.tar.gz
rm -rf avimigrationtools-$AVI_VERSION.tar.gz
rm -rf avisdk-$AVI_VERSION.deb
rm -rf avisdk-$AVI_VERSION.rpm
| true |
11e4e4f2ef4376e865068032e4d3bed9ebdcf1c6
|
Shell
|
sohail-ahamed-qureshi/Shell-programming
|
/Repitations/assignments/usingWhileLoop/coinflip.sh
|
UTF-8
| 183 | 2.984375 | 3 |
[] |
no_license
|
#!/bin/bash
counter=0
while [[ counter -le 10 ]]
do
coin=$((RANDOM%2))
if [ $coin -eq 1 ]
then
echo "Heads"
counter=$(($counter + 1))
else
echo "Tails"
fi
done
| true |
c6b6fc735501818193c9522e8f3e8db0959ce4ca
|
Shell
|
michael-bowen-sc/dotfiles
|
/dot_oh-my-zsh/custom/aliases.zsh
|
UTF-8
| 758 | 2.734375 | 3 |
[
"MIT"
] |
permissive
|
if [ -x "$(command -v exa)" ]; then
alias ls="exa"
alias lsa="exa --long --all --group --icons"
fi
if [ -x "$(command -v code)" ]; then
alias zshc="code ~/.zshrc"
fi
# Use this version of pip install to install from BlackRock hosted libraries
if [ -x "$(command -v pip)" ]; then
alias blkpip_install="pip install --disable-pip-version-check -i http://artifactory.blackrock.com:8081/artifactory/api/pypi/python-staging/simple --trusted-host artifactory.blackrock.com"
fi
alias grep='grep --color=auto --exclude-dir={.bzr,CVS,.git,.hg,.svn}'
alias -s {yml,yaml}="code"
alias d='dirs -v | head -10'
alias 1='cd -'
alias 2='cd -2'
alias 3='cd -3'
alias 4='cd -4'
alias 5='cd -5'
alias 6='cd -6'
alias 7='cd -7'
alias 8='cd -8'
alias 9='cd -9'
| true |
6cd2ffb3f865509b8b118d324784d868f46a02cf
|
Shell
|
tabulon-ext/zeesh
|
/func/zeesh-plugin
|
UTF-8
| 815 | 3.171875 | 3 |
[
"MIT"
] |
permissive
|
# zeesh plugin controller
zeesh-plugin() {
if [ $2 ] && [[ -d ~/.zsh/plugins/$2 ]]; then
case $1 in
desc|description)
zeesh-plugin-desc $2
;;
enable)
echo ":: zeesh plugin $2 enabled"
source ~/.zsh/plugins/$2/plugin.zsh
;;
prefs)
zeesh-prefs $2 $@[3,-1]
;;
esac
else
case $1 in
create|new)
if [ $2 ]; then
zeesh-plugin-new $2
else
echo "please specify a name for your new plugin"
fi
;;
list|ls)
echo ":: zeesh plugin :: available plugins"
/bin/ls ~/.zsh/plugins
;;
*)
echo "not a valid command"
;;
esac
fi
}
# vim: ft=zsh
| true |
b22b144f3e80e579f64910986e788f0887afadd5
|
Shell
|
ibiqlik/qliksense-k8s
|
/preflight.sh
|
UTF-8
| 2,414 | 4.1875 | 4 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# test deployment and service name we will use to test
appName="qnginx001"
continue_install=false;
namespace=""
unset preflight_status
print_help() {
echo "usage: ./preflight.sh [OPTIONS]
Options:
-h Prints help.
-n string namespace to use.
"
}
# accept value for namespace in case we run this script from CLI
while getopts "n:h" opt; do
case $opt in
n)
namespace=$OPTARG
;;
h)
print_help
exit 1
;;
\?)
# in case we want to abort installation in the event of preflight check failure
# exit 1
;;
esac
done
if [[ $namespace == "" ]]; then
echo "namespace empty, resetting it to default namespace."
namespace="default"
fi
echo "namespace to use: $namespace"
# replace value of namespace in preflight_checks.yaml
sed -i -e "s/PREFLIGHT_NAMESPACE/$namespace/g" clustertests/preflight_check/preflight_checks.yaml
# create a test deployment and service and then run preflight in this setup
kubectl create deployment $appName --image=nginx -n $namespace && \
kubectl create service clusterip $appName --tcp=80:80 -n $namespace && \
kubectl -n $namespace wait --for=condition=ready pod -l app=$appName --timeout=120s && \
/usr/local/bin/preflight clustertests/preflight_check/preflight_checks.yaml --interactive=false | tee outfile.txt
# optional, will download support_bundle for further inspection.
# this is of little use now, as this will be downloaded into the container running this script.
# we intend to address this feature in future.
echo "Downloading support bundle: "
/usr/local/bin/support-bundle clustertests/preflight_check/preflight_checks.yaml
# infer status of the preflight checks to determine if we should continue with Qliksense installation or not.
preflight_status=$( tail -n 2 outfile.txt )
# cleaning up our setup after running checks.
(kubectl delete deployment $appName -n $namespace) || true
(kubectl delete service $appName -n $namespace) || true
rm outfile.txt
# depending on the status of the checks, we will print an appropriate message and continue with installation.
if [[ "$preflight_status" == *PASS* ]]; then
echo "Preflight checks have passed."
else
echo "Preflight checks have failed, please proceed at your own risk..."
# For now, we will not abort installation if preflight checks fail.
# exit 1
fi
echo "Qliksense installation can continue"
| true |
b2e4b7b6726cddc858a1ac44a2adb2f8c916672b
|
Shell
|
fredsson/cmakegen
|
/PKGBUILD
|
UTF-8
| 746 | 3.234375 | 3 |
[
"MIT"
] |
permissive
|
#Maintainer: Fredrik Andersson <[email protected]>
pkgname=cmakegen-git
pkgver=r40.115b37b
pkgrel=1
pkgdesc="Tool to automatically generate cmake files for c++ projects"
arch=("x86_64")
license=("MIT")
makedepends=("git" "cmake" "make")
depends=("cmake" "make")
source=("git+https://github.com/fredsson/cmakegen.git")
md5sums=("SKIP")
pkgver() {
cd "$srcdir/${pkgname%-git}"
( set -o pipefail
git describe --long 2>/dev/null | sed 's/\([^-]*-g\)/r\1/;s/-/./g' ||
printf "r%s.%s" "$(git rev-list --count HEAD)" "$(git rev-parse --short HEAD)"
)
}
build() {
cd "$srcdir/${pkgname%-git}"
mkdir _build && cd _build
cmake -DCMAKE_BUILD_TYPE=Release ..
make
}
package() {
cd "$srcdir/${pkgname%-git}"/_build
make DESTDIR="$pkgdir/" install
}
| true |
cd1e79b5a89b8b79e22b284858eeeb6dfac9b2b5
|
Shell
|
supreet-s8/PMRDC
|
/PMR_DC_3/PMR_DC/prepareIP.sh
|
UTF-8
| 1,729 | 3.296875 | 3 |
[] |
no_license
|
#!/bin/bash
function identifyClients {
mount -o remount,rw / 2>/dev/null
PREFIX=''; PREFIX=`/opt/tps/bin/pmx.py show hadoop | egrep "client" | awk '{print $NF}' | awk -F. '{print $1"."$2"."$3"."}' | sort -u`
CLIENTS=''; CLIENTS=`/opt/tps/bin/pmx.py show hadoop | egrep "client" | awk '{print $NF}' | awk -F. '{print $4}' | sort -ru`
col=''
for i in $CLIENTS; do
if [[ $col ]]; then
col="$i $col"
else
col="${i}"
fi
done
cnp=''; cnp=`echo "$col" | awk '{print $1 " " $2}'`
col1=''; col1=`echo "$col" | awk '{if ( $1 != "" || $2 != "" ) print $1" "$2; fi}'`
col2=''; col2=`echo "$col" | awk '{if ( $3 != "" || $4 != "" ) print $3" "$4; fi}'`
#>IP.sh
echo "prefix=\"$PREFIX\"" >> IP.sh
echo "col=\"$col\"" >> IP.sh
echo "cnp=\"$cnp\"" >> IP.sh
echo "col1=\"$col1\"" >> IP.sh
if [[ $col2 ]]; then
echo "col2=\"$col2\"" >> IP.sh
fi
}
function identifyComputes {
SLAVES=''
SLAVES=`/opt/tps/bin/pmx.py show hadoop | egrep "slave" | awk '{print $NF}' | awk -F. '{print $4}' | sort -ru`
map=''
for i in $SLAVES; do
if [[ $map ]]; then
map="$i $map"
else
map="${i}"
fi
done
echo "map=\"$map\"" >> IP.sh
echo "cmp=\"$map\"" >> IP.sh
}
function identifyVIPs {
MASTER=''
MASTER=`/opt/tms/bin/cli -t 'en' 'sho clus global' | grep -A2 master | grep address | awk {'print $4'} | sed -e 's/\,//' | awk -F. '{print $4}'`
if [[ ${MASTER} == '' ]]; then ${MASTER}="3"; fi
if [[ $cnp ]]; then echo "cnp0=\"${MASTER}\"" >> IP.sh; fi
if [[ $cnp ]]; then echo "col0=\"${MASTER}\"" >> IP.sh; fi
if [[ $col1 ]]; then echo "col10=\"${MASTER}\"" >> IP.sh; fi
if [[ $col2 ]]; then echo "col20=\"4\"" >> IP.sh; fi
}
#-------------------------#
identifyClients
identifyComputes
identifyVIPs
#-------------------------#
| true |
3ec0ef7fdc788001aa20596927a84ef5446abae2
|
Shell
|
davejonesbkk/kubernetes-the-automated-way
|
/kubelet_client_certs.sh
|
UTF-8
| 1,246 | 2.6875 | 3 |
[] |
no_license
|
#!/bin/bash
WORKER0_HOST=`cat hosts | sed '3!d' | sed 's/[=].*$//'`
WORKER0_IP=`cat hosts | sed '3!d' | sed 's/.*[=]//'`
WORKER1_HOST=`cat hosts | sed '4!d' | sed 's/[=].*$//'`
WORKER1_IP=`cat hosts | sed '4!d' | sed 's/.*[=]//'`
{
cat > ${WORKER0_HOST}-csr.json << EOF
{
"CN": "system:node:${WORKER0_HOST}",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "US",
"L": "Portland",
"O": "system:nodes",
"OU": "Kubernetes The Hard Way",
"ST": "Oregon"
}
]
}
EOF
cfssl gencert \
-ca=ca.pem \
-ca-key=ca-key.pem \
-config=ca-config.json \
-hostname=${WORKER0_IP},${WORKER0_HOST} \
-profile=kubernetes \
${WORKER0_HOST}-csr.json | cfssljson -bare ${WORKER0_HOST}
cat > ${WORKER1_HOST}-csr.json << EOF
{
"CN": "system:node:${WORKER1_HOST}",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "US",
"L": "Portland",
"O": "system:nodes",
"OU": "Kubernetes The Hard Way",
"ST": "Oregon"
}
]
}
EOF
cfssl gencert \
-ca=ca.pem \
-ca-key=ca-key.pem \
-config=ca-config.json \
-hostname=${WORKER1_IP},${WORKER1_HOST} \
-profile=kubernetes \
${WORKER1_HOST}-csr.json | cfssljson -bare ${WORKER1_HOST}
}
| true |
0eda9e952bdc29f0a3cef6c17a854bc451ef3ef2
|
Shell
|
Chr0nos/ft_linux
|
/xorg/ebuild/libarchive.sh
|
UTF-8
| 270 | 2.875 | 3 |
[] |
no_license
|
build() {
PKG="libarchive"
VERSION="3.3.2"
URL="http://www.libarchive.org/downloads/$PKG-$VERSION.tar.gz"
pull $PKG gz $VERSION $URL
unpack $PKG-$VERSION gz
./configure --prefix=/usr --disable-static && compile $PKG-$VERSION
make install
cleanup $PKG $VERSION
}
| true |
baf86ead33f4ee623d32d2a002962c92ff7978d1
|
Shell
|
Armenak1998/Algorithm
|
/gmain.bash
|
UTF-8
| 354 | 2.65625 | 3 |
[] |
no_license
|
if [ -d "gmain/" ] ; then
rm -rf gmain/;
fi;
mkdir gmain;
if [ -d "output/" ]; then
rm -rf output;
fi;
mkdir output;
cd gmain;
g++ ../algorithm/algorithm.cpp ../algorithm/main.cpp ../algorithm/private.cpp -o gmain.exe;
./gmain.exe > ../output/output.tcl;
cd ..;
touch rmfile.bash ;
echo "rm -rf gmain; rm -rf output; rm -rf rmfile.bash" > rmfile.bash;
| true |
dd28c1f9e274eb1d7dc8cb5e75d2f172fe96922e
|
Shell
|
tungsheng/dotfiles
|
/install/debian/init.sh
|
UTF-8
| 1,006 | 2.9375 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
log_file=$HOME/install_progress_log.txt
echo -ne "Initiating...\n"
sudo apt-get -y update
echo -ne "Installing utils...\n"
sudo apt-get -y install git tig
sudo apt-get -y install make locate
sudo apt-get -y install whois
sudo apt-get -y install curl wget
sudo apt-get -y install silversearcher-ag
sudo apt-get -y install python-pip
sudo apt-get -y install openssh-server
sudo apt-get -y install apt-transport-https ca-certificates software-properties-common
echo -ne "Installing bash-it...\n"
git clone --depth=1 https://github.com/Bash-it/bash-it.git ~/.bash_it
echo -ne "Installing neovim...\n"
sudo apt-get -y install neovim
echo -ne "Installing tmux...\n"
sudo apt-get -y install tmux
if hash tmux 2>/dev/null; then
echo "tmux Installed" >> $log_file
else
echo "tmux FAILED TO INSTALL!!!" >> $log_file
fi
# source $DOTFILES/install/debian/nvm.sh
# source $DOTFILES/install/debian/golang.sh
# source $DOTFILES/install/debian/docker.sh
source $DOTFILES/install/debian/link.sh
| true |
9299b410757f75270aa678feee58c3c67e6e9c30
|
Shell
|
dayroned/django-template
|
/run-devenv.sh
|
UTF-8
| 527 | 3.375 | 3 |
[] |
no_license
|
#!/bin/bash
set -o errexit
set -o pipefail
set -o nounset
export COMPOSE_FILE=local.yml
stop_stack()
{
echo "Stopping compose environment..."
docker compose down
}
if [ ! -f .env ]; then
echo "Creating .env file from example..."
cp .env.example .env
fi
echo "Building compose environment..."
docker compose build
echo "Starting compose environment..."
docker compose up -d
trap stop_stack INT
echo
echo "System running at http://localhost:8000"
echo
echo "Django logs..."
docker compose logs django -f
| true |
8aa5ff22237ea68e6941a1b5a783c0c3efb4b437
|
Shell
|
benhamilton/snippets
|
/bash-get-next-task-number.sh
|
UTF-8
| 132 | 2.703125 | 3 |
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
#!/bin/bash
source ~/last_task_number.txt
((TaskNumber++))
echo $TaskNumber
echo "TaskNumber=$TaskNumber" > ~/last_task_number.txt
| true |
c94d3a42e80048f62d7792adbb9b43f01e3bf148
|
Shell
|
qrti/rotBright
|
/script/rotate.sh
|
UTF-8
| 3,689 | 3.671875 | 4 |
[
"MIT"
] |
permissive
|
#!/bin/bash
# rotate.sh V0.7 191023 [email protected]
# linux bash script for rotation control (display + touchscreen + optional pen)
# make script executable
# chmod a+x rotate.sh
# open a console and enter
# $ xinput -list
# note the input device(s) you want to rotate
#if unused -> DEVICE1=
DEVICE0="pointer:ELAN22A6:00 04F3:22A6" # Asus T102HA touchscreen
DEVICE1="ELAN22A6:00 04F3:22A6 Pen (0)" # pen
DEVICE2="ASUS HID Device ASUS HID Device Touchpad" # touchpad
# display rotation might set brighness to maximum
# bright.sh must be executable in same directory (if set to true)
#
RESTBRIGHT=true # restore brightness
ORIA0=(normal left inverted right) # orientation array 0
ORIA1='(normal|left|inverted|right)' # 1
CUDIS=`xrandr --current | grep primary | sed -e 's/ .*//g'` # current display
CUORI=`xrandr --current --verbose | grep primary | egrep -o '\) '$ORIA1' \(' | egrep -o $ORIA1` # current orientation
path=`dirname $0` # arguments
file="$path/rotate.dat"
arg1="$1"
#-------------------------------------------------------------------------------
function do_rotate
{
xrandr --output $1 --rotate $2
TRANSFORM='Coordinate Transformation Matrix'
case "$2" in
normal)
[ ! -z "$DEVICE0" ] && xinput set-prop "$DEVICE0" "$TRANSFORM" 1 0 0 0 1 0 0 0 1
[ ! -z "$DEVICE1" ] && xinput set-prop "$DEVICE1" "$TRANSFORM" 1 0 0 0 1 0 0 0 1
[ ! -z "$DEVICE2" ] && xinput set-prop "$DEVICE2" "$TRANSFORM" 0 -1 1 1 0 0 0 0 1
;;
inverted)
[ ! -z "$DEVICE0" ] && xinput set-prop "$DEVICE0" "$TRANSFORM" -1 0 1 0 -1 1 0 0 1
[ ! -z "$DEVICE1" ] && xinput set-prop "$DEVICE1" "$TRANSFORM" -1 0 1 0 -1 1 0 0 1
[ ! -z "$DEVICE2" ] && xinput set-prop "$DEVICE2" "$TRANSFORM" 0 1 0 -1 0 1 0 0 1
;;
left)
[ ! -z "$DEVICE0" ] && xinput set-prop "$DEVICE0" "$TRANSFORM" 0 -1 1 1 0 0 0 0 1
[ ! -z "$DEVICE1" ] && xinput set-prop "$DEVICE1" "$TRANSFORM" 0 -1 1 1 0 0 0 0 1
[ ! -z "$DEVICE2" ] && xinput set-prop "$DEVICE2" "$TRANSFORM" -1 0 1 0 -1 1 0 0 1
;;
right)
[ ! -z "$DEVICE0" ] && xinput set-prop "$DEVICE0" "$TRANSFORM" 0 1 0 -1 0 1 0 0 1
[ ! -z "$DEVICE1" ] && xinput set-prop "$DEVICE1" "$TRANSFORM" 0 1 0 -1 0 1 0 0 1
[ ! -z "$DEVICE2" ] && xinput set-prop "$DEVICE2" "$TRANSFORM" 1 0 0 0 1 0 0 0 1
;;
esac
}
#-------------------------------------------------------------------------------
if [ -z "$arg1" ]; then # show current
echo "current rotation:" $CUORI
exit 0
fi
if [ "$arg1" == "restore" ]; then # restore
if [ -f "$file" ]; then # stored
ori=`cat $file`
else # current
ori=$CUORI
fi
elif [ "$arg1" == "+" ]; then # +
for i in ${!ORIA0[*]}; do
if [ "$CUORI" == ${ORIA0[$i]} ]; then
ori=${ORIA0[($i+1) % 4]}
break
fi
done
else # new
ori=$arg1
fi
# check
if [ "$ori" == "normal" ] || [ "$ori" == "left" ] ||
[ "$ori" == "inverted" ] || [ "$ori" == "right" ]; then
:
else # invalid
echo "invalid argument"
echo "usage: ./rotate.sh [+|normal|inverted|left|right|restore]"
exit 1
fi
echo $ori > $file # store
do_rotate $CUDIS $ori # rotate
if [ $RESTBRIGHT == true ]; then # restore brightness
sleep 1 # delay
$path/bright.sh restore # call script
fi
| true |
a63db12043144297a6e90b37ed108ef88e368b0d
|
Shell
|
binocarlos/powerstrip-mesosphere-demo
|
/mesosphere-install.sh
|
UTF-8
| 2,467 | 3.6875 | 4 |
[] |
no_license
|
#!/bin/bash -e
if [[ $# > 0 ]]; then
if [[ "$1" == "slave" ]]; then
export INSTALLER_TYPE=slave
else
export INSTALLER_TYPE=master
fi
else
export INSTALLER_TYPE=master
fi
echo "####################################################################"
echo "#################### Installing mesosphere $INSTALLER_TYPE #########"
echo "####################################################################"
export MASTER_IP=`cat /etc/flocker/master_address`
export SLAVE1_IP=`cat /etc/flocker/slave1_address`
export SLAVE2_IP=`cat /etc/flocker/slave2_address`
export MY_ADDRESS=`cat /etc/flocker/my_address`
update-hosts() {
## Update /etc/hosts to add kube-master and kube-slave mapping ##
echo "updating /etc/hosts to add master IP entry"
echo "127.0.0.1 localhost" > /etc/hosts
echo "$MASTER_IP master" | sudo tee -a /etc/hosts
echo "$SLAVE1_IP node1" | sudo tee -a /etc/hosts
echo "$SLAVE2_IP node2" | sudo tee -a /etc/hosts
cat /etc/hosts
}
setup-master() {
mkdir -p /etc/zookeeper/conf
mkdir -p /etc/mesos
mkdir -p /etc/mesos-master
mkdir -p /etc/marathon/conf
echo "1" > /etc/zookeeper/conf/myid
echo "server.1=$MASTER_IP:2888:3888" >> /etc/zookeeper/conf/zoo.cfg
echo "zk://$MASTER_IP:2181/mesos" > /etc/mesos/zk
cp /etc/mesos/zk /etc/marathon/conf/master
echo "zk://$MASTER_IP:2181/marathon" > /etc/marathon/conf/zk
#echo "1" > /etc/mesos-master/quorum
echo "$MY_ADDRESS" > /etc/mesos-master/hostname
echo "$MY_ADDRESS" > /etc/mesos-master/ip
echo "$MY_ADDRESS" > /etc/marathon/conf/hostname
rm /etc/init/zookeeper.override
rm /etc/init/mesos-master.override
rm /etc/init/marathon.override
sudo service zookeeper start
sudo service mesos-master start
sudo service marathon start
}
setup-slave() {
mkdir -p /etc/mesos
mkdir -p /etc/mesos-slave
mkdir -p /etc/marathon/conf
echo 'docker,mesos' > /etc/mesos-slave/containerizers
cp /etc/flocker/mesos-attributes /etc/mesos-slave/attributes
echo '5mins' > /etc/mesos-slave/executor_registration_timeout
echo "zk://$MASTER_IP:2181/mesos" > /etc/mesos/zk
echo "ports:[7000-9000]" > /etc/mesos-slave/resources
echo "$MY_ADDRESS" > /etc/mesos-slave/hostname
echo "$MY_ADDRESS" > /etc/mesos-slave/ip
echo "$MY_ADDRESS" > /etc/marathon/conf/hostname
rm /etc/init/mesos-slave.override
sleep 10
sudo service mesos-slave start
}
update-hosts
if [[ "$INSTALLER_TYPE" == "master" ]]; then
setup-master
else
setup-slave
fi
| true |
eabfce0b0ae0815f2b63bc8b874e6fda9eef77cd
|
Shell
|
MenkeTechnologies/zpwr
|
/autoload/common/zpwrKeySender
|
UTF-8
| 347 | 2.71875 | 3 |
[
"MIT"
] |
permissive
|
# -*- mode: sh -*-
# vim: set ft=sh:
function zpwrKeySender(){
if (( $ZPWR_SEND_KEYS_PANE != -1 )); then
local pane mywords
# tmux send-keys -t learn:0.0 $1
for pane in ${(Az)${(s@,@)ZPWR_SEND_KEYS_PANE}}; do
tmux send-keys -t $pane "C-u" "$BUFFER" #&>/dev/null
done
fi
}
zpwrKeySender "$@"
| true |
10b0f0e604a122ab82224bcc8f172ba6bc89168d
|
Shell
|
sunfish-shogi/sunfish
|
/server.sh
|
UTF-8
| 1,776 | 3.578125 | 4 |
[] |
no_license
|
#!/bin/bash
# server.sh
# Server Program for Sunfish
# 繰り返し回数
steps=5000
# ホスト名, ログイン名, パスワード
. hosts
# クライアントの総数
cnum=${#CLIENT[*]}
# ログファイル
log="server.log"
# clean
rm -rf ~/.sunfish_s
# make directory
mkdir ~/.sunfish_s
# host name
hostname=`hostname`
echo $hostname
# number of clients
echo "Clients : $cnum" >> $log
# cycle
pv=0
cycle=50
cnt=0
max_cnt=42
for (( i = 0 ; i < $steps ; i++ ))
do
echo "step : $i"
echo "step : $i" >> $log
date >> $log
if [ $i -eq $pv ]
then
num3=`printf %03d ${cnt}`
echo "backup to evdata_${num3}"
cp evdata evdata_${num3}
echo "update PV!!" >> $log
cmp="evdata.cmp0"
pv=`expr $pv + $cycle`
#cycle=`expr \( $cycle \* 9 + 10 - 1 \) / 10`
cnt=`expr $cnt + 1`
if [ $cnt -eq $max_cnt ]
then
break
fi
else
echo "not update PV." >> $log
cmp="evdata.cmp"
fi
# send evdata
echo "broadcast a evdata :" >> $log
date >> $log
gzip -c evdata > evdata.gz
for(( j = 0 ; j < $cnum ; j++ ))
do
scp evdata.gz ${USER[j]}@${CLIENT[j]}:~/.sunfish_c/
scp server.sh ${USER[j]}@${CLIENT[j]}:~/.sunfish_c/${cmp}
done
echo "broadcasting is completed :" >> $log
date >> $log
# recieve gradients
fnum=`find ~/.sunfish_s -name '*.cmp' | wc -l`
timeout=0
while [ $fnum -lt $cnum -a $timeout -lt 30 ]
do
sleep 1
fnum=`find ~/.sunfish_s -name '*.cmp' | wc -l`
fnum2=`find ~/.sunfish_s -name '*.gr' | wc -l`
if [ $fnum2 -eq $cnum ]
then
timeout=`expr $timeout + 1`
fi
done
echo "gradients : $fnum" >> $log
ls ~/.sunfish_s/ >> $log
# adjust
echo "adjust :" >> $log
date >> $log
./retry.sh "./sunfish -la"
# remove gradients
rm -f ~/.sunfish_s/*.gr
rm -f ~/.sunfish_s/*.info
rm -f ~/.sunfish_s/*.cmp
done
| true |
d7be899c38c90609cd8106f053bdd9d694b33d0a
|
Shell
|
andreaguarise/one-initScripts
|
/c7-java8.sh
|
UTF-8
| 384 | 3.171875 | 3 |
[] |
no_license
|
#!/bin/sh
# java8.sh
#
#
# Created by Andrea Guarise on 7/1/15.
#
yum install -y java-1.8.0 &> /tmp/java8.log
java8=`alternatives --display java | grep -v slave | grep 1.8 | cut -d ' ' -f1`
alternatives --set java $java8
alternatives --display java | grep "points to" | grep 1.8.0
if [ $? = 0 ]; then
echo "Java 1.8.0 correctly installed and configured" &> /tmp/java8.log
fi
| true |
104a2337c2ee5c9101da94e469a882647dfd5e6f
|
Shell
|
galabe-cloud/kops-terraform-tutorial
|
/kops_3_create_s3_tfstate_store.sh
|
UTF-8
| 694 | 2.609375 | 3 |
[] |
no_license
|
#!/bin/bash
# kops_3_create_s3_tfstate_store.sh
# Ref: https://github.com/kubernetes/kops/blob/master/docs/getting_started/aws.md
export AWS_PROFILE=garyttt8
DOMAIN="learn-devops.online"
SUBDOMAIN="kops.${DOMAIN}"
REGION="ap-southeast-1"
BUCKET="${SUBDOMAIN}-tfstate-store"
aws s3api create-bucket \
--region $REGION \
--bucket $BUCKET \
--create-bucket-configuration LocationConstraint=$REGION
aws s3api put-bucket-versioning \
--bucket $BUCKET \
--versioning-configuration Status=Enabled
aws s3api put-bucket-encryption \
--bucket $BUCKET \
--server-side-encryption-configuration '{"Rules":[{"ApplyServerSideEncryptionByDefault":{"SSEAlgorithm":"AES256"}}]}'
| true |
02b130c2b173ed277d69464b0a896aa22867d29b
|
Shell
|
jjzamudio/HIGAN
|
/validation-metrics/run-jupyter_sb_py2.sbatch
|
UTF-8
| 1,793 | 2.78125 | 3 |
[] |
no_license
|
#!/bin/bash
#SBATCH --gres=gpu:1
#SBATCH --job-name=jupyterNB
#SBATCH --nodes=1
#SBATCH --time=24:00:00
#SBATCH --mem=64GB
##SBATCH --cpus-per-task=2
#SBATCH --job-name=viz-cap
module purge
module load python/intel/2.7.12
#source /beegfs/sb3923-share/pytorch-cpu/py3.6.3/bin/activate
module load fftw/intel/3.3.6-pl2
python -m pip install pyFFTW --user
python -c "import pyfftw; print(pyfftw.__file__); print(pyfftw.__version__)"
module load h5py/intel/2.7.0rc2
pip install https://download.pytorch.org/whl/cpu/torch-1.0.1.post2-cp27-cp27mu-linux_x86_64.whl
pip install torchvision
#python3 -m pip install numpy cython mpi4py --user
#python3 -m pip install nbodykit[extras] --user
port=$(shuf -i 6000-9999 -n 1)
/usr/bin/ssh -N -f -R $port:localhost:$port log-0
/usr/bin/ssh -N -f -R $port:localhost:$port log-1
cat<<EOF
Jupyter server is running on: $(hostname)
Job starts at: $(date)
Step 1 :
If you are working in NYU campus, please open an iTerm window, run command
ssh -L $port:localhost:$port [email protected]
If you are working off campus, you should already have ssh tunneling setup through HPC bastion host,
that you can directly login to prince with command
ssh $USER@prince
Please open an iTerm window, run command
ssh -L $port:localhost:$port $USER@prince
Step 2:
Keep the iTerm windows in the previouse step open. Now open browser, find the line with
The Jupyter Notebook is running at: $(hostname)
the URL is something: http://localhost:${port}/?token=XXXXXXXX (see your token below)
you should be able to connect to jupyter notebook running remotly on prince compute node with above url
EOF
unset XDG_RUNTIME_DIR
if [ "$SLURM_JOBTMP" != "" ]; then
export XDG_RUNTIME_DIR=$SLURM_JOBTMP
fi
jupyter notebook --no-browser --port $port --notebook-dir=$(pwd)
| true |
8d76a4839bf1d28a61fa3861e19fe3d564c3b770
|
Shell
|
AWooldrige/puppet
|
/apply.sh
|
UTF-8
| 521 | 2.875 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
set -eu
sudo puppet apply -v --strict_variables --modulepath=/etc/puppet/code/modules/:/etc/securepuppet/modules/:./modules/ ./manifests/
echo "Complete. Don't forget that gdpup may eventually overwrite changes made by this puppet run"
if ! passwd -S woolie | grep '^woolie P ' > /dev/null; then
echo "FAIL";
echo "ERROR: User account 'woolie' has no password set."
echo " -> DO NOT REBOOT OR LOG OUT"
echo " -> SET PASSWORD IMMEDIATELY, USING: passwd woolie"
exit 1
fi
| true |
b85a2a4b12da45abe9df490898e2be7f2cf57985
|
Shell
|
source-academy/js-slang
|
/scripts/jsdoc.sh
|
UTF-8
| 7,983 | 3.359375 | 3 |
[
"Apache-2.0"
] |
permissive
|
#! /usr/bin/env bash
JSDOC="node_modules/.bin/jsdoc"
TMPL="docs/jsdoc/templates/template"
DST="docs/source"
MD="docs/md"
LIB="docs/lib"
SPECS="docs/specs"
main() {
if [ "$1" == "prepare" ]; then
prepare
elif [ "$1" == "clean" ]; then
clean
elif [[ "$(git rev-parse --show-toplevel 2> /dev/null)" -ef "$PWD" ]]; then
run
else
echo "Please run this command from the git root directory."
false # exit 1
fi
}
run() {
# Source landing page
${JSDOC} -r -t ${TMPL} \
-c docs/jsdoc/conf.json \
-R ${MD}/README_top.md \
-d ${DST}/ \
${LIB}/empty.js
# Source §1
${JSDOC} -r -t ${TMPL} \
-c docs/jsdoc/conf.json \
-R ${MD}/README_1.md \
-d ${DST}/"source_1"/ \
${LIB}/misc.js \
${LIB}/math.js
# Source §1 Lazy
${JSDOC} -r -t ${TMPL} \
-c docs/jsdoc/conf.json \
-R ${MD}/README_1_LAZY.md \
-d ${DST}/"source_1_lazy"/ \
${LIB}/misc.js \
${LIB}/math.js
# Source §1 Typed
${JSDOC} -r -t ${TMPL} \
-c docs/jsdoc/conf.json \
-R ${MD}/README_1_TYPED.md \
-d ${DST}/"source_1_typed"/ \
${LIB}/misc.js \
${LIB}/math.js
# Source §1 WebAssembly
${JSDOC} -r -t ${TMPL} \
-c docs/jsdoc/conf.json \
-R ${MD}/README_1_WASM.md \
-d ${DST}/"source_1_wasm"/ \
${LIB}/empty.js
# Source §2
${JSDOC} -r -t ${TMPL} \
-c docs/jsdoc/conf.json \
-R ${MD}/README_2.md \
-d ${DST}/"source_2"/ \
${LIB}/auxiliary.js \
${LIB}/misc.js \
${LIB}/math.js \
${LIB}/list.js
# Source §2 Lazy
${JSDOC} -r -t ${TMPL} \
-c docs/jsdoc/conf.json \
-R ${MD}/README_2_LAZY.md \
-d ${DST}/"source_2_lazy"/ \
${LIB}/auxiliary.js \
${LIB}/misc.js \
${LIB}/math.js \
${LIB}/list.js
# Source §2 Typed
${JSDOC} -r -t ${TMPL} \
-c docs/jsdoc/conf.json \
-R ${MD}/README_2_TYPED.md \
-d ${DST}/"source_2_typed"/ \
${LIB}/auxiliary.js \
${LIB}/misc.js \
${LIB}/math.js \
${LIB}/list.js
# Source §3
${JSDOC} -r -t ${TMPL} \
-c docs/jsdoc/conf.json \
-R ${MD}/README_3.md \
-d ${DST}/"source_3"/ \
${LIB}/auxiliary.js \
${LIB}/misc.js \
${LIB}/math.js \
${LIB}/list.js \
${LIB}/stream.js \
${LIB}/array.js \
${LIB}/pairmutator.js
# Source §3 Concurrent
${JSDOC} -r -t ${TMPL} \
-c docs/jsdoc/conf.json \
-R ${MD}/README_3_CONCURRENT.md \
-d ${DST}/"source_3_concurrent"/ \
${LIB}/auxiliary.js \
${LIB}/misc.js \
${LIB}/math.js \
${LIB}/list.js \
${LIB}/stream.js \
${LIB}/array.js \
${LIB}/pairmutator.js \
${LIB}/concurrency.js
# Source §3 Non-Det
${JSDOC} -r -t ${TMPL} \
-c docs/jsdoc/conf.json \
-R ${MD}/README_3_NON-DET.md \
-d ${DST}/"source_3_non-det"/ \
${LIB}/auxiliary.js \
${LIB}/misc.js \
${LIB}/math.js \
${LIB}/list.js \
${LIB}/stream.js \
${LIB}/array.js \
${LIB}/pairmutator.js \
${LIB}/non-det.js
# Source §3 Typed
${JSDOC} -r -t ${TMPL} \
-c docs/jsdoc/conf.json \
-R ${MD}/README_3_TYPED.md \
-d ${DST}/"source_3_typed"/ \
${LIB}/auxiliary.js \
${LIB}/misc.js \
${LIB}/math.js \
${LIB}/list.js \
${LIB}/stream.js \
${LIB}/array.js \
${LIB}/pairmutator.js
# Source §4
${JSDOC} -r -t ${TMPL} \
-c docs/jsdoc/conf.json \
-R ${MD}/README_4.md \
-d ${DST}/"source_4"/ \
${LIB}/auxiliary.js \
${LIB}/misc.js \
${LIB}/math.js \
${LIB}/list.js \
${LIB}/stream.js \
${LIB}/array.js \
${LIB}/pairmutator.js \
${LIB}/mce.js
# Source §4 GPU
${JSDOC} -r -t ${TMPL} \
-c docs/jsdoc/conf.json \
-R ${MD}/README_4_GPU.md \
-d ${DST}/"source_4_gpu"/ \
${LIB}/auxiliary.js \
${LIB}/misc.js \
${LIB}/math.js \
${LIB}/list.js \
${LIB}/stream.js \
${LIB}/array.js \
${LIB}/pairmutator.js
# Source §4 Typed
${JSDOC} -r -t ${TMPL} \
-c docs/jsdoc/conf.json \
-R ${MD}/README_4_TYPED.md \
-d ${DST}/"source_4_typed"/ \
${LIB}/auxiliary.js \
${LIB}/misc.js \
${LIB}/math.js \
${LIB}/list.js \
${LIB}/stream.js \
${LIB}/array.js \
${LIB}/pairmutator.js \
${LIB}/mce.js
# AUXILIARY
${JSDOC} -r -t ${TMPL} \
-c docs/jsdoc/conf.json \
-R ${MD}/README_AUXILIARY.md \
-d ${DST}/AUXILIARY/ \
${LIB}/auxiliary.js
# MISC
${JSDOC} -r -t ${TMPL} \
-c docs/jsdoc/conf.json \
-R ${MD}/README_MISC.md \
-d ${DST}/MISC/ \
${LIB}/misc.js
# MATH
${JSDOC} -r -t ${TMPL} \
-c docs/jsdoc/conf.json \
-R ${MD}/README_MATH.md \
-d ${DST}/MATH/ \
${LIB}/math.js
# LISTS
${JSDOC} -r -t ${TMPL} \
-c docs/jsdoc/conf.json \
-R ${MD}/README_LISTS.md \
-d ${DST}/LISTS/ \
${LIB}/list.js
# STREAMS
${JSDOC} -r -t ${TMPL} \
-c docs/jsdoc/conf.json \
-R ${MD}/README_STREAMS.md \
-d ${DST}/STREAMS/ \
${LIB}/stream.js
# ARRAYS
${JSDOC} -r -t ${TMPL} \
-c docs/jsdoc/conf.json \
-R ${MD}/README_ARRAYS.md \
-d ${DST}/ARRAYS/ \
${LIB}/array.js
# PAIRMUTATORS
${JSDOC} -r -t ${TMPL} \
-c docs/jsdoc/conf.json \
-R ${MD}/README_PAIRMUTATORS.md \
-d ${DST}/PAIRMUTATORS/ \
${LIB}/pairmutator.js
# CONCURRENCY
${JSDOC} -r -t ${TMPL} \
-c docs/jsdoc/conf.json \
-R ${MD}/README_CONCURRENCY.md \
-d ${DST}/CONCURRENCY/ \
${LIB}/concurrency.js
# NON-DET
${JSDOC} -r -t ${TMPL} \
-c docs/jsdoc/conf.json \
-R ${MD}/README_NON-DET.md \
-d ${DST}/NON-DET/ \
${LIB}/non-det.js
# MCE
${JSDOC} -r -t ${TMPL} \
-c docs/jsdoc/conf.json \
-R ${MD}/README_MCE.md \
-d ${DST}/MCE/ \
${LIB}/mce.js
# RUNES
${JSDOC} -r -t ${TMPL} \
-c docs/jsdoc/conf.json \
-d ${DST}/RUNES/ \
-R ${MD}/RUNES_README.md \
${LIB}/webGLrune.js
# CURVES
${JSDOC} -r -t ${TMPL} \
-c docs/jsdoc/conf.json \
-d ${DST}/CURVES/ \
-R ${MD}/CURVES_README.md \
${LIB}/webGLcurve.js \
${LIB}/webGLhi_graph.js
# SOUNDS
${JSDOC} -r -t ${TMPL} \
-c docs/jsdoc/conf.json \
-d ${DST}/SOUNDS/ \
-R ${MD}/SOUNDS_README.md \
${LIB}/sound
# BINARYTREES
${JSDOC} -r -t ${TMPL}/ \
-c docs/jsdoc/conf.json \
-R ${MD}/README_BINARYTREES.md \
-d ${DST}/BINARYTREES \
${LIB}/tree.js
# PIXNFLIX
${JSDOC} -r -t ${TMPL} \
-c docs/jsdoc/conf.json \
-d "${DST}/PIXNFLIX/" \
-R ${MD}/PIXNFLIX_README.md \
${LIB}/video_lib.js
# GAME
${JSDOC} -r -t ${TMPL} \
-c docs/jsdoc/conf.json \
-d "${DST}/GAME/" \
-R ${MD}/GAME_README.md \
${LIB}/game.js
# EV3
${JSDOC} -r -t ${TMPL} \
-c docs/jsdoc/conf.json \
-d "${DST}/EV3/" \
-R ${MD}/EV3_README.md \
${LIB}/ev3.js
# External
${JSDOC} -r -t ${TMPL}/ \
-c docs/jsdoc/conf.json \
-R ${MD}/README_EXTERNAL.md \
-d ${DST}/"External libraries"/ \
${LIB}/webGLrune.js \
${LIB}/webGLcurve.js \
${LIB}/webGLhi_graph.js \
${LIB}/video_lib.js \
${LIB}/game.js \
${LIB}/sound \
${LIB}/ev3.js
}
prepare() {
run
cp -r docs/images ${DST} ; \
cd ${SPECS}; make; cp *.pdf ../source; cd ../..
}
clean() {
rm -rf ${DST}/*
}
main $1
| true |
d3bca37309f5bbaf2f73b749a366a8fa5cf97f1d
|
Shell
|
nimratbrar/solarthing
|
/other/systemd/edit-service.sh
|
UTF-8
| 512 | 3.890625 | 4 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env sh
BASEDIR=$(dirname "$0")
cd "$BASEDIR" || exit 1
SERVICE=$1
if [ -z ${SERVICE+x} ]; then
echo "You need to specify a service!"
exit 1
fi
NAME="solarthing-$SERVICE.service"
FILE_PATH="/etc/systemd/system/$NAME"
FILE_HASH=$(md5sum "$FILE_PATH" 2>/dev/null)
if [ -z ${EDITOR+x} ]; then
vi "$FILE_PATH" || exit 1
else
$EDITOR "$FILE_PATH" || exit 1
fi
if [ ! "$FILE_HASH" = "$(md5sum "$FILE_PATH" 2>/dev/null)" ]; then
systemctl daemon-reload || exit 1
echo "Reloaded systemctl"
fi
| true |
185bbcc39098caba5df75c6a94294c7ddc4c1446
|
Shell
|
dbar/dotfiles
|
/.lemonbar/popups/clock/moon
|
UTF-8
| 640 | 3.015625 | 3 |
[] |
no_license
|
#!/bin/bash
## EXECUTE
set phase (math "((("(date --date='00:00' +%s)" - 1386030360) % 2551443) / 86400)")
switch $phase
case in 0 1 29
echo " The moon will look like ² today."
case in 2 3 4 5
echo " The moon will look like ³ today."
case in 6 7 8 9
echo " The moon will look like ´ today."
case in 10 11 12 13
echo " The moon will look like µ today."
case in 14 15 16
echo " The moon will look like ¶ today."
case in 17 18 19 20
echo " The moon will look like · today."
case in 21 22 23 24
echo " The moon will look like ¸ today."
case in 25 26 27 28
echo " The moon will look like ¹ today."
end
| true |
1b700db057868f17f07a6a5151613e6e4ac2dd03
|
Shell
|
greenplum-db/gpbackup
|
/ci/scripts/gpbackup-tools-versions.bash
|
UTF-8
| 572 | 3.03125 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -ex
# get versions for gpbackup, s3_plugin and gpbackup_manager
pushd gpbackup-go-components-rhel8
tar -xzf go_components.tar.gz
GPBACKUP_VERSION=$(cat gpbackup_version)
cp *_version ../gpbackup-tools-versions/
popd
echo ${GPBACKUP_VERSION} > gpbackup-tools-versions/pkg_version
# get version for ddboost_plugin
pushd gpbackup_ddboost_plugin
DDBOOST_PLUGIN_VERSION=$(git describe --tags | perl -pe 's/(.*)-([0-9]*)-(g[0-9a-f]*)/\1+dev.\2.\3/')
popd
echo ${DDBOOST_PLUGIN_VERSION} > gpbackup-tools-versions/ddboost_plugin_version
| true |
f3a43ad0297a5c07126bd69136e95ec783b4f97e
|
Shell
|
awslabs/dynamic-training-with-apache-mxnet-on-aws
|
/scala-package/spark/bin/run-mnist-example.sh
|
UTF-8
| 2,521 | 3.21875 | 3 |
[
"BSD-3-Clause",
"BSD-2-Clause-Views",
"Zlib",
"Apache-2.0",
"BSD-2-Clause",
"Intel"
] |
permissive
|
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
set -x
CURR_DIR=$(cd `dirname $0`; pwd)
SPARK_MODULE_DIR=$(cd $CURR_DIR/../; pwd)
SCALA_PKG_DIR=$(cd $CURR_DIR/../../; pwd)
OS=""
if [ "$(uname)" == "Darwin" ]; then
# Do something under Mac OS X platform
OS='osx-x86_64-cpu'
elif [ "$(expr substr $(uname -s) 1 5)" == "Linux" ]; then
OS='linux-x86_64-cpu'
fi
LIB_DIR=${SPARK_MODULE_DIR}/target/classes/lib
SPARK_JAR=`find ${SPARK_MODULE_DIR}/target -name "*.jar" -type f -exec ls "{}" + | grep -v -E '(javadoc|sources)'`
SCALA_JAR=`find ${SCALA_PKG_DIR}/assembly/$OS/target -maxdepth 1 -name "*.jar" -type f -exec ls "{}" + | grep -v -E '(javadoc|sources)'`
SPARK_OPTS+=" --name mxnet-spark-mnist"
SPARK_OPTS+=" --driver-memory 2g"
SPARK_OPTS+=" --jars ${SCALA_JAR}"
# Download training and test set
if [ ! -f ./train.txt ]; then
wget https://s3.us-east-2.amazonaws.com/mxnet-scala/scala-example-ci/Spark/train.txt
fi
if [ ! -f ./val.txt ]; then
wget https://s3.us-east-2.amazonaws.com/mxnet-scala/scala-example-ci/Spark/val.txt
fi
# running opts
RUN_OPTS+=" --input train.txt"
RUN_OPTS+=" --input-val val.txt"
RUN_OPTS+=" --output ./"
# These jars are required by the KVStores at runtime.
# They will be uploaded and distributed to each node automatically.
RUN_OPTS+=" --jars $SCALA_JAR,$SPARK_JAR"
RUN_OPTS+=" --num-server 1"
RUN_OPTS+=" --num-worker 2"
RUN_OPTS+=" --java $JAVA_HOME/bin/java"
RUN_OPTS+=" --model mlp"
RUN_OPTS+=" --cpus 0,1"
RUN_OPTS+=" --num-epoch 5"
# check if SPARK_HOME is set
if [ -z "$SPARK_HOME" ]; then
echo "SPARK_HOME is unset";
exit 1
fi
HOST=`hostname`
$SPARK_HOME/bin/spark-submit --master local[*] \
--class org.apache.mxnet.spark.example.ClassificationExample \
${SPARK_OPTS} \
${SPARK_JAR} \
${RUN_OPTS}
| true |
9beed7b6f3ccfd259aa1199fe0914fab5daf623d
|
Shell
|
altair861/DBColumn
|
/buildProcessor.sh
|
UTF-8
| 1,499 | 3.90625 | 4 |
[] |
no_license
|
#!/bin/sh
set -e
SDK_ROOT=`pwd`
while getopts ':p:' opt; do
case $opt in
p)
SDK_ROOT=$OPTARG #set the SDK_ROOT
echo "get_opts SDK_ROOT:${SDK_ROOT}"
;;
?)
echo "How to use: $0 [-b path ]" >&1
exit 1
;;
:)
echo "Option -$OPTARG requires an argument." >&1
exit 1
;;
esac
done
function copy() {
from=$1
to=$2
if [[ -d ${from} || -f ${from} ]]; then
cp -rf ${from} ${to}
else
echo "error: ${from} not exist"
fi
}
function build_jar() {
cur_sec=`date '+%s'`
cd ${SDK_ROOT}
echo "###start###"
cd ${SDK_ROOT}
echo ${SDK_ROOT}
mkdir -p "tmp/"
./gradlew db-processor:jar
copy db-processor/build/libs/db-processor.jar tmp/
copy db-processor/libs/javapoet-1.11.1.jar tmp/
./gradlew db-annotation:jar
copy db-annotation/build/libs/db-annotation.jar tmp/
./gradlew db-library:build
copy db-library/build/intermediates/intermediate-jars/debug/classes.jar tmp/db-library.jar
copy build.xml tmp/
cd tmp
echo "###start merge jar###"
ant -buildfile build.xml
cd -
mkdir -p "db-library/build/outputs/libs/"
copy tmp/cydb-library.jar db-library/build/outputs/libs/
rm -rf tmp
temp_sec=$((`date '+%s'`-${cur_sec}))
echo ""
echo "BUILD SUCCESSFUL in ${temp_sec}s"
echo "output file:db-library/build/outputs/libs/cydb-library.jar"
}
build_jar
| true |
a424e1a0fcff64f31dacad0f1d30f7847ab8926d
|
Shell
|
schlueter/bin
|
/note
|
UTF-8
| 622 | 4.15625 | 4 |
[] |
no_license
|
#!/usr/bin/env bash
log () {
printf '%s %s\n' "$(date -R)" "$*" >> ~/.logs/note.log
}
named_note () {
vim "$XDG_CONFIG_HOME/notes/$1"
(
set -e
exec > >(while read -r l; do log "$l"; done) 2>&1
if ! cd "$XDG_CONFIG_HOME"/notes
then
log "Issue moving to notes directory, cannot continue."
fi
if [ -L "$1" ]
then
git add "$(readlink $1)"
else
git add $1
fi
git commit -m "Updated $1 note"
)
}
if [ "$#" -eq 0 ]
then
named_note 'default'
elif [ "$#" -eq 1 ]
then
named_note "$1"
fi
| true |
81392e9dfcfc4d5aa56644fa13b4e91ecb05f6ac
|
Shell
|
roorco/etcetera
|
/skel/.config/executables/updatedriver.sh
|
UTF-8
| 2,850 | 3.671875 | 4 |
[] |
no_license
|
## Simple bash script to install the necessary software packages to
## enable full multimedia capabilities.
##
## Written by Carl Duff
##
clear
echo "Checking root privilages and internet connection...."
echo
## Check 1: Ensure that the script is being run with root privilages
if [[ `whoami` != "root" ]];
then
echo "This script must be run with root privilages (i.e. the 'sudo' command)."
echo "press $(tput setaf 2)$(tput bold)<enter> $(tput sgr0)to close the terminal."
read pause
exit
fi
## Check 2: Ensure that there is an active internet connection
if ! [ "`ping -c 1 google.com`" ];
then
echo
echo "$(tput setaf 1)$(tput bold)Connection test failed$(tput sgr0). You must run this script with an active internet"
echo "connection. Press $(tput setaf 2)$(tput bold)<enter> $(tput sgr0)to close this terminal."
read pause
exit
fi
# Information about this script for the user
clear
echo
echo "$(tput sgr 0 1)$(tput setaf 2)$(tput bold)Manjaro Hardware Detection: Graphics Driver Detection and Installation"
echo
echo "$(tput sgr0)Manjaro can automatically detect and install the most appropriate"
echo "graphics driver(s) for your system. There are two choices:"
echo
echo "$(tput setaf 4)$(tput bold)1. Open Source (non-proprietary) Drivers"
echo "$(tput sgr0)Includes drivers for Virtual Machines and Intel Chipsets, as well as"
echo "drivers written by the Linux Community."
echo
echo "$(tput setaf 3)$(tput bold)2. Proprietary Drivers (Recommended)"
echo "$(tput sgr0)Comprises of drivers written by the hardware manufacturers such as"
echo "NVidia for their graphics cards. These provide the best performance."
echo
echo "You may run this program to switch between open source and proprietary"
echo "drivers at any time."
echo
echo "Press $(tput setaf 2)$(tput bold)<enter> $(tput sgr0)to first identify the graphics driver(s) currently installed."
read pause
clear
## Identify what has already been installed
mhwd -li
## And now the options
echo
echo
echo "$(tput setaf 4)$(tput bold)1. Detect and Install Open Source (non-proprietary) Drivers"
echo
echo "$(tput setaf 3)$(tput bold)2. Detect and Install Proprietary Drivers (Recommended)."
echo
echo "$(tput sgr0)Enter the number of your choice ($(tput setaf 4)$(tput bold)1$(tput sgr0) or $(tput setaf 3)$(tput bold)2$(tput sgr0)), or just press $(tput setaf 2)$(tput bold)<enter> $(tput sgr0)to cancel."
read option
case "$option" in
"1")
pacman -Syy
mhwd -a pci free 0300 -f
echo
echo "Process Complete. Press $(tput setaf 2)$(tput bold)<enter> $(tput sgr0)to continue. Now reboot your system."
read pause
echo
;;
"2")
pacman -Syy
mhwd -a pci nonfree 0300 -f
echo
echo "Process Complete. Press $(tput setaf 2)$(tput bold)<enter> $(tput sgr0)to continue. Now reboot your system."
read pause
echo
;;
esac
exit 0
| true |
92d2e71c6cb1c1691d187ccdd02fcc23aec97a61
|
Shell
|
nesi/ARCS-systems
|
/dataFabricScripts/iRODS/usageScripts/SingleZone/UsageStatsDownload.sh
|
UTF-8
| 1,853 | 4 | 4 |
[] |
no_license
|
#!/bin/sh
IRODS_HOME="/opt/iRODS-2.0v/iRODS"
logDir="$IRODS_HOME/server/log"
usage()
{
echo "Usage: `basename $0` -d Path-To-DataFabric-UsageStats"
exit 1
}
if [ $# -ne 2 ]
then
usage
fi
while getopts d OPTION
do
case $OPTION in
\?) usage
;;
esac
done
#Complete the copy from yesterday's XML to today's if a collection operation can not be done successfully
cpXMLfile()
{
year=`date '+%Y'`
month=`date '+%m'`
day=`date '+%d'`
curDate=$year-$month-$day
day=`expr "$day" - 1`
case "$day" in
0)
month=`expr "$month" - 1`
case "$month" in
0)
month=12
year=`expr "$year" - 1`
;;
esac
day=`cal $month $year | grep . | fmt -1 | tail -1`
esac
dayLen=`expr length $day`
if [ $dayLen -lt 2 ]; then
day="0$day"
fi
monLen=`expr length $month`
if [ $monLen -lt 2 ]; then
month="0$month"
fi
lastDate=$year-$month-$day
curTime=`date '+%Y-%m-%d-%H-%M-%S'`
curXMLfile=`find $1/$2 -name $2-$curDate\*.xml`
lastXMLfile=`find $1/$2 -name $2-$lastDate\*.xml`
if [ -z "$curXMLfile" ]; then
cp $lastXMLfile $1/$2/dataFabricStats/$2-$curTime.xml
fi
}
#postfix or sendmail needs to be configured
#The file called as siteAdmins stores each site admin's email address, which is located in the path $IRODS_HOME/server/bin/usageScripts
# Each line in the file looks like arcs-df.hpcu.uq.edu.au: [email protected]
mailOut()
{
mailAddress=`cat $IRODS_HOME/server/bin/usageScripts/siteAdmins | grep $1 |awk -F: '{print $2}'`
echo "Unsuccessful in downloading usage stats from your site!" |mail -s "Usage Stats" $mailAddress
}
#Print time to log file of usage stats
date '+%Y-%m-%d-%H-%M-%S' >> "$logDir/useLog-DataFabric"
zone="ARCS"
cpXMLfile $2 $zone
python $IRODS_HOME/server/bin/usageScripts/DBScripts/StatsDB.py $2 >> "$logDir/useLog-DataFabric" 2>> "$logDir/useLog-DataFabric"
| true |
4e3f670e1ffa996329a9ca21e93a58f1ff79ffb2
|
Shell
|
madnh/bash-app
|
/modules/create_module/create_module.sh
|
UTF-8
| 513 | 3.640625 | 4 |
[] |
no_license
|
#!/usr/bin/env bash
function __module() {
local MODULE_DIR="${1}"
local MODULE_NAME=$(whiptail --inputbox "Module name?" 8 78 --title "Create module" 3>&1 1>&2 2>&3)
exitstatus=$?
if [ $exitstatus = 0 ]; then
local MODULE_FILE="${APP_MODULE_DIR}/${MODULE_NAME}/${MODULE_NAME}.sh"
mkdir "${APP_MODULE_DIR}/${MODULE_NAME}";
cp "${MODULE_DIR}/data/template.sh" "${MODULE_FILE}";
chmod +x "${MODULE_FILE}";
_message success "Create module success"
fi
}
| true |
ad0a8dd360cf52ce5bbc0c0d5acf5db65bbf3373
|
Shell
|
rdghosal/njmvc_checker.py
|
/entrypoint.sh
|
UTF-8
| 360 | 2.5625 | 3 |
[] |
no_license
|
#!/usr/bin/bash
BASE_URL=$PWD/app/
source ${BASE_URL}/local.env
/usr/bin/python3 ${BASE_URL}njmvc_checker.py -s "knowledge test" -c "Edison,South Plainfield,Rahway" >> ${BASE_URL}njmvc_checker.log
if grep -qe "Found [0-9]\+ appointments" ${BASE_URL}njmvc_checker.log; then
echo "Stopping cron..." >> ${BASE_URL}njmvc_checker.log
/usr/bin/crontab -r
fi
| true |
2a8407cf3a40fb2ee4b8a7afec316bfa97c085b9
|
Shell
|
rocksclusters/area51
|
/src/tripwire-config/etc/tw-email-to
|
UTF-8
| 1,022 | 2.890625 | 3 |
[] |
no_license
|
#!/bin/bash
# $Id: tw-email-to,v 1.6 2011/02/23 00:33:53 bruno Exp $
# $Log: tw-email-to,v $
# Revision 1.6 2011/02/23 00:33:53 bruno
# fix to send tripwire reports to multiple recipients.
#
# Revision 1.5 2009/07/01 22:03:59 bruno
# fixes
#
# Revision 1.4 2009/05/26 22:21:31 bruno
# changed 'tripwire' 'mail(to)' from an app_global to a global attribute
#
# Revision 1.3 2007/07/13 17:30:34 phil
# Use the rocks command line to store the emailto for tripwire
#
# Revision 1.2 2006/09/18 23:06:39 phil
# Don't fail if mysql isn't running ...
#
# Revision 1.1 2005/10/16 04:46:07 phil
# Generic method to set the email address for report recipients
#
#
# A small script to read email addresses that tripwire cron sends email to
#
# See if we can get the attribute without error
/opt/rocks/bin/rocks report host attr localhost attr=tripwire_mail 2>1 > \
/dev/null
retcode=$?
if [ $retcode -eq 0 ]; then
/opt/rocks/bin/rocks report host attr localhost attr=tripwire_mail
else
echo "root@localhost"
fi
| true |
1ee11ef22fdbaf0d06066ee23228d54da090d80e
|
Shell
|
roytam1/mirbsd-cvs
|
/contrib/hosted/tg/m_1
|
UTF-8
| 1,629 | 3.296875 | 3 |
[] |
no_license
|
#!/bin/mksh
# $MirOS: contrib/hosted/tg/m_1,v 1.2 2009/09/08 16:43:01 tg Exp $
#-
# Copyright (c) 2008
# Thorsten Glaser <[email protected]>
#
# Provided that these terms and disclaimer and all copyright notices
# are retained or reproduced in an accompanying document, permission
# is granted to deal in this work without restriction, including un-
# limited rights to use, publicly perform, distribute, sell, modify,
# merge, give away, or sublicence.
#
# This work is provided "AS IS" and WITHOUT WARRANTY of any kind, to
# the utmost extent permitted by applicable law, neither express nor
# implied; without malicious intent or gross negligence. In no event
# may a licensor, author or contributor be held liable for indirect,
# direct, other damage, loss, or other issues arising in any way out
# of dealing in the work, even if advised of the possibility of such
# damage or existence of a defect, except proven that it results out
# of said person's immediate fault when using the work as intended.
cd /
uid=$(id -u)
typeset -l remh=$1
if [[ -z $remh ]]; then
print Host empty
exit 1
fi
if ! fgrep -i "Host $remh" $HOME/.etc/ssh/config >/dev/null 2>&1; then
print Host not configured: $remh
exit 1
fi
# kill old one first
set -A x $(ps x|fgrep ssh|fgrep -v grep|fgrep "ssh -fNM ${remh}")
while [[ -n ${x[0]} ]]; do
kill ${x[0]}
set -A x $(ps x|fgrep ssh|fgrep -v grep|fgrep "ssh -fNM ${remh}")
done
rm -f /var/run/ssh-agent/$uid/ctl.${remh}
# start new one
while [[ -z ${x[0]} ]]; do
ssh -fNM "$@" ${remh}
set -A x $(ps x|fgrep ssh|fgrep -v grep|fgrep "ssh -fNM ${remh}")
done
print ${x[0]} >/var/run/ssh-agent/$uid/pid.${remh}
| true |
84d977457afd7f2de55476e52d2a4d20153680e1
|
Shell
|
datallboy/php-dev-env
|
/gencerts.sh
|
UTF-8
| 464 | 2.9375 | 3 |
[] |
no_license
|
#!/bin/bash
if [[ $EUID -ne 0 ]]; then
echo "This script must be run as root"
exit 1
fi
mkdir -p certs
OPENSSL_SUBJ="/C=US/ST=/L="
OPENSSL_CA="${OPENSSL_SUBJ}/CN=php-dev-env-CA"
OPENSSL_SERVER="${OPENSSL_SUBJ}/CN=mariadb"
OPENSSL_CLIENT="${OPENSSL_SUBJ}/CN=client"
sh ./scripts/genroot.sh "${OPENSSL_CA}"
sh ./scripts/genserver.sh "${OPENSSL_SERVER}"
sh ./scripts/genclient.sh "${OPENSSL_CLIENT}"
chmod 644 ./certs/client-key.pem ./certs/server-key.pem
| true |
e22fe3db34145f49bd3f82b786806963e90b60fc
|
Shell
|
rhempel/bashtools
|
/src/fmt
|
UTF-8
| 1,747 | 4.125 | 4 |
[
"MIT"
] |
permissive
|
#!/bin/bash
#
# fmt - colorized output for your bash scripts
#
# Copyright (C) 2014-2015 Ralph Hempel <[email protected]>
#
# Based on "Template to write better bash scripts" from:
# More info: http://kvz.io/blog/2013/02/26/introducing-bash3boilerplate/
# Version 0.0.1
#
# See LICENSE for https://github.com/rhempel/bashtools
# -----------------------------------------------------------------------------
# Configuration
# -----------------------------------------------------------------------------
# Environment variables
[ -z "${LOG_LEVEL}" ] && LOG_LEVEL="3" # 4 = debug -> 0 = fail
# -----------------------------------------------------------------------------
function _fmt () {
color_info="\x1b[32m"
color_warn="\x1b[33m"
color_error="\x1b[31m"
color=
[ "${1}" = "info" ] && color="${color_info}"
[ "${1}" = "warn" ] && color="${color_warn}"
[ "${1}" = "error" ] && color="${color_error}"
[ "${1}" = "fail" ] && color="${color_error}"
color_reset="\x1b[0m"
if [ "${TERM}" != "xterm" ] || [ -t 1 ]; then
# Don't use colors on pipes on non-recognized terminals
color=""
color_reset=""
fi
echo -e "$(date +"%H:%M:%S") [${color}$(printf "%5s" ${1})${color_reset}]";
}
function fail () { echo "$(_fmt fail) ${@}" || true; exit 1; }
function error () { [ "${LOG_LEVEL}" -ge 1 ] && echo "$(_fmt error) ${@}" || true; }
function warn () { [ "${LOG_LEVEL}" -ge 2 ] && echo "$(_fmt warn) ${@}" || true; }
function info () { [ "${LOG_LEVEL}" -ge 3 ] && echo "$(_fmt info) ${@}" || true; }
function debug () { [ "${LOG_LEVEL}" -ge 4 ] && echo "$(_fmt debug) ${@}" || true; }
# -----------------------------------------------------------------------------
| true |
0d5e2e38ee858c5ca0f2ecca88979b7cdddcadfd
|
Shell
|
sshyran/meta-tn-imx-bsp-TechNexion
|
/recipes-containers/docker-disk/files/entry.sh
|
UTF-8
| 2,975 | 3.96875 | 4 |
[
"MIT"
] |
permissive
|
#!/bin/sh
set -o errexit
set -o nounset
DOCKER_TIMEOUT=20 # Wait 20 seconds for docker to start
DATA_VOLUME=/docker-partition
BUILD=/build
SRC=/src
PARTITION_SIZE=${PARTITION_SIZE:-2048}
PARTITION_IMAGE=${PARTITION_IMAGE:-docker-data-partition.img}
CONTAINER_SUFFIX=${CONTAINER_SUFFIX:-.tar.gz}
IMAGE_SUFFIX=".tar"
finish() {
# Make all files owned by the build system
chown -R "$USER_ID:$USER_GID" "${BUILD}"
}
trap finish EXIT
# Create user
echo "[INFO] Creating and setting $USER_ID:$USER_GID."
groupadd -g "$USER_GID" docker-disk-group || true
useradd -u "$USER_ID" -g "$USER_GID" -p "" docker-disk-user || true
mkdir -p $DATA_VOLUME/docker
# Start docker
echo "Starting docker daemon with $STORAGE_DRIVER storage driver."
dockerd --data-root ${DATA_VOLUME}/docker -s "${STORAGE_DRIVER}" -b none --experimental &
echo "Waiting for docker to become ready.."
STARTTIME="$(date +%s)"
ENDTIME="$STARTTIME"
while [ ! -S /var/run/docker.sock ]
do
if [ $((ENDTIME - STARTTIME)) -le $DOCKER_TIMEOUT ]; then
sleep 1 && ENDTIME=$((ENDTIME + 1))
else
echo "Timeout while waiting for docker to come up."
exit 1
fi
done
echo "Docker started."
if [ -n "${PRIVATE_REGISTRY}" ] && [ -n "${PRIVATE_REGISTRY_USER}" ] && [ -n "${PRIVATE_REGISTRY_PASSWORD}" ]; then
echo "login ${PRIVATE_REGISTRY}..."
docker login -u "${PRIVATE_REGISTRY_USER}" -p "${PRIVATE_REGISTRY_PASSWORD}" "${PRIVATE_REGISTRY}"
fi
# Pull in the technexion image from dockerhub
if [ -n "${TARGET_REPOSITORY}" -a -n "${TARGET_TAG}" ]; then
echo "Pulling ${TARGET_REPOSITORY}:${TARGET_TAG}..."
docker pull "${TARGET_REPOSITORY}:${TARGET_TAG}"
fi
# Pull in arch specific hello-world image and tag it tn-healthcheck-image
if [ -n "${HEALTHCHECK_REPOSITORY}" ]; then
echo "Pulling ${HEALTHCHECK_REPOSITORY}:latest..."
docker pull --platform "${HEALTHCHECK_PLATFORM}" "${HEALTHCHECK_REPOSITORY}"
docker tag "${HEALTHCHECK_REPOSITORY}" ${HEALTHCHECK_EXPORT_IMAGE//${IMAGE_SUFFIX}}
docker rmi "${HEALTHCHECK_REPOSITORY}"
docker save ${HEALTHCHECK_EXPORT_IMAGE//${IMAGE_SUFFIX}} > ${BUILD}/${HEALTHCHECK_EXPORT_IMAGE}
fi
# Import the container image from local build
if [ -n "${CONTAINER_IMAGE}" -a -f "${SRC}/${CONTAINER_IMAGE}" ]; then
echo "Importing ${SRC}/${CONTAINER_IMAGE}..."
docker import ${SRC}/${CONTAINER_IMAGE} technexion/${CONTAINER_IMAGE//".${CONTAINER_SUFFIX}"}:latest
fi
echo "Imported Docker Images..."
docker images
echo "Stopping docker..."
kill -TERM "$(cat /var/run/docker.pid)"
# don't let wait() error out and crash the build if the docker daemon has already been stopped
wait "$(cat /var/run/docker.pid)" || true
# Export the final data filesystem
dd if=/dev/zero of=${BUILD}/${PARTITION_IMAGE} bs=1M count=0 seek="${PARTITION_SIZE}"
mkfs.ext4 -E lazy_itable_init=0,lazy_journal_init=0 -i 8192 -d ${DATA_VOLUME}/docker -F ${BUILD}/${PARTITION_IMAGE}
tar zcf ${BUILD}/${PARTITION_IMAGE}.${CONTAINER_SUFFIX} -C ${DATA_VOLUME}/docker .
| true |
616d12cf58de853fd8a67253d0456afd895c8ed7
|
Shell
|
heiliuer/shell-study
|
/007server-deploy/mbuild.sh
|
UTF-8
| 229 | 3.109375 | 3 |
[] |
no_license
|
project=$1
action=$2
version=$3
if [ $version ];then
modules/build.sh $sourceroot/$project/ $action $version
else
modules/build.sh $sourceroot/$project/ $action
fi
if [ $? -ne 0 ];then
echo "build $project failed"
exit 1
fi
| true |
48556e61c942efad70c769f33bbea2a224a45d3a
|
Shell
|
chelseatroy/diego-lite
|
/diego-lite-pipeline/02_test/run_whetstone
|
UTF-8
| 1,226 | 3.234375 | 3 |
[] |
no_license
|
#!/bin/bash
set -e
save_logs() {
echo "save logs"
pushd $WORKSPACE_DIR/diego-lite
vagrant ssh -c "sudo tar -czf /vagrant/vagrant_upstart_logs.tgz /var/log/upstart"
popd
mv diego-lite/vagrant_upstart_logs.tgz .
}
cleanup_vagrant() {
echo "cleaning up vagrant"
pushd $WORKSPACE_DIR/diego-lite
vagrant destroy --force
popd
}
cleanup(){
set +e
save_logs
cleanup_vagrant
}
export DIEGO_RELEASE_PATH=$1
export DIEGO_EDGE_TAR_PATH=$2
WORKSPACE_DIR=`pwd`
cp $DIEGO_EDGE_TAR_PATH diego-lite
export VAGRANT_DIEGO_EDGE_TAR_PATH=/vagrant/`basename $DIEGO_EDGE_TAR_PATH`
trap cleanup EXIT
pushd $WORKSPACE_DIR/diego-lite
vagrant up --provider=virtualbox
popd
export GOPATH=$DIEGO_RELEASE_PATH
rm -rf $GOPATH/pkg/*
mkdir -p $GOPATH/src/github.com/pivotal-cf-experimental
ln -sf $WORKSPACE_DIR/whetstone $GOPATH/src/github.com/pivotal-cf-experimental/
go get github.com/onsi/ginkgo/ginkgo
go get github.com/onsi/gomega
PATH=$PATH:$GOPATH/bin ginkgo -noColor $GOPATH/src/github.com/pivotal-cf-experimental/whetstone -- -domain="192.168.11.11.xip.io" -loggregatorAddress="loggregator.192.168.11.11.xip.io" -receptorAddress="receptor.192.168.11.11.xip.io" -timeout=300
| true |
9e0c519d7c81596dc2840137e3a47bd2d3841b45
|
Shell
|
diatum-org/portal
|
/tools/challenge.sh
|
UTF-8
| 1,875 | 3.453125 | 3 |
[
"Apache-2.0"
] |
permissive
|
ACTION="$1"
RECORD="$2"
NAME="$3"
VALUE=\\\"$4\\\"
IP=`mysql -u root -proot portal -sN -e "select address from device where id='$RECORD'"`
HOSTNAME=`mysql -u root -proot portal -sN -e "select dns from device where id='$RECORD'"`
DOMAIN=`echo "$HOSTNAME" | cut -c 8-`
DOMAIN_ID=`mysql -u root -proot portal -sN -e "select id from domain where name='$DOMAIN'"`
REGION=`mysql -u root -proot portal -sN -e "select region from domain where id='$DOMAIN_ID'"`
ZONE=`mysql -u root -proot portal -sN -e "select zone from domain where id='$DOMAIN_ID'"`
ACCESS_KEY=`mysql -u root -proot portal -sN -e "select key_value from domain where id='$DOMAIN_ID'"`
KEY_ID=`mysql -u root -proot portal -sN -e "select key_id from domain where id='$DOMAIN_ID'"`
export AWS_SECRET_ACCESS_KEY=$ACCESS_KEY
export AWS_ACCESS_KEY_ID=$KEY_ID
debuglog () {
mysql -u root -proot portal -sN -e "insert into log (level, message) values ('debug', '$1')";
}
ROUTE=`/usr/bin/aws route53 change-resource-record-sets --hosted-zone-id $ZONE --region $REGION --output json --change-batch "{\"Changes\": [{\"Action\": \"$ACTION\",\"ResourceRecordSet\": {\"Name\": \"$NAME.$HOSTNAME\",\"Type\": \"TXT\",\"TTL\": 300,\"ResourceRecords\": [{ \"Value\": \"$VALUE\"}]}}]}"` || {
logger "$DEVICE: change-resource-record-sets failed"
debuglog "$DEVICE: change-resource-record-sets failed"
exit 1;
}
CHANGE=`echo $ROUTE | jq -r .ChangeInfo.Id`
STATE=`echo $ROUTE | jq -r .ChangeInfo.Status`
# wait for DNS to sync
logger "$DEVICE: waiting for dns sync"
for (( d=0; d < 60; ++d))
do
ROUTE=`/usr/bin/aws route53 get-change --region $REGION --output json --id $CHANGE`
STATE=`echo $ROUTE | jq -r .ChangeInfo.Status`
if [ "$STATE" = "INSYNC" ]; then
break;
fi
sleep 5
done
if [ "$STATE" != "INSYNC" ]; then
logger "$DEVICE: failed to sync dns"
debuglog "$DEVICE: failed to sync dns"
exit 1;
fi
| true |
f72ea1a9e1932daad338c4ac8f3b2b84fa656cfe
|
Shell
|
p-himik/config
|
/zsh/line_numbers.zsh
|
UTF-8
| 1,324 | 3.921875 | 4 |
[] |
no_license
|
#!zsh
function fill-query-and-params {
local query; query=()
local params; params=()
for a in $@; do
if [[ "$a" =~ "^-" ]]; then
params+=($a)
else
query+=($a)
fi
done
export CMD_QUERY="$query"
export CMD_PARAMS="$params"
}
function print-with-aliases {
local query="$1"
local total=$#INPUT_FILES_LIST
local width=$((${#total} + 2))
local -R $width i
local c=1
for (( j=1; j<=$gs_max_changes; j++ )); do unset $git_env_char$j; done
for f in $INPUT_FILES_LIST; do
if [[ $c -lt $gs_max_changes ]]; then
export $git_env_char$c="$(readlink -f $f)"
i="[$((c++))]"
else
i="[]"
fi
echo "$fg_bold[yellow]${i}$reset_color\t$f" | grep --color=always -i "$query"
done
}
function find-with-alias {
fill-query-and-params $@
local INPUT_FILES_LIST
INPUT_FILES_LIST=("${(f)$(find . $CMD_PARAMS -iname "*$CMD_QUERY*")}")
print-with-aliases "$CMD_QUERY"
}
alias fn=find-with-alias
function locate-with-alias() {
fill-query-and-params $@
local INPUT_FILES_LIST
if [[ -n "$CMD_QUERY" ]]; then
INPUT_FILES_LIST=("${(f)$(locate "$CMD_QUERY" $CMD_PARAMS)}")
print-with-aliases "$CMD_QUERY"
fi
}
alias lt=locate-with-alias
| true |
82d798765249e6352edd7b7aa05c698b5e41eb6e
|
Shell
|
Vimux/dotfiles
|
/macos.sh
|
UTF-8
| 13,411 | 2.90625 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
# Customize macOS defaults settings
# Ask for the administrator password upfront
sudo -v
# Keep-alive: update existing `sudo` time stamp until `.macos` has finished
while true; do sudo -n true; sleep 60; kill -0 "$$" || exit; done 2>/dev/null &
# Boot macOS in verbose mode
sudo nvram boot-args="-v"
###############################################################################
# General #
###############################################################################
# Set highlight color to graphite
defaults write NSGlobalDomain AppleHighlightColor -string "0.847059 0.847059 0.862745"
# Set sidebar icon size to medium
defaults write NSGlobalDomain NSTableViewDefaultSizeMode -int 2
# Expand save panel by default
defaults write NSGlobalDomain NSNavPanelExpandedStateForSaveMode -bool true
defaults write NSGlobalDomain NSNavPanelExpandedStateForSaveMode2 -bool true
# Expand print panel by default
defaults write NSGlobalDomain PMPrintingExpandedStateForPrint -bool true
defaults write NSGlobalDomain PMPrintingExpandedStateForPrint2 -bool true
# Save to disk (not to iCloud) by default
defaults write NSGlobalDomain NSDocumentSaveNewDocumentsToCloud -bool false
# Disable Notification Center and remove the menu bar icon
launchctl unload -w /System/Library/LaunchAgents/com.apple.notificationcenterui.plist 2> /dev/null
###############################################################################
# Dock and Hot Corners #
###############################################################################
# Enable highlight hover effect for the grid view of a stack (Dock)
defaults write com.apple.dock mouse-over-hilite-stack -bool true
# Set the icon size of Dock items to 54 pixels
defaults write com.apple.dock tilesize -int 54
# Change minimize/maximize window effect
defaults write com.apple.dock mineffect -string "scale"
# Minimize windows into their application’s icon
defaults write com.apple.dock minimize-to-application -bool true
# Dock orientation: "left", "bottom", "right"
defaults write com.apple.dock "orientation" -string "left"
# Show indicator lights for open applications in the Dock
defaults write com.apple.dock show-process-indicators -bool true
# Don’t animate opening applications from the Dock
defaults write com.apple.dock launchanim -bool false
# Don’t show recent applications in Dock
defaults write com.apple.dock show-recents -bool false
# Disable Hot corners
# Possible values:
# 0: no-op
# 2: Mission Control
# 3: Show application windows
# 4: Desktop
# 5: Start screen saver
# 6: Disable screen saver
# 7: Dashboard
# 10: Put display to sleep
# 11: Launchpad
# 12: Notification Center
# Top left screen corner
defaults write com.apple.dock wvous-tl-corner -int 0
defaults write com.apple.dock wvous-tl-modifier -int 0
# Top right screen corner
defaults write com.apple.dock wvous-tr-corner -int 0
defaults write com.apple.dock wvous-tr-modifier -int 0
# Bottom left screen corner
defaults write com.apple.dock wvous-bl-corner -int 0
defaults write com.apple.dock wvous-bl-modifier -int 0
# Bottom right screen corner
defaults write com.apple.dock wvous-br-corner -int 0
defaults write com.apple.dock wvous-br-modifier -int 0
###############################################################################
# Screen #
###############################################################################
# Require password immediately after sleep or screen saver begins
defaults write com.apple.screensaver askForPassword -int 1
defaults write com.apple.screensaver askForPasswordDelay -int 0
# Save screenshots to the desktop
defaults write com.apple.screencapture location -string "${HOME}/Desktop"
# Save screenshots in PNG format (other options: BMP, GIF, JPG, PDF, TIFF)
defaults write com.apple.screencapture type -string "png"
# Disable shadow in screenshots
defaults write com.apple.screencapture disable-shadow -bool true
###############################################################################
# Keyboard #
###############################################################################
# Set fast keyboard repeat rate
defaults write NSGlobalDomain KeyRepeat -int 2
defaults write NSGlobalDomain InitialKeyRepeat -int 15
# Disable automatic capitalization as it’s annoying when typing code
defaults write NSGlobalDomain NSAutomaticCapitalizationEnabled -bool false
# Disable smart dashes as they’re annoying when typing code
defaults write NSGlobalDomain NSAutomaticDashSubstitutionEnabled -bool false
# Disable automatic period substitution as it’s annoying when typing code
defaults write NSGlobalDomain NSAutomaticPeriodSubstitutionEnabled -bool false
# Disable smart quotes as they’re annoying when typing code
defaults write NSGlobalDomain NSAutomaticQuoteSubstitutionEnabled -bool false
# Disable auto-correct
defaults write NSGlobalDomain NSAutomaticSpellingCorrectionEnabled -bool false
###############################################################################
# Mouse #
###############################################################################
# Disable “natural” scrolling
defaults write NSGlobalDomain com.apple.swipescrolldirection -bool false
###############################################################################
# Date & Time #
###############################################################################
# Show date and time in the menu bar
defaults write com.apple.menuextra.clock "DateFormat" "EEE MMM d H.mm"
# Set the timezone; see `sudo systemsetup -listtimezones` for other values
sudo systemsetup -settimezone "Europe/Moscow" > /dev/null
###############################################################################
# Finder #
###############################################################################
# Finder: allow quitting via ⌘ + Q; doing so will also hide desktop icons
defaults write com.apple.finder QuitMenuItem -bool true
# Finder: disable window animations and Get Info animations
defaults write com.apple.finder DisableAllAnimations -bool true
# Don't show icons for hard drives, servers, and removable media on the desktop
defaults write com.apple.finder ShowExternalHardDrivesOnDesktop -bool false
defaults write com.apple.finder ShowHardDrivesOnDesktop -bool false
defaults write com.apple.finder ShowMountedServersOnDesktop -bool false
defaults write com.apple.finder ShowRemovableMediaOnDesktop -bool false
# Finder: show hidden files by default
defaults write com.apple.finder AppleShowAllFiles -bool true
# Finder: show all filename extensions
defaults write NSGlobalDomain AppleShowAllExtensions -bool true
# Finder: show status bar
defaults write com.apple.finder ShowStatusBar -bool true
# Finder: show path bar
defaults write com.apple.finder ShowPathbar -bool true
# Keep folders on top when sorting by name
defaults write com.apple.finder _FXSortFoldersFirst -bool true
# Disable the warning when changing a file extension
defaults write com.apple.finder FXEnableExtensionChangeWarning -bool false
# Avoid creating .DS_Store files on network or USB volumes
defaults write com.apple.desktopservices DSDontWriteNetworkStores -bool true
defaults write com.apple.desktopservices DSDontWriteUSBStores -bool true
# Use list view in all Finder windows by default
# Four-letter codes for the other view modes: `icnv`, `clmv`, `glyv`
defaults write com.apple.finder FXPreferredViewStyle -string "Nlsv"
# Disable the warning before emptying the Trash
defaults write com.apple.finder WarnOnEmptyTrash -bool false
# Show the ~/Library folder
chflags nohidden ~/Library
###############################################################################
# Safari & WebKit #
###############################################################################
# Privacy: don’t send search queries to Apple
defaults write com.apple.Safari UniversalSearchEnabled -bool false
defaults write com.apple.Safari SuppressSearchSuggestions -bool true
# Press Tab to highlight each item on a web page
# defaults write com.apple.Safari WebKitTabToLinksPreferenceKey -bool true
# defaults write com.apple.Safari com.apple.Safari.ContentPageGroupIdentifier.WebKit2TabsToLinks -bool true
# Show the full URL in the address bar (note: this still hides the scheme)
defaults write com.apple.Safari ShowFullURLInSmartSearchField -bool true
# Set Safari’s home page to `about:blank` for faster loading
defaults write com.apple.Safari HomePage -string "about:blank"
# Prevent Safari from opening ‘safe’ files automatically after downloading
defaults write com.apple.Safari AutoOpenSafeDownloads -bool false
# Make Safari’s search banners default to Contains instead of Starts With
# defaults write com.apple.Safari FindOnPageMatchesWordStartsOnly -bool false
# Enable the Develop menu and the Web Inspector in Safari
defaults write com.apple.Safari IncludeDevelopMenu -bool true
defaults write com.apple.Safari WebKitDeveloperExtrasEnabledPreferenceKey -bool true
defaults write com.apple.Safari com.apple.Safari.ContentPageGroupIdentifier.WebKit2DeveloperExtrasEnabled -bool true
# Add a context menu item for showing the Web Inspector in web views
defaults write NSGlobalDomain WebKitDeveloperExtras -bool true
# Enable continuous spellchecking
defaults write com.apple.Safari WebContinuousSpellCheckingEnabled -bool true
# Disable auto-correct
defaults write com.apple.Safari WebAutomaticSpellingCorrectionEnabled -bool false
# Disable AutoFill
defaults write com.apple.Safari AutoFillFromAddressBook -bool false
defaults write com.apple.Safari AutoFillPasswords -bool false
defaults write com.apple.Safari AutoFillCreditCardData -bool false
defaults write com.apple.Safari AutoFillMiscellaneousForms -bool false
# Warn about fraudulent websites
defaults write com.apple.Safari WarnAboutFraudulentWebsites -bool true
# Disable plug-ins
defaults write com.apple.Safari WebKitPluginsEnabled -bool false
defaults write com.apple.Safari com.apple.Safari.ContentPageGroupIdentifier.WebKit2PluginsEnabled -bool false
# Disable Java
defaults write com.apple.Safari WebKitJavaEnabled -bool false
defaults write com.apple.Safari com.apple.Safari.ContentPageGroupIdentifier.WebKit2JavaEnabled -bool false
defaults write com.apple.Safari com.apple.Safari.ContentPageGroupIdentifier.WebKit2JavaEnabledForLocalFiles -bool false
# Block pop-up windows
defaults write com.apple.Safari WebKitJavaScriptCanOpenWindowsAutomatically -bool false
defaults write com.apple.Safari com.apple.Safari.ContentPageGroupIdentifier.WebKit2JavaScriptCanOpenWindowsAutomatically -bool false
# Enable “Do Not Track”
defaults write com.apple.Safari SendDoNotTrackHTTPHeader -bool true
# Update extensions automatically
defaults write com.apple.Safari InstallExtensionUpdatesAutomatically -bool true
###############################################################################
# Terminal #
###############################################################################
# Only use UTF-8 in Terminal.app
defaults write com.apple.terminal StringEncodings -array 4
###############################################################################
# iTerm2 #
###############################################################################
# Don’t display the annoying prompt when quitting iTerm
defaults write com.googlecode.iterm2 PromptOnQuit -bool false
###############################################################################
# Time Machine #
###############################################################################
# Prevent Time Machine from prompting to use new hard drives as backup volume
defaults write com.apple.TimeMachine DoNotOfferNewDisksForBackup -bool true
###############################################################################
# Activity Monitor #
###############################################################################
# Show all processes in Activity Monitor
defaults write com.apple.ActivityMonitor ShowCategory -int 0
# Sort Activity Monitor results by CPU usage
defaults write com.apple.ActivityMonitor SortColumn -string "CPUUsage"
defaults write com.apple.ActivityMonitor SortDirection -int 0
###############################################################################
# Kill affected applications #
###############################################################################
for app in "Activity Monitor" \
"cfprefsd" \
"Dock" \
"Finder" \
"Safari" \
"SystemUIServer" \
"Terminal"; do
killall "${app}" &> /dev/null
done
echo "Done. Note that some of these changes may require a logout/restart to take effect."
| true |
067fd243162aef36b465e93e28aa99c41ac95d45
|
Shell
|
Som-Energia/somenergia-scripts
|
/accounting/municipal_certificate/doall.sh
|
UTF-8
| 256 | 2.84375 | 3 |
[] |
no_license
|
# Usage ./doall.sh key.pks12
# Process all csv in input folder
# Generates a output_NAME folder with the signed pdfs of each csv
for a in input/*csv
do
./csv2yaml.py "$a" "output_$(basename "$a" .csv)"
done
./create_certificate.sh "$1" output_*/*yaml
| true |
bb770b703047490218693dd290f278b7b8faa437
|
Shell
|
dongtranthien/IotWebConf
|
/pio/clean-pioexamples.sh
|
UTF-8
| 453 | 3.453125 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
#
# This script will remove created PlatformIO compatible folder structure.
# Files are mostly references to original files, and the original files
# will not be deleted. So generally said it safe to run this script.
# platformio.ini files will be removed alog with any additional added
# files.
#
cd ../examples-pio || exit 1
for example in IotWebConf*; do
echo "Removing pio example ${example}"
rm -rf ${example} || exit $?
done
| true |
7e6733487f396bf25e8e24d61e1f345d19ba032e
|
Shell
|
zsboss/hyperf-reload
|
/reload.sh
|
UTF-8
| 480 | 3.171875 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
basepath=$(cd `dirname $0`; pwd)
cd $basepath
if [ -f "../runtime/hyperf.pid" ];then
PID=`ps -A |grep "php hyperf.php start"| awk '{print $1}'`
#cat ../runtime/hyperf.pid | awk '{print $1}' | xargs kill -9 && rm -rf ../runtime/hyperf.pid && rm -rf ../runtime/container
#cat ../runtime/hyperf.pid | awk '{print $2}' | xargs kill -9 && rm -rf ../runtime/hyperf.pid && rm -rf ../runtime/container
kill -9 $PID && rm -rf ../runtime/container
fi
php hyperf.php start
| true |
3422802f30b12d91ba3869bee4d713059176921b
|
Shell
|
alerts-pl/slipstream_node_production
|
/scripts/mumudvb_start.sh
|
UTF-8
| 707 | 2.59375 | 3 |
[] |
no_license
|
#!/bin/bash
# MuMuDVB Boot Script by Jamie Whittingham
(( UID != 0 )) && { echo "Error: you MUST be logged in as root."; exit 1; }
echo "Starting MuMuDVB"
mkdir /var/run/mumudvb
killall mumudvb
mumudvb -c /root/mumudvb_0.conf
mumudvb -c /root/mumudvb_1.conf
mumudvb -c /root/mumudvb_2.conf
mumudvb -c /root/mumudvb_3.conf
mumudvb -c /root/mumudvb_4.conf
mumudvb -c /root/mumudvb_5.conf
mumudvb -c /root/mumudvb_6.conf
mumudvb -c /root/mumudvb_7.conf
mumudvb -c /root/mumudvb_8.conf
mumudvb -c /root/mumudvb_9.conf
mumudvb -c /root/mumudvb_10.conf
mumudvb -c /root/mumudvb_11.conf
mumudvb -c /root/mumudvb_12.conf
mumudvb -c /root/mumudvb_13.conf
mumudvb -c /root/mumudvb_14.conf
mumudvb -c /root/mumudvb_15.conf
| true |
b49de2be33e15c89bb71682c41d845b56b73700b
|
Shell
|
orwell-int/proxy-robots
|
/proxy-robots-module/src/main/scripts/generate.sh
|
UTF-8
| 292 | 2.75 | 3 |
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
# generate java code for the protobuff definition
DIR=$(cd "$(dirname "$0")" ; pwd)
cd "$DIR/../../../.."
MSG_PATH=$DIR/../../../../messages
echo "Generating .java classes in" $MSG_PATH
protoc --java_out=proxy-robots-module/src/main/java/ --proto_path=$MSG_PATH $MSG_PATH/*.proto
| true |
680b03b1b5a35f178c04ef77a9bfa78887e5a7ae
|
Shell
|
danielthoren/Config-files
|
/kitty/config.sh
|
UTF-8
| 832 | 4.09375 | 4 |
[] |
no_license
|
#!/bin/bash
workingDir=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
dir=~/.config/kitty
source $workingDir/../functions.sh
source $workingDir/../commandParser.sh -scope kitty "$@"
if ! command_exists kitty ; then
if flags_exists no-sudo ; then
echo "kitty not installed, cant install without sudo, exiting..."
exit no-sudo
fi
echo "kitty not installed, installing..."
update
install kitty
fi
echo "installing kitty in folder $dir"
if [ ! -d $dir ]; then
echo "Folder does not exist, creating folder"
mkdir -p $dir
else
echo "Folder exists, purging data"
rm "${dir}/kitty.conf"
rm -r "${dir}/kitty-themes"
fi
ln -s "$workingDir/kitty_files/kitty.conf" "${dir}"
ln -s "$workingDir/kitty_files/kitty-themes" "${dir}"
echo "Done configuring kitty"
| true |
7989a706e143a4436a74ca3436da2a4ef7973fd7
|
Shell
|
paologallinaharbur/newrelic-integration-e2e-action
|
/samples/nri-powerdns/deps/pdns-authoritative/start.sh
|
UTF-8
| 2,095 | 3.6875 | 4 |
[] |
no_license
|
#!/bin/bash
mkdir -p /etc/powerdns/pdns.d
PDNSVARS=`echo ${!PDNSCONF_*}`
touch /etc/powerdns/pdns.conf
PDNSCONF_GMYSQL_USER=pdns
PDNSCONF_GMYSQL_DBNAME=pdns
PDNSCONF_GMYSQL_PASSWORD=pdnspw
for var in $PDNSVARS; do
varname=`echo ${var#"PDNSCONF_"} | awk '{print tolower($0)}' | sed 's/_/-/g'`
value=`echo ${!var} | sed 's/^$\(.*\)/\1/'`
echo "$varname=$value" >> /etc/powerdns/pdns.conf
done
if [ ! -z $PDNSCONF_API_KEY ]; then
cat >/etc/powerdns/pdns.d/api.conf <<EOF
api=yes
webserver=yes
webserver-address=0.0.0.0
webserver-allow-from=0.0.0.0/0
EOF
fi
mysqlcheck() {
# Wait for MySQL to be available...
COUNTER=20
until mysql -h mysql -u "$PDNSCONF_GMYSQL_USER" -p"$PDNSCONF_GMYSQL_PASSWORD" -e "show databases" 2>/dev/null; do
echo "WARNING: MySQL still not up. Trying again..."
sleep 10
let COUNTER-=1
if [ $COUNTER -lt 1 ]; then
echo "ERROR: MySQL connection timed out. Aborting."
exit 1
fi
done
count=`mysql -h mysql -u "$PDNSCONF_GMYSQL_USER" -p"$PDNSCONF_GMYSQL_PASSWORD" -e "select count(*) from information_schema.tables where table_type='BASE TABLE' and table_schema='dbns';" | tail -1`
if [ "$count" == "0" ]; then
echo "Database is empty. Importing PowerDNS schema..."
mysql -h mysql -u "$PDNSCONF_GMYSQL_USER" -p"$PDNSCONF_GMYSQL_PASSWORD" "$PDNSCONF_GMYSQL_DBNAME" < /usr/share/doc/pdns-backend-mysql/schema.mysql.sql && echo "Import done."
else
echo "database already exists..."
fi
}
mysqlcheck
if [ "$SECALLZONES_CRONJOB" == "yes" ]; then
cat > /etc/crontab <<EOF
PDNSCONF_API_KEY=$PDNSCONF_API_KEY
SHELL=/bin/bash
PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
# m h dom mon dow user command
0,30 * * * * root /usr/local/bin/secallzones.sh > /var/log/cron.log 2>&1
EOF
ln -sf /proc/1/fd/1 /var/log/cron.log
cron -f &
fi
# Start PowerDNS
# same as /etc/init.d/pdns monitor
echo "Starting PowerDNS..."
if [ "$#" -gt 0 ]; then
exec /usr/sbin/pdns_server "$@"
else
exec /usr/sbin/pdns_server --daemon=no --guardian=no --control-console --loglevel=9
fi
| true |
6c5beff7ecb3668b127d2428400e6b72de10e1f3
|
Shell
|
carymrobbins/dotfiles
|
/shared/bin/rebase-scalafmt
|
UTF-8
| 2,408 | 3.96875 | 4 |
[] |
no_license
|
#!/bin/bash
usage() {
cat <<HERE
Usage: $0 <rev> [options]
Example: rebase-scalafmt origin/master --fast
Options:
--fast Shorthand for --no-edit --no-interactive --no-compile
--no-edit Don't prompt to edit messages when amending commits.
--no-interactive Don't run in interactive rebase mode.
Useful to skip the initial rebase prompt.
--no-compile Don't compile sources after applying scalafmt to a commit.
--no-it Don't run scalafmt for 'it' sources (integration tests).
Useful if your project doesn't have 'it' sources.
HERE
}
red="$(tput setaf 1)"
green="$(tput setaf 2)"
magenta="$(tput setaf 5)"
text_reset="$(tput sgr0)"
echo_color() {
echo "$1$2$text_reset"
}
###################################################
base_rev=$1
if [ -z "$base_rev" ]; then
>&2 echo_color $red "Missing argument: rev"
>&2 usage
exit 1
fi
abort() {
>&2 echo_color $red "$1"
git rebase --abort 2>/dev/null
exit 1
}
trap 'abort "Errors occurred, aborting rebase."' ERR
trap 'abort "Killed by signal, aborting rebase."' SIGINT SIGTERM SIGKILL
orig_rev=$(git rev-parse HEAD)
shift
while [ $# -ne 0 ]; do
case "$1" in
--help|-h) usage; exit;;
--fast) shift; no_edit=1; no_interactive=1; no_compile=1;;
--no-edit) shift; no_edit=1;;
--no-interactive) shift; no_interactive=1;;
--no-compile) shift; no_compile=1;;
--no-it) shift; no_it=1;;
*)
>&2 echo_color $red "Unexpected argument: $1"
>&2 usage
exit 1
;;
esac
done
echo_color $magenta "Starting at rev $orig_rev"
exec_cmd="sbt scalafmt "
[ -n "$no_compile" ] || exec_cmd+="compile "
exec_cmd+="test:scalafmt "
[ -n "$no_compile" ] || exec_cmd+="test:compile "
if [ -z "$no_it" ]; then
exec_cmd+="it:scalafmt "
[ -n "$no_compile" ] || exec_cmd+="it:compile "
fi
exec_cmd+=" && git commit -a --amend "
[ -z "$no_edit" ] || exec_cmd+="--no-edit "
[ -n "$no_interactive" ] || interactive_flag=-i
git rebase $interactive_flag --exec "$exec_cmd" "$base_rev"
echo
echo_color $green "scalafmt successfully applied to commits!"
echo
echo "The original revision was $orig_rev"
echo "Use the following to see what changed after applying scalafmt -"
echo " git diff $orig_rev"
echo "If things look wrong, you can revert back with -"
echo " git reset --hard $orig_rev"
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.