commit 7a9ff83aab80eb9f6ac9aca05abb54a963e7422e Author: Starbeamrainbowlabs Date: Fri Dec 11 10:29:22 2015 +0000 Initial commit. diff --git a/.bash_aliases b/.bash_aliases new file mode 100644 index 0000000..f696d35 --- /dev/null +++ b/.bash_aliases @@ -0,0 +1,33 @@ +############### +### Aliases ### +############### + +# some more ls aliases +alias ll='ls -hAtFl' +alias la='ls -hA' +alias l='ls -htFl' + +# make cp and mv prompt before overwriting +alias cp='cp -i' +alias mv='mv -i' + +# make mkdir tell us about each directory it has created +alias mkdir='mkdir -pv' +# make rm tell us about everything it's deleting +alias rm='rm -v' +# Make the permissions tweaking commands give us more information about what they are doing +alias chmod='chmod -v' +alias chown='chown -v' +alias chgrp='chgrp -v' + +# show colours in less +alias less='less -R' + +# Alias to clear the screen for real +alias cls='printf "\033c"' + +# allow us to create a directory and immediately move into it +mkdird () { + mkdir -pv $1 + cd $1 +} diff --git a/.bashrc b/.bashrc new file mode 100644 index 0000000..0c10ad4 --- /dev/null +++ b/.bashrc @@ -0,0 +1,111 @@ +# ~/.bashrc: executed by bash(1) for non-login shells. +# see /usr/share/doc/bash/examples/startup-files (in the package bash-doc) +# for examples + +# If not running interactively, don't do anything +case $- in + *i*) ;; + *) return;; +esac + +# don't put duplicate lines or lines starting with space in the history. +# See bash(1) for more options +HISTCONTROL=ignoreboth + +# append to the history file, don't overwrite it +shopt -s histappend + +# for setting history length see HISTSIZE and HISTFILESIZE in bash(1) +HISTSIZE=1000 +HISTFILESIZE=2000 + +# check the window size after each command and, if necessary, +# update the values of LINES and COLUMNS. +shopt -s checkwinsize + +# If set, the pattern "**" used in a pathname expansion context will +# match all files and zero or more directories and subdirectories. +shopt -s globstar + +# make less more friendly for non-text input files, see lesspipe(1) +[ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)" + +# set variable identifying the chroot you work in (used in the prompt below) +if [ -z "${debian_chroot:-}" ] && [ -r /etc/debian_chroot ]; then + debian_chroot=$(cat /etc/debian_chroot) +fi + +# set a fancy prompt (non-color, unless we know we "want" color) +case "$TERM" in + xterm-color) color_prompt=yes;; +esac + +# uncomment for a colored prompt, if the terminal has the capability; turned +# off by default to not distract the user: the focus in a terminal window +# should be on the output of commands, not on the prompt +force_color_prompt=yes + +if [ -n "$force_color_prompt" ]; then + if [ -x /usr/bin/tput ] && tput setaf 1 >&/dev/null; then + # We have color support; assume it's compliant with Ecma-48 + # (ISO/IEC-6429). (Lack of such support is extremely rare, and such + # a case would tend to support setf rather than setaf.) + color_prompt=yes + else + color_prompt= + fi +fi + +if [ "$color_prompt" = yes ]; then + PS1='${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ ' +else + PS1='${debian_chroot:+($debian_chroot)}\u@\h:\w\$ ' +fi +unset color_prompt force_color_prompt + +# If this is an xterm set the title to user@host:dir +case "$TERM" in +xterm*|rxvt*) + PS1="\[\e]0;${debian_chroot:+($debian_chroot)}\u@\h: \w\a\]$PS1" + ;; +*) + ;; +esac + +# enable color support of ls and also add handy aliases +if [ -x /usr/bin/dircolors ]; then + test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)" + alias ls='ls --color=auto' + #alias dir='dir --color=auto' + #alias vdir='vdir --color=auto' + + alias grep='grep --color=auto' + alias fgrep='fgrep --color=auto' + alias egrep='egrep --color=auto' +fi + +# Add an "alert" alias for long running commands. Use like so: +# sleep 10; alert +alias alert='notify-send --urgency=low -i "$([ $? = 0 ] && echo terminal || echo error)" "$(history|tail -n1|sed -e '\''s/^\s*[0-9]\+\s*//;s/[;&|]\s*alert$//'\'')"' + +# Alias definitions. +# You may want to put all your additions into a separate file like +# ~/.bash_aliases, instead of adding them here directly. +# See /usr/share/doc/bash-doc/examples in the bash-doc package. + +if [ -f ~/.bash_aliases ]; then + . ~/.bash_aliases +fi + +# enable programmable completion features (you don't need to enable +# this, if it's already enabled in /etc/bash.bashrc and /etc/profile +# sources /etc/bash.bashrc). +if ! shopt -oq posix; then + if [ -f /usr/share/bash-completion/bash_completion ]; then + . /usr/share/bash-completion/bash_completion + elif [ -f /etc/bash_completion ]; then + . /etc/bash_completion + fi +fi + + diff --git a/ctx b/ctx new file mode 100755 index 0000000..f8c75d7 --- /dev/null +++ b/ctx @@ -0,0 +1,15 @@ +#!/bin/bash + +if [[ $# -eq 0 ]] +then + echo "Usage:" + echo "$0 " + exit 1 +fi + +if [[ -z $2 ]] +then + watch -d -n .2 $0 $1 nw +fi + +ps -Leo lastcpu:1,tid,comm | grep "^$1 " | awk '{printf $3": ";system("cut -d\" \" -f3 /proc/"$2"/task/"$2"/schedstat 2>/dev/null")}' | sort -k 1 | column -t diff --git a/decrypt b/decrypt new file mode 100755 index 0000000..76e9ab2 --- /dev/null +++ b/decrypt @@ -0,0 +1,14 @@ +#!/usr/bin/env bash + +########################## +# File decryption script # +########################## +# Written by Starbeamrainbowlabs +# Uses openssl to symmetrically decrypt a file. + +read -s -p "Password: " password +echo + +openssl enc -in $1 -out ${1%.*} -d -aes256 -k "$password" + +echo diff --git a/dropbox_uploader.sh b/dropbox_uploader.sh new file mode 100755 index 0000000..90f8afd --- /dev/null +++ b/dropbox_uploader.sh @@ -0,0 +1,1362 @@ +#!/usr/bin/env bash +# +# Dropbox Uploader +# +# Copyright (C) 2010-2014 Andrea Fabrizi +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +# + +#Default configuration file +CONFIG_FILE=~/.dropbox_uploader + +#Default chunk size in Mb for the upload process +#It is recommended to increase this value only if you have enough free space on your /tmp partition +#Lower values may increase the number of http requests +CHUNK_SIZE=4 + +#Curl location +#If not set, curl will be searched into the $PATH +#CURL_BIN="/usr/bin/curl" + +#Default values +TMP_DIR="/tmp" +DEBUG=0 +QUIET=0 +SHOW_PROGRESSBAR=0 +SKIP_EXISTING_FILES=0 +ERROR_STATUS=0 + +#Don't edit these... +API_REQUEST_TOKEN_URL="https://api.dropbox.com/1/oauth/request_token" +API_USER_AUTH_URL="https://www.dropbox.com/1/oauth/authorize" +API_ACCESS_TOKEN_URL="https://api.dropbox.com/1/oauth/access_token" +API_CHUNKED_UPLOAD_URL="https://api-content.dropbox.com/1/chunked_upload" +API_CHUNKED_UPLOAD_COMMIT_URL="https://api-content.dropbox.com/1/commit_chunked_upload" +API_UPLOAD_URL="https://api-content.dropbox.com/1/files_put" +API_DOWNLOAD_URL="https://api-content.dropbox.com/1/files" +API_DELETE_URL="https://api.dropbox.com/1/fileops/delete" +API_MOVE_URL="https://api.dropbox.com/1/fileops/move" +API_COPY_URL="https://api.dropbox.com/1/fileops/copy" +API_METADATA_URL="https://api.dropbox.com/1/metadata" +API_INFO_URL="https://api.dropbox.com/1/account/info" +API_MKDIR_URL="https://api.dropbox.com/1/fileops/create_folder" +API_SHARES_URL="https://api.dropbox.com/1/shares" +API_SAVEURL_URL="https://api.dropbox.com/1/save_url/auto" +API_SAVEURL_JOB_URL="https://api.dropbox.com/1/save_url_job" +APP_CREATE_URL="https://www.dropbox.com/developers/apps" +RESPONSE_FILE="$TMP_DIR/du_resp_$RANDOM" +CHUNK_FILE="$TMP_DIR/du_chunk_$RANDOM" +TEMP_FILE="$TMP_DIR/du_tmp_$RANDOM" +BIN_DEPS="sed basename date grep stat dd mkdir" +VERSION="0.16" + +umask 077 + +#Check the shell +if [ -z "$BASH_VERSION" ]; then + echo -e "Error: this script requires the BASH shell!" + exit 1 +fi + +shopt -s nullglob #Bash allows filename patterns which match no files to expand to a null string, rather than themselves +shopt -s dotglob #Bash includes filenames beginning with a "." in the results of filename expansion + +#Look for optional config file parameter +while getopts ":qpskdf:" opt; do + case $opt in + + f) + CONFIG_FILE=$OPTARG + ;; + + d) + DEBUG=1 + ;; + + q) + QUIET=1 + ;; + + p) + SHOW_PROGRESSBAR=1 + ;; + + k) + CURL_ACCEPT_CERTIFICATES="-k" + ;; + + s) + SKIP_EXISTING_FILES=1 + ;; + + \?) + echo "Invalid option: -$OPTARG" >&2 + exit 1 + ;; + + :) + echo "Option -$OPTARG requires an argument." >&2 + exit 1 + ;; + + esac +done + +if [[ $DEBUG != 0 ]]; then + echo $VERSION + uname -a 2> /dev/null + cat /etc/issue 2> /dev/null + set -x + RESPONSE_FILE="$TMP_DIR/du_resp_debug" +fi + +if [[ $CURL_BIN == "" ]]; then + BIN_DEPS="$BIN_DEPS curl" + CURL_BIN="curl" +fi + +#Dependencies check +which $BIN_DEPS > /dev/null +if [[ $? != 0 ]]; then + for i in $BIN_DEPS; do + which $i > /dev/null || + NOT_FOUND="$i $NOT_FOUND" + done + echo -e "Error: Required program could not be found: $NOT_FOUND" + exit 1 +fi + +#Check if readlink is installed and supports the -m option +#It's not necessary, so no problem if it's not installed +which readlink > /dev/null +if [[ $? == 0 && $(readlink -m "//test" 2> /dev/null) == "/test" ]]; then + HAVE_READLINK=1 +else + HAVE_READLINK=0 +fi + +#Forcing to use the builtin printf, if it's present, because it's better +#otherwise the external printf program will be used +#Note that the external printf command can cause character encoding issues! +builtin printf "" 2> /dev/null +if [[ $? == 0 ]]; then + PRINTF="builtin printf" + PRINTF_OPT="-v o" +else + PRINTF=$(which printf) + if [[ $? != 0 ]]; then + echo -e "Error: Required program could not be found: printf" + fi + PRINTF_OPT="" +fi + +#Print the message based on $QUIET variable +function print +{ + if [[ $QUIET == 0 ]]; then + echo -ne "$1"; + fi +} + +#Returns unix timestamp +function utime +{ + echo $(date +%s) +} + +#Remove temporary files +function remove_temp_files +{ + if [[ $DEBUG == 0 ]]; then + rm -fr "$RESPONSE_FILE" + rm -fr "$CHUNK_FILE" + rm -fr "$TEMP_FILE" + fi +} + +#Returns the file size in bytes +function file_size +{ + #Generic GNU + SIZE=$(stat --format="%s" "$1" 2> /dev/null) + if [ $? -eq 0 ]; then + echo $SIZE + return + fi + + #Some embedded linux devices + SIZE=$(stat -c "%s" "$1" 2> /dev/null) + if [ $? -eq 0 ]; then + echo $SIZE + return + fi + + #BSD, OSX and other OSs + SIZE=$(stat -f "%z" "$1" 2> /dev/null) + if [ $? -eq 0 ]; then + echo $SIZE + return + fi + + echo "0" +} + + +#Usage +function usage +{ + echo -e "Dropbox Uploader v$VERSION" + echo -e "Andrea Fabrizi - andrea.fabrizi@gmail.com\n" + echo -e "Usage: $0 COMMAND [PARAMETERS]..." + echo -e "\nCommands:" + + echo -e "\t upload " + echo -e "\t download [LOCAL_FILE/DIR]" + echo -e "\t delete " + echo -e "\t move " + echo -e "\t copy " + echo -e "\t mkdir " + echo -e "\t list [REMOTE_DIR]" + echo -e "\t share " + echo -e "\t saveurl " + echo -e "\t info" + echo -e "\t unlink" + + echo -e "\nOptional parameters:" + echo -e "\t-f Load the configuration file from a specific file" + echo -e "\t-s Skip already existing files when download/upload. Default: Overwrite" + echo -e "\t-d Enable DEBUG mode" + echo -e "\t-q Quiet mode. Don't show messages" + echo -e "\t-p Show cURL progress meter" + echo -e "\t-k Doesn't check for SSL certificates (insecure)" + + echo -en "\nFor more info and examples, please see the README file.\n\n" + remove_temp_files + exit 1 +} + +#Check the curl exit code +function check_http_response +{ + CODE=$? + + #Checking curl exit code + case $CODE in + + #OK + 0) + + ;; + + #Proxy error + 5) + print "\nError: Couldn't resolve proxy. The given proxy host could not be resolved.\n" + + remove_temp_files + exit 1 + ;; + + #Missing CA certificates + 60|58) + print "\nError: cURL is not able to performs peer SSL certificate verification.\n" + print "Please, install the default ca-certificates bundle.\n" + print "To do this in a Debian/Ubuntu based system, try:\n" + print " sudo apt-get install ca-certificates\n\n" + print "If the problem persists, try to use the -k option (insecure).\n" + + remove_temp_files + exit 1 + ;; + + 6) + print "\nError: Couldn't resolve host.\n" + + remove_temp_files + exit 1 + ;; + + 7) + print "\nError: Couldn't connect to host.\n" + + remove_temp_files + exit 1 + ;; + + esac + + #Checking response file for generic errors + if grep -q "HTTP/1.1 400" "$RESPONSE_FILE"; then + ERROR_MSG=$(sed -n -e 's/{"error": "\([^"]*\)"}/\1/p' "$RESPONSE_FILE") + + case $ERROR_MSG in + *access?attempt?failed?because?this?app?is?not?configured?to?have*) + echo -e "\nError: The Permission type/Access level configured doesn't match the DropBox App settings!\nPlease run \"$0 unlink\" and try again." + exit 1 + ;; + esac + + fi + +} + +#Urlencode +function urlencode +{ + #The printf is necessary to correctly decode unicode sequences + local string=$($PRINTF "${1}") + local strlen=${#string} + local encoded="" + + for (( pos=0 ; pos 1 ]]; then + new_path="$new_path/" + fi + + echo "$new_path" + else + echo "$path" + fi +} + +#Check if it's a file or directory +#Returns FILE/DIR/ERR +function db_stat +{ + local FILE=$(normalize_path "$1") + + #Checking if it's a file or a directory + $CURL_BIN $CURL_ACCEPT_CERTIFICATES -s --show-error --globoff -i -o "$RESPONSE_FILE" "$API_METADATA_URL/$ACCESS_LEVEL/$(urlencode "$FILE")?oauth_consumer_key=$APPKEY&oauth_token=$OAUTH_ACCESS_TOKEN&oauth_signature_method=PLAINTEXT&oauth_signature=$APPSECRET%26$OAUTH_ACCESS_TOKEN_SECRET&oauth_timestamp=$(utime)&oauth_nonce=$RANDOM" 2> /dev/null + check_http_response + + #Even if the file/dir has been deleted from DropBox we receive a 200 OK response + #So we must check if the file exists or if it has been deleted + if grep -q "\"is_deleted\":" "$RESPONSE_FILE"; then + local IS_DELETED=$(sed -n 's/.*"is_deleted":.\([^,]*\).*/\1/p' "$RESPONSE_FILE") + else + local IS_DELETED="false" + fi + + #Exits... + grep -q "^HTTP/1.1 200 OK" "$RESPONSE_FILE" + if [[ $? == 0 && $IS_DELETED != "true" ]]; then + + local IS_DIR=$(sed -n 's/^\(.*\)\"contents":.\[.*/\1/p' "$RESPONSE_FILE") + + #It's a directory + if [[ $IS_DIR != "" ]]; then + echo "DIR" + #It's a file + else + echo "FILE" + fi + + #Doesn't exists + else + echo "ERR" + fi +} + +#Generic upload wrapper around db_upload_file and db_upload_dir functions +#$1 = Local source file/dir +#$2 = Remote destination file/dir +function db_upload +{ + local SRC=$(normalize_path "$1") + local DST=$(normalize_path "$2") + + #Checking if the file/dir exists + if [[ ! -e $SRC && ! -d $SRC ]]; then + print " > No such file or directory: $SRC\n" + ERROR_STATUS=1 + return + fi + + #Checking if the file/dir has read permissions + if [[ ! -r $SRC ]]; then + print " > Error reading file $SRC: permission denied\n" + ERROR_STATUS=1 + return + fi + + TYPE=$(db_stat "$DST") + + #If DST it's a file, do nothing, it's the default behaviour + if [[ $TYPE == "FILE" ]]; then + DST="$DST" + + #if DST doesn't exists and doesn't ends with a /, it will be the destination file name + elif [[ $TYPE == "ERR" && "${DST: -1}" != "/" ]]; then + DST="$DST" + + #if DST doesn't exists and ends with a /, it will be the destination folder + elif [[ $TYPE == "ERR" && "${DST: -1}" == "/" ]]; then + local filename=$(basename "$SRC") + DST="$DST/$filename" + + #If DST it'a directory, it will be the destination folder + elif [[ $TYPE == "DIR" ]]; then + local filename=$(basename "$SRC") + DST="$DST/$filename" + fi + + #It's a directory + if [[ -d $SRC ]]; then + db_upload_dir "$SRC" "$DST" + + #It's a file + elif [[ -e $SRC ]]; then + db_upload_file "$SRC" "$DST" + + #Unsupported object... + else + print " > Skipping not regular file \"$SRC\"\n" + fi +} + +#Generic upload wrapper around db_chunked_upload_file and db_simple_upload_file +#The final upload function will be choosen based on the file size +#$1 = Local source file +#$2 = Remote destination file +function db_upload_file +{ + local FILE_SRC=$(normalize_path "$1") + local FILE_DST=$(normalize_path "$2") + + shopt -s nocasematch + + #Checking not allowed file names + basefile_dst=$(basename "$FILE_DST") + if [[ $basefile_dst == "thumbs.db" || \ + $basefile_dst == "desktop.ini" || \ + $basefile_dst == ".ds_store" || \ + $basefile_dst == "icon\r" || \ + $basefile_dst == ".dropbox" || \ + $basefile_dst == ".dropbox.attr" \ + ]]; then + print " > Skipping not allowed file name \"$FILE_DST\"\n" + return + fi + + shopt -u nocasematch + + #Checking file size + FILE_SIZE=$(file_size "$FILE_SRC") + + #Checking if the file already exists + TYPE=$(db_stat "$FILE_DST") + if [[ $TYPE != "ERR" && $SKIP_EXISTING_FILES == 1 ]]; then + print " > Skipping already existing file \"$FILE_DST\"\n" + return + fi + + if [[ $FILE_SIZE -gt 157286000 ]]; then + #If the file is greater than 150Mb, the chunked_upload API will be used + db_chunked_upload_file "$FILE_SRC" "$FILE_DST" + else + db_simple_upload_file "$FILE_SRC" "$FILE_DST" + fi + +} + +#Simple file upload +#$1 = Local source file +#$2 = Remote destination file +function db_simple_upload_file +{ + local FILE_SRC=$(normalize_path "$1") + local FILE_DST=$(normalize_path "$2") + + if [[ $SHOW_PROGRESSBAR == 1 && $QUIET == 0 ]]; then + CURL_PARAMETERS="--progress-bar" + LINE_CR="\n" + else + CURL_PARAMETERS="-s" + LINE_CR="" + fi + + print " > Uploading \"$FILE_SRC\" to \"$FILE_DST\"... $LINE_CR" + $CURL_BIN $CURL_ACCEPT_CERTIFICATES $CURL_PARAMETERS -i --globoff -o "$RESPONSE_FILE" --upload-file "$FILE_SRC" "$API_UPLOAD_URL/$ACCESS_LEVEL/$(urlencode "$FILE_DST")?oauth_consumer_key=$APPKEY&oauth_token=$OAUTH_ACCESS_TOKEN&oauth_signature_method=PLAINTEXT&oauth_signature=$APPSECRET%26$OAUTH_ACCESS_TOKEN_SECRET&oauth_timestamp=$(utime)&oauth_nonce=$RANDOM" + check_http_response + + #Check + if grep -q "^HTTP/1.1 200 OK" "$RESPONSE_FILE"; then + print "DONE\n" + else + print "FAILED\n" + print "An error occurred requesting /upload\n" + ERROR_STATUS=1 + fi +} + +#Chunked file upload +#$1 = Local source file +#$2 = Remote destination file +function db_chunked_upload_file +{ + local FILE_SRC=$(normalize_path "$1") + local FILE_DST=$(normalize_path "$2") + + print " > Uploading \"$FILE_SRC\" to \"$FILE_DST\"" + + local FILE_SIZE=$(file_size "$FILE_SRC") + local OFFSET=0 + local UPLOAD_ID="" + local UPLOAD_ERROR=0 + local CHUNK_PARAMS="" + + #Uploading chunks... + while ([[ $OFFSET != $FILE_SIZE ]]); do + + let OFFSET_MB=$OFFSET/1024/1024 + + #Create the chunk + dd if="$FILE_SRC" of="$CHUNK_FILE" bs=1048576 skip=$OFFSET_MB count=$CHUNK_SIZE 2> /dev/null + + #Only for the first request these parameters are not included + if [[ $OFFSET != 0 ]]; then + CHUNK_PARAMS="upload_id=$UPLOAD_ID&offset=$OFFSET" + fi + + #Uploading the chunk... + $CURL_BIN $CURL_ACCEPT_CERTIFICATES -s --show-error --globoff -i -o "$RESPONSE_FILE" --upload-file "$CHUNK_FILE" "$API_CHUNKED_UPLOAD_URL?$CHUNK_PARAMS&oauth_consumer_key=$APPKEY&oauth_token=$OAUTH_ACCESS_TOKEN&oauth_signature_method=PLAINTEXT&oauth_signature=$APPSECRET%26$OAUTH_ACCESS_TOKEN_SECRET&oauth_timestamp=$(utime)&oauth_nonce=$RANDOM" 2> /dev/null + check_http_response + + #Check + if grep -q "^HTTP/1.1 200 OK" "$RESPONSE_FILE"; then + print "." + UPLOAD_ERROR=0 + UPLOAD_ID=$(sed -n 's/.*"upload_id": *"*\([^"]*\)"*.*/\1/p' "$RESPONSE_FILE") + OFFSET=$(sed -n 's/.*"offset": *\([^}]*\).*/\1/p' "$RESPONSE_FILE") + else + print "*" + let UPLOAD_ERROR=$UPLOAD_ERROR+1 + + #On error, the upload is retried for max 3 times + if [[ $UPLOAD_ERROR -gt 2 ]]; then + print " FAILED\n" + print "An error occurred requesting /chunked_upload\n" + ERROR_STATUS=1 + return + fi + fi + + done + + UPLOAD_ERROR=0 + + #Commit the upload + while (true); do + + $CURL_BIN $CURL_ACCEPT_CERTIFICATES -s --show-error --globoff -i -o "$RESPONSE_FILE" --data "upload_id=$UPLOAD_ID&oauth_consumer_key=$APPKEY&oauth_token=$OAUTH_ACCESS_TOKEN&oauth_signature_method=PLAINTEXT&oauth_signature=$APPSECRET%26$OAUTH_ACCESS_TOKEN_SECRET&oauth_timestamp=$(utime)&oauth_nonce=$RANDOM" "$API_CHUNKED_UPLOAD_COMMIT_URL/$ACCESS_LEVEL/$(urlencode "$FILE_DST")" 2> /dev/null + check_http_response + + #Check + if grep -q "^HTTP/1.1 200 OK" "$RESPONSE_FILE"; then + print "." + UPLOAD_ERROR=0 + break + else + print "*" + let UPLOAD_ERROR=$UPLOAD_ERROR+1 + + #On error, the commit is retried for max 3 times + if [[ $UPLOAD_ERROR -gt 2 ]]; then + print " FAILED\n" + print "An error occurred requesting /commit_chunked_upload\n" + ERROR_STATUS=1 + return + fi + fi + + done + + print " DONE\n" +} + +#Directory upload +#$1 = Local source dir +#$2 = Remote destination dir +function db_upload_dir +{ + local DIR_SRC=$(normalize_path "$1") + local DIR_DST=$(normalize_path "$2") + + #Creatig remote directory + db_mkdir "$DIR_DST" + + for file in "$DIR_SRC/"*; do + db_upload "$file" "$DIR_DST" + done +} + +#Generic download wrapper +#$1 = Remote source file/dir +#$2 = Local destination file/dir +function db_download +{ + local SRC=$(normalize_path "$1") + local DST=$(normalize_path "$2") + + TYPE=$(db_stat "$SRC") + + #It's a directory + if [[ $TYPE == "DIR" ]]; then + + #If the DST folder is not specified, I assume that is the current directory + if [[ $DST == "" ]]; then + DST="." + fi + + #Checking if the destination directory exists + if [[ ! -d $DST ]]; then + local basedir="" + else + local basedir=$(basename "$SRC") + fi + + local DEST_DIR=$(normalize_path "$DST/$basedir") + print " > Downloading \"$SRC\" to \"$DEST_DIR\"... \n" + print " > Creating local directory \"$DEST_DIR\"... " + mkdir -p "$DEST_DIR" + + #Check + if [[ $? == 0 ]]; then + print "DONE\n" + else + print "FAILED\n" + ERROR_STATUS=1 + return + fi + + #Extracting directory content [...] + #and replacing "}, {" with "}\n{" + #I don't like this piece of code... but seems to be the only way to do this with SED, writing a portable code... + local DIR_CONTENT=$(sed -n 's/.*: \[{\(.*\)/\1/p' "$RESPONSE_FILE" | sed 's/}, *{/}\ +{/g') + + #Extracting files and subfolders + TMP_DIR_CONTENT_FILE="${RESPONSE_FILE}_$RANDOM" + echo "$DIR_CONTENT" | sed -n 's/.*"path": *"\([^"]*\)",.*"is_dir": *\([^"]*\),.*/\1:\2/p' > $TMP_DIR_CONTENT_FILE + + #For each entry... + while read -r line; do + + local FILE=${line%:*} + local TYPE=${line#*:} + + #Removing unneeded / + FILE=${FILE##*/} + + if [[ $TYPE == "false" ]]; then + db_download_file "$SRC/$FILE" "$DEST_DIR/$FILE" + else + db_download "$SRC/$FILE" "$DEST_DIR" + fi + + done < $TMP_DIR_CONTENT_FILE + + rm -fr $TMP_DIR_CONTENT_FILE + + #It's a file + elif [[ $TYPE == "FILE" ]]; then + + #Checking DST + if [[ $DST == "" ]]; then + DST=$(basename "$SRC") + fi + + #If the destination is a directory, the file will be download into + if [[ -d $DST ]]; then + DST="$DST/$SRC" + fi + + db_download_file "$SRC" "$DST" + + #Doesn't exists + else + print " > No such file or directory: $SRC\n" + ERROR_STATUS=1 + return + fi +} + +#Simple file download +#$1 = Remote source file +#$2 = Local destination file +function db_download_file +{ + local FILE_SRC=$(normalize_path "$1") + local FILE_DST=$(normalize_path "$2") + + if [[ $SHOW_PROGRESSBAR == 1 && $QUIET == 0 ]]; then + CURL_PARAMETERS="--progress-bar" + LINE_CR="\n" + else + CURL_PARAMETERS="-s" + LINE_CR="" + fi + + #Checking if the file already exists + if [[ -e $FILE_DST && $SKIP_EXISTING_FILES == 1 ]]; then + print " > Skipping already existing file \"$FILE_DST\"\n" + return + fi + + #Creating the empty file, that for two reasons: + #1) In this way I can check if the destination file is writable or not + #2) Curl doesn't automatically creates files with 0 bytes size + dd if=/dev/zero of="$FILE_DST" count=0 2> /dev/null + if [[ $? != 0 ]]; then + print " > Error writing file $FILE_DST: permission denied\n" + ERROR_STATUS=1 + return + fi + + print " > Downloading \"$FILE_SRC\" to \"$FILE_DST\"... $LINE_CR" + $CURL_BIN $CURL_ACCEPT_CERTIFICATES $CURL_PARAMETERS --globoff -D "$RESPONSE_FILE" -o "$FILE_DST" "$API_DOWNLOAD_URL/$ACCESS_LEVEL/$(urlencode "$FILE_SRC")?oauth_consumer_key=$APPKEY&oauth_token=$OAUTH_ACCESS_TOKEN&oauth_signature_method=PLAINTEXT&oauth_signature=$APPSECRET%26$OAUTH_ACCESS_TOKEN_SECRET&oauth_timestamp=$(utime)&oauth_nonce=$RANDOM" + check_http_response + + #Check + if grep -q "^HTTP/1.1 200 OK" "$RESPONSE_FILE"; then + print "DONE\n" + else + print "FAILED\n" + rm -fr "$FILE_DST" + ERROR_STATUS=1 + return + fi +} + +#Saveurl +#$1 = URL +#$2 = Remote file destination +function db_saveurl +{ + local URL="$1" + local FILE_DST=$(normalize_path "$2") + local FILE_NAME=$(basename "$URL") + + print " > Downloading \"$URL\" to \"$FILE_DST\"..." + $CURL_BIN $CURL_ACCEPT_CERTIFICATES -s --show-error --globoff -i -o "$RESPONSE_FILE" --data "url=$(urlencode "$URL")&oauth_consumer_key=$APPKEY&oauth_token=$OAUTH_ACCESS_TOKEN&oauth_signature_method=PLAINTEXT&oauth_signature=$APPSECRET%26$OAUTH_ACCESS_TOKEN_SECRET&oauth_timestamp=$(utime)&oauth_nonce=$RANDOM" "$API_SAVEURL_URL/$FILE_DST/$FILE_NAME" 2> /dev/null + check_http_response + + JOB_ID=$(sed -n 's/.*"job": *"*\([^"]*\)"*.*/\1/p' "$RESPONSE_FILE") + if [[ $JOB_ID == "" ]]; then + print " > Error getting the job id\n" + return + fi + + #Checking the status + while (true); do + + $CURL_BIN $CURL_ACCEPT_CERTIFICATES -s --show-error --globoff -i -o "$RESPONSE_FILE" --data "oauth_consumer_key=$APPKEY&oauth_token=$OAUTH_ACCESS_TOKEN&oauth_signature_method=PLAINTEXT&oauth_signature=$APPSECRET%26$OAUTH_ACCESS_TOKEN_SECRET&oauth_timestamp=$(utime)&oauth_nonce=$RANDOM" "$API_SAVEURL_JOB_URL/$JOB_ID" 2> /dev/null + check_http_response + + STATUS=$(sed -n 's/.*"status": *"*\([^"]*\)"*.*/\1/p' "$RESPONSE_FILE") + case $STATUS in + + PENDING) + print "." + ;; + + DOWNLOADING) + print "+" + ;; + + COMPLETE) + print " DONE\n" + break + ;; + + FAILED) + print " ERROR\n" + MESSAGE=$(sed -n 's/.*"error": *"*\([^"]*\)"*.*/\1/p' "$RESPONSE_FILE") + print " > Error: $MESSAGE\n" + break + ;; + + esac + + sleep 2 + + done +} + +#Prints account info +function db_account_info +{ + print "Dropbox Uploader v$VERSION\n\n" + print " > Getting info... " + $CURL_BIN $CURL_ACCEPT_CERTIFICATES -s --show-error --globoff -i -o "$RESPONSE_FILE" --data "oauth_consumer_key=$APPKEY&oauth_token=$OAUTH_ACCESS_TOKEN&oauth_signature_method=PLAINTEXT&oauth_signature=$APPSECRET%26$OAUTH_ACCESS_TOKEN_SECRET&oauth_timestamp=$(utime)&oauth_nonce=$RANDOM" "$API_INFO_URL" 2> /dev/null + check_http_response + + #Check + if grep -q "^HTTP/1.1 200 OK" "$RESPONSE_FILE"; then + + name=$(sed -n 's/.*"display_name": "\([^"]*\).*/\1/p' "$RESPONSE_FILE") + echo -e "\n\nName:\t$name" + + uid=$(sed -n 's/.*"uid": \([0-9]*\).*/\1/p' "$RESPONSE_FILE") + echo -e "UID:\t$uid" + + email=$(sed -n 's/.*"email": "\([^"]*\).*/\1/p' "$RESPONSE_FILE") + echo -e "Email:\t$email" + + quota=$(sed -n 's/.*"quota": \([0-9]*\).*/\1/p' "$RESPONSE_FILE") + let quota_mb=$quota/1024/1024 + echo -e "Quota:\t$quota_mb Mb" + + used=$(sed -n 's/.*"normal": \([0-9]*\).*/\1/p' "$RESPONSE_FILE") + let used_mb=$used/1024/1024 + echo -e "Used:\t$used_mb Mb" + + let free_mb=($quota-$used)/1024/1024 + echo -e "Free:\t$free_mb Mb" + + echo "" + + else + print "FAILED\n" + ERROR_STATUS=1 + fi +} + +#Account unlink +function db_unlink +{ + echo -ne "Are you sure you want unlink this script from your Dropbox account? [y/n]" + read answer + if [[ $answer == "y" ]]; then + rm -fr "$CONFIG_FILE" + echo -ne "DONE\n" + fi +} + +#Delete a remote file +#$1 = Remote file to delete +function db_delete +{ + local FILE_DST=$(normalize_path "$1") + + print " > Deleting \"$FILE_DST\"... " + $CURL_BIN $CURL_ACCEPT_CERTIFICATES -s --show-error --globoff -i -o "$RESPONSE_FILE" --data "oauth_consumer_key=$APPKEY&oauth_token=$OAUTH_ACCESS_TOKEN&oauth_signature_method=PLAINTEXT&oauth_signature=$APPSECRET%26$OAUTH_ACCESS_TOKEN_SECRET&oauth_timestamp=$(utime)&oauth_nonce=$RANDOM&root=$ACCESS_LEVEL&path=$(urlencode "$FILE_DST")" "$API_DELETE_URL" 2> /dev/null + check_http_response + + #Check + if grep -q "^HTTP/1.1 200 OK" "$RESPONSE_FILE"; then + print "DONE\n" + else + print "FAILED\n" + ERROR_STATUS=1 + fi +} + +#Move/Rename a remote file +#$1 = Remote file to rename or move +#$2 = New file name or location +function db_move +{ + local FILE_SRC=$(normalize_path "$1") + local FILE_DST=$(normalize_path "$2") + + TYPE=$(db_stat "$FILE_DST") + + #If the destination it's a directory, the source will be moved into it + if [[ $TYPE == "DIR" ]]; then + local filename=$(basename "$FILE_SRC") + FILE_DST=$(normalize_path "$FILE_DST/$filename") + fi + + print " > Moving \"$FILE_SRC\" to \"$FILE_DST\" ... " + $CURL_BIN $CURL_ACCEPT_CERTIFICATES -s --show-error --globoff -i -o "$RESPONSE_FILE" --data "oauth_consumer_key=$APPKEY&oauth_token=$OAUTH_ACCESS_TOKEN&oauth_signature_method=PLAINTEXT&oauth_signature=$APPSECRET%26$OAUTH_ACCESS_TOKEN_SECRET&oauth_timestamp=$(utime)&oauth_nonce=$RANDOM&root=$ACCESS_LEVEL&from_path=$(urlencode "$FILE_SRC")&to_path=$(urlencode "$FILE_DST")" "$API_MOVE_URL" 2> /dev/null + check_http_response + + #Check + if grep -q "^HTTP/1.1 200 OK" "$RESPONSE_FILE"; then + print "DONE\n" + else + print "FAILED\n" + ERROR_STATUS=1 + fi +} + +#Copy a remote file to a remote location +#$1 = Remote file to rename or move +#$2 = New file name or location +function db_copy +{ + local FILE_SRC=$(normalize_path "$1") + local FILE_DST=$(normalize_path "$2") + + TYPE=$(db_stat "$FILE_DST") + + #If the destination it's a directory, the source will be copied into it + if [[ $TYPE == "DIR" ]]; then + local filename=$(basename "$FILE_SRC") + FILE_DST=$(normalize_path "$FILE_DST/$filename") + fi + + print " > Copying \"$FILE_SRC\" to \"$FILE_DST\" ... " + $CURL_BIN $CURL_ACCEPT_CERTIFICATES -s --show-error --globoff -i -o "$RESPONSE_FILE" --data "oauth_consumer_key=$APPKEY&oauth_token=$OAUTH_ACCESS_TOKEN&oauth_signature_method=PLAINTEXT&oauth_signature=$APPSECRET%26$OAUTH_ACCESS_TOKEN_SECRET&oauth_timestamp=$(utime)&oauth_nonce=$RANDOM&root=$ACCESS_LEVEL&from_path=$(urlencode "$FILE_SRC")&to_path=$(urlencode "$FILE_DST")" "$API_COPY_URL" 2> /dev/null + check_http_response + + #Check + if grep -q "^HTTP/1.1 200 OK" "$RESPONSE_FILE"; then + print "DONE\n" + else + print "FAILED\n" + ERROR_STATUS=1 + fi +} + +#Create a new directory +#$1 = Remote directory to create +function db_mkdir +{ + local DIR_DST=$(normalize_path "$1") + + print " > Creating Directory \"$DIR_DST\"... " + $CURL_BIN $CURL_ACCEPT_CERTIFICATES -s --show-error --globoff -i -o "$RESPONSE_FILE" --data "oauth_consumer_key=$APPKEY&oauth_token=$OAUTH_ACCESS_TOKEN&oauth_signature_method=PLAINTEXT&oauth_signature=$APPSECRET%26$OAUTH_ACCESS_TOKEN_SECRET&oauth_timestamp=$(utime)&oauth_nonce=$RANDOM&root=$ACCESS_LEVEL&path=$(urlencode "$DIR_DST")" "$API_MKDIR_URL" 2> /dev/null + check_http_response + + #Check + if grep -q "^HTTP/1.1 200 OK" "$RESPONSE_FILE"; then + print "DONE\n" + elif grep -q "^HTTP/1.1 403 Forbidden" "$RESPONSE_FILE"; then + print "ALREADY EXISTS\n" + else + print "FAILED\n" + ERROR_STATUS=1 + fi +} + +#List remote directory +#$1 = Remote directory +function db_list +{ + local DIR_DST=$(normalize_path "$1") + + print " > Listing \"$DIR_DST\"... " + $CURL_BIN $CURL_ACCEPT_CERTIFICATES -s --show-error --globoff -i -o "$RESPONSE_FILE" "$API_METADATA_URL/$ACCESS_LEVEL/$(urlencode "$DIR_DST")?oauth_consumer_key=$APPKEY&oauth_token=$OAUTH_ACCESS_TOKEN&oauth_signature_method=PLAINTEXT&oauth_signature=$APPSECRET%26$OAUTH_ACCESS_TOKEN_SECRET&oauth_timestamp=$(utime)&oauth_nonce=$RANDOM" 2> /dev/null + check_http_response + + #Check + if grep -q "^HTTP/1.1 200 OK" "$RESPONSE_FILE"; then + + local IS_DIR=$(sed -n 's/^\(.*\)\"contents":.\[.*/\1/p' "$RESPONSE_FILE") + + #It's a directory + if [[ $IS_DIR != "" ]]; then + + print "DONE\n" + + #Extracting directory content [...] + #and replacing "}, {" with "}\n{" + #I don't like this piece of code... but seems to be the only way to do this with SED, writing a portable code... + local DIR_CONTENT=$(sed -n 's/.*: \[{\(.*\)/\1/p' "$RESPONSE_FILE" | sed 's/}, *{/}\ +{/g') + + #Converting escaped quotes to unicode format + echo "$DIR_CONTENT" | sed 's/\\"/\\u0022/' > "$TEMP_FILE" + + #Extracting files and subfolders + rm -fr "$RESPONSE_FILE" + while read -r line; do + + local FILE=$(echo "$line" | sed -n 's/.*"path": *"\([^"]*\)".*/\1/p') + local IS_DIR=$(echo "$line" | sed -n 's/.*"is_dir": *\([^,]*\).*/\1/p') + local SIZE=$(echo "$line" | sed -n 's/.*"bytes": *\([0-9]*\).*/\1/p') + + echo -e "$FILE:$IS_DIR;$SIZE" >> "$RESPONSE_FILE" + + done < "$TEMP_FILE" + + #Looking for the biggest file size + #to calculate the padding to use + local padding=0 + while read -r line; do + local FILE=${line%:*} + local META=${line##*:} + local SIZE=${META#*;} + + if [[ ${#SIZE} -gt $padding ]]; then + padding=${#SIZE} + fi + done < "$RESPONSE_FILE" + + #For each entry, printing directories... + while read -r line; do + + local FILE=${line%:*} + local META=${line##*:} + local TYPE=${META%;*} + local SIZE=${META#*;} + + #Removing unneeded / + FILE=${FILE##*/} + + if [[ $TYPE == "true" ]]; then + FILE=$(echo -e "$FILE") + $PRINTF " [D] %-${padding}s %s\n" "$SIZE" "$FILE" + fi + + done < "$RESPONSE_FILE" + + #For each entry, printing files... + while read -r line; do + + local FILE=${line%:*} + local META=${line##*:} + local TYPE=${META%;*} + local SIZE=${META#*;} + + #Removing unneeded / + FILE=${FILE##*/} + + if [[ $TYPE == "false" ]]; then + FILE=$(echo -e "$FILE") + $PRINTF " [F] %-${padding}s %s\n" "$SIZE" "$FILE" + fi + + done < "$RESPONSE_FILE" + + #It's a file + else + print "FAILED: $DIR_DST is not a directory!\n" + ERROR_STATUS=1 + fi + + else + print "FAILED\n" + ERROR_STATUS=1 + fi +} + +#Share remote file +#$1 = Remote file +function db_share +{ + local FILE_DST=$(normalize_path "$1") + + $CURL_BIN $CURL_ACCEPT_CERTIFICATES -s --show-error --globoff -i -o "$RESPONSE_FILE" "$API_SHARES_URL/$ACCESS_LEVEL/$(urlencode "$FILE_DST")?oauth_consumer_key=$APPKEY&oauth_token=$OAUTH_ACCESS_TOKEN&oauth_signature_method=PLAINTEXT&oauth_signature=$APPSECRET%26$OAUTH_ACCESS_TOKEN_SECRET&oauth_timestamp=$(utime)&oauth_nonce=$RANDOM&short_url=true" 2> /dev/null + check_http_response + + #Check + if grep -q "^HTTP/1.1 200 OK" "$RESPONSE_FILE"; then + print " > Share link: " + SHARE_LINK=$(sed -n 's/.*"url": "\([^"]*\).*/\1/p' "$RESPONSE_FILE") + echo "$SHARE_LINK" + else + print "FAILED\n" + ERROR_STATUS=1 + fi +} + +################ +#### SETUP #### +################ + +#CHECKING FOR AUTH FILE +if [[ -e $CONFIG_FILE ]]; then + + #Loading data... and change old format config if necesary. + source "$CONFIG_FILE" 2>/dev/null || { + sed -i'' 's/:/=/' "$CONFIG_FILE" && source "$CONFIG_FILE" 2>/dev/null + } + + #Checking the loaded data + if [[ $APPKEY == "" || $APPSECRET == "" || $OAUTH_ACCESS_TOKEN_SECRET == "" || $OAUTH_ACCESS_TOKEN == "" ]]; then + echo -ne "Error loading data from $CONFIG_FILE...\n" + echo -ne "It is recommended to run $0 unlink\n" + remove_temp_files + exit 1 + fi + + #Back compatibility with previous Dropbox Uploader versions + if [[ $ACCESS_LEVEL == "" ]]; then + ACCESS_LEVEL="dropbox" + fi + +#NEW SETUP... +else + + echo -ne "\n This is the first time you run this script.\n\n" + echo -ne " 1) Open the following URL in your Browser, and log in using your account: $APP_CREATE_URL\n" + echo -ne " 2) Click on \"Create App\", then select \"Dropbox API app\"\n" + echo -ne " 3) Now go on with the configuration, choosing the app permissions and access restrictions to your DropBox folder\n" + echo -ne " 4) Enter the \"App Name\" that you prefer (e.g. MyUploader$RANDOM$RANDOM$RANDOM)\n\n" + + echo -ne " Now, click on the \"Create App\" button.\n\n" + + echo -ne " When your new App is successfully created, please type the\n" + echo -ne " App Key, App Secret and the Permission type shown in the confirmation page:\n\n" + + #Getting the app key and secret from the user + while (true); do + + echo -ne " # App key: " + read APPKEY + + echo -ne " # App secret: " + read APPSECRET + + echo -ne "\nPermission type:\n App folder [a]: If you choose that the app only needs access to files it creates\n Full Dropbox [f]: If you choose that the app needs access to files already on Dropbox\n\n # Permission type [a/f]: " + read ACCESS_LEVEL + + if [[ $ACCESS_LEVEL == "a" ]]; then + ACCESS_LEVEL="sandbox" + ACCESS_MSG="App Folder" + else + ACCESS_LEVEL="dropbox" + ACCESS_MSG="Full Dropbox" + fi + + echo -ne "\n > App key is $APPKEY, App secret is $APPSECRET and Access level is $ACCESS_MSG. Looks ok? [y/n]: " + read answer + if [[ $answer == "y" ]]; then + break; + fi + + done + + #TOKEN REQUESTS + echo -ne "\n > Token request... " + $CURL_BIN $CURL_ACCEPT_CERTIFICATES -s --show-error --globoff -i -o "$RESPONSE_FILE" --data "oauth_consumer_key=$APPKEY&oauth_signature_method=PLAINTEXT&oauth_signature=$APPSECRET%26&oauth_timestamp=$(utime)&oauth_nonce=$RANDOM" "$API_REQUEST_TOKEN_URL" 2> /dev/null + check_http_response + OAUTH_TOKEN_SECRET=$(sed -n 's/oauth_token_secret=\([a-z A-Z 0-9]*\).*/\1/p' "$RESPONSE_FILE") + OAUTH_TOKEN=$(sed -n 's/.*oauth_token=\([a-z A-Z 0-9]*\)/\1/p' "$RESPONSE_FILE") + + if [[ $OAUTH_TOKEN != "" && $OAUTH_TOKEN_SECRET != "" ]]; then + echo -ne "OK\n" + else + echo -ne " FAILED\n\n Please, check your App key and secret...\n\n" + remove_temp_files + exit 1 + fi + + while (true); do + + #USER AUTH + echo -ne "\n Please open the following URL in your browser, and allow Dropbox Uploader\n" + echo -ne " to access your DropBox folder:\n\n --> ${API_USER_AUTH_URL}?oauth_token=$OAUTH_TOKEN\n" + echo -ne "\nPress enter when done...\n" + read + + #API_ACCESS_TOKEN_URL + echo -ne " > Access Token request... " + $CURL_BIN $CURL_ACCEPT_CERTIFICATES -s --show-error --globoff -i -o "$RESPONSE_FILE" --data "oauth_consumer_key=$APPKEY&oauth_token=$OAUTH_TOKEN&oauth_signature_method=PLAINTEXT&oauth_signature=$APPSECRET%26$OAUTH_TOKEN_SECRET&oauth_timestamp=$(utime)&oauth_nonce=$RANDOM" "$API_ACCESS_TOKEN_URL" 2> /dev/null + check_http_response + OAUTH_ACCESS_TOKEN_SECRET=$(sed -n 's/oauth_token_secret=\([a-z A-Z 0-9]*\)&.*/\1/p' "$RESPONSE_FILE") + OAUTH_ACCESS_TOKEN=$(sed -n 's/.*oauth_token=\([a-z A-Z 0-9]*\)&.*/\1/p' "$RESPONSE_FILE") + OAUTH_ACCESS_UID=$(sed -n 's/.*uid=\([0-9]*\)/\1/p' "$RESPONSE_FILE") + + if [[ $OAUTH_ACCESS_TOKEN != "" && $OAUTH_ACCESS_TOKEN_SECRET != "" && $OAUTH_ACCESS_UID != "" ]]; then + echo -ne "OK\n" + + #Saving data in new format, compatible with source command. + echo "APPKEY=$APPKEY" > "$CONFIG_FILE" + echo "APPSECRET=$APPSECRET" >> "$CONFIG_FILE" + echo "ACCESS_LEVEL=$ACCESS_LEVEL" >> "$CONFIG_FILE" + echo "OAUTH_ACCESS_TOKEN=$OAUTH_ACCESS_TOKEN" >> "$CONFIG_FILE" + echo "OAUTH_ACCESS_TOKEN_SECRET=$OAUTH_ACCESS_TOKEN_SECRET" >> "$CONFIG_FILE" + + echo -ne "\n Setup completed!\n" + break + else + print " FAILED\n" + ERROR_STATUS=1 + fi + + done; + + remove_temp_files + exit $ERROR_STATUS +fi + +################ +#### START #### +################ + +COMMAND=${@:$OPTIND:1} +ARG1=${@:$OPTIND+1:1} +ARG2=${@:$OPTIND+2:1} + +let argnum=$#-$OPTIND + +#CHECKING PARAMS VALUES +case $COMMAND in + + upload) + + if [[ $argnum -lt 2 ]]; then + usage + fi + + FILE_DST=${@:$#:1} + + for (( i=$OPTIND+1; i<$#; i++ )); do + FILE_SRC=${@:$i:1} + db_upload "$FILE_SRC" "/$FILE_DST" + done + + ;; + + download) + + if [[ $argnum -lt 1 ]]; then + usage + fi + + FILE_SRC=$ARG1 + FILE_DST=$ARG2 + + db_download "/$FILE_SRC" "$FILE_DST" + + ;; + + saveurl) + + if [[ $argnum -lt 1 ]]; then + usage + fi + + URL=$ARG1 + FILE_DST=$ARG2 + + db_saveurl "$URL" "/$FILE_DST" + + ;; + + share) + + if [[ $argnum -lt 1 ]]; then + usage + fi + + FILE_DST=$ARG1 + + db_share "/$FILE_DST" + + ;; + + info) + + db_account_info + + ;; + + delete|remove) + + if [[ $argnum -lt 1 ]]; then + usage + fi + + FILE_DST=$ARG1 + + db_delete "/$FILE_DST" + + ;; + + move|rename) + + if [[ $argnum -lt 2 ]]; then + usage + fi + + FILE_SRC=$ARG1 + FILE_DST=$ARG2 + + db_move "/$FILE_SRC" "/$FILE_DST" + + ;; + + copy) + + if [[ $argnum -lt 2 ]]; then + usage + fi + + FILE_SRC=$ARG1 + FILE_DST=$ARG2 + + db_copy "/$FILE_SRC" "/$FILE_DST" + + ;; + + mkdir) + + if [[ $argnum -lt 1 ]]; then + usage + fi + + DIR_DST=$ARG1 + + db_mkdir "/$DIR_DST" + + ;; + + list) + + DIR_DST=$ARG1 + + #Checking DIR_DST + if [[ $DIR_DST == "" ]]; then + DIR_DST="/" + fi + + db_list "/$DIR_DST" + + ;; + + unlink) + + db_unlink + + ;; + + *) + + if [[ $COMMAND != "" ]]; then + print "Error: Unknown command: $COMMAND\n\n" + ERROR_STATUS=1 + fi + usage + + ;; + +esac + +remove_temp_files +exit $ERROR_STATUS diff --git a/dropshell b/dropshell new file mode 100755 index 0000000..610768a --- /dev/null +++ b/dropshell @@ -0,0 +1,423 @@ +#!/usr/bin/env bash +# +# DropShell +# +# Copyright (C) 2013-2014 Andrea Fabrizi +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +# + +#Looking for dropbox uploader +if [ -f "./dropbox_uploader.sh" ]; then + DU="./dropbox_uploader.sh" +else + DU=$(which dropbox_uploader.sh) + if [ $? -ne 0 ]; then + echo "Dropbox Uploader not found!" + exit 1 + fi +fi + +#For MacOSX, install coreutils (which includes greadlink) +# $brew install coreutils +if [ "${OSTYPE:0:6}" == "darwin" ]; then + READLINK="greadlink" +else + READLINK="readlink" +fi + +SHELL_HISTORY=~/.dropshell_history +DU_OPT="-q" +BIN_DEPS="id $READLINK ls basename ls pwd cut" +VERSION="0.2" + +umask 077 + +#Dependencies check +for i in $BIN_DEPS; do + which $i > /dev/null + if [ $? -ne 0 ]; then + echo -e "Error: Required program could not be found: $i" + exit 1 + fi +done + +#Check DropBox Uploader +if [ ! -f "$DU" ]; then + echo "Dropbox Uploader not found: $DU" + echo "Please change the 'DU' variable according to the Dropbox Uploader location." + exit 1 +else + DU=$($READLINK -m "$DU") +fi + +#Returns the current user +function get_current_user +{ + id -nu +} + +function normalize_path +{ + $READLINK -m "$1" +} + +################ +#### START #### +################ + +echo -e "DropShell v$VERSION" +echo -e "The Interactive Dropbox SHELL" +echo -e "Andrea Fabrizi - andrea.fabrizi@gmail.com\n" +echo -e "Type help for the list of the available commands.\n" + +history -r "$SHELL_HISTORY" +username=$(get_current_user) + +#Initial Working Directory +CWD="/" + +function sh_ls +{ + local arg1=$1 + + #Listing current dir + if [ -z "$arg1" ]; then + $DU $DU_OPT list "$CWD" + + #Listing $arg1 + else + + #Relative or absolute path? + if [ ${arg1:0:1} == "/" ]; then + $DU $DU_OPT list "$(normalize_path "$arg1")" + else + $DU $DU_OPT list "$(normalize_path "$CWD/$arg1")" + fi + + #Checking for errors + if [ $? -ne 0 ]; then + echo -e "ls: cannot access '$arg1': No such file or directory" + fi + fi +} + +function sh_cd +{ + local arg1=$1 + + OLD_CWD=$CWD + + if [ -z "$arg1" ]; then + CWD="/" + elif [ ${arg1:0:1} == "/" ]; then + CWD=$arg1 + else + CWD=$(normalize_path "$OLD_CWD/$arg1/") + fi + + $DU $DU_OPT list "$CWD" > /dev/null + + #Checking for errors + if [ $? -ne 0 ]; then + echo -e "cd: $arg1: No such file or directory" + CWD=$OLD_CWD + fi +} + +function sh_get +{ + local arg1=$1 + local arg2=$2 + + if [ ! -z "$arg1" ]; then + + #Relative or absolute path? + if [ ${arg1:0:1} == "/" ]; then + $DU $DU_OPT download "$(normalize_path "$arg1")" "$arg2" + else + $DU $DU_OPT download "$(normalize_path "$CWD/$arg1")" "$arg2" + fi + + #Checking for errors + if [ $? -ne 0 ]; then + echo -e "get: Download error" + fi + + #args error + else + echo -e "get: missing operand" + echo -e "syntax: get [LOCAL_FILE/DIR]" + fi +} + +function sh_put +{ + local arg1=$1 + local arg2=$2 + + if [ ! -z "$arg1" ]; then + + #Relative or absolute path? + if [ "${arg2:0:1}" == "/" ]; then + $DU $DU_OPT upload "$arg1" "$(normalize_path "$arg2")" + else + $DU $DU_OPT upload "$arg1" "$(normalize_path "$CWD/$arg2")" + fi + + #Checking for errors + if [ $? -ne 0 ]; then + echo -e "put: Upload error" + fi + + #args error + else + echo -e "put: missing operand" + echo -e "syntax: put " + fi +} + +function sh_rm +{ + local arg1=$1 + + if [ ! -z "$arg1" ]; then + + #Relative or absolute path? + if [ ${arg1:0:1} == "/" ]; then + $DU $DU_OPT remove "$(normalize_path "$arg1")" + else + $DU $DU_OPT remove "$(normalize_path "$CWD/$arg1")" + fi + + #Checking for errors + if [ $? -ne 0 ]; then + echo -e "rm: cannot remove '$arg1'" + fi + + #args error + else + echo -e "rm: missing operand" + echo -e "syntax: rm " + fi +} + +function sh_mkdir +{ + local arg1=$1 + + if [ ! -z "$arg1" ]; then + + #Relative or absolute path? + if [ ${arg1:0:1} == "/" ]; then + $DU $DU_OPT mkdir "$(normalize_path "$arg1")" + else + $DU $DU_OPT mkdir "$(normalize_path "$CWD/$arg1")" + fi + + #Checking for errors + if [ $? -ne 0 ]; then + echo -e "mkdir: cannot create directory '$arg1'" + fi + + #args error + else + echo -e "mkdir: missing operand" + echo -e "syntax: mkdir " + fi +} + +function sh_mv +{ + local arg1=$1 + local arg2=$2 + + if [ ! -z "$arg1" -a ! -z "$arg2" ]; then + + #SRC relative or absolute path? + if [ ${arg1:0:1} == "/" ]; then + SRC="$arg1" + else + SRC="$CWD/$arg1" + fi + + #DST relative or absolute path? + if [ ${arg2:0:1} == "/" ]; then + DST="$arg2" + else + DST="$CWD/$arg2" + fi + + $DU $DU_OPT move "$(normalize_path "$SRC")" "$(normalize_path "$DST")" + + #Checking for errors + if [ $? -ne 0 ]; then + echo -e "mv: cannot move '$arg1' to '$arg2'" + fi + + #args error + else + echo -e "mv: missing operand" + echo -e "syntax: mv " + fi +} + +function sh_cp +{ + local arg1=$1 + local arg2=$2 + + if [ ! -z "$arg1" -a ! -z "$arg2" ]; then + + #SRC relative or absolute path? + if [ ${arg1:0:1} == "/" ]; then + SRC="$arg1" + else + SRC="$CWD/$arg1" + fi + + #DST relative or absolute path? + if [ ${arg2:0:1} == "/" ]; then + DST="$arg2" + else + DST="$CWD/$arg2" + fi + + $DU $DU_OPT copy "$(normalize_path "$SRC")" "$(normalize_path "$DST")" + + #Checking for errors + if [ $? -ne 0 ]; then + echo -e "cp: cannot copy '$arg1' to '$arg2'" + fi + + #args error + else + echo -e "cp: missing operand" + echo -e "syntax: cp " + fi +} + +function sh_free +{ + $DU $DU_OPT info | grep "Free:" | cut -f 2 +} + +function sh_cat +{ + local arg1=$1 + + if [ ! -z "$arg1" ]; then + + tmp_cat="/tmp/sh_cat_$RANDOM" + sh_get "$arg1" "$tmp_cat" + cat "$tmp_cat" + rm -fr "$tmp_cat" + + #args error + else + echo -e "cat: missing operand" + echo -e "syntax: cat " + fi +} + +while (true); do + + #Reading command from shell + read -e -p "$username@Dropbox:$CWD$ " input + + #Tokenizing command + eval tokens=($input) + cmd=${tokens[0]} + arg1=${tokens[1]} + arg2=${tokens[2]} + + #Saving command in the history file + history -s "$input" + history -w "$SHELL_HISTORY" + + case $cmd in + + ls) + sh_ls "$arg1" + ;; + + cd) + sh_cd "$arg1" + ;; + + pwd) + echo $CWD + ;; + + get) + sh_get "$arg1" "$arg2" + ;; + + put) + sh_put "$arg1" "$arg2" + ;; + + rm) + sh_rm "$arg1" + ;; + + mkdir) + sh_mkdir "$arg1" + ;; + + mv) + sh_mv "$arg1" "$arg2" + ;; + + cp) + sh_cp "$arg1" "$arg2" + ;; + + cat) + sh_cat "$arg1" + ;; + + free) + sh_free + ;; + + lls) + ls -l + ;; + + lpwd) + pwd + ;; + + lcd) + cd "$arg1" + ;; + + help) + echo -e "Supported commands: ls, cd, pwd, get, put, cat, rm, mkdir, mv, cp, free, lls, lpwd, lcd, help, exit\n" + ;; + + quit|exit) + exit 0 + ;; + + *) + if [ ! -z "$cmd" ]; then + echo -ne "Unknown command: $cmd\n" + fi + ;; + esac +done + diff --git a/encrypt b/encrypt new file mode 100755 index 0000000..4cc018b --- /dev/null +++ b/encrypt @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +########################## +# File encryption script # +########################## +# Written by Starbeamrainbowlabs +# Uses openssl to symmetrically encrypt a file. + +read -s -p "Password: " password +echo +read -s -p "Retype Password: " retypepassword +echo + +if [ "$password" == "$retypepassword" ];then + openssl enc -in "$1" -out "$1.aes" -e -aes256 -k "$password" + echo Encrypted with aes +else + echo Passwords didn\'t match! >&2 +fi diff --git a/extract b/extract new file mode 100755 index 0000000..bcf69f9 --- /dev/null +++ b/extract @@ -0,0 +1,33 @@ +#!/bin/bash + +if [ $# -lt 1 ];then + echo "Usage: `basename $0` FILES" + exit 1 +fi + +# I found the following function at http://unix.stackexchange.com/a/168/37944 +# which I improved it a little. Many thanks to sydo for this idea. +extract () { + for arg in $@ ; do + if [ -f $arg ] ; then + case $arg in + *.tar.bz2) tar xjf $arg ;; + *.tar.gz) tar xzf $arg ;; + *.bz2) bunzip2 $arg ;; + *.gz) gunzip $arg ;; + *.tar) tar xf $arg ;; + *.tbz2) tar xjf $arg ;; + *.tgz) tar xzf $arg ;; + *.zip) unzip $arg ;; + *.Z) uncompress $arg ;; + *.rar) rar x $arg ;; # 'rar' must to be installed + *.jar) jar -xvf $arg ;; # 'jdk' must to be installed + *) echo "'$arg' cannot be extracted via extract()" ;; + esac + else + echo "'$arg' is not a valid file" + fi + done +} + +extract $@ \ No newline at end of file diff --git a/fdupes b/fdupes new file mode 100755 index 0000000..96aee32 Binary files /dev/null and b/fdupes differ diff --git a/hr b/hr new file mode 100755 index 0000000..088c1f3 --- /dev/null +++ b/hr @@ -0,0 +1,51 @@ +#!/usr/bin/env bash + +# The MIT License (MIT) +# +# Copyright (c) 2014 Gil Gonçalves +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of +# this software and associated documentation files (the "Software"), to deal in +# the Software without restriction, including without limitation the rights to +# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +# the Software, and to permit persons to whom the Software is furnished to do so, +# subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +COLS="$(tput cols)" +if (( COLS <= 0 )) ; then + COLS="${COLUMNS:-80}" +fi + +hr() { + local WORD="$1" + if [[ -n "$WORD" ]] ; then + local LINE='' + while (( ${#LINE} < COLS )) + do + LINE="$LINE$WORD" + done + + echo "${LINE:0:$COLS}" + fi +} + +hrs() { + local WORD + + for WORD in "${@:-#}" + do + hr "$WORD" + done +} + +[ "$0" == "$BASH_SOURCE" ] && hrs "$@" diff --git a/htdigest.sh b/htdigest.sh new file mode 100755 index 0000000..5ac2565 --- /dev/null +++ b/htdigest.sh @@ -0,0 +1,8 @@ +#!/bin/sh +user=$1 +realm=$2 +pass=$3 + +hash=`echo -n "$user:$realm:$pass" | md5sum | cut -b -32` + +echo "$user:$realm:$hash" diff --git a/mail-service-restart b/mail-service-restart new file mode 100755 index 0000000..2475276 --- /dev/null +++ b/mail-service-restart @@ -0,0 +1,6 @@ +#!/usr/bin/env bash +service postfix restart +service dovecot restart +service spamassassin restart +service spamass-milter restart +service opendkim restart diff --git a/mk-ca-bundle.pl b/mk-ca-bundle.pl new file mode 100755 index 0000000..c2080e9 --- /dev/null +++ b/mk-ca-bundle.pl @@ -0,0 +1,499 @@ +#!/usr/bin/perl -w +# *************************************************************************** +# * _ _ ____ _ +# * Project ___| | | | _ \| | +# * / __| | | | |_) | | +# * | (__| |_| | _ <| |___ +# * \___|\___/|_| \_\_____| +# * +# * Copyright (C) 1998 - 2014, Daniel Stenberg, , et al. +# * +# * This software is licensed as described in the file COPYING, which +# * you should have received as part of this distribution. The terms +# * are also available at http://curl.haxx.se/docs/copyright.html. +# * +# * You may opt to use, copy, modify, merge, publish, distribute and/or sell +# * copies of the Software, and permit persons to whom the Software is +# * furnished to do so, under the terms of the COPYING file. +# * +# * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY +# * KIND, either express or implied. +# * +# *************************************************************************** +# This Perl script creates a fresh ca-bundle.crt file for use with libcurl. +# It downloads certdata.txt from Mozilla's source tree (see URL below), +# then parses certdata.txt and extracts CA Root Certificates into PEM format. +# These are then processed with the OpenSSL commandline tool to produce the +# final ca-bundle.crt file. +# The script is based on the parse-certs script written by Roland Krikava. +# This Perl script works on almost any platform since its only external +# dependency is the OpenSSL commandline tool for optional text listing. +# Hacked by Guenter Knauf. +# +use Getopt::Std; +use MIME::Base64; +use LWP::UserAgent; +use strict; +use vars qw($opt_b $opt_d $opt_f $opt_h $opt_i $opt_l $opt_n $opt_p $opt_q $opt_s $opt_t $opt_u $opt_v $opt_w); +use List::Util; +use Text::Wrap; +my $MOD_SHA = "Digest::SHA"; +eval "require $MOD_SHA"; +if ($@) { + $MOD_SHA = "Digest::SHA::PurePerl"; + eval "require $MOD_SHA"; +} + +my %urls = ( + 'nss' => + 'http://hg.mozilla.org/projects/nss/raw-file/tip/lib/ckfw/builtins/certdata.txt', + 'central' => + 'http://hg.mozilla.org/mozilla-central/raw-file/default/security/nss/lib/ckfw/builtins/certdata.txt', + 'aurora' => + 'http://hg.mozilla.org/releases/mozilla-aurora/raw-file/default/security/nss/lib/ckfw/builtins/certdata.txt', + 'beta' => + 'http://hg.mozilla.org/releases/mozilla-beta/raw-file/default/security/nss/lib/ckfw/builtins/certdata.txt', + 'release' => + 'http://hg.mozilla.org/releases/mozilla-release/raw-file/default/security/nss/lib/ckfw/builtins/certdata.txt', +); + +$opt_d = 'release'; + +# If the OpenSSL commandline is not in search path you can configure it here! +my $openssl = 'openssl'; + +my $version = '1.25'; + +$opt_w = 76; # default base64 encoded lines length + +# default cert types to include in the output (default is to include CAs which may issue SSL server certs) +my $default_mozilla_trust_purposes = "SERVER_AUTH"; +my $default_mozilla_trust_levels = "TRUSTED_DELEGATOR"; +$opt_p = $default_mozilla_trust_purposes . ":" . $default_mozilla_trust_levels; + +my @valid_mozilla_trust_purposes = ( + "DIGITAL_SIGNATURE", + "NON_REPUDIATION", + "KEY_ENCIPHERMENT", + "DATA_ENCIPHERMENT", + "KEY_AGREEMENT", + "KEY_CERT_SIGN", + "CRL_SIGN", + "SERVER_AUTH", + "CLIENT_AUTH", + "CODE_SIGNING", + "EMAIL_PROTECTION", + "IPSEC_END_SYSTEM", + "IPSEC_TUNNEL", + "IPSEC_USER", + "TIME_STAMPING", + "STEP_UP_APPROVED" +); + +my @valid_mozilla_trust_levels = ( + "TRUSTED_DELEGATOR", # CAs + "NOT_TRUSTED", # Don't trust these certs. + "MUST_VERIFY_TRUST", # This explicitly tells us that it ISN'T a CA but is otherwise ok. In other words, this should tell the app to ignore any other sources that claim this is a CA. + "TRUSTED" # This cert is trusted, but only for itself and not for delegates (i.e. it is not a CA). +); + +my $default_signature_algorithms = $opt_s = "MD5"; + +my @valid_signature_algorithms = ( + "MD5", + "SHA1", + "SHA256", + "SHA384", + "SHA512" +); + +$0 =~ s@.*(/|\\)@@; +$Getopt::Std::STANDARD_HELP_VERSION = 1; +getopts('bd:fhilnp:qs:tuvw:'); + +if(!defined($opt_d)) { + # to make plain "-d" use not cause warnings, and actually still work + $opt_d = 'release'; +} + +# Use predefined URL or else custom URL specified on command line. +my $url = ( defined( $urls{$opt_d} ) ) ? $urls{$opt_d} : $opt_d; + +my $curl = `curl -V`; + +if ($opt_i) { + print ("=" x 78 . "\n"); + print "Script Version : $version\n"; + print "Perl Version : $]\n"; + print "Operating System Name : $^O\n"; + print "Getopt::Std.pm Version : ${Getopt::Std::VERSION}\n"; + print "MIME::Base64.pm Version : ${MIME::Base64::VERSION}\n"; + print "LWP::UserAgent.pm Version : ${LWP::UserAgent::VERSION}\n"; + print "LWP.pm Version : ${LWP::VERSION}\n"; + print "Digest::SHA.pm Version : ${Digest::SHA::VERSION}\n" if ($Digest::SHA::VERSION); + print "Digest::SHA::PurePerl.pm Version : ${Digest::SHA::PurePerl::VERSION}\n" if ($Digest::SHA::PurePerl::VERSION); + print ("=" x 78 . "\n"); +} + +sub warning_message() { + if ( $opt_d =~ m/^risk$/i ) { # Long Form Warning and Exit + print "Warning: Use of this script may pose some risk:\n"; + print "\n"; + print " 1) Using http is subject to man in the middle attack of certdata content\n"; + print " 2) Default to 'release', but more recent updates may be found in other trees\n"; + print " 3) certdata.txt file format may change, lag time to update this script\n"; + print " 4) Generally unwise to blindly trust CAs without manual review & verification\n"; + print " 5) Mozilla apps use additional security checks aren't represented in certdata\n"; + print " 6) Use of this script will make a security engineer grind his teeth and\n"; + print " swear at you. ;)\n"; + exit; + } else { # Short Form Warning + print "Warning: Use of this script may pose some risk, -d risk for more details.\n"; + } +} + +sub HELP_MESSAGE() { + print "Usage:\t${0} [-b] [-d] [-f] [-i] [-l] [-n] [-p] [-q] [-s] [-t] [-u] [-v] [-w] []\n"; + print "\t-b\tbackup an existing version of ca-bundle.crt\n"; + print "\t-d\tspecify Mozilla tree to pull certdata.txt or custom URL\n"; + print "\t\t Valid names are:\n"; + print "\t\t ", join( ", ", map { ( $_ =~ m/$opt_d/ ) ? "$_ (default)" : "$_" } sort keys %urls ), "\n"; + print "\t-f\tforce rebuild even if certdata.txt is current\n"; + print "\t-i\tprint version info about used modules\n"; + print "\t-l\tprint license info about certdata.txt\n"; + print "\t-n\tno download of certdata.txt (to use existing)\n"; + print wrap("\t","\t\t", "-p\tlist of Mozilla trust purposes and levels for certificates to include in output. Takes the form of a comma separated list of purposes, a colon, and a comma separated list of levels. (default: $default_mozilla_trust_purposes:$default_mozilla_trust_levels)"), "\n"; + print "\t\t Valid purposes are:\n"; + print wrap("\t\t ","\t\t ", join( ", ", "ALL", @valid_mozilla_trust_purposes ) ), "\n"; + print "\t\t Valid levels are:\n"; + print wrap("\t\t ","\t\t ", join( ", ", "ALL", @valid_mozilla_trust_levels ) ), "\n"; + print "\t-q\tbe really quiet (no progress output at all)\n"; + print wrap("\t","\t\t", "-s\tcomma separated list of certificate signatures/hashes to output in plain text mode. (default: $default_signature_algorithms)\n"); + print "\t\t Valid signature algorithms are:\n"; + print wrap("\t\t ","\t\t ", join( ", ", "ALL", @valid_signature_algorithms ) ), "\n"; + print "\t-t\tinclude plain text listing of certificates\n"; + print "\t-u\tunlink (remove) certdata.txt after processing\n"; + print "\t-v\tbe verbose and print out processed CAs\n"; + print "\t-w \twrap base64 output lines after chars (default: ${opt_w})\n"; + exit; +} + +sub VERSION_MESSAGE() { + print "${0} version ${version} running Perl ${]} on ${^O}\n"; +} + +warning_message() unless ($opt_q || $url =~ m/^(ht|f)tps:/i ); +HELP_MESSAGE() if ($opt_h); + +sub report($@) { + my $output = shift; + + print STDERR $output . "\n" unless $opt_q; +} + +sub is_in_list($@) { + my $target = shift; + + return defined(List::Util::first { $target eq $_ } @_); +} + +# Parses $param_string as a case insensitive comma separated list with optional whitespace +# validates that only allowed parameters are supplied +sub parse_csv_param($$@) { + my $description = shift; + my $param_string = shift; + my @valid_values = @_; + + my @values = map { + s/^\s+//; # strip leading spaces + s/\s+$//; # strip trailing spaces + uc $_ # return the modified string as upper case + } split( ',', $param_string ); + + # Find all values which are not in the list of valid values or "ALL" + my @invalid = grep { !is_in_list($_,"ALL",@valid_values) } @values; + + if ( scalar(@invalid) > 0 ) { + # Tell the user which parameters were invalid and print the standard help message which will exit + print "Error: Invalid ", $description, scalar(@invalid) == 1 ? ": " : "s: ", join( ", ", map { "\"$_\"" } @invalid ), "\n"; + HELP_MESSAGE(); + } + + @values = @valid_values if ( is_in_list("ALL",@values) ); + + return @values; +} + +sub sha1 { + my $result; + if ($Digest::SHA::VERSION || $Digest::SHA::PurePerl::VERSION) { + open(FILE, $_[0]) or die "Can't open '$_[0]': $!"; + binmode(FILE); + $result = $MOD_SHA->new(1)->addfile(*FILE)->hexdigest; + close(FILE); + } else { + # Use OpenSSL command if Perl Digest::SHA modules not available + $result = (split(/ |\r|\n/,`$openssl dgst -sha1 $_[0]`))[1]; + } + return $result; +} + + +sub oldsha1 { + my $sha1 = ""; + open(C, "<$_[0]") || return 0; + while() { + chomp; + if($_ =~ /^\#\# SHA1: (.*)/) { + $sha1 = $1; + last; + } + } + close(C); + return $sha1; +} + +if ( $opt_p !~ m/:/ ) { + print "Error: Mozilla trust identifier list must include both purposes and levels\n"; + HELP_MESSAGE(); +} + +(my $included_mozilla_trust_purposes_string, my $included_mozilla_trust_levels_string) = split( ':', $opt_p ); +my @included_mozilla_trust_purposes = parse_csv_param( "trust purpose", $included_mozilla_trust_purposes_string, @valid_mozilla_trust_purposes ); +my @included_mozilla_trust_levels = parse_csv_param( "trust level", $included_mozilla_trust_levels_string, @valid_mozilla_trust_levels ); + +my @included_signature_algorithms = parse_csv_param( "signature algorithm", $opt_s, @valid_signature_algorithms ); + +sub should_output_cert(%) { + my %trust_purposes_by_level = @_; + + foreach my $level (@included_mozilla_trust_levels) { + # for each level we want to output, see if any of our desired purposes are included + return 1 if ( defined( List::Util::first { is_in_list( $_, @included_mozilla_trust_purposes ) } @{$trust_purposes_by_level{$level}} ) ); + } + + return 0; +} + +my $crt = $ARGV[0] || 'ca-bundle.crt'; +(my $txt = $url) =~ s@(.*/|\?.*)@@g; + +my $stdout = $crt eq '-'; +my $resp; +my $fetched; + +my $oldsha1 = oldsha1($crt); + +report "SHA1 of old file: $oldsha1"; + +report "Downloading '$txt' ..."; + +if($curl && !$opt_n) { + my $https = $url; + $https =~ s/^http:/https:/; + report "Get certdata over HTTPS with curl!"; + my $quiet = $opt_q ? "-s" : ""; + my @out = `curl -w %{response_code} $quiet -O $https`; + if(@out && $out[0] == 200) { + $fetched = 1; + } else { + report "Failed downloading HTTPS with curl, trying HTTP with LWP"; + } +} + +unless ($fetched || ($opt_n and -e $txt)) { + my $ua = new LWP::UserAgent(agent => "$0/$version"); + $ua->env_proxy(); + $resp = $ua->mirror($url, $txt); + if ($resp && $resp->code eq '304') { + report "Not modified"; + exit 0 if -e $crt && !$opt_f; + } else { + $fetched = 1; + } + if( !$resp || $resp->code !~ /^(?:200|304)$/ ) { + report "Unable to download latest data: " + . ($resp? $resp->code . ' - ' . $resp->message : "LWP failed"); + exit 1 if -e $crt || ! -r $txt; + } +} + +my $filedate = $resp ? $resp->last_modified : (stat($txt))[9]; +my $datesrc = "as of"; +if(!$filedate) { + # mxr.mozilla.org gave us a time, hg.mozilla.org does not! + $filedate = time(); + $datesrc="downloaded on"; +} + +# get the hash from the download file +my $newsha1= sha1($txt); + +if(!$opt_f && $oldsha1 eq $newsha1) { + report "Downloaded file identical to previous run\'s source file. Exiting"; + exit; +} + +report "SHA1 of new file: $newsha1"; + +my $currentdate = scalar gmtime($filedate); + +my $format = $opt_t ? "plain text and " : ""; +if( $stdout ) { + open(CRT, '> -') or die "Couldn't open STDOUT: $!\n"; +} else { + open(CRT,">$crt.~") or die "Couldn't open $crt.~: $!\n"; +} +print CRT <) { + if (/\*\*\*\*\* BEGIN LICENSE BLOCK \*\*\*\*\*/) { + print CRT; + print if ($opt_l); + while () { + print CRT; + print if ($opt_l); + last if (/\*\*\*\*\* END LICENSE BLOCK \*\*\*\*\*/); + } + } + next if /^#|^\s*$/; + chomp; + if (/^CVS_ID\s+\"(.*)\"/) { + print CRT "# $1\n"; + } + + # this is a match for the start of a certificate + if (/^CKA_CLASS CK_OBJECT_CLASS CKO_CERTIFICATE/) { + $start_of_cert = 1 + } + if ($start_of_cert && /^CKA_LABEL UTF8 \"(.*)\"/) { + $caname = $1; + } + my %trust_purposes_by_level; + if ($start_of_cert && /^CKA_VALUE MULTILINE_OCTAL/) { + my $data; + while () { + last if (/^END/); + chomp; + my @octets = split(/\\/); + shift @octets; + for (@octets) { + $data .= chr(oct); + } + } + # scan forwards until the trust part + while () { + last if (/^CKA_CLASS CK_OBJECT_CLASS CKO_NSS_TRUST/); + chomp; + } + # now scan the trust part to determine how we should trust this cert + while () { + last if (/^#/); + if (/^CKA_TRUST_([A-Z_]+)\s+CK_TRUST\s+CKT_NSS_([A-Z_]+)\s*$/) { + if ( !is_in_list($1,@valid_mozilla_trust_purposes) ) { + report "Warning: Unrecognized trust purpose for cert: $caname. Trust purpose: $1. Trust Level: $2"; + } elsif ( !is_in_list($2,@valid_mozilla_trust_levels) ) { + report "Warning: Unrecognized trust level for cert: $caname. Trust purpose: $1. Trust Level: $2"; + } else { + push @{$trust_purposes_by_level{$2}}, $1; + } + } + } + + if ( !should_output_cert(%trust_purposes_by_level) ) { + $skipnum ++; + } else { + my $encoded = MIME::Base64::encode_base64($data, ''); + $encoded =~ s/(.{1,${opt_w}})/$1\n/g; + my $pem = "-----BEGIN CERTIFICATE-----\n" + . $encoded + . "-----END CERTIFICATE-----\n"; + print CRT "\n$caname\n"; + + my $maxStringLength = length($caname); + if ($opt_t) { + foreach my $key (keys %trust_purposes_by_level) { + my $string = $key . ": " . join(", ", @{$trust_purposes_by_level{$key}}); + $maxStringLength = List::Util::max( length($string), $maxStringLength ); + print CRT $string . "\n"; + } + } + print CRT ("=" x $maxStringLength . "\n"); + if (!$opt_t) { + print CRT $pem; + } else { + my $pipe = ""; + foreach my $hash (@included_signature_algorithms) { + $pipe = "|$openssl x509 -" . $hash . " -fingerprint -noout -inform PEM"; + if (!$stdout) { + $pipe .= " >> $crt.~"; + close(CRT) or die "Couldn't close $crt.~: $!"; + } + open(TMP, $pipe) or die "Couldn't open openssl pipe: $!"; + print TMP $pem; + close(TMP) or die "Couldn't close openssl pipe: $!"; + if (!$stdout) { + open(CRT, ">>$crt.~") or die "Couldn't open $crt.~: $!"; + } + } + $pipe = "|$openssl x509 -text -inform PEM"; + if (!$stdout) { + $pipe .= " >> $crt.~"; + close(CRT) or die "Couldn't close $crt.~: $!"; + } + open(TMP, $pipe) or die "Couldn't open openssl pipe: $!"; + print TMP $pem; + close(TMP) or die "Couldn't close openssl pipe: $!"; + if (!$stdout) { + open(CRT, ">>$crt.~") or die "Couldn't open $crt.~: $!"; + } + } + report "Parsing: $caname" if ($opt_v); + $certnum ++; + $start_of_cert = 0; + } + } +} +close(TXT) or die "Couldn't close $txt: $!\n"; +close(CRT) or die "Couldn't close $crt.~: $!\n"; +unless( $stdout ) { + if ($opt_b && -e $crt) { + my $bk = 1; + while (-e "$crt.~${bk}~") { + $bk++; + } + rename $crt, "$crt.~${bk}~" or die "Failed to create backup $crt.~$bk}~: $!\n"; + } elsif( -e $crt ) { + unlink( $crt ) or die "Failed to remove $crt: $!\n"; + } + rename "$crt.~", $crt or die "Failed to rename $crt.~ to $crt: $!\n"; +} +unlink $txt if ($opt_u); +report "Done ($certnum CA certs processed, $skipnum skipped)."; diff --git a/node-update b/node-update new file mode 100755 index 0000000..9e65874 --- /dev/null +++ b/node-update @@ -0,0 +1,23 @@ +#!/bin/bash + +echo "> Getting latest version number" + +VERSION=v${1:-$(curl https://nodejs.org/dist/index.json | sed -e 's/^.*"version":"\([^"]*\)".*$/\1/' | head -n 2 | tail -n -1 | cut -c 2-)} +NODEJS=node-${VERSION}-linux-x64 +echo "> Downloading $VERSION of node.js" +curl -s https://nodejs.org/dist/${VERSION}/${NODEJS}.tar.xz | tar xvfJ - + +echo "> Setting ownership of /usr/local to $USER" +sudo chown -R ${USER}:${USER} /usr/local + +echo "> Moving extracted node.js binaries" +rm -rf /usr/local/lib/${NODEJS} && mv ${NODEJS} /usr/local/lib + +echo "> Symlinks" +sudo rm -f /usr/local/bin/{iojs,node,npm,node-gyp} +ln -s /usr/local/lib/${NODEJS}/bin/node /usr/local/bin/node +ln -s /usr/local/lib/${NODEJS}/bin/node /usr/local/bin/node +ln -s /usr/local/lib/${NODEJS}/bin/npm /usr/local/bin/npm + +echo "node.js version: $(node -v), npm version: $(npm -v)" + diff --git a/ports b/ports new file mode 100755 index 0000000..7de08e3 --- /dev/null +++ b/ports @@ -0,0 +1,2 @@ +#!/usr/bin/env bash +netstat -tulnap diff --git a/ps_mem b/ps_mem new file mode 100755 index 0000000..a54a571 --- /dev/null +++ b/ps_mem @@ -0,0 +1,487 @@ +#!/usr/bin/env python + +# Try to determine how much RAM is currently being used per program. +# Note per _program_, not per process. So for example this script +# will report RAM used by all httpd process together. In detail it reports: +# sum(private RAM for program processes) + sum(Shared RAM for program processes) +# The shared RAM is problematic to calculate, and this script automatically +# selects the most accurate method available for your kernel. + +# Licence: LGPLv2 +# Author: P@draigBrady.com +# Source: http://www.pixelbeat.org/scripts/ps_mem.py + +# V1.0 06 Jul 2005 Initial release +# V1.1 11 Aug 2006 root permission required for accuracy +# V1.2 08 Nov 2006 Add total to output +# Use KiB,MiB,... for units rather than K,M,... +# V1.3 22 Nov 2006 Ignore shared col from /proc/$pid/statm for +# 2.6 kernels up to and including 2.6.9. +# There it represented the total file backed extent +# V1.4 23 Nov 2006 Remove total from output as it's meaningless +# (the shared values overlap with other programs). +# Display the shared column. This extra info is +# useful, especially as it overlaps between programs. +# V1.5 26 Mar 2007 Remove redundant recursion from human() +# V1.6 05 Jun 2007 Also report number of processes with a given name. +# Patch from riccardo.murri@gmail.com +# V1.7 20 Sep 2007 Use PSS from /proc/$pid/smaps if available, which +# fixes some over-estimation and allows totalling. +# Enumerate the PIDs directly rather than using ps, +# which fixes the possible race between reading +# RSS with ps, and shared memory with this program. +# Also we can show non truncated command names. +# V1.8 28 Sep 2007 More accurate matching for stats in /proc/$pid/smaps +# as otherwise could match libraries causing a crash. +# Patch from patrice.bouchand.fedora@gmail.com +# V1.9 20 Feb 2008 Fix invalid values reported when PSS is available. +# Reported by Andrey Borzenkov +# V3.2 01 Mar 2014 +# http://github.com/pixelb/scripts/commits/master/scripts/ps_mem.py + +# Notes: +# +# All interpreted programs where the interpreter is started +# by the shell or with env, will be merged to the interpreter +# (as that's what's given to exec). For e.g. all python programs +# starting with "#!/usr/bin/env python" will be grouped under python. +# You can change this by using the full command line but that will +# have the undesirable affect of splitting up programs started with +# differing parameters (for e.g. mingetty tty[1-6]). +# +# For 2.6 kernels up to and including 2.6.13 and later 2.4 redhat kernels +# (rmap vm without smaps) it can not be accurately determined how many pages +# are shared between processes in general or within a program in our case: +# http://lkml.org/lkml/2005/7/6/250 +# A warning is printed if overestimation is possible. +# In addition for 2.6 kernels up to 2.6.9 inclusive, the shared +# value in /proc/$pid/statm is the total file-backed extent of a process. +# We ignore that, introducing more overestimation, again printing a warning. +# Since kernel 2.6.23-rc8-mm1 PSS is available in smaps, which allows +# us to calculate a more accurate value for the total RAM used by programs. +# +# Programs that use CLONE_VM without CLONE_THREAD are discounted by assuming +# they're the only programs that have the same /proc/$PID/smaps file for +# each instance. This will fail if there are multiple real instances of a +# program that then use CLONE_VM without CLONE_THREAD, or if a clone changes +# its memory map while we're checksumming each /proc/$PID/smaps. +# +# I don't take account of memory allocated for a program +# by other programs. For e.g. memory used in the X server for +# a program could be determined, but is not. +# +# FreeBSD is supported if linprocfs is mounted at /compat/linux/proc/ +# FreeBSD 8.0 supports up to a level of Linux 2.6.16 + +import getopt +import time +import errno +import os +import sys + +try: + # md5 module is deprecated on python 2.6 + # so try the newer hashlib first + import hashlib + md5_new = hashlib.md5 +except ImportError: + import md5 + md5_new = md5.new + + +# The following exits cleanly on Ctrl-C or EPIPE +# while treating other exceptions as before. +def std_exceptions(etype, value, tb): + sys.excepthook = sys.__excepthook__ + if issubclass(etype, KeyboardInterrupt): + pass + elif issubclass(etype, IOError) and value.errno == errno.EPIPE: + pass + else: + sys.__excepthook__(etype, value, tb) +sys.excepthook = std_exceptions + +# +# Define some global variables +# + +PAGESIZE = os.sysconf("SC_PAGE_SIZE") / 1024 #KiB +our_pid = os.getpid() + +have_pss = 0 + +class Proc: + def __init__(self): + uname = os.uname() + if uname[0] == "FreeBSD": + self.proc = '/compat/linux/proc' + else: + self.proc = '/proc' + + def path(self, *args): + return os.path.join(self.proc, *(str(a) for a in args)) + + def open(self, *args): + try: + return open(self.path(*args)) + except (IOError, OSError): + val = sys.exc_info()[1] + if (val.errno == errno.ENOENT or # kernel thread or process gone + val.errno == errno.EPERM): + raise LookupError + raise + +proc = Proc() + + +# +# Functions +# + +def parse_options(): + try: + long_options = ['split-args', 'help', 'total'] + opts, args = getopt.getopt(sys.argv[1:], "shtp:w:", long_options) + except getopt.GetoptError: + sys.stderr.write(help()) + sys.exit(3) + + # ps_mem.py options + split_args = False + pids_to_show = None + watch = None + only_total = False + + for o, a in opts: + if o in ('-s', '--split-args'): + split_args = True + if o in ('-t', '--total'): + only_total = True + if o in ('-h', '--help'): + sys.stdout.write(help()) + sys.exit(0) + if o in ('-p',): + try: + pids_to_show = [int(x) for x in a.split(',')] + except: + sys.stderr.write(help()) + sys.exit(3) + if o in ('-w',): + try: + watch = int(a) + except: + sys.stderr.write(help()) + sys.exit(3) + + return (split_args, pids_to_show, watch, only_total) + +def help(): + help_msg = 'ps_mem.py - Show process memory usage\n'\ + '\n'\ + '-h Show this help\n'\ + '-w Measure and show process memory every N seconds\n'\ + '-p [,pid2,...pidN] Only show memory usage PIDs in the specified list\n' \ + '-s, --split-args Show and separate by, all command line arguments\n' \ + '-t, --total Show only the total value\n' + + return help_msg + +#(major,minor,release) +def kernel_ver(): + kv = proc.open('sys/kernel/osrelease').readline().split(".")[:3] + last = len(kv) + if last == 2: + kv.append('0') + last -= 1 + for char in "-_": + kv[last] = kv[last].split(char)[0] + try: + int(kv[last]) + except: + kv[last] = 0 + return (int(kv[0]), int(kv[1]), int(kv[2])) + + +#return Private,Shared +#Note shared is always a subset of rss (trs is not always) +def getMemStats(pid): + global have_pss + mem_id = pid #unique + Private_lines = [] + Shared_lines = [] + Pss_lines = [] + Rss = (int(proc.open(pid, 'statm').readline().split()[1]) + * PAGESIZE) + if os.path.exists(proc.path(pid, 'smaps')): #stat + digester = md5_new() + for line in proc.open(pid, 'smaps').readlines(): #open + # Note we checksum smaps as maps is usually but + # not always different for separate processes. + digester.update(line.encode('latin1')) + if line.startswith("Shared"): + Shared_lines.append(line) + elif line.startswith("Private"): + Private_lines.append(line) + elif line.startswith("Pss"): + have_pss = 1 + Pss_lines.append(line) + mem_id = digester.hexdigest() + Shared = sum([int(line.split()[1]) for line in Shared_lines]) + Private = sum([int(line.split()[1]) for line in Private_lines]) + #Note Shared + Private = Rss above + #The Rss in smaps includes video card mem etc. + if have_pss: + pss_adjust = 0.5 # add 0.5KiB as this avg error due to trunctation + Pss = sum([float(line.split()[1])+pss_adjust for line in Pss_lines]) + Shared = Pss - Private + elif (2,6,1) <= kernel_ver() <= (2,6,9): + Shared = 0 #lots of overestimation, but what can we do? + Private = Rss + else: + Shared = int(proc.open(pid, 'statm').readline().split()[2]) + Shared *= PAGESIZE + Private = Rss - Shared + return (Private, Shared, mem_id) + + +def getCmdName(pid, split_args): + cmdline = proc.open(pid, 'cmdline').read().split("\0") + if cmdline[-1] == '' and len(cmdline) > 1: + cmdline = cmdline[:-1] + + path = proc.path(pid, 'exe') + try: + path = os.readlink(path) + # Some symlink targets were seen to contain NULs on RHEL 5 at least + # https://github.com/pixelb/scripts/pull/10, so take string up to NUL + path = path.split('\0')[0] + except OSError: + val = sys.exc_info()[1] + if (val.errno == errno.ENOENT or # either kernel thread or process gone + val.errno == errno.EPERM): + raise LookupError + raise + + if split_args: + return " ".join(cmdline) + if path.endswith(" (deleted)"): + path = path[:-10] + if os.path.exists(path): + path += " [updated]" + else: + #The path could be have prelink stuff so try cmdline + #which might have the full path present. This helped for: + #/usr/libexec/notification-area-applet.#prelink#.fX7LCT (deleted) + if os.path.exists(cmdline[0]): + path = cmdline[0] + " [updated]" + else: + path += " [deleted]" + exe = os.path.basename(path) + cmd = proc.open(pid, 'status').readline()[6:-1] + if exe.startswith(cmd): + cmd = exe #show non truncated version + #Note because we show the non truncated name + #one can have separated programs as follows: + #584.0 KiB + 1.0 MiB = 1.6 MiB mozilla-thunder (exe -> bash) + # 56.0 MiB + 22.2 MiB = 78.2 MiB mozilla-thunderbird-bin + return cmd + + +#The following matches "du -h" output +#see also human.py +def human(num, power="Ki"): + powers = ["Ki", "Mi", "Gi", "Ti"] + while num >= 1000: #4 digits + num /= 1024.0 + power = powers[powers.index(power)+1] + return "%.1f %s" % (num, power) + + +def cmd_with_count(cmd, count): + if count > 1: + return "%s (%u)" % (cmd, count) + else: + return cmd + +#Warn of possible inaccuracies +#2 = accurate & can total +#1 = accurate only considering each process in isolation +#0 = some shared mem not reported +#-1= all shared mem not reported +def shared_val_accuracy(): + """http://wiki.apache.org/spamassassin/TopSharedMemoryBug""" + kv = kernel_ver() + if kv[:2] == (2,4): + if proc.open('meminfo').read().find("Inact_") == -1: + return 1 + return 0 + elif kv[:2] == (2,6): + pid = os.getpid() + if os.path.exists(proc.path(pid, 'smaps')): + if proc.open(pid, 'smaps').read().find("Pss:")!=-1: + return 2 + else: + return 1 + if (2,6,1) <= kv <= (2,6,9): + return -1 + return 0 + elif kv[0] > 2: + return 2 + else: + return 1 + +def show_shared_val_accuracy( possible_inacc, only_total=False ): + level = ("Warning","Error")[only_total] + if possible_inacc == -1: + sys.stderr.write( + "%s: Shared memory is not reported by this system.\n" % level + ) + sys.stderr.write( + "Values reported will be too large, and totals are not reported\n" + ) + elif possible_inacc == 0: + sys.stderr.write( + "%s: Shared memory is not reported accurately by this system.\n" % level + ) + sys.stderr.write( + "Values reported could be too large, and totals are not reported\n" + ) + elif possible_inacc == 1: + sys.stderr.write( + "%s: Shared memory is slightly over-estimated by this system\n" + "for each program, so totals are not reported.\n" % level + ) + sys.stderr.close() + if only_total and possible_inacc != 2: + sys.exit(1) + +def get_memory_usage( pids_to_show, split_args, include_self=False, only_self=False ): + cmds = {} + shareds = {} + mem_ids = {} + count = {} + for pid in os.listdir(proc.path('')): + if not pid.isdigit(): + continue + pid = int(pid) + + # Some filters + if only_self and pid != our_pid: + continue + if pid == our_pid and not include_self: + continue + if pids_to_show is not None and pid not in pids_to_show: + continue + + try: + cmd = getCmdName(pid, split_args) + except LookupError: + #operation not permitted + #kernel threads don't have exe links or + #process gone + continue + + try: + private, shared, mem_id = getMemStats(pid) + except RuntimeError: + continue #process gone + if shareds.get(cmd): + if have_pss: #add shared portion of PSS together + shareds[cmd] += shared + elif shareds[cmd] < shared: #just take largest shared val + shareds[cmd] = shared + else: + shareds[cmd] = shared + cmds[cmd] = cmds.setdefault(cmd, 0) + private + if cmd in count: + count[cmd] += 1 + else: + count[cmd] = 1 + mem_ids.setdefault(cmd, {}).update({mem_id:None}) + + #Add shared mem for each program + total = 0 + for cmd in cmds: + cmd_count = count[cmd] + if len(mem_ids[cmd]) == 1 and cmd_count > 1: + # Assume this program is using CLONE_VM without CLONE_THREAD + # so only account for one of the processes + cmds[cmd] /= cmd_count + if have_pss: + shareds[cmd] /= cmd_count + cmds[cmd] = cmds[cmd] + shareds[cmd] + total += cmds[cmd] #valid if PSS available + + sorted_cmds = sorted(cmds.items(), key=lambda x:x[1]) + sorted_cmds = [x for x in sorted_cmds if x[1]] + + return sorted_cmds, shareds, count, total + +def print_header(): + sys.stdout.write(" Private + Shared = RAM used\tProgram\n\n") + +def print_memory_usage(sorted_cmds, shareds, count, total): + for cmd in sorted_cmds: + sys.stdout.write("%8sB + %8sB = %8sB\t%s\n" % + (human(cmd[1]-shareds[cmd[0]]), + human(shareds[cmd[0]]), human(cmd[1]), + cmd_with_count(cmd[0], count[cmd[0]]))) + if have_pss: + sys.stdout.write("%s\n%s%8sB\n%s\n" % + ("-" * 33, " " * 24, human(total), "=" * 33)) + +def verify_environment(): + if os.geteuid() != 0: + sys.stderr.write("Sorry, root permission required.\n") + if __name__ == '__main__': + sys.stderr.close() + sys.exit(1) + + try: + kv = kernel_ver() + except (IOError, OSError): + val = sys.exc_info()[1] + if val.errno == errno.ENOENT: + sys.stderr.write( + "Couldn't access " + proc.path('') + "\n" + "Only GNU/Linux and FreeBSD (with linprocfs) are supported\n") + sys.exit(2) + else: + raise + +if __name__ == '__main__': + verify_environment() + split_args, pids_to_show, watch, only_total = parse_options() + + if not only_total: + print_header() + + if watch is not None: + try: + sorted_cmds = True + while sorted_cmds: + sorted_cmds, shareds, count, total = get_memory_usage( pids_to_show, split_args ) + if only_total and have_pss: + sys.stdout.write(human(total).replace(' ','')+'B\n') + elif not only_total: + print_memory_usage(sorted_cmds, shareds, count, total) + time.sleep(watch) + else: + sys.stdout.write('Process does not exist anymore.\n') + except KeyboardInterrupt: + pass + else: + # This is the default behavior + sorted_cmds, shareds, count, total = get_memory_usage( pids_to_show, split_args ) + if only_total and have_pss: + sys.stdout.write(human(total).replace(' ','')+'B\n') + elif not only_total: + print_memory_usage(sorted_cmds, shareds, count, total) + + # We must close explicitly, so that any EPIPE exception + # is handled by our excepthook, rather than the default + # one which is reenabled after this script finishes. + sys.stdout.close() + + vm_accuracy = shared_val_accuracy() + show_shared_val_accuracy( vm_accuracy, only_total ) diff --git a/recorduptime b/recorduptime new file mode 100755 index 0000000..60af11a --- /dev/null +++ b/recorduptime @@ -0,0 +1,4 @@ +#!/usr/bin/env bash +uptime >> ~/uptime.log + +at -f /home/sbrl/recorduptime now +1 minute diff --git a/splitdotmbox.sh b/splitdotmbox.sh new file mode 100755 index 0000000..83745c8 --- /dev/null +++ b/splitdotmbox.sh @@ -0,0 +1 @@ +csplit -n 2 -k $1 '/^From /' '{*}' diff --git a/stealtime b/stealtime new file mode 100755 index 0000000..a10b950 --- /dev/null +++ b/stealtime @@ -0,0 +1,4 @@ +#!/usr/bin/env bash +# echoes the cpu steal time from /proc/stat. +# CPU steal time is the time we have been delayed because the host machine has been executing for another virtual host +head -n 1 /proc/stat | cut -f8 -d " " diff --git a/swapusage b/swapusage new file mode 100755 index 0000000..105a229 --- /dev/null +++ b/swapusage @@ -0,0 +1,2 @@ +#!/usr/bin/env bash +for file in /proc/*/status ; do awk '/VmSwap|Name/{printf $2 " " $3}END{ print ""}' $file; done | sort -k 2 -n -r