aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--README.md77
-rw-r--r--autocompletes/_docker1421
-rw-r--r--autocompletes/_docker_containers5
-rw-r--r--history44
-rw-r--r--includes691
-rwxr-xr-xscripts/fzf_example3
-rwxr-xr-xstow/install5
-rw-r--r--stow/starship/.config/starship.toml54
-rw-r--r--stow/zsh/.zshrc233
9 files changed, 2533 insertions, 0 deletions
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..9c99d53
--- /dev/null
+++ b/README.md
@@ -0,0 +1,77 @@
+# Totally not done the night before ZSH presentatioj
+
+## Overwhelming list of plugin stuff
+
+https://github.com/unixorn/awesome-zsh-plugins
+
+There is so much to read but I would mostly just skim `plugins` for things you
+need.
+
+https://github.com/unixorn/awesome-zsh-plugins#plugins
+
+Fun example I found while browsing plugins
+
+https://github.com/mattberther/zsh-pyenv
+
+## Managing symlinks with gnustow
+
+Source: http://brandon.invergo.net/news/2012-05-26-using-gnu-stow-to-manage-your-dotfiles.html?round=two
+
+Stow mocks the directory stucture of the .dotfiles to understand where files
+should go. So instead of doing say:
+
+```
+ln -s /path/to/my/source/file.txt /path/to/symlink.txt
+```
+
+You create the directory structure with actual folders and stow can use that
+structure to determine where to place the files given a root directory. It will
+complain if files already exist so you won't overwrite them. To install the
+dotfiles you can simply:
+
+```
+cd stow
+./install
+```
+
+## Functions and Aliases
+
+I only split up my main config and another file that contains my functions and
+aliases. I've included a filtered version of my aliases to give ideas, but
+there is a lot cruft in there.
+
+Essential Tools:
+- fzf (https://github.com/junegunn/fzf)
+- gh (github cli https://cli.github.com/)
+- exa (pretty ls https://github.com/ogham/exa)
+
+## Zplug
+
+zplug is a zsh plugin manager. Add the repos you want in your ~/.zshrc and then
+when you start a new shell you'll be prompted to install the new plugins.
+Supports oh-my-zsh plugins.
+
+## Autocompletes
+
+zsh uses a variable called `fpath` to determine where to find autocomplete
+files. Autocomplete files are prefixed with an underscore and then the command
+name. For example, to find the autocompletes for `docker`, zsh would search the
+fpath variable for a file called `_docker`. You can write your own
+autocompletes for things. Defining an alias in your zsh will let the alias pick
+up the autocompletes for the original commands but if you write a function it
+will not:
+
+```
+# gss will pick up the tab completion for git stash
+alias gs="git stash"
+# this will not
+gs() {
+ git stash
+}
+```
+
+You can use the `compdef` function to assign the autocompletes of one command
+to another. Example in zshrc
+
+You can also write your own completions if you know a simple bash command to
+get the list of options you'd like to be presented with.
diff --git a/autocompletes/_docker b/autocompletes/_docker
new file mode 100644
index 0000000..1c2b031
--- /dev/null
+++ b/autocompletes/_docker
@@ -0,0 +1,1421 @@
+#compdef docker
+#
+# zsh completion for docker (http://docker.com)
+#
+# version: 0.3.0
+# github: https://github.com/felixr/docker-zsh-completion
+#
+# contributors:
+# - Felix Riedel
+# - Steve Durrheimer
+# - Vincent Bernat
+#
+# license:
+#
+# Copyright (c) 2013, Felix Riedel
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+# * Neither the name of the <organization> nor the
+# names of its contributors may be used to endorse or promote products
+# derived from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+# Short-option stacking can be disabled with:
+# zstyle ':completion:*:*:docker:*' option-stacking no
+# zstyle ':completion:*:*:docker-*:*' option-stacking no
+__docker_arguments() {
+ if zstyle -T ":completion:${curcontext}:" option-stacking; then
+ print -- -s
+ fi
+}
+
+__docker_get_containers() {
+ [[ $PREFIX = -* ]] && return 1
+ integer ret=1
+ local kind type line s
+ declare -a running stopped lines args names
+
+ kind=$1; shift
+ type=$1; shift
+ [[ $kind = (stopped|all) ]] && args=($args -a)
+
+ lines=(${(f)"$(_call_program commands docker $docker_options ps --format 'table' --no-trunc $args)"})
+
+ # Parse header line to find columns
+ local i=1 j=1 k header=${lines[1]}
+ declare -A begin end
+ while (( j < ${#header} - 1 )); do
+ i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 ))
+ j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 ))
+ k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 ))
+ begin[${header[$i,$((j-1))]}]=$i
+ end[${header[$i,$((j-1))]}]=$k
+ done
+ end[${header[$i,$((j-1))]}]=-1 # Last column, should go to the end of the line
+ lines=(${lines[2,-1]})
+
+ # Container ID
+ if [[ $type = (ids|all) ]]; then
+ for line in $lines; do
+ s="${${line[${begin[CONTAINER ID]},${end[CONTAINER ID]}]%% ##}[0,12]}"
+ s="$s:${(l:15:: :::)${${line[${begin[CREATED]},${end[CREATED]}]/ ago/}%% ##}}"
+ s="$s, ${${${line[${begin[IMAGE]},${end[IMAGE]}]}/:/\\:}%% ##}"
+ if [[ ${line[${begin[STATUS]},${end[STATUS]}]} = Exit* ]]; then
+ stopped=($stopped $s)
+ else
+ running=($running $s)
+ fi
+ done
+ fi
+
+ # Names: we only display the one without slash. All other names
+ # are generated and may clutter the completion. However, with
+ # Swarm, all names may be prefixed by the swarm node name.
+ if [[ $type = (names|all) ]]; then
+ for line in $lines; do
+ names=(${(ps:,:)${${line[${begin[NAMES]},${end[NAMES]}]}%% *}})
+ # First step: find a common prefix and strip it (swarm node case)
+ (( ${#${(u)names%%/*}} == 1 )) && names=${names#${names[1]%%/*}/}
+ # Second step: only keep the first name without a /
+ s=${${names:#*/*}[1]}
+ # If no name, well give up.
+ (( $#s != 0 )) || continue
+ s="$s:${(l:15:: :::)${${line[${begin[CREATED]},${end[CREATED]}]/ ago/}%% ##}}"
+ s="$s, ${${${line[${begin[IMAGE]},${end[IMAGE]}]}/:/\\:}%% ##}"
+ if [[ ${line[${begin[STATUS]},${end[STATUS]}]} = Exit* ]]; then
+ stopped=($stopped $s)
+ else
+ running=($running $s)
+ fi
+ done
+ fi
+
+ [[ $kind = (running|all) ]] && _describe -t containers-running "running containers" running "$@" && ret=0
+ [[ $kind = (stopped|all) ]] && _describe -t containers-stopped "stopped containers" stopped "$@" && ret=0
+ return ret
+}
+
+__docker_stoppedcontainers() {
+ [[ $PREFIX = -* ]] && return 1
+ __docker_get_containers stopped all "$@"
+}
+
+__docker_runningcontainers() {
+ [[ $PREFIX = -* ]] && return 1
+ __docker_get_containers running all "$@"
+}
+
+__docker_containers() {
+ [[ $PREFIX = -* ]] && return 1
+ __docker_get_containers all all "$@"
+}
+
+__docker_containers_ids() {
+ [[ $PREFIX = -* ]] && return 1
+ __docker_get_containers all ids "$@"
+}
+
+__docker_containers_names() {
+ [[ $PREFIX = -* ]] && return 1
+ __docker_get_containers all names "$@"
+}
+
+__docker_plugins() {
+ [[ $PREFIX = -* ]] && return 1
+ integer ret=1
+ emulate -L zsh
+ setopt extendedglob
+ local -a plugins
+ plugins=(${(ps: :)${(M)${(f)${${"$(_call_program commands docker $docker_options info)"##*$'\n'Plugins:}%%$'\n'^ *}}:# $1: *}## $1: })
+ _describe -t plugins "$1 plugins" plugins && ret=0
+ return ret
+}
+
+__docker_images() {
+ [[ $PREFIX = -* ]] && return 1
+ integer ret=1
+ declare -a images
+ images=(${${${(f)"$(_call_program commands docker $docker_options images)"}[2,-1]}/(#b)([^ ]##) ##([^ ]##) ##([^ ]##)*/${match[3]}:${(r:15:: :::)match[2]} in ${match[1]}})
+ _describe -t docker-images "images" images && ret=0
+ __docker_repositories_with_tags && ret=0
+ return ret
+}
+
+__docker_repositories() {
+ [[ $PREFIX = -* ]] && return 1
+ declare -a repos
+ repos=(${${${(f)"$(_call_program commands docker $docker_options images)"}%% *}[2,-1]})
+ repos=(${repos#<none>})
+ _describe -t docker-repos "repositories" repos
+}
+
+__docker_repositories_with_tags() {
+ [[ $PREFIX = -* ]] && return 1
+ integer ret=1
+ declare -a repos onlyrepos matched
+ declare m
+ repos=(${${${${(f)"$(_call_program commands docker $docker_options images)"}[2,-1]}/ ##/:::}%% *})
+ repos=(${${repos%:::<none>}#<none>})
+ # Check if we have a prefix-match for the current prefix.
+ onlyrepos=(${repos%::*})
+ for m in $onlyrepos; do
+ [[ ${PREFIX##${~~m}} != ${PREFIX} ]] && {
+ # Yes, complete with tags
+ repos=(${${repos/:::/:}/:/\\:})
+ _describe -t docker-repos-with-tags "repositories with tags" repos && ret=0
+ return ret
+ }
+ done
+ # No, only complete repositories
+ onlyrepos=(${${repos%:::*}/:/\\:})
+ _describe -t docker-repos "repositories" onlyrepos -qS : && ret=0
+
+ return ret
+}
+
+__docker_search() {
+ [[ $PREFIX = -* ]] && return 1
+ local cache_policy
+ zstyle -s ":completion:${curcontext}:" cache-policy cache_policy
+ if [[ -z "$cache_policy" ]]; then
+ zstyle ":completion:${curcontext}:" cache-policy __docker_caching_policy
+ fi
+
+ local searchterm cachename
+ searchterm="${words[$CURRENT]%/}"
+ cachename=_docker-search-$searchterm
+
+ local expl
+ local -a result
+ if ( [[ ${(P)+cachename} -eq 0 ]] || _cache_invalid ${cachename#_} ) \
+ && ! _retrieve_cache ${cachename#_}; then
+ _message "Searching for ${searchterm}..."
+ result=(${${${(f)"$(_call_program commands docker $docker_options search $searchterm)"}%% *}[2,-1]})
+ _store_cache ${cachename#_} result
+ fi
+ _wanted dockersearch expl 'available images' compadd -a result
+}
+
+__docker_get_log_options() {
+ [[ $PREFIX = -* ]] && return 1
+
+ integer ret=1
+ local log_driver=${opt_args[--log-driver]:-"all"}
+ local -a awslogs_options fluentd_options gelf_options journald_options json_file_options syslog_options splunk_options
+
+ awslogs_options=("awslogs-region" "awslogs-group" "awslogs-stream")
+ fluentd_options=("env" "fluentd-address" "fluentd-async-connect" "fluentd-buffer-limit" "fluentd-retry-wait" "fluentd-max-retries" "labels" "tag")
+ gcplogs_options=("env" "gcp-log-cmd" "gcp-project" "labels")
+ gelf_options=("env" "gelf-address" "gelf-compression-level" "gelf-compression-type" "labels" "tag")
+ journald_options=("env" "labels" "tag")
+ json_file_options=("env" "labels" "max-file" "max-size")
+ syslog_options=("syslog-address" "syslog-format" "syslog-tls-ca-cert" "syslog-tls-cert" "syslog-tls-key" "syslog-tls-skip-verify" "syslog-facility" "tag")
+ splunk_options=("env" "labels" "splunk-caname" "splunk-capath" "splunk-index" "splunk-insecureskipverify" "splunk-source" "splunk-sourcetype" "splunk-token" "splunk-url" "tag")
+
+ [[ $log_driver = (awslogs|all) ]] && _describe -t awslogs-options "awslogs options" awslogs_options "$@" && ret=0
+ [[ $log_driver = (fluentd|all) ]] && _describe -t fluentd-options "fluentd options" fluentd_options "$@" && ret=0
+ [[ $log_driver = (gcplogs|all) ]] && _describe -t gcplogs-options "gcplogs options" gcplogs_options "$@" && ret=0
+ [[ $log_driver = (gelf|all) ]] && _describe -t gelf-options "gelf options" gelf_options "$@" && ret=0
+ [[ $log_driver = (journald|all) ]] && _describe -t journald-options "journald options" journald_options "$@" && ret=0
+ [[ $log_driver = (json-file|all) ]] && _describe -t json-file-options "json-file options" json_file_options "$@" && ret=0
+ [[ $log_driver = (syslog|all) ]] && _describe -t syslog-options "syslog options" syslog_options "$@" && ret=0
+ [[ $log_driver = (splunk|all) ]] && _describe -t splunk-options "splunk options" splunk_options "$@" && ret=0
+
+ return ret
+}
+
+__docker_log_options() {
+ [[ $PREFIX = -* ]] && return 1
+ integer ret=1
+
+ if compset -P '*='; then
+ case "${${words[-1]%=*}#*=}" in
+ (syslog-format)
+ syslog_format_opts=('rfc3164' 'rfc5424' 'rfc5424micro')
+ _describe -t syslog-format-opts "Syslog format Options" syslog_format_opts && ret=0
+ ;;
+ *)
+ _message 'value' && ret=0
+ ;;
+ esac
+ else
+ __docker_get_log_options -qS "=" && ret=0
+ fi
+
+ return ret
+}
+
+__docker_complete_detach_keys() {
+ [[ $PREFIX = -* ]] && return 1
+ integer ret=1
+
+ compset -P "*,"
+ keys=(${:-{a-z}})
+ ctrl_keys=(${:-ctrl-{{a-z},{@,'[','\\','^',']',_}}})
+ _describe -t detach_keys "[a-z]" keys -qS "," && ret=0
+ _describe -t detach_keys-ctrl "'ctrl-' + 'a-z @ [ \\\\ ] ^ _'" ctrl_keys -qS "," && ret=0
+}
+
+__docker_complete_pid() {
+ [[ $PREFIX = -* ]] && return 1
+ integer ret=1
+ local -a opts vopts
+
+ opts=('host')
+ vopts=('container')
+
+ if compset -P '*:'; then
+ case "${${words[-1]%:*}#*=}" in
+ (container)
+ __docker_runningcontainers && ret=0
+ ;;
+ *)
+ _message 'value' && ret=0
+ ;;
+ esac
+ else
+ _describe -t pid-value-opts "PID Options with value" vopts -qS ":" && ret=0
+ _describe -t pid-opts "PID Options" opts && ret=0
+ fi
+
+ return ret
+}
+
+__docker_complete_ps_filters() {
+ [[ $PREFIX = -* ]] && return 1
+ integer ret=1
+
+ if compset -P '*='; then
+ case "${${words[-1]%=*}#*=}" in
+ (ancestor)
+ __docker_images && ret=0
+ ;;
+ (before|since)
+ __docker_containers && ret=0
+ ;;
+ (id)
+ __docker_containers_ids && ret=0
+ ;;
+ (name)
+ __docker_containers_names && ret=0
+ ;;
+ (network)
+ __docker_networks && ret=0
+ ;;
+ (status)
+ status_opts=('created' 'dead' 'exited' 'paused' 'restarting' 'running')
+ _describe -t status-filter-opts "Status Filter Options" status_opts && ret=0
+ ;;
+ (volume)
+ __docker_volumes && ret=0
+ ;;
+ *)
+ _message 'value' && ret=0
+ ;;
+ esac
+ else
+ opts=('ancestor' 'before' 'exited' 'id' 'label' 'name' 'network' 'since' 'status' 'volume')
+ _describe -t filter-opts "Filter Options" opts -qS "=" && ret=0
+ fi
+
+ return ret
+}
+
+__docker_complete_search_filters() {
+ [[ $PREFIX = -* ]] && return 1
+ integer ret=1
+ declare -a boolean_opts opts
+
+ boolean_opts=('true' 'false')
+ opts=('is-automated' 'is-official' 'stars')
+
+ if compset -P '*='; then
+ case "${${words[-1]%=*}#*=}" in
+ (is-automated|is-official)
+ _describe -t boolean-filter-opts "filter options" boolean_opts && ret=0
+ ;;
+ *)
+ _message 'value' && ret=0
+ ;;
+ esac
+ else
+ _describe -t filter-opts "filter options" opts -qS "=" && ret=0
+ fi
+
+ return ret
+}
+
+__docker_complete_images_filters() {
+ [[ $PREFIX = -* ]] && return 1
+ integer ret=1
+ declare -a boolean_opts opts
+
+ boolean_opts=('true' 'false')
+ opts=('before' 'dangling' 'label' 'since')
+
+ if compset -P '*='; then
+ case "${${words[-1]%=*}#*=}" in
+ (before|since)
+ __docker_images && ret=0
+ ;;
+ (dangling)
+ _describe -t boolean-filter-opts "filter options" boolean_opts && ret=0
+ ;;
+ *)
+ _message 'value' && ret=0
+ ;;
+ esac
+ else
+ _describe -t filter-opts "Filter Options" opts -qS "=" && ret=0
+ fi
+
+ return ret
+}
+
+__docker_complete_events_filter() {
+ [[ $PREFIX = -* ]] && return 1
+ integer ret=1
+ declare -a opts
+
+ opts=('container' 'daemon' 'event' 'image' 'label' 'network' 'type' 'volume')
+
+ if compset -P '*='; then
+ case "${${words[-1]%=*}#*=}" in
+ (container)
+ __docker_containers && ret=0
+ ;;
+ (daemon)
+ emulate -L zsh
+ setopt extendedglob
+ local -a daemon_opts
+ daemon_opts=(
+ ${(f)${${"$(_call_program commands docker $docker_options info)"##*$'\n'Name: }%%$'\n'^ *}}
+ ${${(f)${${"$(_call_program commands docker $docker_options info)"##*$'\n'ID: }%%$'\n'^ *}}//:/\\:}
+ )
+ _describe -t daemon-filter-opts "daemon filter options" daemon_opts && ret=0
+ ;;
+ (event)
+ local -a event_opts
+ event_opts=('attach' 'commit' 'connect' 'copy' 'create' 'delete' 'destroy' 'detach' 'die' 'disconnect' 'exec_create' 'exec_detach'
+ 'exec_start' 'export' 'import' 'kill' 'mount' 'oom' 'pause' 'pull' 'push' 'reload' 'rename' 'resize' 'restart' 'start' 'stop' 'tag'
+ 'top' 'unmount' 'unpause' 'untag' 'update')
+ _describe -t event-filter-opts "event filter options" event_opts && ret=0
+ ;;
+ (image)
+ __docker_images && ret=0
+ ;;
+ (network)
+ __docker_networks && ret=0
+ ;;
+ (type)
+ local -a type_opts
+ type_opts=('container' 'daemon' 'image' 'network' 'volume')
+ _describe -t type-filter-opts "type filter options" type_opts && ret=0
+ ;;
+ (volume)
+ __docker_volumes && ret=0
+ ;;
+ *)
+ _message 'value' && ret=0
+ ;;
+ esac
+ else
+ _describe -t filter-opts "filter options" opts -qS "=" && ret=0
+ fi
+
+ return ret
+}
+
+__docker_network_complete_ls_filters() {
+ [[ $PREFIX = -* ]] && return 1
+ integer ret=1
+
+ if compset -P '*='; then
+ case "${${words[-1]%=*}#*=}" in
+ (driver)
+ __docker_plugins Network && ret=0
+ ;;
+ (id)
+ __docker_networks_ids && ret=0
+ ;;
+ (name)
+ __docker_networks_names && ret=0
+ ;;
+ (type)
+ type_opts=('builtin' 'custom')
+ _describe -t type-filter-opts "Type Filter Options" type_opts && ret=0
+ ;;
+ *)
+ _message 'value' && ret=0
+ ;;
+ esac
+ else
+ opts=('driver' 'id' 'label' 'name' 'type')
+ _describe -t filter-opts "Filter Options" opts -qS "=" && ret=0
+ fi
+
+ return ret
+}
+
+__docker_get_networks() {
+ [[ $PREFIX = -* ]] && return 1
+ integer ret=1
+ local line s
+ declare -a lines networks
+
+ type=$1; shift
+
+ lines=(${(f)"$(_call_program commands docker $docker_options network ls)"})
+
+ # Parse header line to find columns
+ local i=1 j=1 k header=${lines[1]}
+ declare -A begin end
+ while (( j < ${#header} - 1 )); do
+ i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 ))
+ j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 ))
+ k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 ))
+ begin[${header[$i,$((j-1))]}]=$i
+ end[${header[$i,$((j-1))]}]=$k
+ done
+ end[${header[$i,$((j-1))]}]=-1
+ lines=(${lines[2,-1]})
+
+ # Network ID
+ if [[ $type = (ids|all) ]]; then
+ for line in $lines; do
+ s="${line[${begin[NETWORK ID]},${end[NETWORK ID]}]%% ##}"
+ s="$s:${(l:7:: :::)${${line[${begin[DRIVER]},${end[DRIVER]}]}%% ##}}"
+ networks=($networks $s)
+ done
+ fi
+
+ # Names
+ if [[ $type = (names|all) ]]; then
+ for line in $lines; do
+ s="${line[${begin[NAME]},${end[NAME]}]%% ##}"
+ s="$s:${(l:7:: :::)${${line[${begin[DRIVER]},${end[DRIVER]}]}%% ##}}"
+ networks=($networks $s)
+ done
+ fi
+
+ _describe -t networks-list "networks" networks "$@" && ret=0
+ return ret
+}
+
+__docker_networks() {
+ [[ $PREFIX = -* ]] && return 1
+ __docker_get_networks all "$@"
+}
+
+__docker_networks_ids() {
+ [[ $PREFIX = -* ]] && return 1
+ __docker_get_networks ids "$@"
+}
+
+__docker_networks_names() {
+ [[ $PREFIX = -* ]] && return 1
+ __docker_get_networks names "$@"
+}
+
+__docker_network_commands() {
+ local -a _docker_network_subcommands
+ _docker_network_subcommands=(
+ "connect:Connects a container to a network"
+ "create:Creates a new network with a name specified by the user"
+ "disconnect:Disconnects a container from a network"
+ "inspect:Displays detailed information on a network"
+ "ls:Lists all the networks created by the user"
+ "rm:Deletes one or more networks"
+ )
+ _describe -t docker-network-commands "docker network command" _docker_network_subcommands
+}
+
+__docker_network_subcommand() {
+ local -a _command_args opts_help
+ local expl help="--help"
+ integer ret=1
+
+ opts_help=("(: -)--help[Print usage]")
+
+ case "$words[1]" in
+ (connect)
+ _arguments $(__docker_arguments) \
+ $opts_help \
+ "($help)*--alias=[Add network-scoped alias for the container]:alias: " \
+ "($help)--ip=[Container IPv4 address]:IPv4: " \
+ "($help)--ip6=[Container IPv6 address]:IPv6: " \
+ "($help)*--link=[Add a link to another container]:link:->link" \
+ "($help -)1:network:__docker_networks" \
+ "($help -)2:containers:__docker_containers" && ret=0
+
+ case $state in
+ (link)
+ if compset -P "*:"; then
+ _wanted alias expl "Alias" compadd -E "" && ret=0
+ else
+ __docker_runningcontainers -qS ":" && ret=0
+ fi
+ ;;
+ esac
+ ;;
+ (create)
+ _arguments $(__docker_arguments) -A '-*' \
+ $opts_help \
+ "($help)*--aux-address[Auxiliary IPv4 or IPv6 addresses used by network driver]:key=IP: " \
+ "($help -d --driver)"{-d=,--driver=}"[Driver to manage the Network]:driver:(null host bridge overlay)" \
+ "($help)*--gateway=[IPv4 or IPv6 Gateway for the master subnet]:IP: " \
+ "($help)--internal[Restricts external access to the network]" \
+ "($help)*--ip-range=[Allocate container ip from a sub-range]:IP/mask: " \
+ "($help)--ipam-driver=[IP Address Management Driver]:driver:(default)" \
+ "($help)*--ipam-opt=[Custom IPAM plugin options]:opt=value: " \
+ "($help)--ipv6[Enable IPv6 networking]" \
+ "($help)*--label=[Set metadata on a network]:label=value: " \
+ "($help)*"{-o=,--opt=}"[Driver specific options]:opt=value: " \
+ "($help)*--subnet=[Subnet in CIDR format that represents a network segment]:IP/mask: " \
+ "($help -)1:Network Name: " && ret=0
+ ;;
+ (disconnect)
+ _arguments $(__docker_arguments) \
+ $opts_help \
+ "($help -)1:network:__docker_networks" \
+ "($help -)2:containers:__docker_containers" && ret=0
+ ;;
+ (inspect)
+ _arguments $(__docker_arguments) \
+ $opts_help \
+ "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " \
+ "($help -)*:network:__docker_networks" && ret=0
+ ;;
+ (ls)
+ _arguments $(__docker_arguments) \
+ $opts_help \
+ "($help)--no-trunc[Do not truncate the output]" \
+ "($help)*"{-f=,--filter=}"[Provide filter values]:filter:->filter-options" \
+ "($help -q --quiet)"{-q,--quiet}"[Only display numeric IDs]" && ret=0
+ case $state in
+ (filter-options)
+ __docker_network_complete_ls_filters && ret=0
+ ;;
+ esac
+ ;;
+ (rm)
+ _arguments $(__docker_arguments) \
+ $opts_help \
+ "($help -)*:network:__docker_networks" && ret=0
+ ;;
+ (help)
+ _arguments $(__docker_arguments) ":subcommand:__docker_network_commands" && ret=0
+ ;;
+ esac
+
+ return ret
+}
+
+__docker_volume_complete_ls_filters() {
+ [[ $PREFIX = -* ]] && return 1
+ integer ret=1
+
+ if compset -P '*='; then
+ case "${${words[-1]%=*}#*=}" in
+ (dangling)
+ dangling_opts=('true' 'false')
+ _describe -t dangling-filter-opts "Dangling Filter Options" dangling_opts && ret=0
+ ;;
+ (driver)
+ __docker_plugins Volume && ret=0
+ ;;
+ (name)
+ __docker_volumes && ret=0
+ ;;
+ *)
+ _message 'value' && ret=0
+ ;;
+ esac
+ else
+ opts=('dangling' 'driver' 'name')
+ _describe -t filter-opts "Filter Options" opts -qS "=" && ret=0
+ fi
+
+ return ret
+}
+
+__docker_volumes() {
+ [[ $PREFIX = -* ]] && return 1
+ integer ret=1
+ declare -a lines volumes
+
+ lines=(${(f)"$(_call_program commands docker $docker_options volume ls)"})
+
+ # Parse header line to find columns
+ local i=1 j=1 k header=${lines[1]}
+ declare -A begin end
+ while (( j < ${#header} - 1 )); do
+ i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 ))
+ j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 ))
+ k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 ))
+ begin[${header[$i,$((j-1))]}]=$i
+ end[${header[$i,$((j-1))]}]=$k
+ done
+ end[${header[$i,$((j-1))]}]=-1
+ lines=(${lines[2,-1]})
+
+ # Names
+ local line s
+ for line in $lines; do
+ s="${line[${begin[VOLUME NAME]},${end[VOLUME NAME]}]%% ##}"
+ s="$s:${(l:7:: :::)${${line[${begin[DRIVER]},${end[DRIVER]}]}%% ##}}"
+ volumes=($volumes $s)
+ done
+
+ _describe -t volumes-list "volumes" volumes && ret=0
+ return ret
+}
+
+__docker_volume_commands() {
+ local -a _docker_volume_subcommands
+ _docker_volume_subcommands=(
+ "create:Create a volume"
+ "inspect:Return low-level information on a volume"
+ "ls:List volumes"
+ "rm:Remove a volume"
+ )
+ _describe -t docker-volume-commands "docker volume command" _docker_volume_subcommands
+}
+
+__docker_volume_subcommand() {
+ local -a _command_args opts_help
+ local expl help="--help"
+ integer ret=1
+
+ opts_help=("(: -)--help[Print usage]")
+
+ case "$words[1]" in
+ (create)
+ _arguments $(__docker_arguments) \
+ $opts_help \
+ "($help -d --driver)"{-d=,--driver=}"[Volume driver name]:Driver name:(local)" \
+ "($help)*--label=[Set metadata for a volume]:label=value: " \
+ "($help)--name=[Volume name]" \
+ "($help)*"{-o=,--opt=}"[Driver specific options]:Driver option: " && ret=0
+ ;;
+ (inspect)
+ _arguments $(__docker_arguments) \
+ $opts_help \
+ "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " \
+ "($help -)1:volume:__docker_volumes" && ret=0
+ ;;
+ (ls)
+ _arguments $(__docker_arguments) \
+ $opts_help \
+ "($help)*"{-f=,--filter=}"[Provide filter values]:filter:->filter-options" \
+ "($help -q --quiet)"{-q,--quiet}"[Only display volume names]" && ret=0
+ case $state in
+ (filter-options)
+ __docker_volume_complete_ls_filters && ret=0
+ ;;
+ esac
+ ;;
+ (rm)
+ _arguments $(__docker_arguments) \
+ $opts_help \
+ "($help -):volume:__docker_volumes" && ret=0
+ ;;
+ (help)
+ _arguments $(__docker_arguments) ":subcommand:__docker_volume_commands" && ret=0
+ ;;
+ esac
+
+ return ret
+}
+
+__docker_caching_policy() {
+ oldp=( "$1"(Nmh+1) ) # 1 hour
+ (( $#oldp ))
+}
+
+__docker_commands() {
+ local cache_policy
+
+ zstyle -s ":completion:${curcontext}:" cache-policy cache_policy
+ if [[ -z "$cache_policy" ]]; then
+ zstyle ":completion:${curcontext}:" cache-policy __docker_caching_policy
+ fi
+
+ if ( [[ ${+_docker_subcommands} -eq 0 ]] || _cache_invalid docker_subcommands) \
+ && ! _retrieve_cache docker_subcommands;
+ then
+ local -a lines
+ lines=(${(f)"$(_call_program commands docker 2>&1)"})
+ _docker_subcommands=(${${${lines[$((${lines[(i)Commands:]} + 1)),${lines[(I) *]}]}## #}/ ##/:})
+ _docker_subcommands=($_docker_subcommands 'daemon:Enable daemon mode' 'help:Show help for a command')
+ (( $#_docker_subcommands > 2 )) && _store_cache docker_subcommands _docker_subcommands
+ fi
+ _describe -t docker-commands "docker command" _docker_subcommands
+}
+
+__docker_subcommand() {
+ local -a _command_args opts_help opts_build_create_run opts_build_create_run_update opts_create_run opts_create_run_update
+ local expl help="--help"
+ integer ret=1
+
+ opts_help=("(: -)--help[Print usage]")
+ opts_build_create_run=(
+ "($help)--cgroup-parent=[Parent cgroup for the container]:cgroup: "
+ "($help)--isolation=[Container isolation technology]:isolation:(default hyperv process)"
+ "($help)--disable-content-trust[Skip image verification]"
+ "($help)*--shm-size=[Size of '/dev/shm' (format is '<number><unit>')]:shm size: "
+ "($help)*--ulimit=[ulimit options]:ulimit: "
+ "($help)--userns=[Container user namespace]:user namespace:(host)"
+ )
+ opts_build_create_run_update=(
+ "($help)--cpu-shares=[CPU shares (relative weight)]:CPU shares:(0 10 100 200 500 800 1000)"
+ "($help)--cpu-period=[Limit the CPU CFS (Completely Fair Scheduler) period]:CPU period: "
+ "($help)--cpu-quota=[Limit the CPU CFS (Completely Fair Scheduler) quota]:CPU quota: "
+ "($help)--cpuset-cpus=[CPUs in which to allow execution]:CPUs: "
+ "($help)--cpuset-mems=[MEMs in which to allow execution]:MEMs: "
+ "($help -m --memory)"{-m=,--memory=}"[Memory limit]:Memory limit: "
+ "($help)--memory-swap=[Total memory limit with swap]:Memory limit: "
+ )
+ opts_create_run=(
+ "($help -a --attach)"{-a=,--attach=}"[Attach to stdin, stdout or stderr]:device:(STDIN STDOUT STDERR)"
+ "($help)*--add-host=[Add a custom host-to-IP mapping]:host\:ip mapping: "
+ "($help)*--blkio-weight-device=[Block IO (relative device weight)]:device:Block IO weight: "
+ "($help)*--cap-add=[Add Linux capabilities]:capability: "
+ "($help)*--cap-drop=[Drop Linux capabilities]:capability: "
+ "($help)--cidfile=[Write the container ID to the file]:CID file:_files"
+ "($help)*--device=[Add a host device to the container]:device:_files"
+ "($help)*--device-read-bps=[Limit the read rate (bytes per second) from a device]:device:IO rate: "
+ "($help)*--device-read-iops=[Limit the read rate (IO per second) from a device]:device:IO rate: "
+ "($help)*--device-write-bps=[Limit the write rate (bytes per second) to a device]:device:IO rate: "
+ "($help)*--device-write-iops=[Limit the write rate (IO per second) to a device]:device:IO rate: "
+ "($help)*--dns=[Custom DNS servers]:DNS server: "
+ "($help)*--dns-opt=[Custom DNS options]:DNS option: "
+ "($help)*--dns-search=[Custom DNS search domains]:DNS domains: "
+ "($help)*"{-e=,--env=}"[Environment variables]:environment variable: "
+ "($help)--entrypoint=[Overwrite the default entrypoint of the image]:entry point: "
+ "($help)*--env-file=[Read environment variables from a file]:environment file:_files"
+ "($help)*--expose=[Expose a port from the container without publishing it]: "
+ "($help)*--group-add=[Add additional groups to run as]:group:_groups"
+ "($help -h --hostname)"{-h=,--hostname=}"[Container host name]:hostname:_hosts"
+ "($help -i --interactive)"{-i,--interactive}"[Keep stdin open even if not attached]"
+ "($help)--ip=[Container IPv4 address]:IPv4: "
+ "($help)--ip6=[Container IPv6 address]:IPv6: "
+ "($help)--ipc=[IPC namespace to use]:IPC namespace: "
+ "($help)*--link=[Add link to another container]:link:->link"
+ "($help)*"{-l=,--label=}"[Container metadata]:label: "
+ "($help)--log-driver=[Default driver for container logs]:Logging driver:(awslogs etwlogs fluentd gcplogs gelf journald json-file none splunk syslog)"
+ "($help)*--log-opt=[Log driver specific options]:log driver options:__docker_log_options"
+ "($help)--mac-address=[Container MAC address]:MAC address: "
+ "($help)--name=[Container name]:name: "
+ "($help)--net=[Connect a container to a network]:network mode:(bridge none container host)"
+ "($help)*--net-alias=[Add network-scoped alias for the container]:alias: "
+ "($help)--oom-kill-disable[Disable OOM Killer]"
+ "($help)--oom-score-adj[Tune the host's OOM preferences for containers (accepts -1000 to 1000)]"
+ "($help)--pids-limit[Tune container pids limit (set -1 for unlimited)]"
+ "($help -P --publish-all)"{-P,--publish-all}"[Publish all exposed ports]"
+ "($help)*"{-p=,--publish=}"[Expose a container's port to the host]:port:_ports"
+ "($help)--pid=[PID namespace to use]:PID namespace:__docker_complete_pid"
+ "($help)--privileged[Give extended privileges to this container]"
+ "($help)--read-only[Mount the container's root filesystem as read only]"
+ "($help)*--security-opt=[Security options]:security option: "
+ "($help)*--sysctl=-[sysctl options]:sysctl: "
+ "($help -t --tty)"{-t,--tty}"[Allocate a pseudo-tty]"
+ "($help -u --user)"{-u=,--user=}"[Username or UID]:user:_users"
+ "($help)--tmpfs[mount tmpfs]"
+ "($help)*-v[Bind mount a volume]:volume: "
+ "($help)--volume-driver=[Optional volume driver for the container]:volume driver:(local)"
+ "($help)*--volumes-from=[Mount volumes from the specified container]:volume: "
+ "($help -w --workdir)"{-w=,--workdir=}"[Working directory inside the container]:directory:_directories"
+ )
+ opts_create_run_update=(
+ "($help)--blkio-weight=[Block IO (relative weight), between 10 and 1000]:Block IO weight:(10 100 500 1000)"
+ "($help)--kernel-memory=[Kernel memory limit in bytes]:Memory limit: "
+ "($help)--memory-reservation=[Memory soft limit]:Memory limit: "
+ "($help)--restart=[Restart policy]:restart policy:(no on-failure always unless-stopped)"
+ )
+ opts_attach_exec_run_start=(
+ "($help)--detach-keys=[Escape key sequence used to detach a container]:sequence:__docker_complete_detach_keys"
+ )
+
+ case "$words[1]" in
+ (attach)
+ _arguments $(__docker_arguments) \
+ $opts_help \
+ $opts_attach_exec_run_start \
+ "($help)--no-stdin[Do not attach stdin]" \
+ "($help)--sig-proxy[Proxy all received signals to the process (non-TTY mode only)]" \
+ "($help -):containers:__docker_runningcontainers" && ret=0
+ ;;
+ (build)
+ _arguments $(__docker_arguments) \
+ $opts_help \
+ $opts_build_create_run \
+ $opts_build_create_run_update \
+ "($help)*--build-arg[Build-time variables]:<varname>=<value>: " \
+ "($help -f --file)"{-f=,--file=}"[Name of the Dockerfile]:Dockerfile:_files" \
+ "($help)--force-rm[Always remove intermediate containers]" \
+ "($help)*--label=[Set metadata for an image]:label=value: " \
+ "($help)--no-cache[Do not use cache when building the image]" \
+ "($help)--pull[Attempt to pull a newer version of the image]" \
+ "($help -q --quiet)"{-q,--quiet}"[Suppress verbose build output]" \
+ "($help)--rm[Remove intermediate containers after a successful build]" \
+ "($help -t --tag)*"{-t=,--tag=}"[Repository, name and tag for the image]: :__docker_repositories_with_tags" \
+ "($help -):path or URL:_directories" && ret=0
+ ;;
+ (commit)
+ _arguments $(__docker_arguments) \
+ $opts_help \
+ "($help -a --author)"{-a=,--author=}"[Author]:author: " \
+ "($help)*"{-c=,--change=}"[Apply Dockerfile instruction to the created image]:Dockerfile:_files" \
+ "($help -m --message)"{-m=,--message=}"[Commit message]:message: " \
+ "($help -p --pause)"{-p,--pause}"[Pause container during commit]" \
+ "($help -):container:__docker_containers" \
+ "($help -): :__docker_repositories_with_tags" && ret=0
+ ;;
+ (cp)
+ _arguments $(__docker_arguments) \
+ $opts_help \
+ "($help -L --follow-link)"{-L,--follow-link}"[Always follow symbol link]" \
+ "($help -)1:container:->container" \
+ "($help -)2:hostpath:_files" && ret=0
+ case $state in
+ (container)
+ if compset -P "*:"; then
+ _files && ret=0
+ else
+ __docker_containers -qS ":" && ret=0
+ fi
+ ;;
+ esac
+ ;;
+ (create)
+ _arguments $(__docker_arguments) \
+ $opts_help \
+ $opts_build_create_run \
+ $opts_build_create_run_update \
+ $opts_create_run \
+ $opts_create_run_update \
+ "($help -): :__docker_images" \
+ "($help -):command: _command_names -e" \
+ "($help -)*::arguments: _normal" && ret=0
+
+ case $state in
+ (link)
+ if compset -P "*:"; then
+ _wanted alias expl "Alias" compadd -E "" && ret=0
+ else
+ __docker_runningcontainers -qS ":" && ret=0
+ fi
+ ;;
+ esac
+
+ ;;
+ (daemon)
+ _arguments $(__docker_arguments) \
+ $opts_help \
+ "($help)--api-cors-header=[CORS headers in the remote API]:CORS headers: " \
+ "($help)*--authorization-plugin=[Authorization plugins to load]" \
+ "($help -b --bridge)"{-b=,--bridge=}"[Attach containers to a network bridge]:bridge:_net_interfaces" \
+ "($help)--bip=[Network bridge IP]:IP address: " \
+ "($help)--cgroup-parent=[Parent cgroup for all containers]:cgroup: " \
+ "($help)--config-file=[Path to daemon configuration file]:Config File:_files" \
+ "($help)--containerd=[Path to containerd socket]:socket:_files -g \"*.sock\"" \
+ "($help -D --debug)"{-D,--debug}"[Enable debug mode]" \
+ "($help)--default-gateway[Container default gateway IPv4 address]:IPv4 address: " \
+ "($help)--default-gateway-v6[Container default gateway IPv6 address]:IPv6 address: " \
+ "($help)--cluster-store=[URL of the distributed storage backend]:Cluster Store:->cluster-store" \
+ "($help)--cluster-advertise=[Address of the daemon instance to advertise]:Instance to advertise (host\:port): " \
+ "($help)*--cluster-store-opt=[Cluster options]:Cluster options:->cluster-store-options" \
+ "($help)*--dns=[DNS server to use]:DNS: " \
+ "($help)*--dns-search=[DNS search domains to use]:DNS search: " \
+ "($help)*--dns-opt=[DNS options to use]:DNS option: " \
+ "($help)*--default-ulimit=[Default ulimit settings for containers]:ulimit: " \
+ "($help)--disable-legacy-registry[Do not contact legacy registries]" \
+ "($help)*--exec-opt=[Runtime execution options]:runtime execution options: " \
+ "($help)--exec-root=[Root directory for execution state files]:path:_directories" \
+ "($help)--fixed-cidr=[IPv4 subnet for fixed IPs]:IPv4 subnet: " \
+ "($help)--fixed-cidr-v6=[IPv6 subnet for fixed IPs]:IPv6 subnet: " \
+ "($help -G --group)"{-G=,--group=}"[Group for the unix socket]:group:_groups" \
+ "($help -g --graph)"{-g=,--graph=}"[Root of the Docker runtime]:path:_directories" \
+ "($help -H --host)"{-H=,--host=}"[tcp://host:port to bind/connect to]:host: " \
+ "($help)--icc[Enable inter-container communication]" \
+ "($help)*--insecure-registry=[Enable insecure registry communication]:registry: " \
+ "($help)--ip=[Default IP when binding container ports]" \
+ "($help)--ip-forward[Enable net.ipv4.ip_forward]" \
+ "($help)--ip-masq[Enable IP masquerading]" \
+ "($help)--iptables[Enable addition of iptables rules]" \
+ "($help)--ipv6[Enable IPv6 networking]" \
+ "($help -l --log-level)"{-l=,--log-level=}"[Logging level]:level:(debug info warn error fatal)" \
+ "($help)*--label=[Key=value labels]:label: " \
+ "($help)--log-driver=[Default driver for container logs]:Logging driver:(awslogs etwlogs fluentd gcplogs gelf journald json-file none splunk syslog)" \
+ "($help)*--log-opt=[Log driver specific options]:log driver options:__docker_log_options" \
+ "($help)--max-concurrent-downloads[Set the max concurrent downloads for each pull]" \
+ "($help)--max-concurrent-uploads[Set the max concurrent uploads for each push]" \
+ "($help)--mtu=[Network MTU]:mtu:(0 576 1420 1500 9000)" \
+ "($help -p --pidfile)"{-p=,--pidfile=}"[Path to use for daemon PID file]:PID file:_files" \
+ "($help)--raw-logs[Full timestamps without ANSI coloring]" \
+ "($help)*--registry-mirror=[Preferred Docker registry mirror]:registry mirror: " \
+ "($help -s --storage-driver)"{-s=,--storage-driver=}"[Storage driver to use]:driver:(aufs devicemapper btrfs zfs overlay)" \
+ "($help)--selinux-enabled[Enable selinux support]" \
+ "($help)*--storage-opt=[Storage driver options]:storage driver options: " \
+ "($help)--tls[Use TLS]" \
+ "($help)--tlscacert=[Trust certs signed only by this CA]:PEM file:_files -g \"*.(pem|crt)\"" \
+ "($help)--tlscert=[Path to TLS certificate file]:PEM file:_files -g \"*.(pem|crt)\"" \
+ "($help)--tlskey=[Path to TLS key file]:Key file:_files -g \"*.(pem|key)\"" \
+ "($help)--tlsverify[Use TLS and verify the remote]" \
+ "($help)--userns-remap=[User/Group setting for user namespaces]:user\:group:->users-groups" \
+ "($help)--userland-proxy[Use userland proxy for loopback traffic]" && ret=0
+
+ case $state in
+ (cluster-store)
+ if compset -P '*://'; then
+ _message 'host:port' && ret=0
+ else
+ store=('consul' 'etcd' 'zk')
+ _describe -t cluster-store "Cluster Store" store -qS "://" && ret=0
+ fi
+ ;;
+ (cluster-store-options)
+ if compset -P '*='; then
+ _files && ret=0
+ else
+ opts=('discovery.heartbeat' 'discovery.ttl' 'kv.cacertfile' 'kv.certfile' 'kv.keyfile' 'kv.path')
+ _describe -t cluster-store-opts "Cluster Store Options" opts -qS "=" && ret=0
+ fi
+ ;;
+ (users-groups)
+ if compset -P '*:'; then
+ _groups && ret=0
+ else
+ _describe -t userns-default "default Docker user management" '(default)' && ret=0
+ _users && ret=0
+ fi
+ ;;
+ esac
+ ;;
+ (diff)
+ _arguments $(__docker_arguments) \
+ $opts_help \
+ "($help -)*:containers:__docker_containers" && ret=0
+ ;;
+ (events)
+ _arguments $(__docker_arguments) \
+ $opts_help \
+ "($help)*"{-f=,--filter=}"[Filter values]:filter:__docker_complete_events_filter" \
+ "($help)--since=[Events created since this timestamp]:timestamp: " \
+ "($help)--until=[Events created until this timestamp]:timestamp: " && ret=0
+ ;;
+ (exec)
+ local state
+ _arguments $(__docker_arguments) \
+ $opts_help \
+ $opts_attach_exec_run_start \
+ "($help -d --detach)"{-d,--detach}"[Detached mode: leave the container running in the background]" \
+ "($help -i --interactive)"{-i,--interactive}"[Keep stdin open even if not attached]" \
+ "($help)--privileged[Give extended Linux capabilities to the command]" \
+ "($help -t --tty)"{-t,--tty}"[Allocate a pseudo-tty]" \
+ "($help -u --user)"{-u=,--user=}"[Username or UID]:user:_users" \
+ "($help -):containers:__docker_runningcontainers" \
+ "($help -)*::command:->anycommand" && ret=0
+
+ case $state in
+ (anycommand)
+ shift 1 words
+ (( CURRENT-- ))
+ _normal && ret=0
+ ;;
+ esac
+ ;;
+ (export)
+ _arguments $(__docker_arguments) \
+ $opts_help \
+ "($help -o --output)"{-o=,--output=}"[Write to a file, instead of stdout]:output file:_files" \
+ "($help -)*:containers:__docker_containers" && ret=0
+ ;;
+ (history)
+ _arguments $(__docker_arguments) \
+ $opts_help \
+ "($help -H --human)"{-H,--human}"[Print sizes and dates in human readable format]" \
+ "($help)--no-trunc[Do not truncate output]" \
+ "($help -q --quiet)"{-q,--quiet}"[Only show numeric IDs]" \
+ "($help -)*: :__docker_images" && ret=0
+ ;;
+ (images)
+ _arguments $(__docker_arguments) \
+ $opts_help \
+ "($help -a --all)"{-a,--all}"[Show all images]" \
+ "($help)--digests[Show digests]" \
+ "($help)*"{-f=,--filter=}"[Filter values]:filter:->filter-options" \
+ "($help)--format[Pretty-print containers using a Go template]:format: " \
+ "($help)--no-trunc[Do not truncate output]" \
+ "($help -q --quiet)"{-q,--quiet}"[Only show numeric IDs]" \
+ "($help -): :__docker_repositories" && ret=0
+
+ case $state in
+ (filter-options)
+ __docker_complete_images_filters && ret=0
+ ;;
+ esac
+ ;;
+ (import)
+ _arguments $(__docker_arguments) \
+ $opts_help \
+ "($help)*"{-c=,--change=}"[Apply Dockerfile instruction to the created image]:Dockerfile:_files" \
+ "($help -m --message)"{-m=,--message=}"[Commit message for imported image]:message: " \
+ "($help -):URL:(- http:// file://)" \
+ "($help -): :__docker_repositories_with_tags" && ret=0
+ ;;
+ (info|version)
+ _arguments $(__docker_arguments) \
+ $opts_help && ret=0
+ ;;
+ (inspect)
+ local state
+ _arguments $(__docker_arguments) \
+ $opts_help \
+ "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " \
+ "($help -s --size)"{-s,--size}"[Display total file sizes if the type is container]" \
+ "($help)--type=[Return JSON for specified type]:type:(image container)" \
+ "($help -)*: :->values" && ret=0
+
+ case $state in
+ (values)
+ if [[ ${words[(r)--type=container]} == --type=container ]]; then
+ __docker_containers && ret=0
+ elif [[ ${words[(r)--type=image]} == --type=image ]]; then
+ __docker_images && ret=0
+ else
+ __docker_images && __docker_containers && ret=0
+ fi
+ ;;
+ esac
+ ;;
+ (kill)
+ _arguments $(__docker_arguments) \
+ $opts_help \
+ "($help -s --signal)"{-s=,--signal=}"[Signal to send]:signal:_signals" \
+ "($help -)*:containers:__docker_runningcontainers" && ret=0
+ ;;
+ (load)
+ _arguments $(__docker_arguments) \
+ $opts_help \
+ "($help -i --input)"{-i=,--input=}"[Read from tar archive file]:archive file:_files -g \"*.((tar|TAR)(.gz|.GZ|.Z|.bz2|.lzma|.xz|)|(tbz|tgz|txz))(-.)\"" \
+ "($help -q --quiet)"{-q,--quiet}"[Suppress the load output]" && ret=0
+ ;;
+ (login)
+ _arguments $(__docker_arguments) \
+ $opts_help \
+ "($help -p --password)"{-p=,--password=}"[Password]:password: " \
+ "($help -u --user)"{-u=,--user=}"[Username]:username: " \
+ "($help -)1:server: " && ret=0
+ ;;
+ (logout)
+ _arguments $(__docker_arguments) \
+ $opts_help \
+ "($help -)1:server: " && ret=0
+ ;;
+ (logs)
+ _arguments $(__docker_arguments) \
+ $opts_help \
+ "($help)--details[Show extra details provided to logs]" \
+ "($help -f --follow)"{-f,--follow}"[Follow log output]" \
+ "($help -s --since)"{-s=,--since=}"[Show logs since this timestamp]:timestamp: " \
+ "($help -t --timestamps)"{-t,--timestamps}"[Show timestamps]" \
+ "($help)--tail=[Output the last K lines]:lines:(1 10 20 50 all)" \
+ "($help -)*:containers:__docker_containers" && ret=0
+ ;;
+ (network)
+ local curcontext="$curcontext" state
+ _arguments $(__docker_arguments) \
+ $opts_help \
+ "($help -): :->command" \
+ "($help -)*:: :->option-or-argument" && ret=0
+
+ case $state in
+ (command)
+ __docker_network_commands && ret=0
+ ;;
+ (option-or-argument)
+ curcontext=${curcontext%:*:*}:docker-${words[-1]}:
+ __docker_network_subcommand && ret=0
+ ;;
+ esac
+ ;;
+ (pause|unpause)
+ _arguments $(__docker_arguments) \
+ $opts_help \
+ "($help -)*:containers:__docker_runningcontainers" && ret=0
+ ;;
+ (port)
+ _arguments $(__docker_arguments) \
+ $opts_help \
+ "($help -)1:containers:__docker_runningcontainers" \
+ "($help -)2:port:_ports" && ret=0
+ ;;
+ (ps)
+ _arguments $(__docker_arguments) \
+ $opts_help \
+ "($help -a --all)"{-a,--all}"[Show all containers]" \
+ "($help)--before=[Show only container created before...]:containers:__docker_containers" \
+ "($help)*"{-f=,--filter=}"[Filter values]:filter:__docker_complete_ps_filters" \
+ "($help)--format[Pretty-print containers using a Go template]:format: " \
+ "($help -l --latest)"{-l,--latest}"[Show only the latest created container]" \
+ "($help)-n[Show n last created containers, include non-running one]:n:(1 5 10 25 50)" \
+ "($help)--no-trunc[Do not truncate output]" \
+ "($help -q --quiet)"{-q,--quiet}"[Only show numeric IDs]" \
+ "($help -s --size)"{-s,--size}"[Display total file sizes]" \
+ "($help)--since=[Show only containers created since...]:containers:__docker_containers" && ret=0
+ ;;
+ (pull)
+ _arguments $(__docker_arguments) \
+ $opts_help \
+ "($help -a --all-tags)"{-a,--all-tags}"[Download all tagged images]" \
+ "($help)--disable-content-trust[Skip image verification]" \
+ "($help -):name:__docker_search" && ret=0
+ ;;
+ (push)
+ _arguments $(__docker_arguments) \
+ $opts_help \
+ "($help)--disable-content-trust[Skip image signing]" \
+ "($help -): :__docker_images" && ret=0
+ ;;
+ (rename)
+ _arguments $(__docker_arguments) \
+ $opts_help \
+ "($help -):old name:__docker_containers" \
+ "($help -):new name: " && ret=0
+ ;;
+ (restart|stop)
+ _arguments $(__docker_arguments) \
+ $opts_help \
+ "($help -t --time)"{-t=,--time=}"[Number of seconds to try to stop for before killing the container]:seconds to before killing:(1 5 10 30 60)" \
+ "($help -)*:containers:__docker_runningcontainers" && ret=0
+ ;;
+ (rm)
+ _arguments $(__docker_arguments) \
+ $opts_help \
+ "($help -f --force)"{-f,--force}"[Force removal]" \
+ "($help -l --link)"{-l,--link}"[Remove the specified link and not the underlying container]" \
+ "($help -v --volumes)"{-v,--volumes}"[Remove the volumes associated to the container]" \
+ "($help -)*:containers:->values" && ret=0
+ case $state in
+ (values)
+ if [[ ${words[(r)-f]} == -f || ${words[(r)--force]} == --force ]]; then
+ __docker_containers && ret=0
+ else
+ __docker_stoppedcontainers && ret=0
+ fi
+ ;;
+ esac
+ ;;
+ (rmi)
+ _arguments $(__docker_arguments) \
+ $opts_help \
+ "($help -f --force)"{-f,--force}"[Force removal]" \
+ "($help)--no-prune[Do not delete untagged parents]" \
+ "($help -)*: :__docker_images" && ret=0
+ ;;
+ (run)
+ _arguments $(__docker_arguments) \
+ $opts_help \
+ $opts_build_create_run \
+ $opts_build_create_run_update \
+ $opts_create_run \
+ $opts_create_run_update \
+ $opts_attach_exec_run_start \
+ "($help -d --detach)"{-d,--detach}"[Detached mode: leave the container running in the background]" \
+ "($help)--health-cmd=[Command to run to check health]:command: " \
+ "($help)--health-interval=[Time between running the check]:time: " \
+ "($help)--health-retries=[Consecutive failures needed to report unhealthy]:retries:(1 2 3 4 5)" \
+ "($help)--health-timeout=[Maximum time to allow one check to run]:time: " \
+ "($help)--no-healthcheck[Disable any container-specified HEALTHCHECK]" \
+ "($help)--rm[Remove intermediate containers when it exits]" \
+ "($help)--sig-proxy[Proxy all received signals to the process (non-TTY mode only)]" \
+ "($help)--stop-signal=[Signal to kill a container]:signal:_signals" \
+ "($help -): :__docker_images" \
+ "($help -):command: _command_names -e" \
+ "($help -)*::arguments: _normal" && ret=0
+
+ case $state in
+ (link)
+ if compset -P "*:"; then
+ _wanted alias expl "Alias" compadd -E "" && ret=0
+ else
+ __docker_runningcontainers -qS ":" && ret=0
+ fi
+ ;;
+ esac
+
+ ;;
+ (save)
+ _arguments $(__docker_arguments) \
+ $opts_help \
+ "($help -o --output)"{-o=,--output=}"[Write to file]:file:_files" \
+ "($help -)*: :__docker_images" && ret=0
+ ;;
+ (search)
+ _arguments $(__docker_arguments) \
+ $opts_help \
+ "($help)*"{-f=,--filter=}"[Filter values]:filter:->filter-options" \
+ "($help)--limit=[Maximum returned search results]:limit:(1 5 10 25 50)" \
+ "($help)--no-trunc[Do not truncate output]" \
+ "($help -):term: " && ret=0
+
+ case $state in
+ (filter-options)
+ __docker_complete_search_filters && ret=0
+ ;;
+ esac
+ ;;
+ (start)
+ _arguments $(__docker_arguments) \
+ $opts_help \
+ $opts_attach_exec_run_start \
+ "($help -a --attach)"{-a,--attach}"[Attach container's stdout/stderr and forward all signals]" \
+ "($help -i --interactive)"{-i,--interactive}"[Attach container's stding]" \
+ "($help -)*:containers:__docker_stoppedcontainers" && ret=0
+ ;;
+ (stats)
+ _arguments $(__docker_arguments) \
+ $opts_help \
+ "($help -a --all)"{-a,--all}"[Show all containers (default shows just running)]" \
+ "($help)--no-stream[Disable streaming stats and only pull the first result]" \
+ "($help -)*:containers:__docker_runningcontainers" && ret=0
+ ;;
+ (tag)
+ _arguments $(__docker_arguments) \
+ $opts_help \
+ "($help -):source:__docker_images"\
+ "($help -):destination:__docker_repositories_with_tags" && ret=0
+ ;;
+ (top)
+ _arguments $(__docker_arguments) \
+ $opts_help \
+ "($help -)1:containers:__docker_runningcontainers" \
+ "($help -)*:: :->ps-arguments" && ret=0
+ case $state in
+ (ps-arguments)
+ _ps && ret=0
+ ;;
+ esac
+
+ ;;
+ (update)
+ _arguments $(__docker_arguments) \
+ $opts_help \
+ $opts_create_run_update \
+ $opts_build_create_run_update \
+ "($help -)*: :->values" && ret=0
+
+ case $state in
+ (values)
+ if [[ ${words[(r)--kernel-memory*]} = (--kernel-memory*) ]]; then
+ __docker_stoppedcontainers && ret=0
+ else
+ __docker_containers && ret=0
+ fi
+ ;;
+ esac
+ ;;
+ (volume)
+ local curcontext="$curcontext" state
+ _arguments $(__docker_arguments) \
+ $opts_help \
+ "($help -): :->command" \
+ "($help -)*:: :->option-or-argument" && ret=0
+
+ case $state in
+ (command)
+ __docker_volume_commands && ret=0
+ ;;
+ (option-or-argument)
+ curcontext=${curcontext%:*:*}:docker-${words[-1]}:
+ __docker_volume_subcommand && ret=0
+ ;;
+ esac
+ ;;
+ (wait)
+ _arguments $(__docker_arguments) \
+ $opts_help \
+ "($help -)*:containers:__docker_runningcontainers" && ret=0
+ ;;
+ (help)
+ _arguments $(__docker_arguments) ":subcommand:__docker_commands" && ret=0
+ ;;
+ esac
+
+ return ret
+}
+
+_docker() {
+ # Support for subservices, which allows for `compdef _docker docker-shell=_docker_containers`.
+ # Based on /usr/share/zsh/functions/Completion/Unix/_git without support for `ret`.
+ if [[ $service != docker ]]; then
+ _call_function - _$service
+ return
+ fi
+
+ local curcontext="$curcontext" state line help="-h --help"
+ integer ret=1
+ typeset -A opt_args
+
+ _arguments $(__docker_arguments) -C \
+ "(: -)"{-h,--help}"[Print usage]" \
+ "($help)--config[Location of client config files]:path:_directories" \
+ "($help -D --debug)"{-D,--debug}"[Enable debug mode]" \
+ "($help -H --host)"{-H=,--host=}"[tcp://host:port to bind/connect to]:host: " \
+ "($help -l --log-level)"{-l=,--log-level=}"[Logging level]:level:(debug info warn error fatal)" \
+ "($help)--tls[Use TLS]" \
+ "($help)--tlscacert=[Trust certs signed only by this CA]:PEM file:_files -g "*.(pem|crt)"" \
+ "($help)--tlscert=[Path to TLS certificate file]:PEM file:_files -g "*.(pem|crt)"" \
+ "($help)--tlskey=[Path to TLS key file]:Key file:_files -g "*.(pem|key)"" \
+ "($help)--tlsverify[Use TLS and verify the remote]" \
+ "($help)--userland-proxy[Use userland proxy for loopback traffic]" \
+ "($help -v --version)"{-v,--version}"[Print version information and quit]" \
+ "($help -): :->command" \
+ "($help -)*:: :->option-or-argument" && ret=0
+
+ local host=${opt_args[-H]}${opt_args[--host]}
+ local config=${opt_args[--config]}
+ local docker_options="${host:+--host $host} ${config:+--config $config}"
+
+ case $state in
+ (command)
+ __docker_commands && ret=0
+ ;;
+ (option-or-argument)
+ curcontext=${curcontext%:*:*}:docker-$words[1]:
+ __docker_subcommand && ret=0
+ ;;
+ esac
+
+ return ret
+}
+
+_docker "$@"
+
+# Local Variables:
+# mode: Shell-Script
+# sh-indentation: 4
+# indent-tabs-mode: nil
+# sh-basic-offset: 4
+# End:
+# vim: ft=zsh sw=4 ts=4 et
diff --git a/autocompletes/_docker_containers b/autocompletes/_docker_containers
new file mode 100644
index 0000000..d335b69
--- /dev/null
+++ b/autocompletes/_docker_containers
@@ -0,0 +1,5 @@
+#compdef docker_containers
+
+local -a options
+options=$(docker ps --format "{{.Names}}")
+_alternative "args:docker containers:($options)"
diff --git a/history b/history
new file mode 100644
index 0000000..87a1146
--- /dev/null
+++ b/history
@@ -0,0 +1,44 @@
+: 1636559174:0;ls
+: 1636559206:0;stow
+: 1636559250:0;ls
+: 1636559252:0;z dot
+: 1636559252:0;ls
+: 1636559254:0;z stow
+: 1636559256:0;ls
+: 1636559260:0;./install
+: 1636559267:0;nvim install
+: 1636559290:0;gws
+: 1636560986:0;cat install
+: 1636574791:0;cd .dotfiles
+: 1636574795:0;ls
+: 1636574908:0;cd
+: 1636574910:0;cd .config
+: 1636574914:0;ls -lah
+: 1636574991:0;cd
+: 1636574992:0;cd .dotfiles
+: 1636574994:0;cd stow
+: 1636574996:0;./install
+: 1636575002:0;rm ~/.zshrc
+: 1636575005:0;touch ~/.zshrc
+: 1636575008:0;./install
+: 1636575014:0;rm ~/.zshrc
+: 1636575015:0;./install
+: 1636575106:0;cd ..
+: 1636575106:0;ls
+: 1636575109:0;cd scripts
+: 1636575112:0;cat fzf_example
+: 1636575122:0;./fzf_example
+: 1636575305:0;ls -lah
+: 1636575720:0;echo 1
+: 1636575722:0;echo 2
+: 1636575812:0;cd
+: 1636575822:0;z dot
+: 1636575829:0;z nvim
+: 1636575835:0;cd .config/nvim
+: 1636575840:0;cd ~/.config/nvim
+: 1636575841:0;cd
+: 1636575843:0;z nvim
+: 1636575852:0;cd
+: 1636576327:0;docker ps- a
+: 1636576393:0;echo /here
+: 1636578474:0;sudo su
diff --git a/includes b/includes
new file mode 100644
index 0000000..b080e51
--- /dev/null
+++ b/includes
@@ -0,0 +1,691 @@
+# vi:syntax=bash
+
+# Heavily inspired from https://github.com/sorin-ionescu/prezto/blob/master/modules/git/alias.zsh
+# Git aliases
+# Log
+zstyle -s ':prezto:module:git:log:medium' format '_git_log_medium_format' \
+ || _git_log_medium_format='%C(bold)Commit:%C(reset) %C(green)%H%C(red)%d%n%C(bold)Author:%C(reset) %C(cyan)%an <%ae>%n%C(bold)Date:%C(reset) %C(blue)%ai (%ar)%C(reset)%n%w(80,1,2)%+B'
+zstyle -s ':prezto:module:git:log:oneline' format '_git_log_oneline_format' \
+ || _git_log_oneline_format='%C(green)%h%C(reset) %><(55,trunc)%s%C(red)%d%C(reset) %C(blue)[%an]%C(reset) %C(yellow)%ad%C(reset)%n'
+zstyle -s ':prezto:module:git:log:brief' format '_git_log_brief_format' \
+ || _git_log_brief_format='%C(green)%h%C(reset) %s%n%C(blue)(%ar by %an)%C(red)%d%C(reset)%n'
+
+# Status
+zstyle -s ':prezto:module:git:status:ignore' submodules '_git_status_ignore_submodules' \
+ || _git_status_ignore_submodules='none'
+
+# Aliases
+
+# Branch (b)
+alias gb='git branch'
+alias gba='git rev-parse --abbrev-ref HEAD'
+alias gbc='git checkout -b'
+alias gbx='git branch -d'
+alias gbX='git branch -D'
+# Show the differences of the current head since it diverged from master
+function gbd (){
+ git diff $(git merge-base master HEAD)...HEAD
+}
+function gbds (){
+ git diff --stat $(git merge-base master HEAD)...HEAD
+}
+alias gdiff='git diff --no-index'
+
+# Commit (c)
+alias gc='git commit --verbose'
+alias gcam='git commit --verbose --amend'
+alias ga='git add'
+alias gcu='git add -u; git commit --verbose'
+alias gca='git add -A; git commit --verbose'
+alias gco='git checkout'
+gcoo() {
+ if [[ "$#" -eq 0 ]]; then
+ local FILES=(${(f)"$(git diff --name-only| fzf --multi --reverse)"})
+ for FILE in ${FILES[@]}; do
+ git checkout "$FILE"
+ done
+ else
+ git checkout "$@"
+ fi
+}
+gsq() {
+ if [[ "$#" -ne 1 ]]; then
+ echo "requires an int arg representing number of commits"
+ fi
+ MSG=$(git log --format=%B HEAD~"${1}"..HEAD)
+ git reset --soft HEAD~"${1}"
+ git commit --verbose --edit -m"${MSG}"
+}
+gfo() {
+ git commit -m 'temp'
+ MSG=$(git log --format=%B HEAD~2..HEAD~1)
+ git reset --soft HEAD~"2"
+ git commit --verbose --edit -m"${MSG}"
+}
+alias gcp='git cherry-pick --ff'
+alias gcm='git commit --amend'
+
+# Fetch (f)
+alias gf='git fetch'
+alias gfc='git clone'
+
+# Log (l)
+alias gl='git log --topo-order --pretty=format:${_git_log_medium_format}'
+alias glp='git log --topo-order --pretty=format:${_git_log_medium_format} -p'
+alias gls='git log --topo-order --stat --pretty=format:${_git_log_medium_format}'
+alias gld='git log --topo-order --stat --patch --full-diff --pretty=format:${_git_log_medium_format}'
+alias glo='git log --topo-order --date=local --pretty=format:${_git_log_oneline_format}'
+# sed will remove multiple adjacent whitespaces
+alias glg='git log --topo-order --all --graph --date=local --pretty=format:${_git_log_oneline_format}'
+alias glb='git log --topo-order --pretty=format:${_git_log_brief_format}'
+alias glc='git shortlog --summary --numbered'
+
+# Rebase (r)
+alias gr='git rebase'
+alias gra='git rebase --abort'
+alias grc='git rebase --continue'
+alias gri='git rebase --interactive'
+alias grs='git rebase --skip'
+
+# Merge (m)
+alias gm='git merge'
+
+# Push (p)
+alias gp='git push'
+alias gpl='git fetch origin master && git rebase origin/master'
+
+# Stash (s)
+alias gs='git stash'
+alias gsa='git stash apply'
+alias gsx='git stash drop'
+alias gsX='git-stash-clear-interactive'
+alias gsl='git stash list'
+alias gss='git stash save --include-untracked'
+
+# Working Copy (w)
+alias gws='git status --ignore-submodules=${_git_status_ignore_submodules} --short'
+alias gwS='git status --ignore-submodules=${_git_status_ignore_submodules}'
+alias gwd='git diff --no-ext-diff'
+alias gwsd='git diff --cached'
+alias gwD='git diff --no-ext-diff --word-diff'
+alias gwr='git reset'
+alias gwR='git reset --hard'
+alias gwc='git clean -f'
+gwu() {
+ local FILES=(${(f)"$(git ls-files --others --exclude-standard| fzf --multi --reverse)"})
+ for FILE in ${FILES[@]}; do
+ rm "$FILE"
+ done
+}
+alias gcp='git cherry-pick --ff'
+
+
+# Personal Aliases
+alias s='ssh'
+alias n='nvim'
+alias ks='kitty +kitten ssh'
+alias sco='nvim ~/.ssh/config'
+alias nvimf='nvim $(fzf)'
+alias cdf='cd $(find . -type d | fzf)'
+alias ..='cd ../'
+alias ...='cd ../../'
+alias ....='cd ../../../'
+alias t='tmux'
+alias ta='tmux attach'
+alias tre='~/.tmux/window_renum.sh'
+alias ncdu='ncdu --color dark -x'
+alias rcp='rsync --verbose --progress --human-readable -zz --archive --hard-links --one-file-system'
+alias rmv='rsync --verbose --progress --human-readable -zz --archive --hard-links --one-file-system --remove-source-files'
+alias rmvu='rsync --verbose --progress --human-readable -zz --archive --hard-links --one-file-system --remove-source-files --update'
+alias rsynchronize='rsync --verbose --progress --human-readable --compress --archive --hard-links --one-file-system --remove-source-files --update --delete'
+
+
+###########
+# Functions
+###########
+
+# Color shortcuts
+G="\e[32m"
+R="\e[31m"
+C="\e[36m"
+NC="\e[39m"
+
+# Docker functions
+alias dl="docker logs"
+alias dlf="docker logs --follow"
+dpa() { docker ps -a } # List all containers
+di() { docker images } # Show images
+drm() { docker rm $(docker ps -a -q) 2> /dev/null; } # Remove dead containers
+drv() { docker volume rm $(docker volume ls -qf dangling=true) } # remove dangling volumes
+dri() { docker rmi -f $(docker images -q) } # Remove unused images
+dstop() { docker stop $(docker ps -a -q); } # Stop all containers
+dip() { docker inspect --format '{{ .NetworkSettings.IPAddress }}' "$1" }
+dfp() { # Function to get forwarded port of docker container
+ if [ -z "$2" ]; then
+ docker inspect --format='{{(index (index .NetworkSettings.Ports "8000/tcp") 0).HostPort}}' $1
+ else
+ docker inspect --format='{{(index (index .NetworkSettings.Ports "'$2'/tcp") 0).HostPort}}' $1
+ fi
+}
+dfpo() { # Open chrome to a forwarded container port
+ if [ "$#" -ne 1 ]; then
+ echo 'Usage: dfpo $port'
+ else
+ /usr/bin/open -a '/Applications/Google Chrome.app' 'http://localhost:'"$1"
+ fi
+}
+dex() { # Enter a container
+ if [ -z "$2"]; then
+ docker exec -it "$1" /bin/bash
+ else
+ docker exec -it "$1" "$2"
+ fi
+}
+
+
+# Tmux functions
+t4() {
+ tmux split-window
+ tmux split-window
+ tmux split-window
+ tmux select-layout tiled
+}
+tat() { # Creating a second window on a session
+ if [ ! -z "$1" ]; then
+ tmux new -t "$1" -s "$1"1
+ fi
+}
+tla() {
+ if [[ -z "$1" ]]; then
+ return
+ fi
+ # Search in all folders under 'Work'
+ local PROJS=($(find "$HOME"/Work/* -mindepth 1 -maxdepth 1 -type d))
+ local PROJ_NAME=''
+ local PROJ_DIR=''
+ # Find if we have a match
+ for dir in "${PROJS[@]}"; do
+ if [[ $(basename "$dir") == "$1" ]]; then
+ PROJ_NAME=$(basename "$dir")
+ PROJ_DIR="$dir/code"
+ fi
+ done
+ if [[ -z "$PROJ_NAME" ]]; then
+ echo 'Project not found'
+ return
+ fi
+ PROJECT="$PROJ_NAME" DIR="$PROJ_DIR" tmuxp load ~/.tmux/templates/alternative.yaml
+}
+
+# Nixos
+alias nrs="sudo -i nixos-rebuild switch"
+alias nco="sudo nixos-container"
+alias ns="nix-shell -p"
+
+# Git
+gup() { # Loop through args, branches, and update them
+ orig_head="$(git name-rev --name-only HEAD)"
+ for var in "$@"
+ do
+ git checkout "$var"
+ git pull origin "$var"
+ git merge origin/"$var"
+ done
+ git checkout "$orig_head"
+}
+
+gupr() {
+ git checkout "$1"
+ git fetch origin master
+ git rebase origin/master
+}
+
+gpf() {
+ current_branch="$(git name-rev --name-only HEAD)"
+ git push origin "$current_branch" --force
+}
+
+gpu() {
+ current_branch="$(git name-rev --name-only HEAD)"
+ git fetch origin "$current_branch"
+ git reset --hard origin/"$current_branch"
+}
+
+
+# GPG
+gpgen() { # Function for compressing and encrypting a file
+ tar -zc $1 | gpg --encrypt --sign --armor --recipient cody@hiar.ca > $2
+}
+gpgde() { # Function for uncompressing and decrypting a file
+ gpg -d $1 | tar -zx
+}
+
+
+# SSH
+sl() { # Handy ssh forwarding commands, pull a port down
+ if [ $# -eq 0 ]; then
+ echo 'Usage: sl $host $port $bindingaddress(optional)'
+ else
+ while true
+ do
+ if [ -z "$3"]; then
+ ssh -nNT -L "$2":localhost:"$2" "$1"
+ else
+ ssh -nNT -L "$2":"$3":"$2" "$1"
+ fi
+ sleep 10
+ done &
+ fi
+}
+sr() { # Handy ssh forwarding commands, push a port up
+ if [ $# -eq 0 ]; then
+ echo 'Usage: sl $host $port $bindingaddress(optional)'
+ else
+ while true
+ do
+ if [ -z "$3"]; then
+ ssh -nNT -R "$2":localhost:"$2" "$1"
+ else
+ ssh -nNT -R "$2":"$3":"$2" "$1"
+ fi
+ done &
+ fi
+}
+
+
+pgdumpr() { # Dump remote postgres database.
+ ssh -t "$1" "sudo -u postgres bash -c \"pg_dump --no-acl --no-owner -d "$2" > /tmp/"$2"-$(date +%F).sql\""
+ scp "$1":/tmp/"$2"-$(date +%F).sql .
+}
+pgimport() { # SCP file remotely and import it.
+ scp "$2" "$1":/tmp
+ SHORTNAME=$(echo "$2" | cut -d'-' -f1)
+ ssh -t "$1" "sudo -u postgres bash -c \"psql -d "$SHORTNAME" < /tmp/"$2"\""
+}
+pgls() { # List commands on a server
+ HOST="$1"
+ ssh -tt "$HOST" 'sudo -u postgres bash -c "psql --list"'
+}
+mysqldumpr() { # Dump remote mysql database
+ ssh -t "$1" "mysqldump "$2" > /tmp/"$2"-$(date +%F).sql" && scp "$1":/tmp/"$2"-$(date +%F).sql .
+}
+fwkill() { # Kill all of the forwarded ports on the machine
+ ps aux | grep 'ssh -nNT -L' | grep -v 'grep' | awk '{ print $2 }' | xargs -n 1 kill
+}
+j() { # Jump to project code
+ PROJ=$(find "$HOME/Work" -mindepth 2 -maxdepth 2 -type d -name "$1")
+ if [[ -d "$PROJ/code" ]]; then
+ cd "$PROJ/code"
+ fi
+}
+ch() { # Force ownership on a projects files. Sometimes docker generates root owned files
+ PROJ=$(find "$HOME/Work" -mindepth 2 -maxdepth 2 -type d -name "$1")
+ sudo chown -R thorny:users "$PROJ/code"
+}
+finalurl() { # check that redirects are properly happening
+ curl http://"$1" -s -L -o /dev/null -w '%{url_effective}'
+ echo ''
+ curl http://www."$1" -s -L -o /dev/null -w '%{url_effective}'
+ echo ''
+ curl https://"$1" -s -L -o /dev/null -w '%{url_effective}'
+ echo ''
+ curl https://www."$1" -s -L -o /dev/null -w '%{url_effective}'
+ echo ''
+}
+newproj() { # Create a new cookiecuter project
+ cookiecutter https://git.codyhiar.com/docker/cookiecutter-docker
+ PROJECT=$(ls -t1 --color=never | head -1)
+ mv "$PROJECT" code
+ mkdir "$PROJECT"
+ mv code "$PROJECT"
+}
+p() { # List all projects
+ find "$HOME"/Work/* -mindepth 1 -maxdepth 1 -type d | xargs -n1 basename | sort
+}
+ssl() {
+ echo | openssl s_client -servername "$1" -connect "$1":443 2>/dev/null | openssl x509 -noout -issuer -dates -subject -fingerprint
+}
+
+gfcc () {
+ # This function assumes urls of one of the following formats. All others
+ # will not work:
+ #
+ # git@github.com:user/repo.git
+ # https://github.com/user/repo
+ PROTOCOL=$(echo "$1" | cut -c1-3)
+ if [[ "$PROTOCOL" == 'git' ]]; then
+ REPO=$(echo "$1" | cut -d'/' -f2 | cut -d'.' -f1)
+ elif [[ "$PROTOCOL" == 'htt' ]]; then
+ REPO=$(echo "$1" | cut -d'/' -f5 )
+ fi
+ git clone "$1" "$REPO"/code
+}
+
+heartbeat() { # Keep a heartbeat on a website
+ while true; do
+ STATUS=$(nice curl -I "$1" 2> /dev/null | grep '200 OK')
+ if [[ -n $STATUS ]]; then
+ echo -e "$(date) ${G}$1 is up${NC}"
+ else
+ STATUS=$(nice curl -I "$1" 2> /dev/null | grep 'HTTP/2 200')
+ if [[ -n $STATUS ]]; then
+ echo -e "$(date) ${G}$1 is up${NC}"
+ else
+ echo -e "$(date) ${R}$1 is down${NC}"
+ fi
+ fi
+ sleep 2
+ done
+}
+mvw() { # i3 move workspace to monitor
+ i3-msg "workspace ${1}, move workspace to output ${2}"
+}
+getip() { # Get ip for website, ignore cloudflare ips
+ IS_CLOUDFLARE=$(dig +short NS "$1" | grep 'cloudflare')
+ if [[ -n "$IS_CLOUDFLARE" ]]; then
+ echo 'Behind Cloudflare'
+ return
+ fi
+ IP_ADDR=$(dig +short "$1")
+ echo "$IP_ADDR"
+ grep -B 2 "$IP_ADDR" ~/.ssh/config | grep 'Host '
+}
+lorem() {
+ WORD_LENGTH=$(((RANDOM % 5) + 5))
+ WORD=$(openssl rand -base64 12| head -n 1 | cut -c1-"$WORD_LENGTH")
+ SENTENCE="$WORD"
+ for i in {1..40}; do
+ WORD_LENGTH=$(((RANDOM % 5) + 5))
+ WORD=$(openssl rand -base64 12| head -n 1 | cut -c1-"$WORD_LENGTH")
+ SENTENCE="$SENTENCE $WORD"
+ done
+ echo $SENTENCE
+}
+amis() {
+ aws ec2 describe-images --owners self | jq '.Images[] | {id: .ImageId, name: .Name, state: .State, snapshot: .BlockDeviceMappings[0].Ebs.SnapshotId}'
+}
+rm_ami() {
+ AMI_NAME="$1"
+ DATA=$(aws ec2 describe-images --owners self)
+ AMI_ID=$(echo "$DATA"| jq '.Images[] | select(.Name | contains("'"$AMI_NAME"'")) | .ImageId' | sed -e 's/^"//' -e 's/"$//')
+ SNAPSHOT_ID=$(echo "$DATA"| jq '.Images[] | select(.Name | contains("'"$AMI_NAME"'")) | .BlockDeviceMappings[0].Ebs.SnapshotId' | sed -e 's/^"//' -e 's/"$//')
+ aws ec2 deregister-image --image-id "$AMI_ID"
+ aws ec2 delete-snapshot --snapshot-id "$SNAPSHOT_ID"
+}
+settitle() {
+ xdotool set_window --name "$*" $(xdotool getactivewindow)
+}
+csv() {
+ clear; csvlook -d ',' --no-inference "$1" | less -s
+}
+y() {
+ yank | xp
+}
+gbxm() { # Clear out branches
+ git branch | egrep -v "(master)" | xargs -n 1 git branch -D
+ rm -rf .git/refs/remotes/origin/*
+ git fetch origin master
+}
+d() { # reset monitors on desktop
+ i3-msg "workspace 1, move workspace to output DVI-D-1"
+ i3-msg "workspace 2, move workspace to output DVI-I-1"
+ i3-msg "workspace 3, move workspace to output HDMI-4"
+}
+mkv2mp4() { # Create an mp4 of mkv
+ ffmpeg -i "$1" -codec copy "${1%.*}.mp4"
+}
+rzf() {
+ local FILENAME=$(fzf)
+ local DIRNAME=$(dirname "${FILENAME}")
+ ranger "${DIRNAME}"
+}
+
+btc() { # Get current btc
+ curl -s https://bitpay.com/api/rates | python -c "import json, sys; print(json.load(sys.stdin)[6]['rate'])"
+}
+btcc() { # convert btc to cad
+ BTC_RATE=$(curl -s https://bitpay.com/api/rates | python -c 'import json, sys; print(json.load(sys.stdin)[6]["rate"])')
+ echo $(($1 * $BTC_RATE))
+}
+brightd() {
+ sudo python3 "$HOME/.dotfiles/repos/additional/scripts/brightness_daemon.py"
+}
+brightness_up() {
+ echo 'up' | nc -U /tmp/brightd.sock
+}
+brightness_down() {
+ echo 'down' | nc -U /tmp/brightd.sock
+}
+tlo () {
+ if [[ -z "$1" ]]; then
+ return
+ fi
+ local PROJS=($(find "$HOME"/Work/* -mindepth 1 -maxdepth 1 -type d))
+ local PROJ_NAME=''
+ local PROJ_DIR=''
+ for dir in "${PROJS[@]}"; do
+ if [[ $(basename "$dir") == "$1" ]]; then
+ PROJ_NAME=$(basename "$dir")
+ PROJ_DIR="$dir/code"
+ tmux new-session -c "${PROJ_DIR}" -s "${PROJ_NAME}"
+ return
+ fi
+ done
+ echo "Project '${1}' was not found"
+}
+
+gpp() {
+ MESSAGE=${1:-auto}
+ git commit -m "$MESSAGE" && git push origin master
+}
+gppa() {
+ MESSAGE=${1:-auto}
+ git add -A; git commit -m "$MESSAGE" && git push origin master
+}
+cwh() {
+ cat $(which "${1}")
+}
+vwh() {
+ nvim $(which "${1}")
+}
+xephyr() {
+ Xephyr -br -ac -noreset -screen 1080x1080 :1
+}
+pwgen() {
+ date +%s | sha256sum | base64 | head -c 32 | cut -c1-10
+}
+# rotate uses ImageMagick
+rotate() { convert "$1" -rotate 90 "$1" }
+rotate90() { convert "$1" -rotate 90 "$1" }
+rotate180() { convert "$1" -rotate 180 "$1" }
+rotate270() { convert "$1" -rotate 270 "$1" }
+# task warrior commands
+tkwa() {task add "$1" +work; task sync}
+tkpa() {task add "$1" +personal; task sync}
+tkw() {task +work}
+tkp() {task +personal}
+ts() {task sync}
+ttotal() {
+ task "$1" information | grep 'duration' | awk -F'duration: ' '{ print $2 }' | cut -d')' -f1 | iso8601_adder
+}
+twstart() {
+ TAG=$(cat ~/.timewarrior/my_tags | fzf)
+ timew start "$TAG"
+}
+twstop() {
+ TAG=$(cat ~/.timewarrior/my_tags | fzf)
+ timew stop "$TAG"
+}
+twremove() {
+ TAGS=(${(f)"$(cat ~/.timewarrior/my_tags | fzf --multi)"})
+ for TAG in ${TAGS[@]}; do
+ sed -i '/'"$TAG"'/d' ~/.timewarrior/my_tags
+ done
+}
+twsummary() {
+ timew summary
+}
+twsummaryt() {
+ TAG=$(cat ~/.timewarrior/my_tags | fzf)
+ timew summary "$TAG"
+}
+inc() {
+ nvim ~/.dotfiles/zsh/includes
+}
+dot2png() {
+ dot "${1}" -Tpng -o "${1%.*}.png"
+}
+dot2svg() {
+ dot "${1}" -Tsvg -o "${1%.*}.svg"
+}
+alias vbm="vboxmanage"
+xlsx2csv() {
+ in2csv "${1}" > "${1%.*}.csv"
+}
+xls2csv() {
+ in2csv "${1}" > "${1%.*}.csv"
+}
+klu() {
+ CLUSTER=$(kubectl config get-contexts | tail -n +2 | awk '{ print $2 }' | fzf)
+ kubectl config use-context "$CLUSTER"
+}
+ksl() {
+ kubectl get svc
+}
+kdl() {
+ clear
+ kubectl get deployment -o wide | less -S
+}
+kex() {
+ POD=$(kubectl get pods | tail -n +2 | awk '{ print $1 }' | fzf)
+ kubectl exec -it "$POD" -- bash
+}
+klf() {
+ POD=$(kubectl get pods | tail -n +2 | awk '{ print $1 }' | fzf)
+ kubectl logs --follow "$POD"
+}
+mrs() {
+ TOKEN=$(sops -d /not/real/path/to/keys.yaml | yq -r .gitlab_token)
+ curl --header "PRIVATE-TOKEN: ${TOKEN}" -X GET "https://gitlab.com/api/v4/projects/someproject/merge_requests?state=opened" 2> /dev/null | \
+ jq ".[] | {title: .title, branch: .source_branch, author: .author.name, web_url: .web_url, labels: .labels}"
+}
+mrsc() {
+ TOKEN=$(sops -d /not/real/path/to/keys.yaml | yq -r .gitlab_token)
+ curl --header "PRIVATE-TOKEN: ${TOKEN}" -X GET "https://gitlab.com/api/v4/projects/someproject/merge_requests?state=merged&order_by=updated_at" 2> /dev/null | \
+ jq "limit(4;.[]) | {title: .title, branch: .source_branch, author: .author.name, web_url: .web_url}"
+}
+glab_ci() {
+ TOKEN=$(sops -d /not/real/path/to/keys.yaml | yq -r .gitlab_token)
+ RUNNING_BRANCH=$(curl --header "PRIVATE-TOKEN: ${TOKEN}" -X GET "https://gitlab.com/api/v4/projects/someproject/pipelines?status=running" 2> /dev/null | jq -r ".[0].ref")
+ glab ci view $RUNNING_BRANCH
+}
+
+2faimport() {
+ if [[ "$#" -ne 2 ]]; then
+ echo 'error in num args'
+ else
+ # 1 is image 2 is account
+ zbarimg -q --raw "${1}" | pass otp append "${2}"
+ fi
+}
+2fadisplay() {
+ pass otp uri -q "${1}"
+}
+rst2md() {
+ pandoc -s -o "${1%.*}.md" "${1}"
+}
+rtf2txt() {
+ unoconv -f txt "${1}"
+}
+zshpure() {
+ zsh -d -f -i
+}
+cheat() {
+ curl https://cheat.sh/"${1}"
+}
+fz() {
+ DIR=$(_z 2>&1 -l "${1}" | rg -v 'common:' | awk '{ print $2 }' | fzf --tac)
+ cd $DIR
+}
+alias icat="kitty +kitten icat"
+rwh() {
+ readlink $(which "${1}")
+}
+cdrwh() {
+ cd $(dirname $(dirname $(rwh "${1}")))
+}
+cdr() {
+ cd $(dirname $(dirname "${1}"))
+}
+nums() {
+ # set IFS to deal with dirs having spaces
+ local IFS=$'\n\t'
+ for DIR in $(find . -mindepth 1 -maxdepth 1 -type d | sort); do
+ local MYDIR=$(basename "${DIR}")
+ local NUMFILES=$(find ${MYDIR} -type f | wc -l)
+ printf "%6s %s\n" $NUMFILES $MYDIR
+ done
+}
+l() {
+ if [ -x "$(command -v exa)" ]; then
+ exa "$@"
+ else
+ ls "$@"
+ fi
+}
+# This goes in an infinite recursion if exa is not installed. lol
+ls() {
+ if [ -x "$(command -v exa)" ]; then
+ exa "$@"
+ else
+ ls "$@"
+ fi
+}
+ll() {
+ l -l
+}
+l1() {
+ l -1
+}
+
+clean_nix_generations() {
+ sudo nix-env -p /nix/var/nix/profiles/system --delete-generations +5
+ sudo nix-collect-garbage
+}
+ghprl() {
+ gh pr list --author @me --json number,title,headRefName,url | jq -r 'map({number,title,headRefName,url}) | (first | keys_unsorted) as $keys | map([to_entries[] | .value]) as $rows | $keys,$rows[] | @csv' | csvlook -d ',' --no-inference | less
+ gh pr list --search "is:open is:pr review-requested:@me" --json number,title,headRefName,url | jq -r 'map({number,title,headRefName,url}) | (first | keys_unsorted) as $keys | map([to_entries[] | .value]) as $rows | $keys,$rows[] | @csv' | csvlook -d ',' --no-inference
+}
+ghpra() {
+ gh pr list --search "is:open is:pr author:@me review:approved" --json number,title,headRefName,url | jq -r 'map({number,title,headRefName,url}) | (first | keys_unsorted) as $keys | map([to_entries[] | .value]) as $rows | $keys,$rows[] | @csv' | csvlook -d ',' --no-inference
+}
+ghpr() {
+ local PR=$(gh pr list --author @me --json number,title -q '.[] | "\(.number) \(.title)"' | fzf --prompt "Which PR do you want to check out?" | awk '{ print $1 }')
+ export GH_PR=$PR
+}
+ghprs() {
+ gh pr checks $1
+ echo ""
+ gh pr view $1
+}
+ghprd() {
+ gh pr diff "$1"
+}
+ghprm() {
+ echo "Merge PR: $1?"
+ read choice
+ case "$choice" in
+ y|Y ) gh pr merge --auto --rebase --delete-branch $1;;
+ n|N ) return;;
+ * ) echo "invalid";;
+ esac
+}
+ghprr() {
+ gh pr list -S 'review-requested:@me'
+}
+ghil() {
+ gh issue list -a @me
+}
+ghprw() {
+ while true :; do clear; gh pr checks "$1" ; sleep 7; done
+}
diff --git a/scripts/fzf_example b/scripts/fzf_example
new file mode 100755
index 0000000..7b9c4d1
--- /dev/null
+++ b/scripts/fzf_example
@@ -0,0 +1,3 @@
+#!/usr/bin/env bash
+CHOICE=$(echo -e "1\n2\n3\n" | fzf --prompt "Lucky number?")
+echo "You selected $CHOICE"
diff --git a/stow/install b/stow/install
new file mode 100755
index 0000000..c16ab4d
--- /dev/null
+++ b/stow/install
@@ -0,0 +1,5 @@
+APPS=$(find . -mindepth 1 -maxdepth 1 -type d | xargs -n1 basename)
+for APP in ${APPS[@]}; do
+ stow "$APP" -t "$HOME"
+done
+
diff --git a/stow/starship/.config/starship.toml b/stow/starship/.config/starship.toml
new file mode 100644
index 0000000..27edf54
--- /dev/null
+++ b/stow/starship/.config/starship.toml
@@ -0,0 +1,54 @@
+format = """
+$shlvl\
+$kubernetes\
+$hostname\
+$directory\
+$vcsh\
+$git_branch\
+$git_commit\
+$git_state\
+$git_metrics\
+$git_status\
+$docker_context\
+$package\
+$cmake\
+$deno\
+$python\
+$red\
+$terraform\
+$nix_shell\
+$memory_usage\
+$aws\
+$gcloud\
+$openstack\
+$env_var\
+$custom\
+$line_break\
+$lua\
+$jobs\
+$battery\
+$time\
+$status\
+$shell\
+$character"""
+
+[git_branch]
+style = "bold white"
+format = "[$branch]($style) "
+
+[directory]
+truncate_to_repo = false
+
+[character]
+success_symbol = "[](bold cyan) "
+error_symbol = "[](bold red) "
+
+[python]
+style = "bold purple"
+format = '[${pyenv_prefix}(${version} )(\($virtualenv\) )]($style)'
+
+[nix_shell]
+format = '[$state( \($name\))]($style) '
+
+[hostname]
+ssh_only = true
diff --git a/stow/zsh/.zshrc b/stow/zsh/.zshrc
new file mode 100644
index 0000000..5b4ecb2
--- /dev/null
+++ b/stow/zsh/.zshrc
@@ -0,0 +1,233 @@
+eval "$(starship init zsh)"
+
+# Include custom aliases/functions
+source "$HOME/.dotfiles/includes"
+
+install_zplug() {
+ curl -sL --proto-redir -all,https https://raw.githubusercontent.com/zplug/installer/master/installer.zsh | zsh
+}
+rm_zplug() {
+ rm -rf ~/.zplug
+}
+
+# Zplug settings
+# https://github.com/zplug/zplug
+source ~/.zplug/init.zsh
+
+# https://www.github.com/zsh-users/zsh-syntax-highlighting
+# Fish shell-like syntax highlighting for Zsh.
+zplug "zsh-users/zsh-syntax-highlighting"
+
+# You can type the beginning of a command and then use arrow keys to filter
+# previous commands in your history that share the same beginning
+# https://www.github.com/zsh-users/zsh-history-substring-search
+zplug "zsh-users/zsh-history-substring-search"
+
+# https://www.github.com/zsh-users/zsh-autosuggestions
+# Fish-like fast/unobtrusive autosuggestions for zsh. When you type a command a
+# second time it shows up but is shaded, use the right arrow to fully complete
+# the command
+zplug "zsh-users/zsh-autosuggestions"
+
+# https://www.github.com/zsh-users/zsh-completions
+# Additional completions for common command line tools
+zplug "zsh-users/zsh-completions"
+
+# Quick changing directories, z is a command line tool that allows you to jump
+# quickly to directories that you have visited frequently in the past, or
+# recently
+# https://www.github.com/agkozak/zsh-z
+zplug "agkozak/zsh-z"
+
+# Install plugins if there are plugins that have not been installed
+if ! zplug check --verbose; then
+ printf "Install? [y/N]: "
+ if read -q; then
+ echo; zplug install
+ fi
+fi
+
+# Then, source plugins and add commands to $PATH
+# Remove --verbose if you find the startup message annoying
+zplug load --verbose
+
+# If you see custom autocomplete files that you want to install, say from
+# someones dotfiles, Well create our own autocomplete folder where we can just
+# download/curl the files into and then we'll save them.
+#
+# For instance, what if we just wanted to use the _docker file from this repo
+# https://raw.githubusercontent.com/felixr/docker-zsh-completion
+install_docker_autocomplete() {
+ curl -fsSL 'https://raw.githubusercontent.com/felixr/docker-zsh-completion/master/_docker' -o "$HOME/.dotfiles/autocompletes/_docker"
+}
+rm_docker_autocomplete() {
+ rm ~/.dotfiles/autocompletes/_docker
+}
+fpath=($HOME/.dotfiles/autocompletes $fpath)
+
+# Highlight the current autocomplete option
+zstyle ':completion:*' list-colors "${(s.:.)LS_COLORS}"
+
+# Auto-complete will group matches and describe.
+zstyle ':completion:*:*:*:*:*' menu select
+zstyle ':completion:*:matches' group 'yes'
+zstyle ':completion:*:options' description 'yes'
+zstyle ':completion:*:options' auto-description '%d'
+zstyle ':completion:*:corrections' format ' %F{green}-- %d (errors: %e) --%f'
+zstyle ':completion:*:descriptions' format ' %F{yellow}-- %d --%f'
+zstyle ':completion:*:messages' format ' %F{purple} -- %d --%f'
+zstyle ':completion:*:warnings' format ' %F{red}-- no matches found --%f'
+zstyle ':completion:*:default' list-prompt '%S%M matches%s'
+zstyle ':completion:*' format ' %F{yellow}-- %d --%f'
+zstyle ':completion:*' group-name ''
+zstyle ':completion:*' verbose yes
+
+# Simple ssh completion, don't ask how it works
+zstyle -e ':completion:*:hosts' hosts 'reply=(
+ ${=${=${=${${(f)"$(cat {/etc/ssh_,~/.ssh/known_}hosts(|2)(N) 2>/dev/null)"}%%[#| ]*}//\]:[0-9]*/ }//,/ }//\[/ }
+ ${=${(f)"$(cat /etc/hosts(|)(N) <<(ypcat hosts 2>/dev/null))"}%%\#*}
+ ${=${${${${(@M)${(f)"$(cat ~/.ssh/config 2>/dev/null)"}:#Host *}#Host }:#*\**}:#*\?*}}
+)'
+
+# Allow for autocomplete to be case insensitive
+zstyle ':completion:*' matcher-list '' 'm:{[:lower:][:upper:]}={[:upper:][:lower:]}' \
+ '+l:|?=** r:|?=**'
+
+# Initialize autocompletes for zsh
+# Needs to come after the fpath is set
+autoload -Uz compinit && compinit -i
+
+# Example of how to assign a custom function the autocompletes of an existing
+# function
+sls() {
+ ssh "$1" ls
+}
+compdef sls=ssh
+
+dex() { # Enter a container
+ docker exec -it "$1" /bin/bash
+}
+
+# Set `dex` to use to autocompletes for `docker_containers`.
+# `docker_containers` is not a real function, but zsh will still search for the
+# autocomplete file `_docker_containers` on the fpath. Sometimes I use the same
+# autocompletes between multiple custom functions so I just make a "general"
+# autocompletes and compdef commands to in
+compdef dex=docker_containers
+
+# Extra Goodies
+
+
+# Source additional options if set
+if [[ -s "$HOME/.zshrc2" ]]; then
+ source "$HOME/.zshrc2"
+fi
+
+# Set Editor
+export EDITOR='nvim'
+export VISUAL='nvim'
+export PAGER='less -S'
+export XDG_CONFIG_HOME="$HOME/.config"
+export COOKIECUTTER_CONFIG="$HOME/.config/cookiecutter/config.yml"
+
+# Set the Language, important for NERD Fonts
+export LANG=en_US.UTF-8
+
+# Set the default Less options.
+# Mouse-wheel scrolling has been disabled by -X (disable screen clearing).
+# Remove -X and -F (exit if the content fits on one screen) to enable it.
+export LESS='-F -g -i -M -R -S -w -X -z-4'
+
+# Use `bindkey -v` to use vim line editing mode
+bindkey -e
+export KEYTIMEOUT=1
+
+# FZF find function, ignore pyc files
+export FZF_DEFAULT_COMMAND='find * -type f | grep -v ".pyc"'
+
+# Vi mode changes a bunch on the commands that normal bash
+# provides so I bind these keys back to their original functions
+bindkey '^?' backward-delete-char
+bindkey '^w' backward-kill-word
+bindkey '^h' backward-delete-char
+bindkey '^u' backward-kill-line
+bindkey '^r' history-incremental-search-backward
+bindkey '^S' history-incremental-search-forward
+bindkey '^[[Z' reverse-menu-complete
+bindkey -M viins '\C-A' beginning-of-line
+bindkey -M viins '\C-E' end-of-line
+bindkey -M viins '^k' vi-cmd-mode
+
+
+# History Files Options
+HISTFILE="${ZDOTDIR:-$HOME}/.dotfiles/history"
+HISTSIZE=10000
+SAVEHIST=10000
+setopt INC_APPEND_HISTORY # Write to the history file immediately, not when the shell exits.
+setopt HIST_IGNORE_SPACE # Do not record an event starting with a space.
+setopt BANG_HIST # Treat the '!' character specially during expansion.
+setopt EXTENDED_HISTORY # Write the history file in the ':start:elapsed;command' format.
+setopt HIST_IGNORE_DUPS # Do not record an event that was just recorded again.
+
+# Use vim to edit longer commands
+autoload -z edit-command-line
+zle -N edit-command-line
+bindkey "^X^E" edit-command-line
+
+# Alt m for easlier words.
+autoload -Uz copy-earlier-word
+zle -N copy-earlier-word
+bindkey "^[m" copy-earlier-word
+
+# Disable XON/XOFF flow control which causes ^-s to freeze instead of
+# searching forward through history
+stty -ixon
+
+# Append custom bins
+path+=("$HOME/.dotfiles/bin")
+
+# Allow brace expansions
+setopt BRACE_CCL
+
+# Ctrl-w delete word, when it's a path only delete to the '/' and ':'
+# Remove them from wordchars: A list of non-alphanumeric characters considered
+# part of a word by the line editor.
+my-backward-delete-word() {
+ local WORDCHARS=${WORDCHARS/\//}
+ WORDCHARS=${WORDCHARS/:/}
+ zle backward-delete-word
+}
+zle -N my-backward-delete-word
+bindkey '^W' my-backward-delete-word
+
+
+# Make sure SSH Agent is present
+if ! pgrep -u "$USER" ssh-agent > /dev/null; then
+ ssh-agent > ~/.ssh-agent-cache
+fi
+if [[ "$SSH_AGENT_PID" == "" ]]; then
+ eval "$(<~/.ssh-agent-cache)"
+fi
+
+# For GPG signing commits
+export GPG_TTY=$(tty)
+
+# LS_COLORS config
+local ls_colors_file="$HOME/.zsh/ls_colors"
+if [[ -e "$ls_colors_file" ]]; then
+ eval $(dircolors -b "$ls_colors_file")
+fi
+
+# Use direnv
+if [ -x "$(command -v direnv)" ]; then
+ eval "$(direnv hook zsh)"
+fi
+
+# Autocompletes for kubectl
+if [ -x "$(command -v kubectl)" ]; then
+ source <(kubectl completion zsh)
+fi
+
+[ -f ~/.fzf.zsh ] && source ~/.fzf.zsh
+
+export GOPATH="$HOME/.go"