SYNTAX
comment
the line number is independent of sub selection, all the set of line is parsed and the counter of lines is for the global set
'/^while .*= "quit"/,/^done/ {/^\s*#/! {1, 70 s/^\(\s*\)/#\1/}}' /devs/install/alpine.install
<==>
'1,70 {/^while .*= "quit"/,/^done/ {/^\s*#/! s/^\(\s*\)/#\1/}}' /devs/install/alpine.install
use ';' or -e / different syntax for the same result
'1,70 {/^\s*#/! s/^\(\s*\)/#\1/}; s/^\(\s*\)/#\1/' /devs/install/alpine.install
<=>
-e '1,70 {/^\s*#/! s/^\(\s*\)/#\1/}' -e 's/^\(\s*\)/#\1/' /devs/install/alpine.install
BASIC EXAMPLES
# insert/add
"1i $line" # insert text at the beginning of file
"8i $line" # insert mytext in 8th line of file
"/$reg/ a \$line" # append line after the indicator
"/$reg/ i \$line" # insert line before the indicator
"/$reg/ c \$line" # replace line of the indicator
"s|^|$text|" # append text at the end of line
"s|$|$text|" # prepend text at the beginning of line
# sustitute
"/$begin/,/$end/ {s/\($str\).*/\1$str/}" # modify between two indicators
"/$begin/,$ {s/\($str\).*/\1$str/}" # modify between indicator to the end
"/$begin/,10 {s/\($str\).*/\1$str/}" # modify between indicator and the 10th line
"/$begin/,+2 {s/\($str\).*/\1$str/}" # modify between indicator and two lines after indicator
"s/$sch/$str/3" # replace only the 3rd 'sch' found string by str for each line
"s/$sch/\l$str/" # replace found string 'sch' by 'str' with the first character in lower case
"s/$sch/\L$str/" # replace found string 'sch' by 'str' with all characters in lower case
# modify
-e 's/\(.*\)/\L\1/' # to lowercase
# print/extract
-n "s/$select/ s|^export \(.*\)$|\1|p" # extract text from line
-n '2p;5p' # print 2rd & 5th lines
-n '3,$p' # print line from 3th to end
-n '/while/,+10p' # print lines from line matched while to 10 lines after. **REPEATED FOR EACH MATCHED LINE**
-s -n '4,10 p' /*/$file # print range of lines for each files
# delete
'7,9d' # delete lines between interval
'/pattern_start/,/pattern_start/d' # delete lines between two patterns
# backup
-i.bak '4i $line' $file # insert line at 4th lines & backup old file to $file.bak'
COMPLEX EXAMPLES
percent-code
decode percent-code
echo $text_encoded | sed 's@+@ @g;s@%@\\x@g' | xargs -0 printf "%b"
print TODO lines
print lines are in between patterns '<<TODO' & 'TODO' in /home/shared/dev
while read line; do
echo -e "\n$line"
sed -n '/^<<TODO/,/^TODO/ {p}' "$line"
done < <(grep '<<TODO' /home/shared/dev -rl)
select then modify
prepend '#' where not begin with # (with possible start with \s)
'/^\s*#/! s/^\(\s*\)/\1#/'
subtreatment with lines counter
prepend '#' where line not begin with # (possibly beginning with \s) for the lines are between line matched patterns '^while .*= "quit"' and '^done'
'/^while .*= "quit"/,/^done/ {/^\s*#/! {1, 70 s/^\(\s*\)/#\1/}}' /devs/install/alpine.install
selected lines between matched line and x next lines
prepend '#' for lines between matching $str and 10 lines after, and for wich are not begin with '#'
'/$str/,+10 {/^#/! s/\(.*\)/#\1/}'
inverse selected lines
prepend '#' for lines between matching $str and last line, and for wich are not begin with '#'
'/$str/,$ {/^#/! s/\(.*\)/#\1/}'
treats each file separately
treats each file separately, in difference of |xargs sed
ls $path | xargs -L 1 sed "\$a $str"
ls $path | xargs sed -s "\$a $str"
sed -s "$action" /*/*.conf
grep selected
replace string in file only in selected line by grep
while read line; do
file="${line%%:*}"
line="$(echo "$line"|awk -F : '{ print $2 }')"
echo sed -i "$line s|\(\[ .*\) == \(.* \]\)|\1 = \2|g" "$file"
done <<< "$(grep -rn "\[ .* == .* \]" /home/shared/dev/install/)"
() {} # declare function with global scope for internal declarations
() () # declare function with local scope for internal declarations (subprocess)
DEBUG
https://linuxconfig.org/how-to-debug-bash-scripts
bash -x $script
Short | Long notation | description |
---|---|---|
set -f | set -o noglob | Disable file name generation using metacharacters (globbing) |
set -v | set -o verbose | Prints shell input lines as they are read |
set -x | set -o xtrace | Print command traces before executing command |
To see current state of all variables
echo $-
trap signals
ARG is a command to be read and executed when the shell receives the signal(s) SIGNAL_SPEC. If ARG is absent (and a single SIGNAL_SPEC is supplied) or `-', each specified signal is reset to its original value. If ARG is the null string each SIGNAL_SPEC is ignored by the shell and by the commands it invokes
trap n [condition...]
trap [action condition...]
-l # print a list of signal names and their corresponding numbers
-p # display the trap commands associated with each SIGNAL_SPEC
trap $cmd $signal # launch $cmd when signal $signal is enabled
EXAMPLES
get stdin
function _read() { while IFS= read line; do echo $line; done; }
ls | _read
give answer to command
echo -e "ct\nct"|sudo passwd root
yes ct | sudo passwd root
give the different pids of subprocess in script
file=/tmp/test
paths=$(df -l |grep "^/dev.*120.*" |sed "s|^.* \([^ ]*\)$|\1|" | sed "/save/d")
#paths='/ /var'
echo '#!/bin/bash' > $file
for path in $paths
do
echo "# $path" >> $file
echo "(echo > free2zero; pid=\$!; echo \"start $path - \\\$! \$! - \\\$BASHPID \$BASHPID - \\\$PPID \$PPID - \\\$\\\$ \$\$\"; cd $path; sleep 60; rm free2zero; echo \"end $path - \\\$! \$! - \\\$BASHPID \$BASHPID - \\\$PPID \$PPID - \\\$\\\$ \$\$\"; exit) &" >> $file
done
echo "echo \"itself : \\\$! \$! - \\\$BASHPID \$BASHPID - \\\$PPID \$PPID - \\\$\\\$ \$\$\"; sleep 30" >> $file
chmod +x $file
cat $file
sh $file
wipe free space in devices
file=/tmp/test
paths=$(df -l |grep "^/dev.*" |sed "s|^.* \([^ ]*\)$|\1|" |xargs)
paths=${paths/\/save/}
echo '#!/bin/bash' > $file
for path in $paths
do
echo "# $path" >> $file
echo "(echo \"start $path \$(date +\"%T - %N\")\"; cd $path; dd if=/dev/zero of=free2zero; rm free2zero; echo \"end $path \$(date +\"%T - %N\")\"; exit) &" >> $file
done
chmod +x $file
sh $file
DEVICES
list
lshw -class disk -short # list shortly all available disks
lshw -class disk # detailed list of all available disks
blkid
parted $device print list # print partitions list
parted $device print free # print list of free space of partitions
FDISK
partition type of fdisk
8e : lvm
bf : solaris (zfs)
EXT
e2label $devive $label # label
tune2fs -m 0 $devive # reserved block to 0
mkfs.ext4 -m 0 -L $label $devive # format
mkfs.ext4 -E lazy_itable_init $devive # format quickly
FAT
fatlabel $devive $label # label
https://linuxcontainers.org/
https://discuss.linuxcontainers.org
https://lxd.readthedocs.io/en/latest/
COMMANDS
subcommand | Designation |
---|---|
ALIAS | Manage command aliases |
CLUSTER | Manage cluster members |
CONFIG | Manage container and server configuration options |
CONSOLE | Attach to container consoles |
COPY | Copy containers within or in between LXD instances |
DELETE | Delete containers and snapshots |
EXEC | Execute commands in containers |
FILE | Manage files in containers |
HELP | Help about any command |
IMAGE | Manage images |
INFO | Show container or server information |
LAUNCH | Create and start containers from images |
LIST | List containers |
MOVE | Move containers within or in between LXD instances |
NETWORK | Manage and attach containers to networks |
OPERATION | List, show and delete background operations |
PROFILE | Manage profiles |
PUBLISH | Publish containers as images |
REMOTE | Manage the list of remote servers |
RENAME | Rename containers and snapshots |
RESTART | Restart containers |
RESTORE | Restore containers from snapshots |
SHELL | Execute commands in containers |
SNAPSHOT | Create container snapshots |
START | Start containers |
STOP | Stop containers |
STORAGE | Manage storage pools and volumes |
VERSION | Show local and remote versions |
Global flags
--debug # Show all debug messages
--force-local # Force using the local unix socket
-h,--help # Print help
-v,--verbose # Show all information messages
--version # Print version number
ALIAS
lxc alias
Manage command aliases
lxc alias [command]
add <alias> <target> [flags] # Add new aliases
list [flags] # List aliases
remove <alias> [flags] # Remove aliases
rename list new-list # Rename aliases
CLUSTER
lxc cluster
Manage cluster members
lxc cluster [command]
enable [<remote>:] <name> [flags] # Enable clustering on a single non-clustered LXD instance
list [<remote>:] [flags] # List all the cluster members
remove [<remote>:]<member> [flags] # Remove a member from the cluster
-f, --force # Force removing a member, even if degraded
rename [<remote>:]<member> <new-name> [flags] # Rename a cluster member
show [<remote>:]<member> [flags] # Show details of a cluster member
CONFIG
sub commands
device # Manage container devices
metadata # Manage container metadata files
template # Manage container file templates
trust [command] # Manage trusted clients
lxc config
Manage container and server configuration options
lxc config [command]
edit [<remote>:][<container>] [flags] # Edit container or server configurations as YAML
get [<remote>:][<container>] <key> [flags] # Get values for container or server configuration keys
set [<remote>:][<container>] <key> <value> [flags] # Set container or server configuration keys
show [<remote>:][<container>] [flags] # Show container or server configurations
--expanded # Show the expanded configuration
unset [<remote>:][<container>] <key> [flags] # Unset container or server configuration keys
-
DEVICE
lxc config device
Manage container devices
lxc config device [command] add [<remote>:]<container|profile> <device> <type> [key=value...] [flags] # Add devices to containers or profiles get [<remote>:]<container|profile> <device> <key> [flags] # Get values for container device configuration keys list [<remote>:]<container|profile> [flags] # List container devices override [<remote>:]<container> <device> [key=value...] [flags] # Copy profile inherited devices and override configuration keys remove [<remote>:]<container|profile> <name>... [flags] # Remove container devices set [<remote>:]<container|profile> <device> <key> <value> [flags] # Set container device configuration keys show [<remote>:]<container|profile> [flags] # Show full device configuration for containers or profiles unset [<remote>:]<container|profile> <device> <key> [flags] # Unset container device configuration keys
-
METADATA
lxc config metadata
Manage container metadata files
lxc config metadata [command] edit [<remote>:]<container> [flags] # Edit container metadata files show [<remote>:]<container> [flags] # Show container metadata files
-
TEMPLATE
lxc config template
Manage container file templates
lxc config template [command] create [<remote>:]<container> <template> [flags] # Create new container file templates delete [<remote>:]<container> <template> [flags] # Delete container file templates edit [<remote>:]<container> <template> [flags] # Edit container file templates list [<remote>:]<container> [flags] # List container file templates show [<remote>:]<container> <template> [flags] # Show content of container file templates
-
TRUST
lxc config trust
Manage trusted clients
lxc config trust [command] add [<remote>:] <cert> [flags] # Add new trusted clients list [<remote>:] [flags] # List trusted clients remove [<remote>:] <hostname|fingerprint> [flags] # Remove trusted clients
Host properties
candid.api.key
candid.api.url
candid.domains
candid.expiry
cluster.offline_threshold
core.https_address
core.https_allowed_credentials
core.https_allowed_headers
core.https_allowed_methods
core.https_allowed_origin
core.proxy_http
core.proxy_https
core.proxy_ignore_hosts
core.trust_password
images.auto_update_cached
images.auto_update_interval
images.compression_algorithm
images.remote_cache_expiry
maas.api.key
maas.api.url
maas.machine
Container properties
boot.autostart
boot.autostart.delay
boot.autostart.priority
boot.host_shutdown_timeout
boot.stop.priority
environment.
limits.cpu
limits.cpu.allowance
limits.cpu.priority
limits.disk.priority
limits.memory
limits.memory.enforce
limits.memory.swap
limits.memory.swap.priority
limits.network.priority
limits.processes
linux.kernel_modules
migration.incremental.memory
migration.incremental.memory.goal
migration.incremental.memory.iterations security.nesting
nvidia.runtime
raw.apparmor
raw.idmap
raw.lxc
raw.seccomp
security.devlxd
security.idmap.base
security.idmap.isolated
security.idmap.size
security.privileged
security.syscalls.blacklist
security.syscalls.blacklist_compat
security.syscalls.blacklist_default
user.meta-data
user.network-config
user.network_mode
user.user-data
user.vendor-data
volatile.apply_quota
volatile.apply_template
volatile.base_image
volatile.idmap.base
volatile.idmap.next
volatile.last_state.idmap
volatile.last_state.power
CONSOLE
lxc console
Attach to container consoles
lxc console [<remote>:]<container> [flags]
--show-log # Retrieve the container s console log
COPY
lxc copy
Copy containers within or in between LXD instances
lxc copy [<remote>:]<source>[/<snapshot>] [[<remote>:]<destination>] [flags]
-c, --config # Config key/value to apply to the new container
--container-only # Copy the container without its snapshots
-d, --device # New key/value to apply to a specific device
-e, --ephemeral # Ephemeral container
--mode # Transfer mode. One of pull (default), push or relay (default "pull")
--no-profiles # Create the container with no profiles applied
-p, --profile # Profile to apply to the new container
--stateless # Copy a stateful container stateless
--target # Cluster member name
DELETE
lxc delete
Delete containers and snapshots
lxc delete [<remote>:]<container>[/<snapshot>] [[<remote>:]<container>[/<snapshot>]...] [flags]
-f, --force # Force the removal of running containers
-i, --interactive # Require user confirmation
EXEC
lxc exec
Execute commands in containers. The command is executed directly using exec, so there is no shell and shell patterns (variables, file redirects, ...) wont be understood
lxc exec [<remote>:]<container> [flags] [--] <command line>
-n, --disable-stdin # Disable stdin (reads from /dev/null)
--env # Environment variable to set (e.g. HOME=/home/foo)
-t, --force-interactive # Force pseudo-terminal allocation
-T, --force-noninteractive # Disable pseudo-terminal allocation
--mode # Override the terminal mode (auto, interactive or non-interactive) (default "auto")
FILE
lxc file
Manage files in containers
delete [<remote>:]<container>/<path> [[<remote>:]<container>/<path>...] [flags] # Delete files in containers
edit [<remote>:]<container>/<path> [flags] # Edit files in containers
pull [<remote>:]<container>/<path> [[<remote>:]<container>/<path>...] <target path> [flags] # Pull files from containers
-p, --create-dirs # Create any directories necessary
-r, --recursive # Recursively transfer files
push <source path> [<remote>:]<container>/<path> [[<remote>:]<container>/<path>...] [flags]
--gid # Set the file's gid on push (default -1)
--mode # Set the file's perms on push
-r, --recursive # Recursively transfer files
--uid # Set the file's uid on push (default -1)
IMAGE
sub commands
alias # Manage image aliases
lxc image
Manage images
lxc image [command]
copy [<remote>:]<image> <remote>: [flags] # Copy images between servers
--alias # New aliases to add to the image
--auto-update # Keep the image up to date after initial copy
--copy-aliases # Copy aliases from source
--public # Make image public
delete [<remote>:]<image> [[<remote>:]<image>...] [flags] # Delete images
edit [<remote>:]<image> [flags] # Edit image properties
export [<remote>:]<image> [<target>] [flags] # Export and download images
import <tarball>|<directory>|<URL> [<rootfs tarball>] [<remote>:] [key=value...] [flags] # Import images into the image store
--alias # New aliases to add to the image
--public # Make image public
info [<remote>:]<image> [flags] # Show useful information about images
list [<remote>:] [<filter>...] [flags] # List images
--format # Format (csv|json|table|yaml), default "table"
-c, --columns # Columns default "lfpdasu"
# Column shorthand chars:
l - Shortest image alias (and optionally number of other aliases)
L - Newline-separated list of all image aliases
f - Fingerprint
p - Whether image is public
d - Description
a - Architecture
s - Size
refresh [<remote>:]<image> [[<remote>:]<image>...] [flags] # Refresh images
show [<remote>:]<image> [flags] # Show image properties
-
ALIAS
lxc image alias
Manage image aliases
lxc image alias [command] create [<remote>:]<alias> <fingerprint> [flags] # Create aliases for existing images delete [<remote>:]<alias> [flags] # Delete image aliases list [<remote>:] [<filters>...] [flags] # List image aliases rename [<remote>:]<alias> <new-name> [flags] # Rename aliases
INFO
lxc info
Show container or server information
lxc info [<remote>:][<container>] [flags]
--resources # Show the resources available to the server
--show-log # Show the container's last 100 log lines?
LAUNCH
lxc launch
Create and start containers from images
lxc launch [<remote>:]<image> [<remote>:][<name>] [flags]
-c, --config # Config key/value to apply to the new container
-e, --ephemeral # Ephemeral container
-n, --network # Network name
--no-profiles # Create the container with no profiles applied
-p, --profile # Profile to apply to the new container
-s, --storage # Storage pool name
--target # Cluster member name
-t, --type # Instance type
LIST
lxc list
List containers
lxc list [<remote>:] [<filter>...] [flags]
-c, --columns # Columns (default "ns46tSL")
--fast # Fast mode (same as --columns=nsacPt)
--format # Format (csv|json|table|yaml) (default "table")
filter name:
- A single keyword like "web" which will list any container with a name starting by "web"
- A regular expression on the container name, matching a configuration item or its value
- A key/value pair referring to a configuration item. For those, the namespace can be abbreviated to the smallest unambiguous identifier
Pre-defined column shorthand chars:
4 - IPv4 address
6 - IPv6 address
a - Architecture
b - Storage pool
c - Creation date
d - Description
l - Last used date
n - Name
N - Number of Processes
p - PID of the containers init process
P - Profiles
s - State
S - Number of snapshots
t - Type (persistent or ephemeral)
L - Location of the container (e.g. its cluster member)
Custom columns
Custom columns are defined with "key[:name][:maxWidth]":
KEY: The (extended) config key to display
NAME: Name to display in the column header
Defaults to the key if not specified or empty
MAXWIDTH: Max width of the column (longer results are truncated)
Defaults to -1 (unlimited). Use 0 to limit to the column header size
MOVE
lxc move
Move containers within or in between LXD instances
lxc move [<remote>:]<container>[/<snapshot>] [<remote>:][<container>[/<snapshot>]] [flags]
-c, --config # Config key/value to apply to the target container
--container-only # Move the container without its snapshots
-d, --device # New key/value to apply to a specific device
--mode # Transfer mode. One of pull (default), push or relay. (default "pull")
--no-profiles # Unset all profiles on the target container
-p, --profile # Profile to apply to the target container
--stateless # Copy a stateful container stateless
--target # Cluster member name
NETWORK
lxc network
Manage and attach containers to networks
lxc network [command]
attach [<remote>:]<network> <container> [<device name>] [<interface name>] [flags] # Attach network interfaces to containers
attach-profile [<remote>:]<network> <profile> [<device name>] [<interface name>] [flags] # Attach network interfaces to profiles
create [<remote>:]<network> [key=value...] [flags] # Create new networks
--target # Cluster member name
delete [<remote>:]<network> [flags] # Delete networks
detach [<remote>:]<network> <container> [<device name>] [flags] # Detach network interfaces from containers
detach-profile [<remote>:]<network> <container> [<device name>] [flags] # Detach network interfaces from profiles
edit [<remote>:]<network> [flags] # Edit network configurations as YAML
get [<remote>:]<network> <key> [flags] # Get values for network configuration keys
--target # Cluster member name
list [<remote>:] [flags] # List available networks
--format # Format (csv|json|table|yaml) (default "table")
list-leases [<remote>:]<network> [flags] # List DHCP leases
rename [<remote>:]<network> <new-name> [flags] # Rename networks
set [<remote>:]<network> <key> <value> [flags] # Set network configuration keys
--target # Cluster member name
show [<remote>:]<network> [flags] # Show network configurations
--target # Cluster member name
unset [<remote>:]<network> <key> [flags] # Unset network configuration keys
--target # Cluster member name
properties
bridge.driver
bridge.external_interfaces
bridge.mode
bridge.mtu
dns.domain
dns.mode
fan.overlay_subnet
fan.type
fan.underlay_subnet
ipv4.address
ipv4.dhcp
ipv4.dhcp.expiry
ipv4.dhcp.gateway
ipv4.dhcp.ranges
ipv4.firewall
ipv4.nat
ipv4.routes
ipv4.routing
ipv6.address
ipv6.dhcp
ipv6.dhcp.expiry
ipv6.dhcp.ranges
ipv6.dhcp.stateful
ipv6.firewall
ipv6.nat
ipv6.routes
ipv6.routing
raw.dnsmasq
PROFILE
sub commands
device # Manage container devices
lxc profile
Manage profiles
lxc profile [command]
add [<remote>:]<container> <profile> [flags] # Add profiles to containers
assign [<remote>:]<container> <profiles> [flags] # Assign sets of profiles to containers
copy [<remote>:]<profile> [<remote>:]<profile> [flags] # Copy profiles
create [<remote>:]<profile> [flags] # Create profiles
delete [<remote>:]<profile> [flags] # Delete profiles
edit [<remote>:]<profile> [flags] # Edit profile configurations as YAML
get [<remote>:]<profile> <key> [flags] # Get values for profile configuration keys
list [<remote>:] [flags] # List profiles
remove [<remote>:]<container> <profile> [flags] # Remove profiles from containers
rename [<remote>:]<profile> <new-name> [flags] # Rename profiles
set [<remote>:]<profile> <key> <value> [flags] # Set profile configuration keys
show [<remote>:]<profile> [flags] # Show profile configurations
unset [<remote>:]<profile> <key> [flags] # Unset profile configuration keys
-
DEVICE
lxc profile device
Manage container devices
lxc profile device [command] add [<remote>:]<container|profile> <device> <type> [key=value...] [flags] # Add devices to containers or profiles get [<remote>:]<container|profile> <device> <key> [flags] # Get values for container device configuration keys list [<remote>:]<container|profile> [flags] # List container devices remove [<remote>:]<container|profile> <name>... [flags] # Remove container devices set [<remote>:]<container|profile> <device> <key> <value> [flags] # Set container device configuration keys show [<remote>:]<container|profile> [flags] # Show full device configuration for containers or profiles unset [<remote>:]<container|profile> <device> <key> [flags] # Unset container device configuration keys
properties
boot.autostart
boot.autostart.delay
boot.autostart.priority
boot.host_shutdown_timeout
boot.stop.priority
environment.
limits.cpu
limits.cpu.allowance
limits.cpu.priority
limits.disk.priority
limits.memory
limits.memory.enforce
limits.memory.swap
limits.memory.swap.priority
limits.network.priority
limits.processes
linux.kernel_modules
migration.incremental.memory
migration.incremental.memory.goal
migration.incremental.memory.iterations security.nesting
nvidia.runtime
raw.apparmor
raw.idmap
raw.lxc
raw.seccomp
security.devlxd
security.idmap.base
security.idmap.isolated
security.idmap.size
security.privileged
security.syscalls.blacklist
security.syscalls.blacklist_compat
security.syscalls.blacklist_default
user.meta-data
user.network-config
user.network_mode
user.user-data
user.vendor-data
volatile.apply_quota
volatile.apply_template
volatile.base_image
volatile.idmap.base
volatile.idmap.next
volatile.last_state.idmap
volatile.last_state.power
PUBLISH
lxc publish
Publish containers as images
lxc publish [<remote>:]<container>[/<snapshot>] [<remote>:] [flags] [key=value...]
--alias # New alias to define at target
--compression # Define a compression algorithm: for image or none
-f, --force # Stop the container if currently running
--public # Make the image public
REMOTE
lxc remote
Manage the list of remote servers
lxc remote [command]
add [<remote>] <IP|FQDN|URL> [flags] # Add new remote servers
--accept-certificate # Accept certificate
--auth-type # Server authentication type (tls or candid)
--domain # Candid domain to use
--password # Remote admin password
--protocol # Server protocol (lxd or simplestreams)
--public # Public image server
get-default [flags] # Show the default remote
list [flags] # List the available remotes
remove <remote> [flags] # Remove remotes
rename <remote> <new-name> [flags] # Rename remotes
set-default <remote> [flags] # Set the default remote
set-url <remote> <URL> [flags] # Set the URL for the remote
RENAME
lxc rename
Rename containers and snapshots
lxc rename [<remote>:]<container>[/<snapshot>] <container>[/<snapshot>] [flags]
RESTART
lxc restart
Restart containers
lxc restart [<remote>:]<container> [[<remote>:]<container>...] [flags]
--all # Run command against all containers
-f, --force # Force the container to shutdown
--timeout # Time to wait for the container before killing it (default -1)
RESTORE
lxc restore
Restore containers from snapshots
lxc restore [<remote>:]<container> <snapshot> [flags]
--stateful # Whether or not to restore the container's running state from snapshot (if available)
SHELL
Similar to: lxc exec <container> -- sh -c "command"
See lxc exec !
lxc shell
Enter in container & use the shell
lxc shell [<remote>:]<container>
SNAPSHOT
lxc snapshot
Create container snapshots
When --stateful is used, LXD attempts to checkpoint the container's running state, including process memory state, TCP connections, ...
lxc snapshot [<remote>:]<container> [<snapshot name>] [flags]
--stateful # LXD attempts to checkpoint the container's running state, including process memory state, TCP connections, ...
START
lxc start
Start containers
lxc start [<remote>:]<container> [[<remote>:]<container>...] [flags]
--all # Run command against all containers
--stateless # Ignore the container state
STOP
lxc stop
Stop containers
lxc stop [<remote>:]<container> [[<remote>:]<container>...] [flags]
--all # Run command against all containers
-f, --force # Force the container to shutdown
--stateful # Store the container state
--timeout # Time to wait for the container before killing it (default -1)
STORAGE
sub command
volume # Manage storage volumes
lxc storage
Manage storage pools and volumes
lxc storage [command]
create [<remote>:]<pool> <driver> [key=value...] [flags] # Create storage pools
--target # Cluster member name
delete [<remote>:]<pool> [flags] # Delete storage pools
edit [<remote>:]<pool> [flags] # Edit storage pool configurations as YAML
get [<remote>:]<pool> <key> [flags] # Get values for storage pool configuration keys
info [<remote>:]<pool> [flags] # Show useful information about storage pools
--bytes # Show the used and free space in bytes
list [<remote>:] [flags] # List available storage pools
set [<remote>:]<pool> <key> <value> [flags] # Set storage pool configuration keys
--target # Cluster member name
show [<remote>:]<pool> [flags] # Show storage pool configurations and resources
--resources # Show the resources available to the storage pool
--target # Cluster member name
unset [<remote>:]<pool> <key> [flags] # Unset storage pool configuration keys
--target # Cluster member name
-
VOLUME
lxc storage volume
Manage storage volumes
Unless specified through a prefix, all volume operations affect "custom" (user created) volumeslxc storage volume [command] attach [<remote>:]<pool> <volume> <container> [<device name>] <path> [flags] # Attach new storage volumes to containers attach-profile [<remote:>]<pool> <volume> <profile> [<device name>] <path> [flags] # Attach new storage volumes to profiles copy <pool>/<volume> <pool>/<volume> [flags] # Copy storage volumes --mode # Transfer mode. One of pull (default), push or relay. (default "pull") --target # Cluster member name create [<remote>:]<pool> <volume> [key=value...] [flags] # Create new custom storage volumes --target # Cluster member name delete [<remote>:]<pool> <volume> [flags] # Delete storage volumes --target # Cluster member name detach [<remote>:]<pool> <volume> <container> [<device name>] [flags] # Detach storage volumes from containers detach-profile [<remote:>]<pool> <volume> <profile> [<device name>] [flags] # Detach storage volumes from profiles edit [<remote>:]<pool> <volume> [flags] # Edit storage volume configurations as YAML --target # Cluster member name get [<remote>:]<pool> <volume> <key> [flags] # Get values for storage volume configuration keys --target # Cluster member name list [<remote>:]<pool> [flags] # List storage volumes move [<pool>/]<volume> [<pool>/]<volume> [flags] # Move storage volumes between pools --mode # Transfer mode. One of pull (default), push or relay. (default "pull") --target # Cluster member name rename [<remote>:]<pool> <old name> <new name> [flags] # Rename storage volumes --target # Cluster member name set [<remote>:]<pool> <volume> <key> <value> [flags] # Set storage volume configuration keys --target # Cluster member name show [<remote>:]<pool> <volume> [flags] # Show storage volum configurations --target # Cluster member name unset [<remote>:]<pool> <volume> <key> [flags] # Unset storage volume configuration keys --target # Cluster member name
RPM
rpm -qa # list of intalled packages
rpm -ivh item.rpm # install rpm package
YUM
yum update
yum list installed
yum search item
yum repolist all
yum whatprovides "*/bin/whois"
yum –nogpgcheck install item.rpm # install rpm package
yum -y install yum-utils # install yum tools
yum install yum-downloadonly # download packages
yum update httpd -y --downloadonly --downloaddir=/tmp # download packages
versionlock
yum -y install yum-versionlock
yum versionlock list
yum versionlock $PACKAGE # yum versionlock add $PACKAGE
yum versionlock delete $PACKAGE
clean
yum clean headers && yum clean packages && yum clean metadata
yum clean all
YUMDOWNLOADER
yumdownloader httpd # only download package
TRICK
chkconfig
list all auto-start daemon
chkconfig --list
reinitialize network config
rm /etc/udev/rules.d/70-persistent-net.rules # delete cache
uuidgen eth0 >> /etc/sysconfig/network-scripts/ifcfg-eth0
nano /etc/sysconfig/network-scripts/ifcfg-eth0
DEBCONF
debconf-show # show configurations parameters for a package
debconf-get-selections # return all configuration parameters
debconf-set-selections # load configuration from file
duplicate softwares & configuration between 2 OS
OS 1
su -
dir="conf-$(date +"%Y%m%d")" && mkdir -p $dir
dpkg --get-selections > $dir/dpkg.dat
debconf-get-selections|grep "^[^#].*\s*error\s*$" > $dir/debconf.dat # remove error
rsync -av --delete $dir/ user2@host2:/$dir/
OS 2
dir="conf-$(date +"%Y%m%d")" && mkdir -p $dir
# if you want remove old non matching configuration
# dpkg --clear-selections
debconf-set-selections $dir/debconf.dat
dpkg --set-selections pkgs_auto.lst
apt-mark showmanual > pkgs_manual.lst
sudo apt-mark auto $(cat pkgs_auto.lst)
sudo apt-mark manual $(cat pkgs_manual.lst)
PHYSICAL
pvchange # Change attributes of physical volume(s)
pvck # Check the consistency of physical volume(s)
pvcreate # Initialize physical volume(s) for use by LVM
pvdisplay # Display various attributes of physical volume(s)
pvmove # Move extents from one physical volume to another
pvremove # Remove LVM label(s) from physical volume(s)
pvresize # Resize physical volume(s)
pvs # Display information about physical volumes
pvscan # List all physical volumes
GROUP
vgdisplay # affiche les groupes de volumes
vgcfgbackup # sauvegarder la VGDA
vgcfgrestore # restaurer la VGDA
vgchange # changer les attributs d'un VG
vgck # vérification de la VGDA
vgcreate # créer un VG
vgdisplay # voir les informations
vgexport # désactiver un VG pour pouvoir extraire les PV
vgimport # activer et déclarer un VG sur le système
vgextend # ajouter un ou plusieurs PV dans un VG
vgmerge # fusionner deux VG
vgmknodes # recréer /dev/nom_volume et le fichier spécial group
vgreduce # extraire un ou plusieurs PV d'un VG
vgremove # supprimer un VG
vgrename # renommer un VG
LOGICAL
lvdisplay # affiche les volume logique
lvcreate # création d'un VL lvchange, modification des attributs d'un VL
lvdisplay # voir les informations d'un VL
lvextend # augmenter la taille d'un VL
lvreduce # réduire la taille d'un VL
lvremove # supprimer un VL
lvrename # renommer un VL
lvscan # recherche de tous les VL existant
TOC
chapter |
---|
OPTIONS |
FORMAT |
BASIC FILTERS |
TYPES & VALUES |
BUILTIN OPERATORS AND FUNCTIONS |
FUNCTIONS |
DATE |
MATH |
LOOP |
ASSIGNMENT |
CONDITIONALS-COMPARAISONS |
REGEXP |
jq can transform JSON in various ways, by selecting, iterating, reducing and otherwise mangling JSON documents. For instance, running the command jq ´map(.price) | add´ will take an array of JSON objects as input and return the sum of their "price" fields
jq can accept text input as well, but by default, jq reads a stream of JSON entities (including numbers and other literals) from stdin. Whitespace is only needed to separate entities such as 1 and 2, and true and false. One or more files may be specified, in which case jq will read input from those instead
OPTIONS
jq [options...] filter [files...]
--version # Output the jq version and exit with zero
--seq: # Use the application/json-seq MIME type scheme for separating JSON texts in jq´s input and output. This means that an ASCII RS (record separator) character is printed before each value on output and an ASCII LF (line feed) is printed after every output. Input JSON texts that fail to parse are ignored (but warned about), discarding all subsequent input until the next RS. This more also parses the output of jq without the --seq option
--stream: # Parse the input in streaming fashion, outputing arrays of path and leaf values (scalars and empty arrays or empty objects). For example, "a" becomes [[],"a"], and [[],"a",["b"]] becomes [[0],[]], [[1],"a"], and [[1,0],"b"]
-s, --slurp # Instead of running the filter for each JSON object in the input, read the entire input stream into a large array and run the filter just once
-R, --raw-input # Don´t parse the input as JSON. Instead, each line of text is passed to the filter as a string. If combined with --slurp, then the entire input is passed to the filter as a single long string
-n, --null-input # Don´t read any input at all! Instead, the filter is run once using null as the input. This is useful when using jq as a simple calculator or to construct JSON data from scratch
-c, --compact-output # By default, jq pretty-prints JSON output. Using this option will result in more compact output by instead putting each JSON object on a single line
--tab # Use a tab for each indentation level instead of two spaces
--indent n # Use the given number of spaces (no more than 8) for indentation
-M, --monochrome-output # disable color. By default, jq outputs colored JSON if writing to a terminal
-C, --color-output # force color even if writing to a pipe
-a, --ascii-output # jq usually outputs non-ASCII Unicode codepoints as UTF-8, even if the input specified them as escape sequences (like "\u03bc"). Using this option, you can force jq to produce pure ASCII output with every non-ASCII character replaced with the equivalent escape sequence
--unbuffered # Flush the output after each JSON object is printed (useful if you´re piping a slow data source into jq and piping jq´s output elsewhere)
-S, --sort-keys # Output the fields of each object with the keys in sorted order
-r, --raw-output # With this option, if the filter´s result is a string then it will be written directly to standard output rather than being formatted as a JSON string with quotes. This can be useful for making jq filters talk to non-JSON-based systems
-j, --join-output # Like -r but jq won´t print a newline after each output
-f filename, --from-file filename # Read filter from the file rather than from a command line, like awk´s -f option. You can also use ´#´ to make comments
-L directory # Prepend directory to the search list for modules. If this option is used then no builtin search list is used. See the section on modules below
-e, --exit-status # Sets the exit status of jq to 0 if the last output values was neither false nor null, 1 if the last output value was either false or null, or 4 if no valid result was ever produced. Normally jq exits with 2 if there was any usage problem or system error, 3 if there was a jq program compile error, or 0 if the jq program ran
--arg name value # This option passes a value to the jq program as a predefined variable. If you run jq with --arg foo bar, then $foo is available in the program and has the value "bar". Note that value will be treated as a string, so --arg foo 123 will bind $foo to "123"
--argjson name JSON-text # This option passes a JSON-encoded value to the jq program as a predefined variable. If you run jq with --argjson foo 123, then $foo is available in the program and has the value 123
--slurpfile variable-name filename # This option reads all the JSON texts in the named file and binds an array of the parsed JSON values to the given global variable. If you run jq with --argfile foo bar, then $foo is available in the program and has an array whose elements correspond to the texts in the file named bar
--run-tests [filename] # Runs the tests in the given file or standard input. This must be the last option given and does not honor all preceding options. The input consists of comment lines, empty lines, and program lines followed by one input line, as many lines of output as are expected (one per output), and a terminating empty line. Compilation failure tests start with a line containing only "%%FAIL", then a line containing the program to compile, then a line containing an error message to compare to the actual
FORMAT
The @foo syntax is used to format and escape strings, which is useful for building URLs, documents in a language like HTML or XML, and so forth. @foo can be used as a filter on its own, the possible escapings are:
@text: # Calls tostring, see that function for details
@json: # Serializes the input as JSON
@html: # Applies HTML/XML escaping, by mapping the characters <>&´" to their entity equivalents <, >, &, ', "
@uri: # Applies percent-encoding by mapping all reserved URI characters to a %XX sequence
@csv: # The input must be an array, and it is rendered as CSV with double quotes for strings, and quotes escaped by repetition
@tsv: # The input must be an array, and it is rendered as TSV (tab-separated values)
@sh: # The input is escaped suitable for use in a command-line for a POSIX shell
@base64: # The input is converted to base64 as specified by RFC 4648
BASIC FILTERS
.
The absolute simplest filter is . . This is a filter that takes its input and produces it unchanged as output. That is, this is the identity operator
jq '.'
"Hello, world!"
=> "Hello, world!"
.foo, .foo.bar
Object Identifier-Index. The simplest useful filter is .foo. When given a JSON object (aka dictionary or hash) as input, it produces the value at the key "foo", or null if there's none present. A filter of the form .foo.bar is equivalent to .foo|.bar
jq '.foo'
{"foo": 42, "bar": "less interesting data"}
=> 42
.foo? # Optional Object Identifier-Index. Just like .foo, but does not output even an error when . is not an array or an object
jq '.foo?'
{"foo": 42, "bar": "less interesting data"}
=> 42
jq '.foo?'
{"notfoo": true, "alsonotfoo": false}
=> null
.[<string>] # Generic Object Index. You can also look up fields of an object using syntax like .["foo"] (.foo above is a shorthand version of this, but only for identifier-like strings).
jq '.[0]'
[{"name":"JSON", "good":true}, {"name":"XML", "good":false}]
=> {"name":"JSON", "good":true}
jq '.[-2]'
[1,2,3]
=> 2
.[10:15] # Array/String Slice. The .[10:15] syntax can be used to return a subarray of an array or substring of a string. The array returned by .[10:15] will be of length 5, containing the elements from index 10 (inclusive) to index 15 (exclusive). Either index may be negative (in which case it counts backwards from the end of the array), or omitted (in which case it refers to the start or end of the array)
jq '.[2:4]'
["a","b","c","d","e"]
=> ["c", "d"]
jq '.[2:4]'
"abcdefghi"
=> "cd"
jq '.[:3]'
["a","b","c","d","e"]
=> ["a", "b", "c"]
jq '.[-2:]'
["a","b","c","d","e"]
=> ["d", "e"]
.[] # Array/Object Value Iterator. If you use the .[index] syntax, but omit the index entirely, it will return all of the elements of an array. Running .[] with the input [1,2,3] will produce the numbers as three separate results, rather than as a single array.
jq '.[]'
[{"name":"JSON", "good":true}, {"name":"XML", "good":false}]
=> {"name":"JSON", "good":true}, {"name":"XML", "good":false}
jq '.[]'
[]
=>
jq '.[]'
{"a": 1, "b": 1}
=> 1, 1
.[]? # Like .[], but no errors will be output if . is not an array or object
, # Comma. If two filters are separated by a comma, then the same input will be fed into both and the two filters' output value streams will be concatenated in order: first, all of the outputs produced by the left expression, and then all of the outputs produced by the right. For instance, filter .foo, .bar, produces both the "foo" fields and "bar" fields as separate outputs
jq '.foo, .bar'
{"foo": 42, "bar": "something else", "baz": true}
=> 42, "something else"
jq '.user, .projects[]'
{"user":"stedolan", "projects": ["jq", "wikiflow"]}
=> "stedolan", "jq", "wikiflow"
jq '.[4,2]'
["a","b","c","d","e"]
=> "e", "c"
\| # Pipe. The | operator combines two filters by feeding the output(s) of the one on the left into the input of the one on the right. It's pretty much the same as the Unix shell's pipe, if you're used to that
jq '.[] | .name'
[{"name":"JSON", "good":true}, {"name":"XML", "good":false}]
=> "JSON", "XML"
() # Parenthesis work as a grouping operator just as in any typical programming language
jq '(. + 2) * 5'
1
=> 15
TYPES AND VALUES
[] # Array construction. As in JSON, [] is used to construct arrays, as in [1,2,3]. The elements of the arrays can be any jq expression, including a pipeline. All of the results produced by all of the expressions are collected into one big array. You can use it to construct an array out of a known quantity of values (as in [.foo, .bar, .baz]) or to "collect" all the results of a filter into an array (as in [.items[].name])
jq '[.user, .projects[]]'
{"user":"stedolan", "projects": ["jq", "wikiflow"]}
=> ["stedolan", "jq", "wikiflow"]
jq '[ .[] | . * 2]'
[1, 2, 3]
=> [2, 4, 6]
{} # Object Construction. Like JSON, {} is for constructing objects (aka dictionaries or hashes), as in: {"a": 42, "b": 17}. If the keys are "identifier-like", then the quotes can be left off, as in {a:42, b:17}. Keys generated by expressions need to be parenthesized, e.g., {("a"+"b"):59}.
jq '{user, title: .titles[]}'
{"user":"stedolan","titles":["JQ Primer", "More JQ"]}
=> {"user":"stedolan", "title": "JQ Primer"}, {"user":"stedolan", "title": "More JQ"}
jq '{(.user): .titles}'
{"user":"stedolan","titles":["JQ Primer", "More JQ"]}
=> {"stedolan": ["JQ Primer", "More JQ"]}
.. # Recursive Descent. Recursively descends ., producing every value. This is the same as the zero-argument recurse builtin (see below). This is intended to resemble the XPath // operator. Note that ..a does not work; use ..|.a instead. In the example below we use ..|.a? to find all the values of object keys "a" in any object found "below" ..
jq '..|.a?'
[[{"a":1}]]
=> 1
BUILTIN OPERATORS AND FUNCTIONS
+ # Addition
- Numbers are added by normal arithmetic.
- Arrays are added by being concatenated into a larger array.
- Strings are added by being joined into a larger string.
- Objects are added by merging, that is, inserting all the key-value pairs from both objects into a single combined object. If both objects contain a value for the same key, the object on the right of the + wins
jq '.a + 1'
{"a": 7}
=> 8
jq '.a + .b'
{"a": [1,2], "b": [3,4]}
=> [1,2,3,4]
jq '.a + null'
{"a": 1}
=> 1
jq '.a + 1'
{}
=> 1
jq '{a: 1} + {b: 2} + {c: 3} + {a: 42}'
null
=> {"a": 42, "b": 2, "c": 3}
- # Subtraction. As well as normal arithmetic subtraction on numbers, the - operator can be used on arrays to remove all occurrences of the second array's elements from the first array.
jq '4 - .a'
{"a":3}
=> 1
jq '. - ["xml", "yaml"]'
["xml", "yaml", "json"]
=> ["json"]
Multiplication, division, modulo: *, /, and %
These infix operators behave as expected when given two numbers. Division by zero raises an error. x % y computes x
modulo y.
Multiplying a string by a number produces the concatenation of that string that many times. "x" * 0 produces null.
Dividing a string by another splits the first using the second as separators.
Multiplying two objects will merge them recursively: this works like addition but if both objects contain a value for
the same key, and the values are objects, the two are merged with the same strategy.
jq '10 / . * 3'
5
=> 6
jq '. / ", "'
"a, b,c,d, e"
=> ["a","b,c,d","e"]
jq '{"k": {"a": 1, "b": 2}} * {"k": {"a": 0,"c": 3}}'
null
=> {"k": {"a": 0, "b": 2, "c": 3}}
jq '.[] | (1 / .)?'
[1,0,-1]
=> 1, -1
length
The builtin function length gets the length of various different types of value:
- The length of a string is the number of Unicode codepoints it contains (which will be the same as its JSON-encoded
length in bytes if it's pure ASCII).
- The length of an array is the number of elements.
- The length of an object is the number of key-value pairs.
- The length of null is zero.
jq '.[] | length' [[1,2], "string", {"a":2}, null] => 2, 6, 1, 0
utf8bytelength
The builtin function utf8bytelength outputs the number of bytes used to encode a string in UTF-8.
jq 'utf8bytelength'
"\u03bc"
=> 2
keys, keys_unsorted
The builtin function keys, when given an object, returns its keys in an array.
The keys are sorted "alphabetically", by unicode codepoint order. This is not an order that makes particular sense in
any particular language, but you can count on it being the same for any two objects with the same set of keys, regard‐
less of locale settings.
When keys is given an array, it returns the valid indices for that array: the integers from 0 to length-1.
The keys_unsorted function is just like keys, but if the input is an object then the keys will not be sorted, instead
the keys will roughly be in insertion order.
jq 'keys'
{"abc": 1, "abcd": 2, "Foo": 3}
=> ["Foo", "abc", "abcd"]
jq 'keys'
[42,3,35]
=> [0,1,2]
has(key)
The builtin function has returns whether the input object has the given key, or the input array has an element at the
given index.
has($key) has the same effect as checking whether $key is a member of the array returned by keys, although has will be
faster.
jq 'map(has("foo"))'
[{"foo": 42}, {}]
=> [true, false]
jq 'map(has(2))'
[[0,1], ["a","b","c"]]
=> [false, true]
in
The builtin function in returns whether or not the input key is in the given object, or the input index corresponds to
an element in the given array. It is, essentially, an inversed version of has.
jq '.[] | in({"foo": 42})'
["foo", "bar"]
=> true, false
jq 'map(in([0,1]))'
[2, 0]
=> [false, true]
map(x), map_values(x)
For any filter x, map(x) will run that filter for each element of the input array, and return the outputs in a new ar‐
ray. map(.+1) will increment each element of an array of numbers.
Similarly, map_values(x) will run that filter for each element, but it will return an object when an object is passed.
map(x) is equivalent to [.[] | x]. In fact, this is how it's defined. Similarly, map_values(x) is defined as .[] |= x.
jq 'map(.+1)'
[1,2,3]
=> [2,3,4]
jq 'map_values(.+1)'
{"a": 1, "b": 2, "c": 3}
=> {"a": 2, "b": 3, "c": 4}
path(path_expression)
Outputs array representations of the given path expression in .. The outputs are arrays of strings (object keys)
and/or numbers (array indices).
Path expressions are jq expressions like .a, but also .[]. There are two types of path expressions: ones that can
match exactly, and ones that cannot. For example, .a.b.c is an exact match path expression, while .a[].b is not.
path(exact_path_expression) will produce the array representation of the path expression even if it does not exist in
., if . is null or an array or an object.
path(pattern) will produce array representations of the paths matching pattern if the paths exist in ..
Note that the path expressions are not different from normal expressions. The expression path(..|select(type=="bool‐
ean")) outputs all the paths to boolean values in ., and only those paths.
jq 'path(.a[0].b)'
null
=> ["a",0,"b"]
jq '[path(..)]'
{"a":[{"b":1}]}
=> [[],["a"],["a",0],["a",0,"b"]]
del(path_expression)
The builtin function del removes a key and its corresponding value from an object.
jq 'del(.foo)'
{"foo": 42, "bar": 9001, "baz": 42}
=> {"bar": 9001, "baz": 42}
jq 'del(.[1, 2])'
["foo", "bar", "baz"]
=> ["foo"]
getpath(PATHS)
The builtin function getpath outputs the values in . found at each path in PATHS.
jq 'getpath(["a","b"])'
null
=> null
jq '[getpath(["a","b"], ["a","c"])]'
{"a":{"b":0, "c":1}}
=> [0, 1]
setpath(PATHS; VALUE)
The builtin function setpath sets the PATHS in . to VALUE.
jq 'setpath(["a","b"]; 1)'
null
=> {"a": {"b": 1}}
jq 'setpath(["a","b"]; 1)'
{"a":{"b":0}}
=> {"a": {"b": 1}}
jq 'setpath([0,"a"]; 1)'
null
=> [{"a":1}]
delpaths(PATHS)
The builtin function delpaths sets the PATHS in .. PATHS must be an array of paths, where each path is an array of
strings and numbers.
jq 'delpaths([["a","b"]])'
{"a":{"b":1},"x":{"y":2}}
=> {"a":{},"x":{"y":2}}
to_entries, from_entries, with_entries
These functions convert between an object and an array of key-value pairs. If to_entries is passed an object, then for
each k: v entry in the input, the output array includes {"key": k, "value": v}.
from_entries does the opposite conversion, and with_entries(foo) is a shorthand for to_entries | map(foo) | from_en‐
tries, useful for doing some operation to all keys and values of an object. from_entries accepts key, Key, name, Name,
value and Value as keys.
jq 'to_entries'
{"a": 1, "b": 2}
=> [{"key":"a", "value":1}, {"key":"b", "value":2}]
jq 'from_entries'
[{"key":"a", "value":1}, {"key":"b", "value":2}]
=> {"a": 1, "b": 2}
jq 'with_entries(.key |= "KEY_" + .)'
{"a": 1, "b": 2}
=> {"KEY_a": 1, "KEY_b": 2}
select(boolean_expression)
The function select(foo) produces its input unchanged if foo returns true for that input, and produces no output oth‐
erwise.
It's useful for filtering lists: [1,2,3] | map(select(. >= 2)) will give you [2,3].
jq 'map(select(. >= 2))'
[1,5,3,0,7]
=> [5,3,7]
jq '.[] | select(.id == "second")'
[{"id": "first", "val": 1}, {"id": "second", "val": 2}]
=> {"id": "second", "val": 2}
arrays, objects, iterables, booleans, numbers, normals, finites, strings, nulls, values, scalars
These built-ins select only inputs that are arrays, objects, iterables (arrays or objects), booleans, numbers, normal
numbers, finite numbers, strings, null, non-null values, and non-iterables, respectively.
jq '.[]|numbers'
[[],{},1,"foo",null,true,false]
=> 1
empty
empty returns no results. None at all. Not even null.
It's useful on occasion. You'll know if you need it :)
jq '1, empty, 2'
null
=> 1, 2
jq '[1,2,empty,3]'
null
=> [1,2,3]
error(message)
Produces an error, just like .a applied to values other than null and objects would, but with the given message as the
error's value. Errors can be caught with try/catch; see below.
halt
Stops the jq program with no further outputs. jq will exit with exit status 0.
halt_error, halt_error(exit_code)
Stops the jq program with no further outputs. The input will be printed on stderr as raw output (i.e., strings will
not have double quotes) with no decoration, not even a newline.
The given exit_code (defaulting to 5) will be jq's exit status.
For example, "Error: somthing went wrong\n"|halt_error(1).
$__loc__
Produces an object with a "file" key and a "line" key, with the filename and line number where $__loc__ occurs, as
values.
jq 'try error("\($__loc__)") catch .'
null
=> "{\"file\":\"<top-level>\",\"line\":1}"
paths, paths(node_filter), leaf_paths
paths outputs the paths to all the elements in its input (except it does not output the empty list, representing . it‐
self).
paths(f) outputs the paths to any values for which f is true. That is, paths(numbers) outputs the paths to all numeric
values.
leaf_paths is an alias of paths(scalars); leaf_paths is deprecated and will be removed in the next major release.
jq '[paths]'
[1,[[],{"a":2}]]
=> [[0],[1],[1,0],[1,1],[1,1,"a"]]
jq '[paths(scalars)]'
[1,[[],{"a":2}]]
=> [[0],[1,1,"a"]]
add
The filter add takes as input an array, and produces as output the elements of the array added together. This might
mean summed, concatenated or merged depending on the types of the elements of the input array - the rules are the same
as those for the + operator (described above).
If the input is an empty array, add returns null.
jq 'add'
["a","b","c"]
=> "abc"
jq 'add'
[1, 2, 3]
=> 6
jq 'add'
[]
=> null
any, any(condition), any(generator; condition)
The filter any takes as input an array of boolean values, and produces true as output if any of the elements of the
array are true.
If the input is an empty array, any returns false.
The any(condition) form applies the given condition to the elements of the input array.
The any(generator; condition) form applies the given condition to all the outputs of the given generator.
jq 'any'
[true, false]
=> true
jq 'any'
[false, false]
=> false
jq 'any'
[]
=> false
all, all(condition), all(generator; condition)
The filter all takes as input an array of boolean values, and produces true as output if all of the elements of the
array are true.
The all(condition) form applies the given condition to the elements of the input array.
The all(generator; condition) form applies the given condition to all the outputs of the given generator.
If the input is an empty array, all returns true.
jq 'all'
[true, false]
=> false
jq 'all'
[true, true]
=> true
jq 'all'
[]
=> true
flatten, flatten(depth)
The filter flatten takes as input an array of nested arrays, and produces a flat array in which all arrays inside the
original array have been recursively replaced by their values. You can pass an argument to it to specify how many lev‐
els of nesting to flatten.
flatten(2) is like flatten, but going only up to two levels deep.
jq 'flatten'
[1, [2], [[3]]]
=> [1, 2, 3]
jq 'flatten(1)'
[1, [2], [[3]]]
=> [1, 2, [3]]
jq 'flatten'
[[]]
=> []
jq 'flatten'
[{"foo": "bar"}, [{"foo": "baz"}]]
=> [{"foo": "bar"}, {"foo": "baz"}]
range(upto), range(from;upto) range(from;upto;by)
The range function produces a range of numbers. range(4;10) produces 6 numbers, from 4 (inclusive) to 10 (exclusive).
The numbers are produced as separate outputs. Use [range(4;10)] to get a range as an array.
The one argument form generates numbers from 0 to the given number, with an increment of 1.
The two argument form generates numbers from from to upto with an increment of 1.
The three argument form generates numbers from to upto with an increment of by.
jq 'range(2;4)'
null
=> 2, 3
jq '[range(2;4)]'
null
=> [2,3]
jq '[range(4)]'
null
=> [0,1,2,3]
jq '[range(0;10;3)]'
null
=> [0,3,6,9]
jq '[range(0;10;-1)]'
null
=> []
jq '[range(0;-5;-1)]'
null
=> [0,-1,-2,-3,-4]
floor
The floor function returns the floor of its numeric input.
jq 'floor'
3.14159
=> 3
sqrt
The sqrt function returns the square root of its numeric input.
jq 'sqrt'
9
=> 3
tonumber
The tonumber function parses its input as a number. It will convert correctly-formatted strings to their numeric
equivalent, leave numbers alone, and give an error on all other input.
jq '.[] | tonumber'
[1, "1"]
=> 1, 1
tostring
The tostring function prints its input as a string. Strings are left unchanged, and all other values are JSON-encoded.
jq '.[] | tostring'
[1, "1", [1]]
=> "1", "1", "[1]"
type
The type function returns the type of its argument as a string, which is one of null, boolean, number, string, array
or object.
jq 'map(type)'
[0, false, [], {}, null, "hello"]
=> ["number", "boolean", "array", "object", "null", "string"]
infinite, nan, isinfinite, isnan, isfinite, isnormal
Some arithmetic operations can yield infinities and "not a number" (NaN) values. The isinfinite builtin returns true
if its input is infinite. The isnan builtin returns true if its input is a NaN. The infinite builtin returns a posi‐
tive infinite value. The nan builtin returns a NaN. The isnormal builtin returns true if its input is a normal number.
Note that division by zero raises an error.
Currently most arithmetic operations operating on infinities, NaNs, and sub-normals do not raise errors.
jq '.[] | (infinite * .) < 0'
[-1, 1]
=> true, false
jq 'infinite, nan | type'
null
=> "number", "number"
sort, sort_by(path_expression)
The sort functions sorts its input, which must be an array. Values are sorted in the following order:
- null
- false
- true
- numbers
- strings, in alphabetical order (by unicode codepoint value)
- arrays, in lexical order
- objects
The ordering for objects is a little complex: first they're compared by comparing their sets of keys (as arrays in
sorted order), and if their keys are equal then the values are compared key by key.
sort may be used to sort by a particular field of an object, or by applying any jq filter.
sort_by(foo) compares two elements by comparing the result of foo on each element.
jq 'sort'
[8,3,null,6]
=> [null,3,6,8]
jq 'sort_by(.foo)'
[{"foo":4, "bar":10}, {"foo":3, "bar":100}, {"foo":2, "bar":1}]
=> [{"foo":2, "bar":1}, {"foo":3, "bar":100}, {"foo":4, "bar":10}]
group_by(path_expression)
group_by(.foo) takes as input an array, groups the elements having the same .foo field into separate arrays, and pro‐
duces all of these arrays as elements of a larger array, sorted by the value of the .foo field.
Any jq expression, not just a field access, may be used in place of .foo. The sorting order is the same as described
in the sort function above.
jq 'group_by(.foo)'
[{"foo":1, "bar":10}, {"foo":3, "bar":100}, {"foo":1, "bar":1}]
=> [[{"foo":1, "bar":10}, {"foo":1, "bar":1}], [{"foo":3, "bar":100}]]
min, max, min_by(path_exp), max_by(path_exp)
Find the minimum or maximum element of the input array.
The min_by(path_exp) and max_by(path_exp) functions allow you to specify a particular field or property to examine,
e.g. min_by(.foo) finds the object with the smallest foo field.
jq 'min'
[5,4,2,7]
=> 2
jq 'max_by(.foo)'
[{"foo":1, "bar":14}, {"foo":2, "bar":3}]
=> {"foo":2, "bar":3}
unique, unique_by(path_exp)
The unique function takes as input an array and produces an array of the same elements, in sorted order, with dupli‐
cates removed.
The unique_by(path_exp) function will keep only one element for each value obtained by applying the argument. Think of
it as making an array by taking one element out of every group produced by group.
jq 'unique'
[1,2,5,3,5,3,1,3]
=> [1,2,3,5]
jq 'unique_by(.foo)'
[{"foo": 1, "bar": 2}, {"foo": 1, "bar": 3}, {"foo": 4, "bar": 5}]
=> [{"foo": 1, "bar": 2}, {"foo": 4, "bar": 5}]
jq 'unique_by(length)'
["chunky", "bacon", "kitten", "cicada", "asparagus"]
=> ["bacon", "chunky", "asparagus"]
reverse
This function reverses an array.
jq 'reverse'
[1,2,3,4]
=> [4,3,2,1]
contains(element)
The filter contains(b) will produce true if b is completely contained within the input. A string B is contained in a
string A if B is a substring of A. An array B is contained in an array A if all elements in B are contained in any el‐
ement in A. An object B is contained in object A if all of the values in B are contained in the value in A with the
same key. All other types are assumed to be contained in each other if they are equal.
jq 'contains("bar")'
"foobar"
=> true
jq 'contains(["baz", "bar"])'
["foobar", "foobaz", "blarp"]
=> true
jq 'contains(["bazzzzz", "bar"])'
["foobar", "foobaz", "blarp"]
=> false
jq 'contains({foo: 12, bar: [{barp: 12}]})'
{"foo": 12, "bar":[1,2,{"barp":12, "blip":13}]}
=> true
jq 'contains({foo: 12, bar: [{barp: 15}]})'
{"foo": 12, "bar":[1,2,{"barp":12, "blip":13}]}
=> false
indices(s)
Outputs an array containing the indices in . where s occurs. The input may be an array, in which case if s is an array
then the indices output will be those where all elements in . match those of s.
jq 'indices(", ")'
"a,b, cd, efg, hijk"
=> [3,7,12]
jq 'indices(1)'
[0,1,2,1,3,1,4]
=> [1,3,5]
jq 'indices([1,2])'
[0,1,2,3,1,4,2,5,1,2,6,7]
=> [1,8]
index(s), rindex(s)
Outputs the index of the first (index) or last (rindex) occurrence of s in the input.
jq 'index(", ")'
"a,b, cd, efg, hijk"
=> 3
jq 'rindex(", ")'
"a,b, cd, efg, hijk"
=> 12
inside
The filter inside(b) will produce true if the input is completely contained within b. It is, essentially, an inversed
version of contains.
jq 'inside("foobar")'
"bar"
=> true
jq 'inside(["foobar", "foobaz", "blarp"])'
["baz", "bar"]
=> true
jq 'inside(["foobar", "foobaz", "blarp"])'
["bazzzzz", "bar"]
=> false
jq 'inside({"foo": 12, "bar":[1,2,{"barp":12, "blip":13}]})'
{"foo": 12, "bar": [{"barp": 12}]}
=> true
jq 'inside({"foo": 12, "bar":[1,2,{"barp":12, "blip":13}]})'
{"foo": 12, "bar": [{"barp": 15}]}
=> false
startswith(str)
Outputs true if . starts with the given string argument.
jq '[.[]|startswith("foo")]'
["fo", "foo", "barfoo", "foobar", "barfoob"]
=> [false, true, false, true, false]
endswith(str)
Outputs true if . ends with the given string argument.
jq '[.[]|endswith("foo")]'
["foobar", "barfoo"]
=> [false, true]
combinations, combinations(n)
Outputs all combinations of the elements of the arrays in the input array. If given an argument n, it outputs all com‐
binations of n repetitions of the input array.
jq 'combinations'
[[1,2], [3, 4]]
=> [1, 3], [1, 4], [2, 3], [2, 4]
jq 'combinations(2)'
[0, 1]
=> [0, 0], [0, 1], [1, 0], [1, 1]
ltrimstr(str)
Outputs its input with the given prefix string removed, if it starts with it.
jq '[.[]|ltrimstr("foo")]'
["fo", "foo", "barfoo", "foobar", "afoo"]
=> ["fo","","barfoo","bar","afoo"]
rtrimstr(str)
Outputs its input with the given suffix string removed, if it ends with it.
jq '[.[]|rtrimstr("foo")]'
["fo", "foo", "barfoo", "foobar", "foob"]
=> ["fo","","bar","foobar","foob"]
explode
Converts an input string into an array of the string's codepoint numbers.
jq 'explode'
"foobar"
=> [102,111,111,98,97,114]
implode
The inverse of explode.
jq 'implode'
[65, 66, 67]
=> "ABC"
split(str)
Splits an input string on the separator argument.
jq 'split(", ")'
"a, b,c,d, e, "
=> ["a","b,c,d","e",""]
join(str)
Joins the array of elements given as input, using the argument as separator. It is the inverse of split: that is, run‐
ning split("foo") | join("foo") over any input string returns said input string.
Numbers and booleans in the input are converted to strings. Null values are treated as empty strings. Arrays and ob‐
jects in the input are not supported.
jq 'join(", ")'
["a","b,c,d","e"]
=> "a, b,c,d, e"
jq 'join(" ")'
["a",1,2.3,true,null,false]
=> "a 1 2.3 true false"
ascii_downcase, ascii_upcase
Emit a copy of the input string with its alphabetic characters (a-z and A-Z) converted to the specified case.
while(cond; update)
The while(cond; update) function allows you to repeatedly apply an update to . until cond is false.
Note that while(cond; update) is internally defined as a recursive jq function. Recursive calls within while will not
consume additional memory if update produces at most one output for each input. See advanced topics below.
jq '[while(.<100; .*2)]'
1
=> [1,2,4,8,16,32,64]
until(cond; next)
The until(cond; next) function allows you to repeatedly apply the expression next, initially to . then to its own out‐
put, until cond is true. For example, this can be used to implement a factorial function (see below).
Note that until(cond; next) is internally defined as a recursive jq function. Recursive calls within until() will not
consume additional memory if next produces at most one output for each input. See advanced topics below.
jq '[.,1]|until(.[0] < 1; [.[0] - 1, .[1] * .[0]])|.[1]'
4
=> 24
recurse(f), recurse, recurse(f; condition), recurse_down
The recurse(f) function allows you to search through a recursive structure, and extract interesting data from all lev‐
els. Suppose your input represents a filesystem:
{"name": "/", "children": [
{"name": "/bin", "children": [
{"name": "/bin/ls", "children": []},
{"name": "/bin/sh", "children": []}]},
{"name": "/home", "children": [
{"name": "/home/stephen", "children": [
{"name": "/home/stephen/jq", "children": []}]}]}]}
Now suppose you want to extract all of the filenames present. You need to retrieve .name, .children[].name, .chil‐
dren[].children[].name, and so on. You can do this with:
recurse(.children[]) | .name
When called without an argument, recurse is equivalent to recurse(.[]?).
recurse(f) is identical to recurse(f; . != null) and can be used without concerns about recursion depth.
recurse(f; condition) is a generator which begins by emitting . and then emits in turn .|f, .|f|f, .|f|f|f, ... so
long as the computed value satisfies the condition. For example, to generate all the integers, at least in principle,
one could write recurse(.+1; true).
For legacy reasons, recurse_down exists as an alias to calling recurse without arguments. This alias is considered
deprecated and will be removed in the next major release.
The recursive calls in recurse will not consume additional memory whenever f produces at most a single output for each
input.
jq 'recurse(.foo[])'
{"foo":[{"foo": []}, {"foo":[{"foo":[]}]}]}
=> {"foo":[{"foo":[]},{"foo":[{"foo":[]}]}]}, {"foo":[]}, {"foo":[{"foo":[]}]}, {"foo":[]}
jq 'recurse'
{"a":0,"b":[1]}
=> {"a":0,"b":[1]}, 0, [1], 1
jq 'recurse(. * .; . < 20)'
2
=> 2, 4, 16
walk(f)
The walk(f) function applies f recursively to every component of the input entity. When an array is encountered, f is
first applied to its elements and then to the array itself; when an object is encountered, f is first applied to all
the values and then to the object. In practice, f will usually test the type of its input, as illustrated in the fol‐
lowing examples. The first example highlights the usefulness of processing the elements of an array of arrays before
processing the array itself. The second example shows how all the keys of all the objects within the input can be con‐
sidered for alteration.
jq 'walk(if type == "array" then sort else . end)'
[[4, 1, 7], [8, 5, 2], [3, 6, 9]]
=> [[1,4,7],[2,5,8],[3,6,9]]
jq 'walk( if type == "object" then with_entries( .key |= sub( "^_+"; "") ) else . end )'
[ { "_a": { "__b": 2 } } ]
=> [{"a":{"b":2}}]
$ENV, env
$ENV is an object representing the environment variables as set when the jq program started.
env outputs an object representing jq's current environment.
At the moment there is no builtin for setting environment variables.
jq '$ENV.PAGER'
null
=> "less"
jq 'env.PAGER'
null
=> "less"
transpose
Transpose a possibly jagged matrix (an array of arrays). Rows are padded with nulls so the result is always rectangu‐
lar.
jq 'transpose'
[[1], [2,3]]
=> [[1,2],[null,3]]
bsearch(x)
bsearch(x) conducts a binary search for x in the input array. If the input is sorted and contains x, then bsearch(x)
will return its index in the array; otherwise, if the array is sorted, it will return (-1 - ix) where ix is an inser‐
tion point such that the array would still be sorted after the insertion of x at ix. If the array is not sorted,
bsearch(x) will return an integer that is probably of no interest.
jq 'bsearch(0)'
[0,1]
=> 0
jq 'bsearch(0)'
[1,2,3]
=> -1
jq 'bsearch(4) as $ix | if $ix < 0 then .[-(1+$ix)] = 4 else . end'
[1,2,3]
=> [1,2,3,4]
String interpolation - \(foo)
Inside a string, you can put an expression inside parens after a backslash. Whatever the expression returns will be
interpolated into the string.
jq '"The input was \(.), which is one less than \(.+1)"'
42
=> "The input was 42, which is one less than 43"
Convert to/from JSON
The tojson and fromjson builtins dump values as JSON texts or parse JSON texts into values, respectively. The tojson
builtin differs from tostring in that tostring returns strings unmodified, while tojson encodes strings as JSON
strings.
jq '[.[]|tostring]'
[1, "foo", ["foo"]]
=> ["1","foo","[\"foo\"]"]
jq '[.[]|tojson]'
[1, "foo", ["foo"]]
=> ["1","\"foo\"","[\"foo\"]"]
jq '[.[]|tojson|fromjson]'
[1, "foo", ["foo"]]
=> [1,"foo",["foo"]]
Format strings and escaping
The @foo syntax is used to format and escape strings, which is useful for building URLs, documents in a language like
HTML or XML, and so forth. @foo can be used as a filter on its own, the possible escapings are:
@text:
Calls tostring, see that function for details.
@json:
Serializes the input as JSON.
@html:
Applies HTML/XML escaping, by mapping the characters <>&'" to their entity equivalents <, >, &,
', ".
@uri:
Applies percent-encoding, by mapping all reserved URI characters to a %XX sequence.
@csv:
The input must be an array, and it is rendered as CSV with double quotes for strings, and quotes escaped by
repetition.
@tsv:
The input must be an array, and it is rendered as TSV (tab-separated values). Each input array will be printed
as a single line. Fields are separated by a single tab (ascii 0x09). Input characters line-feed (ascii 0x0a),
carriage-return (ascii 0x0d), tab (ascii 0x09) and backslash (ascii 0x5c) will be output as escape sequences
\n, \r, \t, \\ respectively.
@sh:
The input is escaped suitable for use in a command-line for a POSIX shell. If the input is an array, the output
will be a series of space-separated strings.
@base64:
The input is converted to base64 as specified by RFC 4648.
@base64d:
The inverse of @base64, input is decoded as specified by RFC 4648. Note: If the decoded string is not UTF-8,
the results are undefined.
This syntax can be combined with string interpolation in a useful way. You can follow a @foo token with a string lit‐
eral. The contents of the string literal will not be escaped. However, all interpolations made inside that string lit‐
eral will be escaped. For instance,
@uri "https://www.google.com/search?q=\(.search)"
will produce the following output for the input {"search":"what is jq?"}:
"https://www.google.com/search?q=what%20is%20jq%3F"
Note that the slashes, question mark, etc. in the URL are not escaped, as they were part of the string literal.
jq '@html'
"This works if x < y"
=> "This works if x < y"
jq '@sh "echo \(.)"'
"O'Hara's Ale"
=> "echo 'O'\\''Hara'\\''s Ale'"
jq '@base64'
"This is a message"
=> "VGhpcyBpcyBhIG1lc3NhZ2U="
jq '@base64d'
"VGhpcyBpcyBhIG1lc3NhZ2U="
=> "This is a message"
Dates
jq provides some basic date handling functionality, with some high-level and low-level builtins. In all cases these
builtins deal exclusively with time in UTC.
The fromdateiso8601 builtin parses datetimes in the ISO 8601 format to a number of seconds since the Unix epoch
(1970-01-01T00:00:00Z). The todateiso8601 builtin does the inverse.
The fromdate builtin parses datetime strings. Currently fromdate only supports ISO 8601 datetime strings, but in the
future it will attempt to parse datetime strings in more formats.
The todate builtin is an alias for todateiso8601.
The now builtin outputs the current time, in seconds since the Unix epoch.
Low-level jq interfaces to the C-library time functions are also provided: strptime, strftime, strflocaltime, mktime,
gmtime, and localtime. Refer to your host operating system's documentation for the format strings used by strptime and
strftime. Note: these are not necessarily stable interfaces in jq, particularly as to their localization functional‐
ity.
The gmtime builtin consumes a number of seconds since the Unix epoch and outputs a "broken down time" representation
of Greenwhich Meridian time as an array of numbers representing (in this order): the year, the month (zero-based), the
day of the month (one-based), the hour of the day, the minute of the hour, the second of the minute, the day of the
week, and the day of the year -- all one-based unless otherwise stated. The day of the week number may be wrong on
some systems for dates before March 1st 1900, or after December 31 2099.
The localtime builtin works like the gmtime builtin, but using the local timezone setting.
The mktime builtin consumes "broken down time" representations of time output by gmtime and strptime.
The strptime(fmt) builtin parses input strings matching the fmt argument. The output is in the "broken down time" rep‐
resentation consumed by gmtime and output by mktime.
The strftime(fmt) builtin formats a time (GMT) with the given format. The strflocaltime does the same, but using the
local timezone setting.
The format strings for strptime and strftime are described in typical C library documentation. The format string for
ISO 8601 datetime is "%Y-%m-%dT%H:%M:%SZ".
jq may not support some or all of this date functionality on some systems. In particular, the %u and %j specifiers for
strptime(fmt) are not supported on macOS.
jq 'fromdate'
"2015-03-05T23:51:47Z"
=> 1425599507
jq 'strptime("%Y-%m-%dT%H:%M:%SZ")'
"2015-03-05T23:51:47Z"
=> [2015,2,5,23,51,47,4,63]
jq 'strptime("%Y-%m-%dT%H:%M:%SZ")|mktime'
"2015-03-05T23:51:47Z"
=> 1425599507
SQL-Style Operators
jq provides a few SQL-style operators.
INDEX(stream; index_expression):
This builtin produces an object whose keys are computed by the given index expression applied to each value
from the given stream.
JOIN($idx; stream; idx_expr; join_expr):
This builtin joins the values from the given stream to the given index. The index's keys are computed by apply‐
ing the given index expression to each value from the given stream. An array of the value in the stream and the
corresponding value from the index is fed to the given join expression to produce each result.
JOIN($idx; stream; idx_expr):
Same as JOIN($idx; stream; idx_expr; .).
JOIN($idx; idx_expr):
This builtin joins the input . to the given index, applying the given index expression to . to compute the in‐
dex key. The join operation is as described above.
IN(s):
This builtin outputs true if . appears in the given stream, otherwise it outputs false.
IN(source; s):
This builtin outputs true if any value in the source stream appears in the second stream, otherwise it outputs
false.
builtins
Returns a list of all builtin functions in the format name/arity. Since functions with the same name but different ar‐
ities are considered separate functions, all/0, all/1, and all/2 would all be present in the list.
CONDITIONALS AND COMPARISONS
==, !=
The expression 'a == b' will produce 'true' if the result of a and b are equal (that is, if they represent equivalent
JSON documents) and 'false' otherwise. In particular, strings are never considered equal to numbers. If you're coming
from Javascript, jq's == is like Javascript's === - considering values equal only when they have the same type as well
as the same value.
!= is "not equal", and 'a != b' returns the opposite value of 'a == b'
jq '.[] == 1'
[1, 1.0, "1", "banana"]
=> true, true, false, false
if-then-else
if A then B else C end will act the same as B if A produces a value other than false or null, but act the same as C
otherwise.
Checking for false or null is a simpler notion of "truthiness" than is found in Javascript or Python, but it means
that you'll sometimes have to be more explicit about the condition you want: you can't test whether, e.g. a string is
empty using if .name then A else B end, you'll need something more like if (.name | length) > 0 then A else B end in‐
stead.
If the condition A produces multiple results, then B is evaluated once for each result that is not false or null, and
C is evaluated once for each false or null.
More cases can be added to an if using elif A then B syntax.
jq 'if . == 0 then
"zero" elif . == 1 then "one" else "many" end' 2 => "many"
>, >=, <=, <
The comparison operators >, >=, <=, < return whether their left argument is greater than, greater than or equal to,
less than or equal to or less than their right argument (respectively).
The ordering is the same as that described for sort, above.
jq '. < 5'
2
=> true
and/or/not
jq supports the normal Boolean operators and/or/not. They have the same standard of truth as if expressions - false
and null are considered "false values", and anything else is a "true value".
If an operand of one of these operators produces multiple results, the operator itself will produce a result for each
input.
not is in fact a builtin function rather than an operator, so it is called as a filter to which things can be piped
rather than with special syntax, as in .foo and .bar | not.
These three only produce the values "true" and "false", and so are only useful for genuine Boolean operations, rather
than the common Perl/Python/Ruby idiom of "value_that_may_be_null or default". If you want to use this form of "or",
picking between two values rather than evaluating a condition, see the "//" operator below.
jq '42 and "a string"'
null
=> true
jq '(true, false) or false'
null
=> true, false
jq '(true, true) and (true, false)'
null
=> true, false, true, false
jq '[true, false | not]'
null
=> [false, true]
Alternative operator: //
A filter of the form a // b produces the same results as a, if a produces results other than false and null. Other‐
wise, a // b produces the same results as b.
This is useful for providing defaults: .foo // 1 will evaluate to 1 if there's no .foo element in the input. It's sim‐
ilar to how or is sometimes used in Python (jq's or operator is reserved for strictly Boolean operations).
jq '.foo // 42'
{"foo": 19}
=> 19
jq '.foo // 42'
{}
=> 42
try-catch
Errors can be caught by using try EXP catch EXP. The first expression is executed, and if it fails then the second is
executed with the error message. The output of the handler, if any, is output as if it had been the output of the ex‐
pression to try.
The try EXP form uses empty as the exception handler.
jq 'try .a catch ". is not an object"'
true
=> ". is not an object"
jq '[.[]|try .a]'
[{}, true, {"a":1}]
=> [null, 1]
jq 'try error("some exception") catch .'
true
=> "some exception"
Breaking out of control structures
A convenient use of try/catch is to break out of control structures like reduce, foreach, while, and so on.
For example:
# Repeat an expression until it raises "break" as an
# error, then stop repeating without re-raising the error.
# But if the error caught is not "break" then re-raise it.
try repeat(exp) catch .=="break" then empty else error;
jq has a syntax for named lexical labels to "break" or "go (back) to":
label $out | ... break $out ...
The break $label_name expression will cause the program to to act as though the nearest (to the left) label $la‐
bel_name produced empty.
The relationship between the break and corresponding label is lexical: the label has to be "visible" from the break.
To break out of a reduce, for example:
label $out | reduce .[] as $item (null; if .==false then break $out else ... end)
The following jq program produces a syntax error:
break $out
because no label $out is visible.
Error Suppression / Optional Operator: ?
The ? operator, used as EXP?, is shorthand for try EXP.
jq '[.[]|(.a)?]'
[{}, true, {"a":1}]
=> [null, 1]
REGULAR EXPRESSIONS (PCRE)
jq uses the Oniguruma regular expression library, as do php, ruby, TextMate, Sublime Text, etc, so the description
here will focus on jq specifics.
The jq regex filters are defined so that they can be used using one of these patterns:
STRING | FILTER( REGEX )
STRING | FILTER( REGEX; FLAGS )
STRING | FILTER( [REGEX] )
STRING | FILTER( [REGEX, FLAGS] )
where: * STRING, REGEX and FLAGS are jq strings and subject to jq string interpolation; * REGEX, after string interpo‐
lation, should be a valid PCRE regex; * FILTER is one of test, match, or capture, as described below.
FLAGS is a string consisting of one of more of the supported flags:
- g - Global search (find all matches, not just the first)
- i - Case insensitive search
- m - Multi line mode ('.' will match newlines)
- n - Ignore empty matches
- p - Both s and m modes are enabled
- s - Single line mode ('^' -> '\A', '$' -> '\Z')
- l - Find longest possible matches
- x - Extended regex format (ignore whitespace and comments)
To match whitespace in an x pattern use an escape such as \s, e.g.
- test( "a\sb", "x" ).
Note that certain flags may also be specified within REGEX, e.g.
- jq -n '("test", "TEst", "teST", "TEST") | test( "(?i)te(?-i)st" )'
evaluates to: true, true, false, false.
test(val), test(regex; flags)
Like match, but does not return match objects, only true or false for whether or not the regex matches the input.
jq 'test("foo")'
"foo"
=> true
jq '.[] | test("a b c # spaces are ignored"; "ix")'
["xabcd", "ABC"]
=> true, true
match(val), match(regex; flags)
match outputs an object for each match it finds. Matches have the following fields:
- offset - offset in UTF-8 codepoints from the beginning of the input
- length - length in UTF-8 codepoints of the match
- string - the string that it matched
- captures - an array of objects representing capturing groups.
Capturing group objects have the following fields:
- offset - offset in UTF-8 codepoints from the beginning of the input
- length - length in UTF-8 codepoints of this capturing group
- string - the string that was captured
- name - the name of the capturing group (or null if it was unnamed)
Capturing groups that did not match anything return an offset of -1
jq 'match("(abc)+"; "g")'
"abc abc"
=> {"offset": 0, "length": 3, "string": "abc", "captures": [{"offset": 0, "length": 3, "string": "abc", "name": null}]}, {"offset": 4, "length": 3, "string": "abc", "captures": [{"offset": 4, "length": 3, "string": "abc", "name": null}]}
jq 'match("foo")'
"foo bar foo"
=> {"offset": 0, "length": 3, "string": "foo", "captures": []}
jq 'match(["foo", "ig"])'
"foo bar FOO"
=> {"offset": 0, "length": 3, "string": "foo", "captures": []}, {"offset": 8, "length": 3, "string": "FOO", "captures": []}
jq 'match("foo (?<bar123>bar)? foo"; "ig")'
"foo bar foo foo foo"
=> {"offset": 0, "length": 11, "string": "foo bar foo", "captures": [{"offset": 4, "length": 3, "string": "bar", "name": "bar123"}]}, {"offset": 12, "length": 8, "string": "foo foo", "captures": [{"offset": -1, "length": 0, "string": null, "name": "bar123"}]}
jq '[ match("."; "g")] | length'
"abc"
=> 3
capture(val), capture(regex; flags)
Collects the named captures in a JSON object, with the name of each capture as the key, and the matched string as the
corresponding value.
jq 'capture("(?<a>[a-z]+)-(?<n>[0-9]+)")'
"xyzzy-14"
=> { "a": "xyzzy", "n": "14" }
scan(regex), scan(regex; flags)
Emit a stream of the non-overlapping substrings of the input that match the regex in accordance with the flags, if any
have been specified. If there is no match, the stream is empty. To capture all the matches for each input string, use
the idiom [ expr ], e.g. [ scan(regex) ].
split(regex; flags)
For backwards compatibility, split splits on a string, not a regex.
splits(regex), splits(regex; flags)
These provide the same results as their split counterparts, but as a stream instead of an array.
sub(regex; tostring) sub(regex; string; flags)
Emit the string obtained by replacing the first match of regex in the input string with tostring, after interpolation.
tostring should be a jq string, and may contain references to named captures. The named captures are, in effect, pre‐
sented as a JSON object (as constructed by capture) to tostring, so a reference to a captured variable named "x" would
take the form: "(.x)".
gsub(regex; string), gsub(regex; string; flags)
gsub is like sub but all the non-overlapping occurrences of the regex are replaced by the string, after interpolation.
ADVANCED FEATURES
Variables are an absolute necessity in most programming languages, but they're relegated to an "advanced feature" in
jq.
In most languages, variables are the only means of passing around data. If you calculate a value, and you want to use
it more than once, you'll need to store it in a variable. To pass a value to another part of the program, you'll need
that part of the program to define a variable (as a function parameter, object member, or whatever) in which to place
the data.
It is also possible to define functions in jq, although this is is a feature whose biggest use is defining jq's stan‐
dard library (many jq functions such as map and find are in fact written in jq).
jq has reduction operators, which are very powerful but a bit tricky. Again, these are mostly used internally, to de‐
fine some useful bits of jq's standard library.
It may not be obvious at first, but jq is all about generators (yes, as often found in other languages). Some utili‐
ties are provided to help deal with generators.
Some minimal I/O support (besides reading JSON from standard input, and writing JSON to standard output) is available.
Finally, there is a module/library system.
Variable / Symbolic Binding Operator: ... as $identifier | ...
In jq, all filters have an input and an output, so manual plumbing is not necessary to pass a value from one part of a
program to the next. Many expressions, for instance a + b, pass their input to two distinct subexpressions (here a and
b are both passed the same input), so variables aren't usually necessary in order to use a value twice.
For instance, calculating the average value of an array of numbers requires a few variables in most languages - at
least one to hold the array, perhaps one for each element or for a loop counter. In jq, it's simply add / length - the
add expression is given the array and produces its sum, and the length expression is given the array and produces its
length.
So, there's generally a cleaner way to solve most problems in jq than defining variables. Still, sometimes they do
make things easier, so jq lets you define variables using expression as $variable. All variable names start with $.
Here's a slightly uglier version of the array-averaging example:
length as $array_length | add / $array_length
We'll need a more complicated problem to find a situation where using variables actually makes our lives easier.
Suppose we have an array of blog posts, with "author" and "title" fields, and another object which is used to map au‐
thor usernames to real names. Our input looks like:
{"posts": [{"title": "Frist psot", "author": "anon"},
{"title": "A well-written article", "author": "person1"}],
"realnames": {"anon": "Anonymous Coward",
"person1": "Person McPherson"}}
We want to produce the posts with the author field containing a real name, as in:
{"title": "Frist psot", "author": "Anonymous Coward"}
{"title": "A well-written article", "author": "Person McPherson"}
We use a variable, $names, to store the realnames object, so that we can refer to it later when looking up author
usernames:
.realnames as $names | .posts[] | {title, author: $names[.author]}
The expression exp as $x | ... means: for each value of expression exp, run the rest of the pipeline with the entire
original input, and with $x set to that value. Thus as functions as something of a foreach loop.
Just as {foo} is a handy way of writing {foo: .foo}, so {$foo} is a handy way of writing {foo:$foo}.
Multiple variables may be declared using a single as expression by providing a pattern that matches the structure of
the input (this is known as "destructuring"):
. as {realnames: $names, posts: [$first, $second]} | ...
The variable declarations in array patterns (e.g., . as [$first, $second]) bind to the elements of the array in from
the element at index zero on up, in order. When there is no value at the index for an array pattern element, null is
bound to that variable.
Variables are scoped over the rest of the expression that defines them, so
.realnames as $names | (.posts[] | {title, author: $names[.author]})
will work, but
(.realnames as $names | .posts[]) | {title, author: $names[.author]}
won't.
For programming language theorists, it's more accurate to say that jq variables are lexically-scoped bindings. In par‐
ticular there's no way to change the value of a binding; one can only setup a new binding with the same name, but
which will not be visible where the old one was.
jq '.bar as $x | .foo | . + $x'
{"foo":10, "bar":200}
=> 210
jq '. as $i|[(.*2|. as $i| $i), $i]'
5
=> [10,5]
jq '. as [$a, $b, {c: $c}] | $a + $b + $c'
[2, 3, {"c": 4, "d": 5}]
=> 9
jq '.[] as [$a, $b] | {a: $a, b: $b}'
[[0], [0, 1], [2, 1, 0]]
=> {"a":0,"b":null}, {"a":0,"b":1}, {"a":2,"b":1}
Defining Functions
You can give a filter a name using "def" syntax:
def increment: . + 1;
From then on, increment is usable as a filter just like a builtin function (in fact, this is how many of the builtins
are defined). A function may take arguments:
def map(f): [.[] | f];
Arguments are passed as filters (functions with no arguments), not as values. The same argument may be referenced mul‐
tiple times with different inputs (here f is run for each element of the input array). Arguments to a function work
more like callbacks than like value arguments. This is important to understand. Consider:
def foo(f): f|f;
5|foo(.*2)
The result will be 20 because f is .*2, and during the first invocation of f . will be 5, and the second time it will
be 10 (5 * 2), so the result will be 20. Function arguments are filters, and filters expect an input when invoked.
If you want the value-argument behaviour for defining simple functions, you can just use a variable:
def addvalue(f): f as $f | map(. + $f);
Or use the short-hand:
def addvalue($f): ...;
With either definition, addvalue(.foo) will add the current input's .foo field to each element of the array. Do note
that calling addvalue(.[]) will cause the map(. + $f) part to be evaluated once per value in the value of . at the
call site.
Multiple definitions using the same function name are allowed. Each re-definition replaces the previous one for the
same number of function arguments, but only for references from functions (or main program) subsequent to the re-defi‐
nition. See also the section below on scoping.
jq 'def addvalue(f): . + [f]; map(addvalue(.[0]))'
[[1,2],[10,20]]
=> [[1,2,1], [10,20,10]]
jq 'def addvalue(f): f as $x | map(. + $x); addvalue(.[0])'
[[1,2],[10,20]]
=> [[1,2,1,2], [10,20,1,2]]
Scoping
There are two types of symbols in jq: value bindings (a.k.a., "variables"), and functions. Both are scoped lexically,
with expressions being able to refer only to symbols that have been defined "to the left" of them. The only exception
to this rule is that functions can refer to themselves so as to be able to create recursive functions.
For example, in the following expression there is a binding which is visible "to the right" of it, ... | .*3 as
$times_three | [. + $times_three] | ..., but not "to the left". Consider this expression now, ... | (.*3 as
$times_three | [.+ $times_three]) | ...: here the binding $times_three is not visible past the closing parenthesis.
Reduce
The reduce syntax in jq allows you to combine all of the results of an expression by accumulating them into a single
answer. As an example, we'll pass [3,2,1] to this expression:
reduce .[] as $item (0; . + $item)
For each result that .[] produces, . + $item is run to accumulate a running total, starting from 0. In this example,
.[] produces the results 3, 2, and 1, so the effect is similar to running something like this:
0 | (3 as $item | . + $item) |
(2 as $item | . + $item) |
(1 as $item | . + $item)
jq 'reduce .[] as $item (0; . + $item)'
[10,2,5,3]
=> 20
isempty(exp)
Returns true if exp produces no outputs, false otherwise.
jq 'isempty(empty)'
null
=> true
limit(n; exp)
The limit function extracts up to n outputs from exp.
jq '[limit(3;.[])]'
[0,1,2,3,4,5,6,7,8,9]
=> [0,1,2]
first(expr), last(expr), nth(n; expr)
The first(expr) and last(expr) functions extract the first and last values from expr, respectively.
The nth(n; expr) function extracts the nth value output by expr. This can be defined as def nth(n; expr): last(limit(n
+ 1; expr));. Note that nth(n; expr) doesn't support negative values of n.
jq '[first(range(.)), last(range(.)), nth(./2; range(.))]'
10
=> [0,9,5]
first, last, nth(n)
The first and last functions extract the first and last values from any array at ..
The nth(n) function extracts the nth value of any array at ..
jq '[range(.)]|[first, last, nth(5)]'
10
=> [0,9,5]
foreach
The foreach syntax is similar to reduce, but intended to allow the construction of limit and reducers that produce in‐
termediate results (see example).
The form is foreach EXP as $var (INIT; UPDATE; EXTRACT). Like reduce, INIT is evaluated once to produce a state value,
then each output of EXP is bound to $var, UPDATE is evaluated for each output of EXP with the current state and with
$var visible. Each value output by UPDATE replaces the previous state. Finally, EXTRACT is evaluated for each new
state to extract an output of foreach.
This is mostly useful only for constructing reduce- and limit-like functions. But it is much more general, as it al‐
lows for partial reductions (see the example below).
jq '[foreach .[] as $item ([[],[]]; if $item == null then [[],.[0]] else [(.[0] + [$item]),[]] end; if $item == null then .[1] else empty end)]'
[1,2,3,4,null,"a","b",null]
=> [[1,2,3,4],["a","b"]]
Recursion
As described above, recurse uses recursion, and any jq function can be recursive. The while builtin is also imple‐
mented in terms of recursion.
Tail calls are optimized whenever the expression to the left of the recursive call outputs its last value. In practice
this means that the expression to the left of the recursive call should not produce more than one output for each in‐
put.
For example:
def recurse(f): def r: ., (f | select(. != null) | r); r;
def while(cond; update):
def _while:
if cond then ., (update | _while) else empty end;
_while;
def repeat(exp):
def _repeat:
exp, _repeat;
_repeat;
Generators and iterators
Some jq operators and functions are actually generators in that they can produce zero, one, or more values for each
input, just as one might expect in other programming languages that have generators. For example, .[] generates all
the values in its input (which must be an array or an object), range(0; 10) generates the integers between 0 and 10,
and so on.
Even the comma operator is a generator, generating first the values generated by the expression to the left of the
comma, then for each of those, the values generate by the expression on the right of the comma.
The empty builtin is the generator that produces zero outputs. The empty builtin backtracks to the preceding generator
expression.
All jq functions can be generators just by using builtin generators. It is also possible to define new generators us‐
ing only recursion and the comma operator. If the recursive call(s) is(are) "in tail position" then the generator will
be efficient. In the example below the recursive call by _range to itself is in tail position. The example shows off
three advanced topics: tail recursion, generator construction, and sub-functions.
jq 'def range(init; upto; by): def _range: if (by > 0 and . < upto) or (by < 0 and . > upto) then ., ((.+by)|_range) else . end; if by == 0 then init else init|_range end | select((by > 0 and . < upto) or (by < 0 and . > upto)); range(0; 10; 3)'
null
=> 0, 3, 6, 9
jq 'def while(cond; update): def _while: if cond then ., (update | _while) else empty end; _while; [while(.<100; .*2)]'
1
=> [1,2,4,8,16,32,64]
FUNCTIONS
paths # outputs all paths
path(..) # same as paths but add the first empty array []
.foo, .foo.bar # take hierarchical arguments in structure
.foo? # Just like .foo, but does not output even an error when
.[] # returns all elements contained
.[]? # returns all elements contained, nothing & no error if not exists
.[<string>], .[2], .[-2:-1] # returns array contains selected indexes for array & string
| # pipe on output of each select
+ # Addition, takes two filters, applies them both to the same input, and adds the results together
- # Subtraction, operator can be used on arrays to remove occurrences of array´s elements
* / % # Multiplication, division, modulo
length # gets the length of various different types of value
keys, keys_unsorted # returns its keys in an array
has(key) # returns whether the input object has the given key, or the input array has an element at the given index
in # returns if the input string is the index of given object
path(path_expression) #
del(path_expression) # the builtin function del removes a key and its corresponding value from an object
to_entries, from_entries, with_entries # these functions convert between an object and an array of key-value pairs
select(boolean_expression) # select(foo) produces its input unchanged if foo returns true for that input
arrays, objects, iterables, booleans, numbers, normals, finites, strings, nulls, values, scalars # these built-ins select only inputs that are arrays, objects, iterables (arrays or objects), booleans, numbers, normal numbers, finite numbers, strings, null, non-null values, and non-iterables, respectively
empty # returns no results. None at all. Not even null
map(x) # for any filter x, map(x) will run that filter for each element of the input array, and produce the outputs a new array
map_values(x) # similarly, map_values(x) will run that filter for each element, but it will return an object when an object is passed
add # takes as input an array, and produces as output the elements of the array added together
any, any(condition), any(generator; condition) # takes as input an array of boolean values, and produces true as output if any of the the elements of the array is true
all, all(condition), all(generator; condition) # takes as input an array of boolean values, and produces true as output if all of the the elements of the array are true
flatten, flatten(depth) # takes as input an array of nested arrays, and produces a flat array in which all arrays inside the original array have been recursively replaced by their values. You can pass an argument to it to specify how many levels of nesting to flatten
range(upto), range(from;upto) range(from;upto;by) # produces a range of numbers. range(4;10) produces 6 numbers, from 4 (inclusive) to 10 (exclusive). The numbers are produced as separate outputs. Use [range(4;10)] to get a range as an array
sort, sort_by(path_expression) # sorts its input, which must be an array
group_by(path_expression) # group_by(.foo) takes as input an array, groups the elements having the same .foo field into separate arrays, and produces all of these arrays as elements of a larger array, sorted by the value of the .foo field
min, max, min_by(path_exp), max_by(path_exp) # find the minimum or maximum element of the input array
unique, unique_by(path_exp) # takes as input an array and produces an array of the same elements, in sorted order, with duplicates removed
reverse # reverses an array
contains(element) # contains(b) will produce true if b is completely contained within the input. A string B is contained in a string A if B is a substring of A. An array B is contained in an array A if all elements in B are contained in any element in A. An object B is contained in object A if all of the values in B are contained in the value in A with the same key. All other types are assumed to be contained in each other if they are equal
indices(s) # outputs an array containing the indices in . where s occurs. The input may be an array, in which case if s is an array then the indices output will be those where all elements in . match those of s
index(s), rindex(s) # outputs the index of the first (index) or last (rindex) occurrence of s in the input
inside # inside(b) will produce true if the input is completely contained within b. It is, essentially, an inversed version of contains
startswith(str) # outputs true if . starts with the given string argument
ltrimstr(str) # outputs its input with the given prefix string removed, if it starts with it
rtrimstr(str) # outputs its input with the given suffix string removed, if it ends with it
explode # converts an input string into an array of the string´s codepoint numbers
implode # the inverse of explode
split # splits an input string on the separator argument
join(str) # joins the array of elements given as input, using the argument as separator. It is the inverse of split: that is, running split("foo") | join("foo") over any input string returns said input string
ascii_downcase, ascii_upcase # emit a copy of the input string with its alphabetic characters (a-z and A-Z) converted to the specified case
recurse(f), recurse, recurse(f; condition), recurse_down # allows you to search through a recursive structure, and extract interesting data from all levels
env # outputs an object representing jq´s environment
transpose # transpose a possibly jagged matrix (an array of arrays). Rows are padded with nulls so the result is always rectangular
bsearch(x) # bsearch(x) conducts a binary search for x in the input array. If the input is sorted and contains x, then bsearch(x) will return its index in the array; otherwise, if the array is sorted, it will return (-1 - ix) where ix is an insertion point such that the array would still be sorted after the insertion of x at ix. If the array is not sorted, bsearch(x) will return an integer that is probably of no interest
(foo) - string interpolation # inside a string, you can put an expression inside parens after a backslash. Whatever the expression returns will be interpolated into the string
Convert to/from JSON # tojson and fromjson builtins dump values as JSON texts or parse JSON texts into values, respectively. The tojson builtin differs from tostring in that tostring returns strings unmodified, while tojson encodes strings as JSON strings
DATE
builtins deal exclusively with time in UTC
fromdateiso8601 # parses datetimes in the ISO 8601 format to a number of seconds since the Unix epoch (1970-01-01T00:00:00Z). The todateiso8601 builtin does the inverse
fromdate # parses datetime strings. Currently fromdate only supports ISO 8601 datetime strings, but in the future it will attempt to parse datetime strings in more formats
todate # is an alias for todateiso8601
now # outputs the current time, in seconds since the Unix epoch
gmtime # consumes a number of seconds since the Unix epoch and outputs a "broken down time" representation of time as an array of numbers representing (in this order): the year, the month (zero-based), the day of the month, the hour of the day, the minute of the hour, the second of the minute, the day of the week, and the day of the year -- all one-based unless otherwise stated
mktime # consumes "broken down time" representations of time output by gmtime and strptime
strptime(fmt) # parses input strings matching the fmt argument. The output is in the "broken down time" representation consumed by gmtime and output by mktime
strftime(fmt) # formats a time with the given format
MATH
floor # returns the floor of its numeric input (integer part)
sqrt # returns the square root of its numeric input
tonumber # parses its input as a number. It will convert correctly-formatted strings to their numeric equivalent, leave numbers alone, and give an error on all other input
type # returns the type of its argument as a string, which is one of null, boolean, number, string, array or object
LOOP
while(cond; update) # allows you to repeatedly apply an update to . until cond is false
until(cond; next) # allows you to repeatedly apply the expression next, initially to . then to its own output, until cond is true
ASSIGNMENT
= # filter .foo = 1 will take as input an object and produce as output an object with the "foo" field set to 1
|= # jq provides the "update" operator ´|=´, which takes a filter on the right-hand side and works out the new value for the property of . being assigned to by running the old value through this expression
+=, -=, *=, /=, %=, //= # which are all equivalent to a |= . op b. So, += 1 can be used to increment values
CONDITIONALS-COMPARAISONS
==, != # produce ´true´ if the result of a and b are equal (that is, if they represent equivalent JSON documents) and ´false´ otherwise
=== # considering values equal only when they have the same type as well as the same value
>, >=, <=, < # return whether their left argument is greater than, greater than orequal to, less than or equal to or less than their right argument (respectively)
if then else # if A then B else C end will act the same as B if A produces a value other than false or null, but act the sameas C otherwise
and, or, not # They have the same standard of truth
// - alternative operator # a filter of the form a // b produces the same results as a, if a produces results other than false and null. Otherwise, a // b produces the same results as b
try-catch # errors can be caught by using try EXP catch EXP. The first expression is executed, and if it fails then the second is executed with the error message. The output of the handler, if any, is output as if it had been theoutput of the expression to try
? operator # used as EXP?, is shorthand for try EXP
REGEXP
test(val), test(regex; flags) # like match, but does not return match objects, only true or false for whether or not the regex matches the input
match(val), match(regex; flags) # match outputs an object for each match it finds
capture(val), capture(regex; flags) # collects the named captures in a JSON object, with the name of each capture as the key, and the matched string as the corresponding value
scan(regex), scan(regex; flags) # emit a stream of the non-overlapping substrings of the input that match the regex in accordance with the flags, if any have been specified
split(regex; flags) # for backwards compatibility, split splits on a string, not a regex
splits(regex), splits(regex; flags) # these provide the same results as their split counterparts, but as a stream instead of an array
sub(regex; tostring) sub(regex; string; flags) # emit the string obtained by replacing the first match of regex in the input string with tostring, after interpolation. tostring should be a jq string, and may contain references to named captures
gsub(regex; string), gsub(regex; string; flags) # gsub is like sub but all the non-overlapping occurrences of the regex are replaced by the string, after interpolation
Reduce # allows you to combine all of the results of an expression by accumulating them into a single answer
limit(n; exp) # extracts up to n outputs from exp
first(expr), last(expr) # The first(expr) and last(expr) functions extract the first and last values from expr, respectively
nth(n; expr) # extracts the nth value output by expr
first, last # extract the first and last values from any array at
nth(n) # extracts the nth value of any array at
foreach # is similar to reduce, but intended to allow the construction of limit and reducers that produce intermediate results
TELNET
telnet localhost smtp
ehlo ns22218.ovh.net
auth plain bWFuYWdlckBkYXRhc291ay5uZXQAbWFuYWdlckBkYXRhc291ay5uZXQAaGxtMTIwMQ==
mail from:<manager@datasouk.net>
rcpt to:<aguemoun@free.fr>
data
Subject: Petit Bonjour
Yeeeeeeeeeeeeeeeeeeeees
from oimeuuuuuh smtp 465
.
quit
SENDMAIL
mail_from="admin@17112018.fr"
mail_to="eloise.corre@hotmail.fr"
mail_subject="test d'envoi from VPS"
mail_body="Voilà c'est juste un test\n pour voir"
C="text/plain"
(
echo "From: $mail_from"
echo "To: $mail_to"
echo "Subject: $mail_subject"
echo "MIME-Version: 1.0"
echo "Content-Type: $(! [ "$mail_content_type" ] && echo "text/plain" || echo "$mail_content_type"); charset=utf-8"
echo -e "$mail_body"
) | sendmail -t
Use TAB to select options & values !! (slow for systemctl)
TIPS
systemctl list-units --type=mount # list services which mounts devices
OPTIONS
systemctl [OPTIONS...] {COMMAND} ...
-t, --type= # limit display to certain unit types
--state= # When listing units, show only those in the specified states, LOAD, SUB, or ACTIVE
-p, --property= # when showing unit/job/manager properties with the show command, limit display to properties specified
-a, --all # show all properties in listing unit/job/manager properties
-r, --recursive # also show units of local containers
--reverse # show reverse dependencies between units, with list-dependencies
--after # show the units that are ordered before the specified unit, with list-dependencies
--before # show the units that are ordered after the specified unit, with list-dependencies
-l, --full # do not ellipsize unit names, process tree entries, journal output, or truncate unit descriptions in the output of status, list-units, list-jobs, and list-timers
--value # show only print the value, and skip the property name and "=" when printing properties
--show-types # show the type of the socket when showing sockets
--job-mode= # controls how to deal with already queued jobs. It takes one of "fail", "replace", "replace-irreversibly", "isolate", "ignore-dependencies", "ignore-requirements" or "flush"
--fail # shorthand for --job-mode=fail
-i, --ignore-inhibitors # when system shutdown or a sleep state is requested, ignore inhibitor locks
--dry-run # ust print what would be done
-q, --quiet # suppress printing of the results of various commands and also the hints about truncated log lines
--no-block # do not synchronously wait for the requested operation to finish
--wait # synchronously wait for started units to terminate again
--user # talk to the service manager of the calling user, rather than the service manager of the system
--system # talk to the service manager of the system. This is the implied default
--failed # list units in failed state. This is equivalent to --state=failed
--no-wall # do not send wall message before halt, power-off and reboot
--global # operate on the global user configuration director when used with enable and disable
--no-reload # do not implicitly reload daemon configuration after executing the changes when used with enable and disable
--no-ask-password # disables asking for passwords when used with start and related commands
--kill-who= # choose which processes to send a signal to when used with kill
-s, --signal= # choose which signal to send to selected processes when used with kill
-f, --force # overwrite any existing conflicting symlinks when used with enable
--message= # poweroff or reboot, set a short message explaining the reason for the operation when used with halt
--now # the units will also be started when used with enable
--root= # use the specified root path when looking for unit files when used with enable/disable/is-enabled (and related commands)
--runtime # make changes only temporarily, so that they are lost on the next reboot when used with enable, disable, edit, (and related commands)
--preset-mode= # takes one of "full" (the default), "enable-only", "disable-only"
-n, --lines= # controls the number of journal lines to show when used with status
-o, --output= # controls the formatting of the journal entries that are shown when used with status
--firmware-setup # indicate to the system's firmware to boot into setup mode when used with the reboot command
--plain # output is printed as a list instead of a tree when used with list-dependencies, list-units or list-machines
-H, --host= # execute the operation remotely
-M, --machine= # execute operation on a local container
--no-pager # do not pipe output into a pager
--no-legend # do not print the legend
UNIT commands
INFORMATION
list-units [PATTERN...] # List units currently in memory
list-sockets [PATTERN...] # List socket units currently in memory, ordered by address
list-timers [PATTERN...] # List timer units currently in memory, ordered by next elapse
is-active PATTERN... # Check whether units are active
is-failed PATTERN... # Check whether units are failed
status [PATTERN...|PID...] # Show runtime status of one or more units
show [PATTERN...|JOB...] # Show properties of one or more units/jobs or the manager
cat PATTERN... # Show files and drop-ins of specified units
set-property UNIT PROPERTY=VALUE... # Sets one or more properties of a unit
help PATTERN...|PID... # Show manual for one or more units
list-dependencies [UNIT...] # Recursively show units which are required
ACTION
start UNIT... # Start (activate) one or more units
stop UNIT... # Stop (deactivate) one or more units
reload UNIT... # Reload one or more units
restart UNIT... # Start or restart one or more units
try-restart UNIT... # Restart one or more units if active
reload-or-restart UNIT... # Reload one or more units if possible, otherwise start or restart
try-reload-or-restart UNIT... # If active, reload one or more units, if supported, otherwise restart
isolate UNIT # Start one unit and stop all others
kill UNIT... # Send signal to processes of a unit
clean UNIT... # Clean runtime, cache, state, logs or configuration of unit
freeze PATTERN... # Freeze execution of unit processes
thaw PATTERN... # Resume execution of a frozen unit
reset-failed [PATTERN...] # Reset failed state for all, one, or more units
UNIT FILE commands
list-unit-files [PATTERN...] # List installed unit files
enable [UNIT...|PATH...] # Enable one or more unit files
disable UNIT... # Disable one or more unit files
reenable UNIT... # Reenable one or more unit files
preset UNIT... # Enable/disable one or more unit files based on preset configuration
preset-all # Enable/disable all unit files based on preset configuration
is-enabled UNIT... # Check whether unit files are enabled
mask UNIT... # Mask one or more units
unmask UNIT... # Unmask one or more units
link PATH... # Link one or more units files into the search path
revert UNIT... # Revert one or more unit files to vendor version
add-wants TARGET UNIT... # Add 'Wants' dependency for the target on specified one or more units
add-requires TARGET UNIT... # Add 'Requires' dependency for the target on specified one or more units
edit UNIT... # Edit one or more unit files
get-default # Get the name of the default target
set-default TARGET # Set the default target
MACHINE Commands
list-machines [PATTERN...] # List local containers and host
JOB Commands
list-jobs [PATTERN...] # List jobs
cancel [JOB...] # Cancel all, one, or more jobs
ENVIRONMENT commands
show-environment # Dump environment
set-environment VARIABLE=VALUE... # Set one or more environment variables
unset-environment VARIABLE... # Unset one or more environment variables
import-environment [VARIABLE...] # Import all or some environment variables
MANAGER STATE commands
daemon-reload # Reload systemd manager configuration
daemon-reexec # Reexecute systemd manager
log-level [LEVEL] # Get/set logging threshold for manager
log-target [TARGET] # Get/set logging target for manager
service-watchdogs [BOOL] # Get/set service watchdog state
SYSTEM commands
is-system-running # Check whether system is fully running
default # Enter system default mode
rescue # Enter system rescue mode
emergency # Enter system emergency mode
halt # Shut down and halt the system
poweroff # Shut down and power-off the system
reboot # Shut down and reboot the system
kexec # Shut down and reboot the system with kexec
exit [EXIT_CODE] # Request user instance or container exit
switch-root ROOT [INIT] # Change to a different root file system
suspend # Suspend the system
hibernate # Hibernate the system
hybrid-sleep # Hibernate and suspend the system
suspend-then-hibernate # Suspend the system, wake after a period of
RUNLEVEL
Runlevel 0 # Shut down and Power off the system.
Runlevel 1 # Rescue?Maintainance Mode.
Runlevel 3 # multiuser, no-graphic system.
Runlevel 4 # multiuser, no-graphic system.
Runlevel 5 # multiuser, graphical# use TAB to select options & values !! (slow for systemctl
system.
Runlevel 6 # Shutdown and Reboot the machine.
EXAMPLES
ps -eaf | grep systemd # Check systemd running
systemctl list-unit-files # List all the available units
systemctl list-unit-files --type=service # List all available services
systemctl list-unit-files --type=mount # List all system mount points
systemctl list-unit-files --type=socket # List all available system sockets
systemctl list-unit-files sys* # List all the available units start with 'sys'
systemctl list-units *fs* # List all running units contains 'fs'
systemctl --failed # List all failed units
making it impossible to start (cut link)
systemctl mask ssh.service
systemctl unmask ssh.service
auto start service at system boot
systemctl is-active ssh.service
systemctl enable ssh.service
systemctl disable ssh.service
Get the current CPU Shares of a Service
systemctl show -p CPUShares ssh.service
systemctl set-property ssh.service CPUShares=2000 # Limit the CPU Share of a service (httpd.service) to 2000
After logging in, type "journalctl -xb" to view
systemctl emergency
systemctl reboot
systemctl default # to try again to boot into default mode.
How to start Runlevel 5 aka graphical mode
systemctl isolate runlevel5.target
systemctl isolate graphical.target
How to start Runlevel 3 aka multiuser mode
systemctl isolate runlevel3.target
systemctl isolate multiuser.target
How to set multiusermode or graphical mode as default runlevel
systemctl set-default runlevel3.target
systemctl set-default runlevel5.target
ENV
env # list environment variables
env [options] name=value [cmd [arg]] # Set name to value in the temporary environment & run COMMAND
printenv # list environment variables
printenv $var # return value of environment variables $var if exists
( set -o posix ; set ) # list declared + environment variables
compgen -v # list available variable for completion
compgen -v $pat # list available variable for completion matched for a simple pattern $pat
DECLARE
declare [options]
If no NAMEs are given, Display the attributes and values of all variables
declare [-aAfFgilnrtux] [-p]
-f # restrict action or display to function names and definitions
-F # restrict display to function names only (plus line number and source file when debugging)
-g # create global variables when used in a shell function; otherwise ignored
-p # display the attributes and value of each NAME
declare [options] [name]
Declare variables and give them attributes
declare [-aAfFgilnrtux] [-p] [name[=value] ...]
-a # to make NAMEs indexed arrays (if supported)
-A # to make NAMEs associative arrays (if supported)
-i # to make NAMEs have the `integer' attribute
-l # to convert NAMEs to lower case on assignment
-n # make NAME a reference to the variable named by its value
-r # to make NAMEs readonly
-t # to make NAMEs have the `trace' attribute
-u # to convert NAMEs to upper case on assignment
-x # to make NAMEs export
SCOPE
scope is general
terminal <-> function
terminal <-> sourced file
terminal <-> script / for exported variables : 'export'
script <-> function
script <-> sourced file
script <-> script / for exported variables : 'export'
scope is local
terminal <-> script
terminal <-> function / for localized variables : 'local'
terminal <-> sourced file / for localized variables : 'local'
script <-> script
script <-> function / for localized variables : 'local'
script <-> sourced file / for localized variables : 'local'
EXPANSION
value
${parameter:-[word]} # use default values if parameter is unset or null
${parameter:=[word]} # assign default values if parameter is unset or null
${parameter:?[word]} # indicate error if null or unset if parameter is unset or null
${parameter:+[word]} # use alternative value if parameter is not null
parameter | Set and Not Null | Set But Null | Unset |
---|---|---|---|
${parameter:-word} | substitute parameter | substitute word | substitute word |
${parameter-word} | substitute parameter | substitute null | substitute word |
${parameter:=word} | substitute parameter | assign word | assign word |
${parameter=word} | substitute parameter | substitute null | assign word |
${parameter:?word} | substitute parameter | error, exit | error, exit |
${parameter?word} | substitute parameter | substitute null | error, exit |
${parameter:+word} | substitute word | substitute null | substitute null |
${parameter+word} | substitute word | substitute word | substitute null |
case
${var^} # First char in Uppercase
${var^^} # All chars in UPPERCASE
${var,} # First char in lOWERCASE
${var,,} # All chars in lowercase
remove
${parameter%[word]} # remove smallest suffix pattern. the word shall be expanded to produce a pattern
${parameter%%[word]} # remove largest suffix pattern. the word shall be expanded to produce a pattern
${parameter#[word]} # remove smallest prefix pattern. the word shall be expanded to produce a pattern
${parameter##[word]} # remove largest prefix pattern. the word shall be expanded to produce a pattern
position
${#string} # the length in characters of the value of parameter shall be substituted. the result is unspecified for parameter '*' or '@'
${string:position} # extract string from position
${string:position:length} # extract string from position to lenght
${string#substring} # strip shortest match of $substring from front of $string
${string##substring} # strip longest match of $substring from front of $string
${string%substring} # strip shortest match of $substring from back of $string
${string%%substring} # strip longest match of $substring from back of $string
replace
${string/substring/replacement} # replace first match of $substring with $replacement
${string//substring/replacement} # replace all matches of $substring with $replacement
${string/#substring/replacement} # if $substring matches front end of $string, substitute $replacement for $substring
${string/%substring/replacement} # if $substring matches back end of $string, substitute $replacement for $substring
reference
${!var} # display the value referenced by var (name of variable)
${!varprefix*} / ${!varprefix@} # matches all previously declared variables beginning with varprefix
${string:${#string}<3?0:-3} # take the last 3 characters if string length > 3
str=$(</dev/stdin)
# keep EOF
echo "$str"
# replace EOF
echo $str
SEQUENCE
{0..10..2} / seq 0 2 10 # 0 2 4 6 8 10
{2..-3..1} # 2 1 0 -1 -2 -3
{U..b} # U V W X Y Z [ ] ^ _ ` a b
{b..U} # b a ` _ ^ ] [ Z Y X W V U
SYSTEM VARIABLES
$PWD # Actual working directory
$PATH # Path to binaries /usr/bin ...
$USER # User name
$EUID # "effective" user ID number
$UID # User ID number
$GROUPS # Groups current user belongs to
$HOME # Home directory of the user
$HOSTNAME # The hostname assigned at bootup in an init script
$HOSTTYPE # host type
$IFS # internal field separator
$BASH # The path to the Bash binary itself
$BASH_ENV # An environmental variable pointing to a Bash startup file
$BASH_SUBSHELL # A variable indicating the subshell level
$BASHPID # Process ID of the current instance of Bash, not same as $$
$BASH_VERSION # The version of Bash installed on the system
$BASH_VERSINFO[n] # A 6-element array containing version information
[0] # Major version no
[1] # Minor version no
[2] # Patch level
[3] # Build version
[4] # Release status
[5] # Architecture
$'' # permit embedded \n in variables
$CDPATH # A colon-separated list of search paths available to the cd command
$DIRSTACK # The top value in the directory stack
$EDITOR # The default editor invoked by a script, usually vi or emacs.
$GLOBIGNORE # A list of filename patterns to be excluded from matching in globbing
$IGNOREEOF # number of EOF(control-D) the shell will ignore before logging out
$LC_COLLATE # collation order in filename expansion and pattern matching
$LC_CTYPE # controls character interpretation in globbing and pattern matching
$MACHTYPE # machine type
$OLDPWD # Old working directory ("OLD-Print-Working-Directory")
$OSTYPE # operating system type
$REPLY # The default value when a variable is not supplied to read
$SHELLOPTS # The list of enabled shell options
$SHLVL # Shell level, how deeply Bash is nested
$TMOUT # the shell prompt will time out after $time seconds
$PIPESTATUS # Array variable holding exit status(es) of last executed foreground pipe
[0] # holds the exit status of the first command in the pipe
[1] # the exit status of the second command, and so on.
$PROMPT_COMMAND # holding a command to be executed just before the primary prompt
$PS1 # This is the main prompt, seen at the command-line
$PS2 # The 2nd prompt, when additional input is expected. It displays as ">"
$PS3 # The 3rd prompt, displayed in a select loop
$PS4 # as "+", The 4ry prompt, shown at the beginning of each line of output when invoking a script with the -x [verbose trace] option. It displays
FUNCTION VARIABLES
$PPID # $PPID of a process is the process ID (pid) of its parent process
$FUNCNAME # Name of the current function
$BASH_SOURCE[0] # BASH_SOURCE array with hirerachical calls : level => filename
$LINENO # Line number of the shell script in which this variable appears
$SECONDS # The number of seconds the script has been running
$0 # name of script itself (called)
$1, $2, ... # Positional parameters
${*:2} / ${@:2} # return second & following positional parameters
${*:2:3} # return three positional parameters, starting at second
$# # Number of command-line arguments or positional parameters
$* # All of the positional parameters, must be quoted "$*"
$@ # Same as $*, but each parameter is a quoted string
$- # Flags passed to script (using set)
$! # PID (process ID) of last job run in background
$_ # Special variable set to final argument of previous command executed
$? # Exit status of a command, function, or the script itself
$$ # Process ID (PID) of the script itself. The $$ variable often finds use in scripts to construct "unique" temp file names
GENERAL
create filesystem
mkfs.btrfs -f -L $LABEL $DEVICE1 $DEVICE2 ...
# mount subvolume with ssd options & lzo compression
UUID=$UUID $PATHTMP btrfs defaults,noatime,ssd,discard,autodefrag,compress=lzo,space_cache,inode_cache,subvol=save 0 2
# mount device with noauto mounting
UUID=$UUID $PATHTMP btrfs defaults,noauto,noatime0 2
BTRFS
btrfs filesystem
command group othat primarily does work on the whole filesystems
btrfs filesystem <subcommand> <args>
show # show global configuration
show $vol # show information
usage $vol # show detailed information
df $vol # list occupied spaces
defragment $vol # defragment
label $vol $label # get or set label to filesystem
btrfs subvolume
manage btrfs subvolumes
btrfs subvolume <subcommand> [<args>]
create $path # create subvolume
delete $path # delete subvolume
list $path # list major properties of subvolume
set-default $ID $path # set a subvolume a default of device
mount a subvolume
mount [-o ...] subvol=$path <device> <path-system>
btrfs property
get/set/list properties for given filesystem object
btrfs property <subcommand> <args>
list $PATH # list properties of $PATH
-ts list $PATH # list only rw properties of $PATH
get $PATH # return rw property of $PATH
set -ts $PATH ro false # set property of $PATH to RWrite
set -ts $PATH ro true # set property of $PATH to ROnly
UUID
btrfstune -u /dev/sdaX # get UUID of device
btrfstune -U $UUID /dev/sdaX # set the UUID for device
INTEGRITY
btrfs device
manage devices of btrfs filesystems
btrfs device <subcommand> <args>
stats $path-device # show history stats
ready <device> # Wait until all devices of a multiple-device filesystem are scanned and registered within the kernel module
scan <device> # Scan devices for a btrfs filesystem and register them with the kernel module
btrfs scrub
scrub btrfs filesystem, verify block checksums
btrfs scrub <subcommand> <args>
start $path-device # start a scrub
status $path-device # return stats on scrub launched
btrfs check
check or repair an unmounted btrfs filesystem
btrfs check [options] <device>
btrfs check $vol # check an unmounted btrfs filesystem
-b|--backup # use the first valid set of backup roots stored in the superblock
--check-data-csum # verify checksums of data blocks
--chunk-root <bytenr> # use the given offset bytenr for the chunk tree root
-E|--subvol-extents <subvolid> # show extent state for the given subvolume
-p|--progress # indicate progress at various checking phases
--repair # enable the repair mode and attempt to fix problems where possible
SNAPSHOT
btrfs subvolume
manage btrfs subvolumes
btrfs subvolume <subcommand> [<args>]
snapshot $vol $snap # create a RWrite snapshot
snapshot -r $vol $snap # create a ROnly snapshot
delete $vol/$snap # delete snapshot
create [-i <qgroupid>] [<dest>/]<name> # Create a subvolume <name> in <dest>
-i <qgroupid>Add the newly created subvolume to a qgroup
delete [options] <subvolume> [<subvolume>...] # Delete the subvolume(s) from the filesystem
-c|--commit-after # wait for transaction commit at the end of the operation
-C|--commit-each # wait for transaction commit after deleting each subvolume
find-new <subvolume> <last_gen> # List the recently modified files in a subvolume, after <last_gen> ID
get-default <path> # Get the default subvolume of the filesystem <path>
list [options] [-G [+|-]<value>] [-C [+|-]<value>] [--sort=rootid,gen,ogen,path] <path> # List the subvolumes present in the filesystem <path>
-p # print parent ID
-a # print all the subvolumes in the filesystem and distinguish between absolute and relative path with respect to the given <path>
-c # print the ogeneration of the subvolume, aliases: ogen or origin generation
-g # print the generation of the subvolume
-o # print only subvolumes below specified <path>
-u # print the UUID of the subvolume
-q # print the parent uuid of subvolumes (and snapshots)
-R # print the UUID of the sent subvolume, where the subvolume is the result of a receive operation
-t # print the result as a table
-s # only snapshot subvolumes in the filesystem will be listed
-r # only readonly subvolumes in the filesystem will be listed
-G [+|-]<value> # list subvolumes in the filesystem that its generation is >= ⟨+), <= (-)
--sort=rootid,gen,ogen,path # list subvolumes in order by specified items
QGROUP
https://linuxfr.org/users/ar7/journaux/btrfs-et-opensuse-episode-5-les-quotas
BACKUP/RESTORE
https://ramsdenj.com/2016/04/05/using-btrfs-for-easy-backup-and-rollback.html
https://wiki.evolix.org/HowtoBTRFS
https://github.com/digint/btrbk
basic backup
btrfs subvolume snapshot -r $vol $snap # create a ro snapshot
btrfs send <snapshot> | gzip -c > $path # backup to compressed file
gzip -dc $path | btrfs receive <pathto/> # restore from compressed file
btrfs property set -ts <pathto> ro false # to mount volume
credential backup
btrfs subvolume snapshot -r / /my/snapshot-date && sync
btrfs send /my/snapshot-YYYY-MM-DD | ssh user@host btrfs receive /my/backups
btrfs subvolume snapshot -r / /my/incremental-snapshot-YYYY-MM-DD && sync
btrfs send -p /my/snapshot-YYYY-MM-DD /my/incremental-snapshot-YYYY-MM-DD | ssh user@host btrfs receive /backup/home
save the streams
btrfs subvolume snapshot -r / /my/snapshot-YYYY-MM-DD && sync
btrfs send /my/snapshot-YYYY-MM-DD | ssh user@host 'cat >/backup/home/snapshot-YYYY-MM-DD.btrfs'
btrfs subvolume snapshot -r / /my/incremental-snapshot-YYYY-MM-DD && sync
btrfs send -p /my/snapshot-YYYY-MM-DD /my/incremental-snapshot-YYYY-MM-DD | ssh user@host 'cat >/backup/home/incremental-snapshot-YYYY-MM-DD.btrfs'
PARTCLONE
# clone, backup & restore partition
https://manpages.ubuntu.com/manpages/disco/man8/partclone.btrfs.8.html
# clone /dev/hda1 to hda1.img and display debug information.
partclone.ext3 -c -d -s /dev/hda1 -o hda1.img
# restore /dev/hda1 from hda1.img and display debug information.
partclone.extfs -r -d -s hda1.img -o /dev/hda1
# restore image from clonezilla(split, gzip,) with stdin source
cat sda1.ext3-ptcl-img.gz.a* | gunzip -c | partclone.ext3 -d -r -s - -o /dev/sda1
recover/rescue
https://ownyourbits.com/2019/03/03/how-to-recover-a-btrfs-partition/
CONFIG
snapper list-configs # show list of configuration
snapper -c $config
create-config $file # create configuration file
delete-config # delete configuration file
get-config # show config value
set-config $key="$valeur" # set a key/value for a config
SNAPSHOT
snapper -c $config
# list snapshot refer to a config
list
# create a snapshot with description & print id
create --d $DESC -p
# create a 'pre' snapshot
create -t pre --d $DESC -p
# create a 'post' snapshot that refer to pre id
create -t post --pre-number $ID
# create a snapshot
create --d $DESC
# create a pre/post snapshot with command inside
create --command $CMD
# delete snapshot id1
delete $ID
# delete snapshot between id1 & idn
delete $ID1-$ID2
# modify values for snapshot id
modify -$OPTION $valeur $ID
# give a list of changed files between $ID1 & $ID2
status $ID1..$ID2
# show diff of files between $ID1 & $ID2
diff $ID1..$ID2 $fileS
# compare extended attributes of files between $ID1 & $ID2
xadiff $ID1..$ID2 $fileS
# Undo changes on files presents in $file between $ID1 & $ID2
undochange $ID1..$ID2 -i $file
# Undo changes on files between $ID1 & $ID2
undochange $ID1..$ID2 $fileS
```bash
https://ss64.com/bash/
https://www.tldp.org/LDP/abs/html/special-chars.html # symbols
COMPGEN
compgen [option] [word] # Generate possible completion matches for word according to the options, which may be any option accepted by the complete builtin with the exception of -p and -r, and write the matches to the standard output
-a # alias
-b # shell builtins
-c # all commands
-d # directory
-e # exported shell variables
-f # file and functions
-g # groups
-j # job
-k # Shell reserved words
-s # service
-u # userAlias names
-v # shell variables
ARITHMETIC
expr expression
Print the result of the expression (no assignement)
expr 18 / 2 / expr 18/2 # return 9. expr 18 /2 is forbiden
expr 35 / 5 + 4 # return 11
let expression
Assign value to a variable
let a=3+1 / let 'a = 3 + 1' # assign the result of '3+1' to variable a
let a++ # add 1 to variable a
$(( expression ))
Print ther the result of the expression, assignement is allowed
echo $((a = 50 /3)) / echo $((a=50 / 3)) # affect 16 to variable a & print 16
b=$(( a = 100 % 12)) # affect 4 to a & b
EXPANSION
ls */path/*/
mv $dir/!(gnome-*) $dir/ # move files except ones matched pattern
MULTIPLE ASSIGNMENT
read a b c <<<"1 2 3"; echo "$a|$b|$c"
read a[{1..3}] <<<"$(echo 2 4 6)"; echo "${a[1]}|${a[2]}|${a[3]}"
str="Learn-to-Split-a-String-in-Bash-Scripting"
IFS='-'
read -ra ADDR <<< "$str"
IFS=' ' # reset to default value after usage
MULTIPLE COMMAND TO A FILE
{
echo "contents of home directory"
ls ~
} > output.txt
FILE DESCRIPTOR
test file descriptor
"$(true 2>/dev/null >&6; echo $?)" # return 0 if a file descriptor 6 is open for output
"$(true 2>/dev/null <&6; echo $?)" # return 0 if a file descriptor 6 is open for input
PROCESS SUBSTITUTION
<( command )
replace a file
sort -k 9 <(ls -l /bin) <(ls -l /usr/bin)|column -t
diff <(ls -l /bin) <(ls -l /usr/bin)
>( command_list )
redirect an entry
tar cf >(bzip2 -c > $file.tar.bz2) $directory
examples
compress pipe
bzip2 -c < pipe > $file.tar.bz2&
tar cf pipe $directory
rm pipe
redirection
ls /dfdf 2>&1 >> /tmp/1| tee -a /tmp/2 # duplicate stderror in files & redirect stdout to file
exec 2> >(tee -a "$file_error" "$file_log") # duplcate stderr in 2 files
exec 2> >(tee -a /tmp/2) > >(tee -a /tmp/1) 4>&1 # duplicate stderror & stdout in files & 4 in 1
exec 2> >(tee -a /tmp/2) > >(tee -a /tmp/1) 4> >(tee /dev/null) # duplicate stderror & stdout in files & &4 in null
exec 2> >(tee -a /tmp/2) > >(tee -a /tmp/1) 4> >(tee -a /tmp/4) # duplicate stderror & stdout in files & &4
read lines
while read line; do cmd $line; done < $file
while read line; do cmd $line; done < <( command )
while read line; do cmd $line; done <<< "$( command )"
while read line; do cmd $line; done <<< "$text"
ls | while read line; do echo $line; done
find / -type f | while read line; do echo $line; done
find / -type f | xargs -I {} echo $line {}
LIST
# return sorted & uniq
printf "%q\n" ${S_IPS_ADMIN} ${S_IPS_DEV}| sort -u
# list contains of current path
echo *
# list only directories with .??*
ls -I. -I.. -ap /opt|grep /|sed 's|/$||'|xargs
# list only directories without .??*
ls -p /opt|grep /|sed 's|/$||'|xargs
# list incolor & full time
ls -al --color --time-style=full-iso $PATHTMP
FIND
exclude path in name finding
sudo find / -not \( -regex '/\(proc\|run\|sys\)' -prune \) -name .Trash*
sudo find / -not \( -path /proc -prune -o -path /run -prune -o -path /sys -prune \) -name .Trash*
mulitple exec
find . -name "*.txt" -exec echo {} \; -exec grep banana {} \;
delete & report the count of deleted
find "$path" -not \( -regex '/\(proc\|run\|sys\)' -prune \) -type f -name "$str" -exec echo "{}" \; -exec rm -f "{}" \; | wc -l
find directories without cover
find Music/ -mindepth 2 -maxdepth 2 -type d ! -exec test -e "{}/cover.jpg" ';' -print
find Music/ -mindepth 2 -maxdepth 2 -type d ! -exec sh -c "ls -1 '{}'|grep -qE '^cover\.(jpg|png)$'" \; -print
rename files with limiting to the 2 first arguments
find . -name *_test.rb | sed -e "p;s/test/spec/" | xargs -n2 mv
find Music/ -depth -empty -delete # find & delete empty directories & files
find Music/ -depth -type d -empty -delete # find & delete empty directories
SSH
display all configuration parameters
sshd -T
ssh keepalive
echo -e "\nServerAliveInterval 60\nServerAliveCountMax 1200" >> /etc/ssh/ssh_config # client
echo -e "\nClientAliveInterval 240\nClientAliveCountMax 3" >> /etc/ssh/sshd_config # server
Running a command on a remote server
ssh [user]@[server] 'command'
ssh [user]@[server] -e 'command'
ssh [user]@[server] 'bash -s' < [local_script]
ssh [user]@[server] << EOF
...
EOF
SYSTEM
env # print environnement variables
cat /proc/cpuinfo # cpu info
cat /proc/meminfo # mem info
free # memory
vmstat # memory
sudo dmidecode --type memory # physical informations about memory
sudo dmidecode -t 17 # physical informations about memory
getfacl /home/shared # access control / get special right
setfacl -b /home/shared # access control / set special right
hdparm -tT $DEVICE # speed reading for DD
stat -c %u $file # get uid of folder on file/folder
stat -c %a $file # get access right on file/folder
lshw -short -C memory # return details about RAM
filesystem
fuse $DEVICE # all processes use filesystem
fuse $FILE # all processes use file
lsof # list opened files of active processes
lsof $FILE # List processes which opened the file
lsof +D $PATHTMP # List opened files under a directory
lsof $DEVICE # List processes using a mount point
lsof -u $USER # List files opened by a specific user
lsof -p $PID # List all open files by a specific process
lsof -u $USER -a -i
lsof -u $USER -a $DEVICE # list processes using a mount point abd used by user
lsof -i | grep ssh # list ssh processes
kill -9 $(lsof -t $DEVICE) # kill all processes using a device# give executed time for command
udisksctl loop-setup -r -f $files_iso # List all network connections used by user# mount without root privileges
RANDOM
# print an uuid
cat /proc/sys/kernel/random/uuid
# generate random string with 14 characters
cat /proc/sys/kernel/random/uuid | sha256sum | head -c14
< /dev/urandom tr -dc _A-Z-a-z-0-9 | head -c14
# generate random string
openssl rand -base64 24
openssl rand -hex 12
TRICKS
comm
compare two sorted files line by line
With no options, produce three-column output. Column one contains lines unique to FILE1, column two contains lines unique to FILE2, and column three contains lines common to both files
comm [OPTION]... FILE1 FILE2
-1 suppress column 1 (lines unique to FILE1)
-2 suppress column 2 (lines unique to FILE2)
-3 suppress column 3 (lines that appear in both files)
--check-order # check that the input is correctly sorted, even if all input lines are pairable
--nocheck-order # do not check that the input is correctly sorted
--output-delimiter=STR # separate columns with STR
--total # output a summary
-z, --zero-terminated # line delimiter is NUL, not newline
example
# return only variables
comm -3 <(comm -3 <(declare | sort) <(declare -f | sort)) <(env | sort)
convert lines to colums /5lines
awk 'ORS=NR%4?" ":"\n"' $file
cat $sfile | paste - - - - > $fileto
merge 2 results or 2 files in 2 columns
paste $file1 $file2
paste <($command1) <($command2)|column -tn
mount without root privileges
udisksctl loop-setup -r -f $FILE_ISO
affect variable to arguments $*
set -- "var1 var2 var3 ..."; echo "$1"; echo "$2"
process
$cmd & # create a subprocess & detach process
nohup $cmd & # create a subprocess & detach process with log the stdout with nohup
hexa to dec
echo $((16#$hexNum))
time & date
time ( $cmd ) # return the time to execute cmd
date -d @1267619929 +'%Y%m%d' # convert timestamp to format
cut -d' ' -f5- $file # get 5th elements to the last, separated by ' '
tr -cs 'A-Za-z' '\n' < $file |grep -c "aaa" # count number of "aaa"
user
useradd -g $gid -u $uid $USER -p "$(mkpasswd "$pwd")" -d </home/user> -c <comment> -s /bin/sh # user add
usermod -G $gid $USER # add group to user
compress
tar czf # compress with gzip
tar cjf # compress with bzip2
XZ_OPT=-9 tar cvJfh # compress with following symlinks & xz & max compression
xz -z9c file > file # compress with max compression
xz -dc file > file # decompress
who
who -a # list all connections
who -q # all login names and number of users logged on
who -u # list users logged in
ps
ps -e / ps -A # all processus
ps ax # all process with command called to start
ps -ef # all process & full informations
dd
dd $cmd status=progress # verbose mode
OCC
occ
path2occ="/var/share/www/cloud/occ"
sudo -u apache php8 $path2occ <command_name>
help
Displays help for a command
help [options] [--] [<command_name>]
list
Lists commands
list [options] [--] [<namespace>]
Global options
--format=FORMAT The output format (txt, xml, json, or md) [default: "txt"]
--raw # To output raw command help
-h, --help # Display this help message
-q, --quiet # Do not output any message
-V, --version # Display this application version
--ansi # Force ANSI output
--no-ansi # Disable ANSI output
-n, --no-interaction # Do not ask any interactive question
--no-warnings # Skip global warnings, show command output only
-v|vv|vvv, --verbose # Increase the verbosity of messages: 1 for normal output, 2 for more verbose output and 3 for debug
Available commands
check # check dependencies of the server environment
help # Displays help for a command
list # Lists commands
status # show some status information
upgrade # run upgrade routines after installation of a new release. The release has to be installed before
activity
activity:send-mails # Sends the activity notification mails
app
app:check-code # check code to be compliant
app:disable # disable an app
app:enable # enable an app
app:getpath # Get an absolute path to the app directory
app:install # install an app
app:list # List all available apps
app:remove # remove an app
app:update # update an app or all apps
audioplayer
audioplayer:reset # reset audio player library
audioplayer:scan # scan for new audio files; use -v for debugging
background
background:ajax # Use ajax to run background jobs
background:cron # Use cron to run background jobs
background:webcron # Use webcron to run background jobs
config
config:app:delete # Delete an app config value
config:app:get # Get an app config value
config:app:set # Set an app config value
config:import # Import a list of configs
config:list # List all configs
config:system:delete # Delete a system config value
config:system:get # Get a system config value
config:system:set # Set a system config value
dav
dav:create-addressbook # Create a dav addressbook
dav:create-calendar # Create a dav calendar
dav:list-calendars # List all calendars of a user
dav:move-calendar # Move a calendar from an user to another
dav:remove-invalid-shares # Remove invalid dav shares
dav:send-event-reminders # Sends event reminders
dav:sync-birthday-calendar # Synchronizes the birthday calendar
dav:sync-system-addressbook # Synchronizes users to the system addressbook
db
db:add-missing-indices # Add missing indices to the database tables
db:convert-filecache-bigint # Convert the ID columns of the filecache to BigInt
db:convert-mysql-charset # Convert charset of MySQL/MariaDB to use utf8mb4
db:convert-type # Convert the Nextcloud database to the newly configured one
deck
deck:export # Export a JSON dump of user data
encryption
encryption:change-key-storage-root Change key storage root
encryption:decrypt-all # Disable server-side encryption and decrypt all files
encryption:disable # Disable encryption
encryption:enable # Enable encryption
encryption:encrypt-all # Encrypt all files for all users
encryption:list-modules # List all available encryption modules
encryption:set-default-module # Set the encryption default module
encryption:show-key-storage-root # Show current key storage root
encryption:status # Lists the current status of encryption
federation
federation:sync-addressbooks # Synchronizes addressbooks of all federated clouds
files
files:cleanup # cleanup filecache
files:recommendations:recommend
files:scan # rescan filesystem
files:scan-app-data # rescan the AppData folder
files:transfer-ownership # All files and folders are moved to another user - shares are moved as well
group
group:add # Add a group
group:adduser # add a user to a group
group:delete # Remove a group
group:list # list configured groups
group:removeuser # remove a user from a group
integrity
integrity:check-app # Check integrity of an app using a signature
integrity:check-core # Check integrity of core code using a signature
integrity:sign-app # Signs an app using a private key
integrity:sign-core # Sign core using a private key
l10n
l10n:createjs # Create javascript translation files for a given app
log
log:file # manipulate logging backend
log:manage # manage logging configuration
log:tail # Tail the nextcloud logfile
log:watch # Watch the nextcloud logfile
maintenance
maintenance:data-fingerprint # update the systems data-fingerprint after a backup is restored
maintenance:mimetype:update-db # Update database mimetypes and update filecache
maintenance:mimetype:update-js # Update mimetypelist.js
maintenance:mode # set maintenance mode
maintenance:repair # repair this installation
maintenance:theme:update # Apply custom theme changes
maintenance:update:htaccess # Updates the .htaccess file
migrations
migrations:execute # Execute a single migration version manually.
migrations:generate #
migrations:generate-from-schema #
migrations:migrate # Execute a migration to a specified version or the latest available version.
migrations:status # View the status of a set of migrations.
notification
notification:generate # Generate a notification for the given user
security
security:certificates # list trusted certificates
security:certificates:import # import trusted certificate
security:certificates:remove # remove trusted certificate
sharing
sharing:cleanup-remote-storages # Cleanup shared storage entries that have no matching entry in the shares_external table
social
social:account:create # Create a new social account
social:account:following # Following a new account
social:cache:refresh # Update the cache
social:check:install # Check the integrity of the installation
social:fediverse # Allow or deny access to the fediverse
social:note:boost # Boost a note
social:note:create # Create a new note
social:note:like # Like a note
social:queue:process # Process the request queue
social:queue:status # Return status on the request queue
social:reset # Reset ALL data related to the Social App
social:stream # Get stream by timeline and viewer
talk
talk:command:add # Add a new command
talk:command:add-samples # Adds some sample commands: /wiki, …
talk:command:delete # Remove an existing command
talk:command:list # List all available commands
talk:command:update # Add a new command
talk:signaling:add # Add an external signaling server.
talk:signaling:delete # Remove an existing signaling server.
talk:signaling:list # List external signaling servers.
talk:stun:add # Add a new STUN server.
talk:stun:delete # Remove an existing STUN server.
talk:stun:list # List STUN servers.
talk:turn:add # Add a TURN server.
talk:turn:delete # Remove an existing TURN server.
talk:turn:list # List TURN servers.
trashbin
trashbin:cleanup # Remove deleted files
trashbin:expire # Expires the users trashbin
twofactorauth
twofactorauth:cleanup # Clean up the two-factor user-provider association of an uninstalled/removed provider
twofactorauth:disable # Disable two-factor authentication for a user
twofactorauth:enable # Enable two-factor authentication for a user
twofactorauth:enforce # Enabled/disable enforced two-factor authentication
twofactorauth:state # Get the two-factor authentication (2FA) state of a user
update
update:check # Check for server and app updates
user
user:add # adds a user
user:delete # deletes the specified user
user:disable # disables the specified user
user:enable # enables the specified user
user:info # show user info
user:lastseen # shows when the user was logged in last time
user:list # list configured users
user:report # shows how many users have access
user:resetpassword # Resets the password of the named user
user:setting # Read and modify user settings
versions
versions:cleanup # Delete versions
versions:expire # Expires the users file versions
TRICK
clean lock files
-
put Nextcloud in maintenance mode
sudo -u www-data php occ maintenance:mode --on
or edit config/config.php and change this line:
'maintenance' => true,
-
Empty table oc_file_locks
# DELETE FROM oc_file_locks WHERE 1 DELETE FROM oc_file_locks;
-
disable maintenance mode (undo first step)
scan user files
sudo -u www-data php $path2occ files:scan --path user/files
sudo -u www-data php $path2occ files:scan --all
scan audio player
sudo -u www-data php $path2occ audioplayer:scan aguy --debug
sudo -u www-data php $path2occ audioplayer:reset --all
transfer all files and shares from one user to another
sudo -u www-data php occ files:transfer-ownership <source-user> <destination-user>
sudo -u www-data php occ files:transfer-ownership --path="path_to_dir" <source-user> <destination-use
SQL
SELECT * FROM `oc_share` WHERE share_with LIKE 'lcherid';
SELECT SUBSTRING(file_target, 6) AS filetarget FROM `oc_share` WHERE share_with LIKE 'lcherid' AND file_target LIKE '/tmp/%';
SELECT name,fileid FROM `oc_filecache` WHERE path LIKE '%/hd/%' AND name IN (SELECT SUBSTRING(file_target, 6) AS filetarget FROM `oc_share` WHERE share_with LIKE 'lcherid' AND file_target LIKE '/tmp/%');
SELECT 0,'lcherid',null,'aguy','aguy',null,'file',fileid,null,fileid,CONCAT('/',name),3,1534492000,0,null,null,0,null FROM `oc_filecache` WHERE path LIKE '%/hd/%' AND name IN (SELECT SUBSTRING(file_target, 6) AS filetarget FROM `oc_share` WHERE share_with LIKE 'lcherid' AND file_target LIKE '/tmp/%');
INSERT INTO `oc_share` (share_type,share_with,password,uid_owner,uid_initiator,parent,item_type,item_source,item_target,file_source,file_target,permissions,stime,accepted,expiration,token,mail_send,share_name)
SELECT 0,'lcherid',null,'aguy','aguy',null,'file',fileid,null,fileid,CONCAT('/',name),3,1534492000,0,null,null,0,null FROM `oc_filecache` WHERE path LIKE '%/hd/%' AND name IN (SELECT SUBSTRING(file_target, 6) AS filetarget FROM `oc_share` WHERE share_with LIKE 'lcherid' AND file_target LIKE '/tmp/%');
INSERT INTO `oc_share` (`share_type`, `share_with`, `password`, `uid_owner`, `uid_initiator`, `parent`, `item_type`, `item_source`, `item_target`, `file_source`, `file_target`, `permissions`, `stime`, `accepted`, `expiration`, `token`, `mail_send`, `share_name`)
SELECT 0,'teichmann',NULL,'aguy','aguy',NULL,'file',fileid,NULL,fileid,concat('/',name),1,1534155903,0,NULL,NULL,0,NULL FROM `oc_filecache` WHERE path LIKE 'files/perso/photos/paris/1992-%' ORDER BY path
;
# add hd
INSERT INTO `oc_share` (`share_type`, `share_with`, `password`, `uid_owner`, `uid_initiator`, `parent`, `item_type`, `item_source`, `item_target`, `file_source`, `file_target`, `permissions`, `stime`, `accepted`, `expiration`, `token`, `mail_send`, `share_name`)
SELECT 0,'elyazid',NULL,'aguy','aguy',NULL,'file',fileid,NULL,fileid,concat('/tmp/',name),3,1534179000,0,NULL,NULL,0,NULL FROM `oc_filecache` WHERE path LIKE '%/hd/%' AND CONCAT('/',name) IN (SELECT file_target FROM `oc_share` WHERE share_with LIKE 'elyazid' AND file_target NOT LIKE '/algerie/%');
;
show priivileges in sql
pt-show-grants --only myuser
declaration
# declares and initializes an empty indexed array
myarray=()
# sets the first element of an indexed array. If no array created it
myarray[0]=
# declares an indexed array. if existing not initialized it
declare -a myarray
# declares an associative array
declare -A myarray
value setting
# sets the element N of the indexed array
myarray[N]=VALUE
# sets the element indexed by strING of the associative array
myarray[strING]=VALUE
# set the zeroth element of indexed array, for associative array index with string "0"
myarray=VALUE
# sets array with given ek*lements, starting at zero. existing array is unset before
myarray=(E1 E2 …)
# sets index-value pairs for indexed or associative array
myarray=([X]=E1 [Y]=E2 …)
# append to array
myarray+=(E1 E2 …)
# concat two arrays
myarray3=( ${myarray1[@]} ${myarray2[@]} )
# concat & add elementstwo arrays
myarray3=( ${myarray1[@]:0:2} "str" "str2" )
value getting
# return value of array at index. if N<0 is an offset from the greatest index
${myarray[N]}
# return list of indexes in array
${!myarray[@]}
# return list of values in array, preserves breaks between elements
${myarray[@]}
# when you want join all array elements to a string
${myarray[*]}
# return M elements starting from N, preserves breaks between elements
${myarray[@]:N:M}
${myarray[*]:N:M}
# return M elements from position N from last (-N+M)
${myarray[*]: -N:M}
# return stringlength of value at index N (or key)
${#myarray[N]}
# return count of indexes in array, preserves breaks between elements
${#myarray[@]}
${#myarray[*]}
# return list of indexes in array, preserves breaks between elements
${!myarray[@]}
${!myarray[*]}
# test in key N exists in associative array, return _ if key exists or nothing
${myarray[N]+_}
substring removing
# removes shortest match from front of string(s)
${myarray[@]#*sch}
# longest match from front of string(s)
${myarray[@]##*sch}
# shortest match from back of string(s)
${myarray[@]%sch*}
# Longest match from back of string(s)
${myarray[@]%%sch*}
subtring replacement
# replace first occurrence of substring with replacement.
${myarray[@]/sch/str}
# replace all occurrences of substring.
${myarray[@]//sch/str}
# delete all occurrences of substring.
${myarray[@]//str/}
# replace front-end occurrences of substring.
${myarray[@]/#sch/str}
# replace back-end occurrences of substring.
${myarray[@]/%sch/str}
unset
unset -v myarray
# destroys entirely array
unset -v myarray[@]
unset -v myarray[*]
# destroys array element at index N (or key)
unset -v myarray[N]
create
# create array from file or readarray
mapfile -t myarray FILE
# print table with 2 colums
printf "%s\n" "${!myarray[@]}" "${myarray[@]}" | pr -2t
# copy an array
myarray2=("${myarray1[@]}")
GENERAL
user
modify user
mv /etc/lightdm/lightdm.conf /etc/lightdm/lightdm.conf.keep # xubuntu autologin
# reboot / alt+f1 -> root
old=virt
new=foralyse
usermod -l $new $old -d /home/$new -m
#usermod -d /home/$new -m $new
#mv /home/$old /home/$new
groupmod -n $new $old
sed -i "/^$new/ s|$old|$new|" /etc/passwd
sed -i "/^$old/ s|$old|$new|" /etc/subuid
sed -i "/^$old/ s|$old|$new|" /etc/subgid
sed -i "s|/$old/|/$new/|" /home/$new/.config/gtk*/bookmarks
mv /etc/lightdm/lightdm.conf.keep /etc/lightdm/lightdm.conf # xubuntu autologin
sed -i "s|$old|$new|" /etc/lightdm/lightdm.conf # xubuntu autologin
grep $old /etc -r
reboot
color
echo $LS_COLORS # ls colors
~/.dir_colors # file define ls colors
gnome-terminal
Gnome-terminal: unbind F1 & F10, edit preferences
- prefertences tab : unselect F10
- shorcut tab : in last unselect F1
Launch fews tabs with command
gnome-terminal --tab --tab -e 'ssh-connect ns398616' --tab -e 'ssh-connect ns398616'
autostart
~/.config/autostart # home path
/etc/xdg/autostart # system path
sudo sed -i 's|^\(X-GNOME-Autostart-enabled=\).*$|\1false|' $path.desktop # desactivate system autostart
UPDATE-ALTERNATIVES
update-alternatives creates, removes, maintains and displays information about the symbolic links comprising the Debian alternatives system
update-alternatives [<option> ...] <command>
--get-selections # get all configuration
--display $NAME # display information about $NAME group
--install $LINK $NAME $PATH $PRIORITY # add an altyernative for a group
--config $NAME # define the alternative for a group by chosing defined alternatives
--set $NAME $PATH # set alternative $NAME for a group $PATH
example for sublime:
update-alternatives --get-selections
update-alternatives --display gnome-text-editor
sudo update-alternatives --install /usr/bin/gnome-text-editor gnome-text-editor /usr/bin/sublime-text 100
update-alternatives --display gnome-text-editor
sudo update-alternatives --config gnome-text-editor
MIME type
https://help.ubuntu.com/community/AddingMimeTypes
player
aplay / arecord
sound player & recorder for alsa soundcard driver
paplay
play sound from pulseaudio-utils
global
syntax
( $exp ) # test expression
! $exp # negation
$exp1 -a $exp2 # and
$exp1 -o $exp2 # or
string
-z "$str" # the length of $str is zero (quotes protects few values for str)
-n "$str" # the length of $str is nonzero, str have to be quoted
-e # file exists
-f # file is a regular file (not a directory or device file)
-s # file is not zero size
-d # file is a directory
-b # file is a block device (device0="/dev/sda2")
-c # file is a character device (device1="/dev/ttyS1")
-p # file is a pipe
-h # file is a symbolic link
-L # file is a symbolic link
-S # file is a socket
-t # file (descriptor) is associated with a terminal device. This test option may be used to check whether the stdin [ -t 0 ] or stdout [ -t 1 ] in a given script is a terminal.
-r # file has read permission (for the user running the test)
-w # file has write permission (for the user running the test)
-x # file has execute permission (for the user running the test)
-g # set-group-id (sgid) flag set on file or directory. If a directory has the sgid flag set, then a file created within that directory belongs to the group that owns the directory, not necessarily to the group of the user who created the file. This may be useful for a directory shared by a workgroup.
-u # set-user-id (suid) flag set on file.
-k # sticky bit set. Commonly known as the sticky bit, the save-text-mode flag is a special type of file permission. If a file has this flag set, that file will be kept in cache memory, for quicker access.
-O # file exists and is owned by the effective user ID
-G # file exists and is owned by the effective group ID
-N # file modified since it was last read
-nt # f1 -nt f2 file f1 is newer than f2
-ot # f1 -ot f2 file f1 is older than f2
-ef # f1 -ef f2 files f1 and f2 have the same device and inode numbers (are hard links to the same file)
numeric
[]
-eq # is equal to
-ne # is not equal to
-gt # is greater than
-ge # is greater than or equal to
-lt # is less than
-le # is less than or equal to
[[]]
< # is less than
<= # is less or equal than
> # is greater than to
>= # is greater than or equal to
string comparison
[ -z "${var}" ] # true if var is not defined or is null
[ -z ${var+x} ] # true if var is not defined, var+x : expand var with x only if var are unset
= # is equal to
[ "$var" = str* ] # if $var are equal to 'str'* expansion , only if $var does not contains space & $var are not empty
[ -z "${var##*str*}" ] # if the expansion of str replace all content of var
[[ ]] # unsensible with empty values: no need quotes on values
[[ $a = z* ]] # True if $a starts with an "z" (pattern matching). use
[[ $a = "z*" ]] # True if $a is equal to 'z*' (literal matching).
[[ $str =~ $regexp ]] # test with regexp
# very usefull for array
[[ " $var " == *" str "* ]]
[[ " $var " =~ " str " ]]
[ "${var/str}" != "$var" ]
http://www.commentcamarche.com/faq/9536-sed-introduction-a-sed-part-i
https://likegeeks.com/sed-linux/#Understand-sed-Linux-Command
TOC
chapter |
---|
SYNOPSIS |
OPTIONS |
SUBSTITUTION |
BASIC |
PRE-COMMAND |
ADVANCED |
PATTERN-SPACE |
LOOP |
SYNOPSIS
sed [OPTION]... {script-only-if-no-other-script} [input-file]...
sed [-options] [commande] [] sed [-n [-e commande] [-f script] [-i[.extension]] [l [cesure]] rsu] [] []
[adresse[,adresse]][!]commande[arguments]
[adresse[,adresse]]{
commande1
commande2;commande3
}
OPTIONS
-n, --quiet, --silent # suppress automatic printing of pattern space
-e script, --expression=script # add the script to the commands to be executed
-f script-file, --file=script-file # add the contents of script-file to the commands to be executed
--follow-symlinks # follow symlinks when processing in place
-i[SUFFIX], --in-place[=SUFFIX] # edit files in place (makes backup if SUFFIX supplied)
-l N, --line-length=N # specify the desired line-wrap length for the `l' command
--posix # disable all GNU extensions
-E, -r, --regexp-extended # use extended regular expressions in the script (for portability use POSIX -E)
-s, --separate # consider files as separate rather than as a single, continuous long stream
--sandbox # operate in sandbox mode
-u, --unbuffered # load minimal amounts of data from the input files and flush the output buffers more often
-z, --null-data # separate lines by NUL characters
--help # display this help and exit
--version # output version information and exit
SUBSTITUTION
's/modèle/remplacement/drapeau(x)'
Autant la partie gauche (recherche) accepte la syntaxe des BRE (Basic Regular Expression, expressions régulières basiques), la partie droite (remplacement) quant à elle n'accepte que trois valeurs pouvant être interpolées :
- le caractère & (esperluette)
- les références arrières \1 (de 1 à 9)
- les options \U,\u,\L,\l et \E
les options de correspondance de l' extended regular expressions
\w # word
\W # non-word
\s # whitespace
\S # non-whitespace
les options de remplacement \d or \D are not supported
\u # upper next character of match
\l # lower next character of match
\U # upper all characters of match
\L # lower all characters of match
g # global - effectue le remplacement de toutes les occurrences mises en correspondance par le motif ou l'expression régulière
N # occurrence - remplace uniquement la nième occurrence mise en correspondance par le motif ou l'expression régulière
p # affichage - si une substitution a eu lieu, alors afficher l'enregistrement courant. Nécessite la présence de l'option "-n"
w # Si une substitution a eu lieu, écrit l'enregistrement courant dans le fichier spécifié. Un seul espace est accepté entre l'attribut "w" et le nom du fichier
e # evaluate - permet de faire exécuter une commande par le shell et d'en substituer le résultat avec le motif mis en correspondance, uniquement si une correspondance a été établie
I # case-Insensitive - ignore la casse lors de la mise en correspondance du motif
M # En tant normal l'espace de travail contient une ligne lue en entrée. D'autres lignes peuvent être ajoutées à l'espace de travail à travers l'emploi des commandes comme N, G, x, etc. Toutes ces lignes dans l'espace de travail sont séparées par le caractère de fin de ligne "\n" mais sont vues par Sed comme une seule et même ligne dont le début commence avant la 1ère ligne et se terminant à la fin de la dernière ligne. Avec le flag "M" chaque caractère représentant le début (^) et la fin ($) de ligne reprend si on peut dire ainsi, son sens initial et fait correspondre le début et la fin de ligne à chaque ligne se trouvant dans l'espace de travail
BASIC
'#' # commentaires, si les premiers caractères du script sont "#n", l'option "-n" (no-autoprint) est forcé
q # quit, quitte sed sans procéder à aucune autre commande ni évaluer une autre entrée. La ligne courante contenue dans la mémoire principale est affichée à moins que l'option "-n" ait été employée
d # delete, efface l'enregistrement courant et démarre un nouveau cycle
p # print, affiche à l'écran l'enregistrement courant (l'espace de travail). Elle n'efface pas l'espace de travail et ne modifie pas non plus le déroulement du script. Cette commande est toujours employée conjointement avec l'option "-n", sans quoi l'affichage de la ligne est dupliquée
n # nextline, remplace l'enregistrement courant dans l'espace de travail par la ligne suivante sans entamer un nouveau cycle. La ligne remplacée quant à elle est envoyé sur la sortie standard
{ ... } # les accolades permettent de regrouper certaines commandes à effectuer sur une adresse ou une plage d'adresses. (inutile de les protéger par un \)
PRE-COMMAND
y # La commande "y" permet de convertir n'importe quel caractère énuméré dans la chaîne caractère-source par son homologue, en lieu et place, se trouvant dans la chaîne caractère-destination
a\ # ajoute le texte après la ligne mise en correspondance par son numéro de ligne, motif ou expression régulière, et avant la lecture de la ligne suivante. "text" correspond à une seule ligne de texte, qui peut néanmoins contenir des sauts de lignes précédés par des "\" (backslash)
i\ # insère le texte "text" avant la ligne mise en correspondance par son numéro de ligne, motif ou expression régulière. "text" correspond à une seule ligne de texte, qui peut néanmoins contenir des sauts de lignes précédés par des "\" (backslash)
c\ # échange la ligne mise en correspondance par le numéro de ligne, motif ou expression régulière par "text". "text" correspond à une seule ligne de texte, qui peut néanmoins contenir des sauts de lignes précédés par des "\" (backslash)
r # lit le contenu de "fichier" dans l'espace de travail à la suite de l'adresse spécifiée. Il ne doit y avoir qu'un seul espace entre la commande et le nom du fichier. Tout ce qui suit cet espace, et ce jusqu'à la fin de la ligne, est considéré comme étant le nom dudit fichier. De ce fait tout espace (tabulation comprise) sera considéré comme faisant partie intégrante du nom. Si le fichier n'existe pas, aucun message d'avertissement ne sera émis ni sur la sortie standard ni ailleurs. Si le fichier ne se trouve pas dans le même répertoire d'où est lancée la commande, veillez à spécifier le chemin complet vers le fichier
w # écrit la ligne en cours de traitement dans le fichier spécifié à la suite de la commande "w". Tout comme la commande "r" (lecture), il ne doit y avoir qu'un seul espace entre la commande et le nom du fichier. Tout ce qui suit cet espace, et ce jusqu'à la fin de la ligne, est considéré comme étant le nom dudit fichier. De ce fait tout espace (tabulation comprise) sera considéré comme faisant partie intégrante du nom. Si un fichier du même nom existe déjà, il sera écrasé sans avertissement ni confirmation et ce à chaque invocation du scriipt. En revanche, si plusieurs instructions de la commande "w" sont appelées à écrire dans un même fichier depuis un script, chaque écriture est ajoutée à la fin du fichier
= # affiche le numéro de la ligne courante
l [N] # affichage des caractères non imprimable - N permet de spécifier la longueur de coupure de ligne désirée
ADVANCED
- multi-lignes (N,D,P)
- utilisant la mémoire annexe (h,H,g,G,x)
- tests faisant appel à des étiquettes (:,b,t,T)
Multi-lines
N # Next, positionne le caractère "nouvelle ligne" (\n) à la fin du contenu de l'espace de travail et ajoute la ligne suivante du flux d'entrée à l'espace de travail. Si la fin du fichier d'entrée est atteinte, sed termine son exécution sans procéder au traitement d'une nouvelle commande. Le caractère "nouvelle ligne" incorporé dans l'espace de travail peut être matché par la séquence d'échappement "\n". Dans un espace de travail "multilignes", les méta-caractères "^" et "$" matchent respectivement le début et la fin de cet espace de travail et non pas les débuts et fins de lignes précédents ou suivants le caractère nouvelle ligne incorporé
D # Delete, efface le contenu de l'espace de travail jusqu'au 1er caractère délimitant une nouvelle ligne (\n). S'il reste encore des données dans l'espace de travail , un nouveau cycle est redémarré avec ce contenu (sans lire une nouvelle ligne en entrée), sinon un nouveau cycle est démarré avec la ligne suivante
P # Print, affiche le contenu de l'espace de travail jusqu'au 1er caractère délimitant une nouvelle ligne (\n). Quand la dernière commande du script est atteinte, le contenu de l'espace de travail est automatiquement affiché sur la sortie standard (à moins que l'option "-n" ou "#n" n'ait été employée)
Mémoires tampons
- h > Copie l'espace de travail dans la mémoire annexe
- H >> Ajoute l'espace de travail dans la mémoire annexe
- g Échange le contenu des 2 mémoires
h hold pattern space # copie le contenu du motif courant (pattern space) dans la mémoire secondaire, écrasant le contenu précédemment copié si présent. Le tampon courant reste inchangé
H Hold pattern space # ajoute le contenu du motif courant (pattern space) au contenu de la mémoire secondaire. L'ancien contenu et le nouveau sont séparés par une nouvelle ligne matérialisée par le caractère "\n". Le tampon courant reste inchangé. Une nouvelle ligne (\n) est ajoutée à l'espace de travail même si celui-ci est vide
g get contents # copie le contenu de la mémoire secondaire vers le motif courant, écrasant le contenu de celui-ci
G Get contents # ajoute le contenu de la mémoire secondaire au motif courant. L'ancien contenu et le nouveau sont séparés par une nouvelle ligne matérialisée par le caractère "\n"
x eXchange # échange le contenu des deux mémoires tampons (principale et secondaire). Il faut savoir que la mémoire secondaire démarre son cycle avec une ligne vide. Si vous appliquez la commande "x" à la 1ère ligne d'un fichier, cette ligne est donc placée dans la mémoire annexe et est remplacée par le contenu de cette mémoire annexe, autrement dit une ligne vide. Sachez encore que suivant ce principe, la dernière ligne d'un fichier est placée dans la mémoire annexe mais n'est jamais restituée dans l'espace de travail et de ce fait ne sera jamais affichée à moins d'en faire une demande implicite
Etiquettes
:etiquette # définition d'une étiquette
b branch # permet de transférer inconditionnellement l'exécution du script à l'emplacement indiqué par l'étiquette fournie en argument. Si aucun argument n'est fourni, la commande renvoie à la fin du script. La commande en cours de traitement est alors affichée sauf si l'option "-n" était active et le script reprend son exécution avec la prochaine ligne du flux d'entrée
t test # permet de transférer conditionnellement l'exécution du script à l'emplacement indiqué par l'étiquette fournie en argument si une commande de substitution a réussi sur la ligne en cours de traitement ou sur le dernier branchement conditionnel. Si aucun argument n'est fourni, la commande renvoie à la fin du script
T test # permet de transférer conditionnellement l'exécution du script à l'emplacement indiqué par l'étiquette fournie en argument si une commande de substitution a échoué sur la ligne en cours de traitement ou sur le dernier branchement conditionnel. Si aucun argument n'est fourni, la commande renvoie à la fin du script
examples
sed '/^#\?relayhost/h $!d x' # Print the last matched line
sed '/^#\?relayhost/h $!d x s|=.*|= toto|' # Print the last matched line modifed
sed -n '\|str|p' # replace limiters with | & print line matching with str
sed '=' # insert line numbers before lines
sed -r 's/\w+/\u&/g' <<< "Now is the time for all good men..." # Now Is The Time For All Good Men...
sed "a\\$str" # append after each line a line with $str
sed "5i\\$str" # insert before th 5rd lines the line contains $str
sed "5,$i\str" # insert between 5rd lines and end of file, the line contains $str
sed "5,/$pat/$i\\$str" # insert between 5rd lines and line matched by $pat, the line contains $str
sed '/$pat/!c\str' # replace line not macthed $pat with $str
sed 'y/$str1/$str2/' # replace str1 with str2, length of $str1 & $str2 have to be the same
sed 's/^.\{10\}//' # remove the first 10 characters
sed 's/.\{10\}$//' # remove the last 10 characters
sed 's/.//5g' <<< "defn-test" # remove all characters after 5rd
sed 's/.*0//' # remove all characters until 0
sed 's/0.*//' # remove all charcaters after 0
sed -e 's/^\(.\{12\}\).*/\1/' # keep the 10 first characters
cut -c 1-12 # keep the 10 first characters
sed -n "/$pat/ {p;q}" # print only the first matched line with $pat
sed -e '/^$/d;/^#$/d' # delete empty lines & lines started with a single #
sed -n 3p # print line 3
sed -n '3p;6p' # print line 3 & 6
sed '6,$d' # delete lines from 6 to end
sed 1~3d # delete 1 line per 3
sed "\|$pat|d" # delete only matched line with $pat
sed -n '/str1/,/str2/p'# print lines between lines matched by str1 & str2. if str2 is before str1 or str2 don't found, then lines are printed from matched str1 to end of file, if str1 are matched few times, the position of last matched is taken for positioning the 10th
sed -n '/str1/,10p' # print lines between lines matched by str1 & 10th line. if 10th line don't exists or are before line matched with str1, then lines are printed from matched str1 to the end of file, if str1 are matched few times, the position of last matched is taken for positioning the 10th
sed '3q' # print lines from begining to th 3th
sed -n '0-3p' # print lines from begining to th 3th
sed -n '3{p;q}' # print the 3th line & quit : Not read the lines after, more speed for long file
echo -e "AAA\nBBB\nCCC\nDDD" | sed -n '/BBB/ {n;p;q}' # print the next line after matched line & quit
echo -e "AAA\nBBB\nCCC\nDDD" | sed -n '/BBB/ {n;s/C/Z/2p}' # print the next line after matched line with the 2nd character C replaced by Z
# line number
sed -n '/$pat/=' # show only line numbers of matched lines
sed -n '/$pat1/,/$pat2/{=;p}' # print line numbers and lines for a rgane of lines : begining with $pat1 & ending with $pat2
sed -n $= $file # print the total line numbers in $file
# read
sed '/test/r newfile' file # append newfile in file in line
sed '/$pat/ {r newfile;d}' file # replace line matched with $pat by newfile
advanced examples
echo "AAAAA" | sed 's/A/B/' -> BAAAA
echo "AAAAA" | sed 's/A/B/g' -> BBBBB
echo "AAAAA" | sed 's/A/B/3' -> AABAA
var="ligne1\nligne2\nligne3"
echo -e $var
ligne1
ligne2
ligne3
sed '2 s/e2/e n° 2/' # replace in the 2nd line character 'e2' by 'e n° 2'
ligne1
ligne n° 2
ligne4
sed '2 s/e2/e n° 2/p' # duplicate the 2nd line & replace in the 2nd line character 'e2' by 'e n° 2'
ligne1
ligne n° 2
ligne n° 2
ligne3
sed -n '2 s/e2/e n° 2/p' # print only the 2nd line with character 'e2' by 'e n° 2'
ligne n° 2
a="Bonjour"
sed 's/.*2/echo '$a'/e' # replace th 2nd line with the result of command 'echo $a'
ligne1
Bonjour
ligne3
a="Bonjour"
sed '2 s/.\{5\}/echo '$a'/e' # for the 2nd line replace the 5rd first characters with the result of command 'echo $a'
echo "BonJouR" | sed 's/bONjOUr/Salut/' -> BonJouR
echo "BonJouR" | sed 's/bONjOUr/Salut/I' -> Salut
echo -e "foo\nbar" | sed 'N;s/^.*$//'
echo -e "foo\nbar" | sed 'N;s/^.*$//M'
bar
echo -e "foo\nbar\nfoobar\nbarfoo" | sed -e ':boucle; N; $! b boucle; s/^.*$//M3'
foo
bar
barfoo
$
echo -e "foo\nfoo\nfoo\nbar\nfoo" | sed 'N;/bar$/s/^/>/Mg;P;D'
foo
foo
>foo
>bar
foo
$
$ echo -e "foo\nfoo" | sed 'N;s/^/>/;s/\n/\n>/g'
>foo
>foo
$
echo -e "foo\nfoo" | sed 'N;s/^/>/Mg'
>foo
>foo
$
sed '
y/àâéèêëîïôöùûü/aaeeeeiioouuu/
y/ÀÂÉÈÊËÎÏÔÖÙÛÜ/AAEEEEIIOOUUU/
' fichier.txt - part I
sed '/Ligne n° 5/ a\
Bonjour
' fich.txt
sed '/Ligne n° 4/ i\
Bonjour
' fich.txt
sed '/Ligne n° 2/,/Ligne n° 6/ c\
Annulé\
Pour cause\
de travaux
' fich.txt
sed -s '$ r signature.txt' fich*.txt
sed -f foo.sed
PATTERN-SPACE
sed '/^a test$/{
$!{ N # append the next line when not on the last line
s/^a test\nPlease do not$/not a test\nBe/
# now test for a successful substitution, otherwise
#+ unpaired "a test" lines would be mis-handled
t sub-yes # branch_on_substitute (goto label :sub-yes)
:sub-not # a label (not essential; here to self document)
# if no substituion, print only the first line
P # pattern_first_line_printaffichage des caractères non imprimable - N permet de spécifier la longueur de coupure de ligne désirée
D # pattern_ltrunc(line+nl)_top/cycle
:sub-yes # a label (the goto target of the 't' branch)
# fall through to final auto-pattern_print (2 lines)
}
}' alpha.txt
Here it is the same script, condensed into what is obviously harder to read and work with, but some would dubiously call a one-liner
sed '/^a test$/{$!{N;s/^a test\nPlease do not$/not a test\nBe/;ty;P;D;:y}}' alpha.txt
Here is my command "cheat-sheet"
: # label
= # line_number
a # append_text_to_stdout_after_flush
b # branch_unconditional
c # range_change
d # pattern_delete_top/cycle
D # pattern_ltrunc(line+nl)_top/cycle
g # pattern=hold
G # pattern+=nl+hold
h # hold=pattern
H # hold+=nl+pattern
i # insert_text_to_stdout_now
l # pattern_list
n # pattern_flush=nextline_continue
N # pattern+=nl+nextline
p # pattern_print
P # pattern_first_line_print
q # flush_quit
r # append_file_to_stdout_after_flush
s # substitute
t # branch_on_substitute
w # append_pattern_to_file_now
x # swap_pattern_and_hold
y # transform_chars
LOOP
sed ':a;N;$!ba;s/\n/ /g' file
This will read the whole file in a loop, then replaces the newline(s) with a space
:a # create a label a
N # append the current and next line to the pattern space
$!ba # If we are before the last line, branch to the created label $!ba ($! means not to do it on the last line as there should be one final newline)
s/\n/ /g # finally the substitution replaces every newline with a space on the pattern space (which is the whole file)
EXAMPLE
replace selecter by :
awk -F : '{print $1}'
awk 'BEGIN { FS=":" } { print $1 }'
use bash function
awk '{("date -d @"$2" +%Y%m%d-%H%M%S")|getline d;$2=d;print($0);}'> $FILE
DPKG
package manager for Debian
dpkg [option...] action
-i # Installs a Debian package file
-c # Lists the contents of
-I # Extracts package information
-r # Removes an installed package
-P # Removes & Purges an installed package named
-L # Gives a listing of all the files installed by
-s # Shows information on the installed package
-S # Return package names & files find in package database
--get-selections # Get the current status of
example
echo `` hold'' | dpkg --set-selections # Put on hold
DPKG-RECONFIGURE
dpkg-reconfigure # Reconfigures an installed package (see -f --frontend options)
APT
add repository
apt-add-repository ppa:lubuntu-desktop/ppa
add-apt-repository 'deb http://extras.ubuntu.com/ubuntu quantal main'
add-apt-repository 'deb-src http://extras.ubuntu.com/ubuntu quantal main'
apt-get install $pkg # Install packages
apt-get source $pkg # Get source code of installed & cached package
apt-get remove $pkg # Remove packages
apt-cache depends $pkg # Find dependencies of a packages
apt-cache search $pkg # Search in names of packages
apt-cache show $pkg # Shows description of availables packages & versions
apt-cache showpkg $pkg # Shows full description of packages
apt-cache policy $pkg # Shows source url repository of packages
apt-cache madison $pkg # Shows all available versions of packages
APT-MARK
apt-mark auto $pkg # Mark the given packages as automatically installed
apt-mark manual $pkg # Mark the given packages as manually installed
apt-mark hold $pkg # Mark a package as held back
apt-mark unhold $pkg # Unset a package set as held back
apt-mark showauto $pkg # Print the list of automatically installed packages
apt-mark showmanual $pkg # Print the list of manually installed packages
apt-mark showhold $pkg # Print the list of package on hold
example
apt-mark hold php7.4* # hold install & update for all packages started with 'php7.4'
APT-FILE
apt-get install apt-file && apt-file update # install & update files
apt-file search paplay # search which packages contains 'paplay'
BACKPORTS
apt-get install -t trusty-backports $PACKAGE # use backports to install package
control
enable/disable
a2ensite $site_name # enable site
a2dissite $site_name # disable site
a2enconf $conf_name # enable configuration file
a2disconf $conf_name # enable configuration file
a2enmod $module_name # enable module
a2dismod $module_name # enable module
a2ctl
a2ctl configtest # test configurations
a2ctl status # show status
a2ctl fullstatus # shows fullstatus
a2ctl -l # list compiled modules
a2ctl -L # list available configuration directives
a2query
a2query -s $module_name # checks whether the site is enabled or return list of available sites
a2query -c $conf_name # checks whether the configuration is enabled or return list of available configurations
a2query -m $module_name # checks whether the module is enabled or return list of available modules
benchmark: ab
Simule 2000 utilisateurs qui effectueront des requêtes entre toutes les 0 et 5 secondes pendant 10 minutes
# 5000 requests from 5 concurrent clients
ab -c 5 -n 5000 $url
# 2000 clients requests between 0-5s during 10mn
siege -d5 -r500 -c2000 -t10M $url
log
# ambau.ovh
<VirtualHost *:80>
ServerName ambau.ovh
ServerAlias www.ambau.ovh
ServerAdmin webmaster@ambau.ovh
<FilesMatch ".+\.ph(p[3457]?|t|tml)$">
SetHandler "proxy:unix:/run/php/php7.3-fpm.sock|fcgi://localhost"
</FilesMatch>
# PHP: SetEnv PHP_VALUE | PHP_ADMIN_VALUE : not rewrite by user
#SetEnv PHP_ADMIN_VALUE "session.gc_maxlifetime = 14400"
#SetEnv PHP_VALUE "upload_max_filesize = 100M"
#php_value upload_max_filesize 100M
DocumentRoot /var/share/www/ambau.ovh/html
<Directory /var/share/www/ambau.ovh/html>
Options -Indexes -MultiViews +FollowSymLinks
AllowOverride All
Require all granted
</Directory>
# Possible values include: debug, info, notice, warn, error, crit, alert, emerg.
LogLevel warn
#SetEnvIf Remote_Addr "::1" nolog
#SetEnvIf Remote_Addr "127\.0\.0\.1" nolog
#SetEnvIf Remote_Addr "10\.0\.0\.1/24" nolog
#SetEnvIf Remote_Addr "176\.31\.255\.134" nolog
#SetEnvIf Request_Method OPTIONS nolog
SetEnvIf X-Forwarded-For ".+" forwarded
CustomLog "|/usr/bin/logger -p local7.info -t ambau.ovh/apache" combined env=forwarded
#CustomLog "|/usr/bin/logger -p local7.info -t ambau.ovh/apache" combined env=!nolog
#CustomLog "|/usr/bin/logger --rfc3164 -p local7.info -t ambau.ovh/apache -n 176.31.255.134 --udp -P 514" combined env=!nolog
#CustomLog "|/usr/bin/logger --rfc3164 -t ambau.ovh/apache -p info -n 176.31.255.134 --udp -P 514" combined env=!nolog
#CustomLog ${APACHE_LOG_DIR}/ambau.ovh.log combined env=!nolog
ErrorLog "|/usr/bin/logger -p local7.err -t ambau.ovh/apache"
#ErrorLog "|/usr/bin/logger --rfc3164 -p local7.err -t ambau.ovh/apache -n 176.31.255.134 --udp -P 514"
#ErrorLog ${APACHE_LOG_DIR}/ambau.ovh.err
#Include conf-available/serve-cgi-bin.conf
</VirtualHost>
IP
ip a / ip addr show # print informations about all interface
ip addr show dev <device> # print informations about specified device
ip -br addr show dev <device> # print brief informations about specified device
ip -4 a / ip -4 addr show # print informations about all interface for inet family
ip -6 a / ip -6 addr show # print informations about all interface for inet6 family
ip -4 -o route show to default # get device name for default IP v4 route
ip -6 -o route show to default # get device name for default IP v6 route
ip -br -4 -o address show dev <interface> | sed 's|.*\s\+\([0-9\.]\+\)/.*|\1|' # get ip v4 of given interface
ip -br -6 -o address show dev <interface> | sed 's|.*\s\+\([0-9a-z:]\+\)/128.*|\1|' # get ip v6 of given interface
NSTAT
Show open port
options
-a # all sockets (default connected)
-l # listening ports
-p # display the process
-n # numerical addresses (no DNS resolution)
-t # only TCP ports
-u # only UDP ports
tricks
netstat -pl | netstat -pla # listening process
netstat -plu # listening process for UDP
netstat -plt # listening process for TCP
netstat -pln # listening process with IP (no NDS resolution)
netstat -patn # process (listening or not) for TCP with IP (no NDS resolution)
netstat -plutn # show listening process for UDP & TCP with IP (no NDS resolution)
netstat -antup
NMAP
Scan network
tricks
# Scan from 192.168.1.0 to 192.168.1.255
nmap 192.168.1.0-10
# Scan with specified range of ports
nmap -p 81-1024 192.168.1.3
# Scan open ports, send a ICMP ECHO (ping) request
nmap -sP 192.168.1.*
# return all open TCP ports by sending SYN messages
nmap -sS $IP
# return all open UDP ports
nmap -sU $IP
# return informations about OS
nmap -O $IP
nmap -A -T4 $IP
# return potential version of OS
nmap -O --osscan-guess $IP
# Scan with a random mac address
nmap --spoof-mac B0:65:BD:01:01:01 192.168.1.3
TRICKS
show open ssh connection
netstat -n --protocol inet | grep ':22'
lsof -i -n | egrep 'ssh'
lsof -i -n | egrep 'sshd'
create mac address
mac=$(< /dev/urandom tr -dc a-z0-9 | head -c10 |sed 's/\(..\)\(..\)\(..\)\(..\)\(..\)/02:\1:\2:\3:\4:\5/')
resolution of domain name
nslookup $DOMAIN
get ip for eth0
ifconfig eth0 | sed -n 's|^[[:space:]]\+inet \(addr:\)\?\([0-9\.]\+\) .*|\2|p'
get ctid for eth0 (openVZ)
ifconfig eth0 | sed -n 's|^[[:space:]]\+inet \(addr:\)\?[0-9\.]\+\.\([0-9]\+\) .*|\2|p'
SCAN
arp
arp -ne -i wlan0 # scan interface wlan0 without dns & print in shell mode
arp-scan -lI wlan0 # scan the entire locanet interface wlan0
arp-scan -lI eth0 129.20.228.1/24 # scan the interface eth0 with a mask 0.0.0.31
arp-scan -I wlan0 192.168.0.100-192.168.0.200 # scan the locanet interface wlan0 with ips between 192.168.0.100 and 192.168.0.200
nmap
nmap -sP 192.168.0.1/24 # scan by pinging ip addresses with mask 0.0.0.255
nmap -e wlan0 -sP 192.168.0.100-199 # scan the interface wlan0 with ips between 192.168.0.100 and 192.168.0.200
nmap -e wlan0 -sP 192.168.0.,1,101 -oG $file # scan the interface wlan0 with ips 192.168.0.100 & print the result in grepable format to file
nmap -e wlan0 -sL 192.168.0.5,10-20 # List cached informations on ips : 192.168.0.5 & between 192.168.0.10 & 192.168.0.2
nc
nc -v -u -z -w 3 91.121.112.140 514 # test udp port open
WIFI
iwconfig wlo1 | iw dev wlo1 link # print technical informations
lshw -C network
iwlist scan # scan & print informations about wifi networks
nmcli connection show # show available connections about wifi networks
nmcli dev wifi