LIST
ffmpeg -codecs # list all codecs
ffmpeg -encoders # list all encoders
ffmpeg -decoders # lis tall decoders
ffmpeg -formats # list all formats
TRIM
ffmpeg -accurate_seek -ss $SECONDS -i $FILE -frames:v 1 -quality 100 image.png # Extract frame to image
ffmpeg -i "${file}" -ss 00:00:30 -t 00:00:05 -codec copy ${fileout} # Extract a part a video from -ss for a duration -t
ffmpeg -i "${file}" -ss 00:00:00 -to 00:56:33 -c copy "${fileout}" # trim outside of -ss & -to
ffmpeg -f concat -safe 0 -i <(echo -e "file \"${file1}\"\nfile \"${file2}\"") -c copy ${fileout}
ffmpeg -i "concat:${file1}|${file2}" -codec copy "${fileout}" # join files
AUDIO & VIDEO
ffmpeg -i "${file}" -c:v libx265 -codec:a libopus -b:a 64k -vbr on -compression_level 10 "${path}/${file%.*}.mp4"
batch to encode audio & video
path="/ext/shared/Videos/ffmpeg" && path2="/tmp/ffmpeg-batch-$(date +%s)" && while read file; do echo "ffmpeg -i \"${file}\" -c:v libx265 -codec:a libopus -b:a 64k -vbr on -compression_level 10 \"${file%.*}-resized.mp4\"" >> "${path2}"; done < <(find "${path}" -name "*.*") && chmod +x "${path2}" && echo "Launch: ${path2}"
batch to encode audio video with crop & scale
crop="W:H:X:Y"
scale="800x720"
scale="800:-1"
path="/home/nikita/Downloads/.tmp/ffmpeg" && path2="/tmp/ffmpeg-batch-$(date +%s)" && while read file; do echo "ffmpeg -i \"${file}\" -filter:v crop=${crop},scale=${scale} -c:v libx265 -codec:a libopus -b:a 64k -vbr on -compression_level 10 \"${file%.*}-resized.mp4\"" >> "${path2}"; done < <(find "${path}" -name "*.webm") && chmod +x "${path2}" && echo "Launch: ${path2}"
AUDIO
# replace audio in video
ffmpeg -i "$file" -i "${file%.mp4}.opus" -c:v copy -c:a copy -map 0:v:0 -map 1:a:0 "${file%.mp4}-audio.mp4"
# batch to replace audio
path="/home/nikita/Downloads/.tmp/ffmpeg" && path2="/tmp/ffmpeg-batch-$(date +%s)" && while read file; do echo "ffmpeg -i \"${file}\" -i \"${file%.mp4}.opus\" -c:v copy -c:a copy -map 0:v:0 -map 1:a:0 \"${file%.mp4}-audio.mp4\"" >> "${path2}"; done < <(find "${path}" -name "*.mp4") && chmod +x "${path2}" && echo "Launch: ${path2}"
# compress audio
ffmpeg -i "$file" -codec:a libopus -b:a 64k -vbr on -compression_level 10 "${file%.*}.opus"
# batch to compress audio
path="/home/nikita/Downloads/.tmp/ffmpeg" && path2="/tmp/ffmpeg-batch-$(date +%s)" && while read file; do echo "ffmpeg -i \"$file\" -c:v copy -codec:a libopus -b:a 64k -vbr on -compression_level 10 \"${file%.mp4}-audio.mp4\"" >> "${path2}"; done < <(find "${path}" -name "*.mp4") && chmod +x "${path2}" && echo "Launch: ${path2}"
ENCODE
ffmpeg -i "$file" -vn -acodec copy $file_out # extract audio
ffmpeg -i "$file" -filter:v scale=720:-1 -c:a copy "$file_out" # resize video, -1 asks to ffmpeg to keep proportion
ffmpeg -i "$file" -filter:v crop=w:h:x:y -c:a copy "$file_out" # crop to w-width:h-height:x-left:y-top, passthru audio
ffmpeg -i "$file" -filter:v crop=w:h:x:y -c:v libx265 -c:a copy "$file_out" # crop & encode encode with h265, passthru audio
ffmpeg -i "$file" -filter:v "crop=w:h:x:y,scale=w_max:h_max530" -c:v libx265 -c:a copy "$file_out" # crop > scale to max w_max/h_max (-1 keeps proportion), encode h265, passthru audio
OTHERS
ffmpeg -i $file -hide_banner # info
ffmpeg -accurate_seek -ss $SECONDS -i $FILE -frames:v 1 image.bmp # Extract frame to image
FFPROBE
gt info from file
ffprobe -i $file
examples
create batch & launch it to crope, scale & encode files from file list
crop="W:H:X:Y"
scale="800x720"
scale="800:-1"
path="/home/nikita/_new/ffmpeg" && path2="/tmp/ffmpeg-batch-$(date +%s)" && while read file; do name="${file##*/}"; path="${file%/*}"; echo "ffmpeg -i \"${file}\" -filter:v "crop=${crop},scale=${scale}" -c:v libx265 -c:a copy \"${path}/${name%.*}-resized.mp4\"" >> "${path2}"; done < <(find "${path}" -name "*.mp4") && chmod +x "${path2}" && echo "Launch: ${path2}"
create batch & launch it to only encode files searching
path="/home/nikita/_new/ffmpeg" && path2="/tmp/ffmpeg-batch-$(date +%s)" && while read file; do name="${file##*/}"; path="${file%/*}"; echo "ffmpeg -i \"${file}\" -c:v libx265 -c:a copy \"${path}/${name%.*}.mp4\"" >> "${path2}"; done < <(find "${path}" -name "*.mkv") && chmod +x "${path2}" && echo "Launch: ${path2}"
AUDACITY
Export /Export Audio/(external program)
ffmpeg -i - -codec:a libopus -b:a 64k -vbr on -compression_level 10 "%f" # whithout space in path file
start
log mimikatz.log
lsadump
cd {$path_hive}
log c:\lsadump.log
lsadump::sam /system:SYSTEM /sam:SAM
exit
hivexsh
hivexsh [-options] [hivefile]
Provides a simple shell for navigating Windows Registry 'hive' files
options
-d # Enable lots of debug messages. If you find a Registry file that this program cannot parse, please enable this option and post the complete output and the Registry hive file in your bug report.
-f filename # Read commands from "filename" instead of stdin. To write a hivexsh script, use: #!/usr/bin/hivexsh -f
-u # Use heuristics to tolerate certain levels of corruption within hives. This is unsafe but may allow to export/merge valid keys/values in an othewise corrupted hive.
-w # If this option is given, then writes are allowed to the hive (see "commit" command below, and the discussion of modifying hives in "WRITING TO HIVE FILES" in hivex(3)). Important Note: Even if you specify this option, nothing is written to a hive unless you call the "commit" command. If you exit the shell without committing, all changes will be discarded. If this option is not given, then write commands are disabled.
commands
add name # Add a subkey named "name" below the current node. The name may contain spaces and punctuation characters, and does not need to be quoted.
cd path # Change to the subkey "path". Use Windows-style backslashes to separate path elements, and start with a backslash in order to start from the root of the hive. For example:
close | unload # Close the currently loaded hive. If you modified the hive, all uncommitted writes are lost when you call this command (or if the shell exits). You have to call "commit" to write changes.
commit [newfile] # Commit changes to the hive. If the optional "newfile" parameter is supplied, then the hive is written to that file, else the original file is overwritten.
del # Delete the current node and everything beneath it. The current directory is moved up one level (as if you did "cd ..") after this command.
exit | quit # Exit the shell.
load hivefile # Load the binary hive named "hivefile". The currently loaded hive, if any, is closed. The current directory is changed back to the root node.
ls # List the subkeys of the current hive Registry key. Note this command does not take any arguments.
lsval [key] # List the (key, value) pairs of the current hive Registry key. If no argument is given then all pairs are displayed. If "key" is given, then the value of the named key is displayed. If "@" is given, then the value of the default key is displayed.
setval nrvals # This command replaces all (key, value) pairs at the current node with the values in subsequent input. "nrvals" is the number of values (ie. (key, value) pairs), and any existing values at this node are deleted. So "setval 0" just deletes any values at the current node.
hivexget
hivexget hivefile PATH [NAME]
Get subkey from a Windows Registry binary "hive" file
example
hivexget ${path_hive}/SAM "SAM\Domains\Account\Users\000003E9" V
hivexml
hivexml [-dk] HIVE > FILE
Convert Windows Registry binary "hive" into XML
options
-d # Enable lots of debug messages. If you find a Registry file that this program cannot parse, please enable this option and post the complete output and the Registry file in your bug report.
-k # Keep going even if we find errors in the Registry file. This skips over any parts of the Registry that we cannot read.
-u # Use heuristics to tolerate certain levels of corruption within hives. This is unsafe but may allow to export/merge valid keys/values in an othewise corrupted hive.
Install
sudo apt install -y libhivex-bin
https://helpmanual.io/man8/chntpw/
chntpw
chntpw [options] <samfile> [systemfile] [securityfile] [otherreghive] [...]
Utility to overwrite passwords of Windows systems
usage
chntpw -i $hive
options
-u username # Username or username ID (RID) to change. The default is 'Administrator'.
-l # List all users in the SAM database and exit.
-i # Interactive Menu system: list all users (as per -l option) and then ask for the user to change.
-e # Registry editor with limited capabilities (but it does include write support). For a slightly more powerful editor see reged
-d # Use buffer debugger instead (hex editor)
-L # Log all changed filenames to /tmp/changed. When this option is set the program automatically saves the changes in the hive files without rompting the user. Be careful when using the -L option as a root user in a multiuser system. The filename is fixed and this can be used by alicious users (dropping a symlink with the same name) to overwrite system files.
-N # Do not allocate more information, only allow the editing of existing values with same size.
-E # Do not expand the hive file (safe mode).
commands
hive [<n>] # list loaded hives or switch to hive numer n
cd <key> # change current key
ls | dir [<key>] # show subkeys & values,
cat | type <value> # show key value
dpi <value> # show decoded DigitalProductId value
hex <value> # hexdump of value data
ck [<keyname>] # Show keys class data, if it has any
nk <keyname> # add key
dk <keyname> # delete key (must be empty)
ed <value> # Edit value
nv <type#> <valuename> # Add value
dv <valuename> # Delete value
delallv # Delete all values in current key
rdel <keyname> # Recursively delete key & subkeys
ek <filename> <prefix> <keyname> # export key to <filename> (Windows .reg file format)
debug # enter buffer hexeditor
st [<hexaddr>] # debug function: show struct info
q # quit
reged
reged [options] -x<registryhivefile><prefixstring><key><output.reg>
reged [options] -I<registryhivefile><prefixstring><input.reg>
reged [options] -e<registryhivefile>
Utility to export/import and edit a Windows registry hives
usage
reged -x SYSTEM 'HKEY_LOCAL_MACHINE\SYSTEM' 'ControlSet001\Control\Lsa\Skew1' test.reg
modes
-x <registryhivefile> <prefixstring> <key> <output.reg> # Xport. Where <prefixstring> for example is HKEY_LOCAL_MACHINE\SOFTWARE <key> is key o dump (recursively), \ or \\ means all keys in hive. Only one .reg and one hive file supported at the same time
-I <registryhivefile> <prefixstring> <input.reg> # Import from .reg file. Where <prefixstring> for example is HKEY_LOCAL_MACHINE\SOFTWARE. Only one .reg and one hive file supported at the same time
-e <registryhive> ... # Interactive edit one or more of registry files
options
-L # Log changed filenames to /tmp/changed, also auto-saves
-C # Auto-save (commit) changed hives without asking
-N # No allocate mode, only allow edit of existing values with same size
-E # No expand mode, do not expand hive file (safe mode)
-t # Debug trace of allocated blocks
-v # Some more verbose messages
sampasswd
sampasswd [options] -uuser <samfile>
Reset passwords of users in the SAM user database
options
-r # Reset the user's password.
-a # Reset all the users. If this option is used there is no need to specify the next option.
-u <user> # User to change. The user value can be provided as a username, or a RID number in hexadecimal (if the username is preceded with '0x').
-l # Lists the users in the SAM database.
-H # Output human readable output. The program by default will print a parsable table unless this option is used.
-N # Do not allocate more information, only allow the editing of existing values with same size.
-E # Do not expand the hive file (safe mode).
-t # Print debug information of allocated blocks.
-v # Print verbose information and debug messages. ```
Install
sudo apt install -y chntpw
xubuntu 20.04 - focal
virt-manager
host
<filesystem type="mount" accessmode="mapped" fmode="0660" dmode="0770">
<source dir="/vms/share"/>
<target dir="/hostshare"/>
<address type="pci" domain="0x0000" bus="0x07" slot="0x00" function="0x0"/>
</filesystem>
#sudo usermod -G libvirtd -a $USER
sudo usermod -G libvirt-qemu -a $USER
hostpath=/vms/share
sudo chown -R libvirt-qemu:libvirt-qemu $hostpath
sudo setfacl -Rm g:libvirt-qemu:rwx $hostpath
sudo setfacl -d -Rm g:libvirt-qemu:rwx $hostpath
guest
sudo sh -c 'echo "9p
9pnet
9pnet_virtio" >> /etc/initramfs-tools/modules'
sudo update-initramfs -u
sudo sh -c 'echo "# qemu share
hostshare /share 9p trans=virtio,version=9p2000.L,rw,umask=002 0 0" >> /etc/fstab'
global
install
update
sudo apt remove -y gimp* libreoffice-* thunderbird* transmission-gtk
sudo apt update
sudo apt list --upgradable
sudo apt -y dist-upgrade
sudo apt -y autoremove
system
sudo apt install -y binutils-common bsdmainutils curl debconf-utils exfat git gnupg2 gparted hfsprogs htop kpartx lnav most net-tools p7zip-full p7zip-rar pv rar sysstat testdisk tmux tree unrar vim xsysinfo # openssh-server
sudo apt install -y dconf-editor firefox-locale-fr galculator gpicview meld plank qt5ct qt5-gtk2-platformtheme thunar-media-tags-plugin tumbler-plugins-extra
conf
qt5-ct to fusion
global
sudo swapoff -av && sudo sh -c 'echo vm.swappiness=10 > /etc/sysctl.d/99-swappiness.conf' # limit swap
sudo rm /etc/localtime && sudo ln -sv /usr/share/zoneinfo/Etc/UTC /etc/localtime
software-properties-gtk # add canonical partners
export QT_QPA_PLATFORMTHEME=gtk2
echo "\n# QT\nexport QT_QPA_PLATFORMTHEME=gtk2" >> ~/.profile
echo -e "\n#JAVA\nexport _JAVA_OPTIONS=\"-Dawt.useSystemAAFontSettings=on -Dswing.aatext=true -Dswing.defaultlaf=com.sun.java.swing.plaf.gtk.GTKLookAndFeel -Dswing.crossplatformlaf=com.sun.java.swing.plaf.gtk.GTKLookAndFeel \${_JAVA_OPTIONS}\"" >> ~/.profile
menulibre # edit menu
path=~/.config/autostart
[ -d ${path} ] || mkdir ${path}
echo '[Desktop Entry]
Encoding=UTF-8
Version=0.9.4
Type=Application
Name=plank
Comment=plank
Exec=plank
OnlyShowIn=XFCE;
RunHook=0
StartupNotify=false
Terminal=false
Hidden=false' > ${path}/plank.desktop
plank --preferences &
trans
# HOST
path=/vms/share/trans; [ -d ${path} ] || mkdir -p ${path}
cp -r ~/dev/ /vms/share/trans/
# GUEST
path=~/.local/share/icons; [ -d ${path} ] || mkdir -p ${path}
path=~/.local/share/applications; [ -d ${path} ] || mkdir -p ${path}
path=/share/trans/dev
path_conf=${path}/install-desktop/conf
cp ${path_conf}/foralyse/.bashrc ~/
cp ${path_conf}/foralyse/.bash_alias ~/
sudo cp ${path_conf}/foralyse/.bashrc /root/
sudo cp ${path_conf}/foralyse/.bash_alias /root/
cp ${path}/install/conf/foralyse/.vimrc ~/
sudo cp ${path}/install/conf/vim/* /usr/share/vim/vim*/colors/
sudo cp ${path_conf}/soft/meld-dark.xml /usr/share/meld/styles/
sudo cp ${path_conf}/wp/* /usr/share/xfce4/backdrops/
sudo cp ${path_conf}/bash-completion/* /usr/share/bash-completion/completions/
sudo cp ${path_conf}/icons/tmux.svg /usr/share/icons/default/
sudo cp ${path_conf}/foralyse/xfce4-terminal-tmux.desktop ~/.local/share/applications/
cp ${path_conf}/foralyse/xfce4-terminal-tmux.desktop ~/.local/share/applications/
cp ${path_conf}/icons/* ~/.local/share/icons
sudo ln -sv /usr/share/bash-completion/completions/tmux.git /usr/share/bash-completion/completions/tmux
sudo chmod +r /usr/share/icons/default/tmux.svg
sudo chmod +r /usr/share/bash-completion/completions/tmux*
sudo chmod +r /usr/share/xfce4/backdrops/*
sublime text
file="/etc/hosts"
sudo sh -c "echo '\n# sublime-text hack\n127.0.0.1\tsublimetext.com\n127.0.0.1\twww.sublimetext.com\n127.0.0.1\tlicense.sublimehq.com' >> ${file}"
ips="45.55.255.55"
for ip in ${ips}; do sudo iptables -A OUTPUT -d ${ip} -j DROP; done
path=/etc/iptables
[ -d "${path}" ] || sudo mkdir "${path}"
sudo sh -c 'iptables-save > /etc/iptables/rules.v4'
cat ${S_PATH_INSTALL_CONF}/soft/sublime-text.license
forensic
global
# network
sudo apt install -y whois
# pwd & evtx & process
sudo apt install -y john libscca-utils pev radare2
# hive
sudo apt install -y libhivex-bin chntpw reglookup
# gui
sudo apt install -y bless geany ghex gpicview gtkhash wxhexeditor
conf
bless
cp /usr/share/bless/*.layout ~/.config/bless/layouts/
kali
#sudo sh -c "echo '# kali\ndeb http://http.kali.org/kali kali-rolling main non-free contrib' > /etc/apt/sources.list.d/kali.list
#wget -q -O - archive.kali.org/archive-key.asc | sudo apt-key add -
#sudo apt update
#sed -i '/^deb/ s|^|#|' /etc/apt/sources.list.d/kali.list
#sudo apt update
python
sudo apt-get install -y python3 python3-pip
. ~/.profile
sudo apt-get install -y python2 # python2-dev
cd /tmp && curl -sSL https://bootstrap.pypa.io/pip/2.7/get-pip.py -o get-pip.py
python2 get-pip.py
pip2
python2 -m pip install -U balbuzard
pip3
python3 -m pip install -U malcarve regrippy
binwalk
dependencies
sudo apt install mtd-utils gzip bzip2 tar arj lhasa p7zip p7zip-full cabextract cramfsswap squashfs-tools lzop srecord
python3 -m pip install -U nose coverage pycryptodome pyqtgraph capstone matplotlib
. ~/.profile
github
# Install sasquatch to extract non-standard SquashFS images
sudo apt install -y zlib1g-dev liblzma-dev liblzo2-dev
cd /tmp && git clone https://github.com/devttys0/sasquatch
cd sasquatch && ./build.sh
# Install jefferson to extract JFFS2 file systems
python3 -m pip install -U cstruct
cd /tmp && git clone https://github.com/sviehb/jefferson
cd jefferson && sudo python3 setup.py install
# Install ubi_reader to extract UBIFS file systems
sudo apt install -y liblzo2-dev
python3 -m pip install -U python-lzo
cd /tmp && git clone https://github.com/jrspruitt/ubi_reader
cd ubi_reader && sudo python3 setup.py install
# Install yaffshiv to extract YAFFS file systems
cd /tmp && git clone https://github.com/devttys0/yaffshiv
cd yaffshiv && sudo python3 setup.py install
# Install unstuff (closed source) to extract StuffIt archive files
cd /tmp && curl -sS http://downloads.tuxfamily.org/sdtraces/stuffit520.611linux-i386.tar.gz | tar -zxv
sudo cp bin/unstuff /usr/local/bin/
pandoc
# sudo apt install pandoc texlive-latex-base texlive-latex-recommended texlive-latex-extra
# pandoc -s -o $fileout $filein
binwalk
cd /tmp && git clone https://github.com/ReFirmLabs/binwalk
cd binwalk && sudo python3 setup.py install
regripper
sudo apt-get install -y libparse-win32registry-perl
path=$(find /usr/share -name Win32Registry)
cd /usr/share && sudo git clone https://github.com/keydet89/RegRipper3.0.git
sudo mv RegRipper3.0 regripper
for file in WinNT/File.pm WinNT/Key.pm Base.pm; do sudo mv ${path}/${file} ${path}/${file}.$(date +%s); sudo ln -sv /usr/share/regripper/${file##*/} ${path}/${file}; done
cd regripper
sudo cp -a rip.pl rip.pl.$(date +%s)
sudo sed -i '/^my @alerts = ();/a my \$plugindir = "/usr/share/regripper/plugins/";' rip.pl
sudo sed -i "1c #! $(which perl)\nuse lib qw(/usr/lib/perl5/);" rip.pl
sudo chmod +x rip.pl
sudo ln -sv /usr/share/regripper/rip.pl /usr/bin/regripper
sudo ln -sv /usr/share/regripper/rip.pl /usr/bin/rip
volatility
volatility3
python3 -m pip install -U pefile yara-python capstone pycryptodome jsonschema leechcorepyc python-snappy
python3 -m pip install -U volatility3
cd ~/.local/bin && ln -sv vol vol3
volatility2
https://github.com/volatilityfoundation/volatility/wiki/Installation
sudo apt -y install pcregrep libpcre++-dev python-dev
python2 -m pip install distorm3 ipython openpyxl pycrypto pytz ujson yara-python
libforensic1394
sudo apt install -y cmake
cd /tmp
git clone https://github.com/FreddieWitherden/libforensic1394
cd libforensic1394
mkdir build && cd build
cmake -G"Unix Makefiles" ../
sudo make install
cd ../python
sudo python setup.py install
sudo ln -sv /usr/local/lib/libforensic1394.so.0.3.0 /usr/lib/libforensic1394.so.2
cd
sudo rm -fR /tmp/libforensic1394
sudo apt remove cmake
sudo apt autoremove
volatility
cd /opt
git clone https://github.com/volatilityfoundation/volatility.git
cd volatility
rm -fR .git
sudo python setup.py install
cd /usr/local/bin
sudo ln -sv vol.py vol2
vol2 -h
wireshark
sudo add-apt-repository -y ppa:wireshark-dev/stable
sudo apt update
sudo apt install -y tshark wireshark
autopsy
global
path_share=/share
sudo apt-get update
sudo apt install -y afflib-tools testdisk ewf-tools xmount fdupes java-common
sudo apt-get install -y imagemagick libde265-0 libheif1
java
java_file=$(ls ${path_share}/jdk-8*linux-x64.tar.gz)
file=/usr/local/bin/oracle-java-installer.sh
sudo curl -sS https://raw.githubusercontent.com/labcif/oracle-java-installer/master/oracle-java-installer.sh -o ${file}
#sudo sed -i s'/update-java-alternatives -a/update-alternatives --auto java/' /usr/local/bin/oracle-java-installer.sh
#sudo sed -i s'/update-java-alternatives -l/update-alternatives --list java/' /usr/local/bin/oracle-java-installer.sh
sudo sed -i 's|tar -xvzf|tar -xzf|' /usr/local/bin/oracle-java-installer.sh
sudo chmod +x ${file}
sudo ${file} --install ${java_file}
. /etc/profile.d/jdk.sh
${file} --status ${java_file}
base64sha
file=/usr/local/bin/b64sha
sudo curl -sS https://raw.githubusercontent.com/labcif/Base64SHA/master/b64sha -o ${file}
sudo chmod +x ${file}
sleuthkit
sleuthkit_file=$(ls ${path_share}/sleuthkit-java_*_amd64.deb)
read sleuthkit_version_major sleuthkit_version_minor <<<$(echo ${sleuthkit_file}|sed 's|^.*/sleuthkit-java_\([0-9_\.]\+\)-\([0-9]\)_amd64.deb|\1 \2|')
sudo apt install ${sleuthkit_file}
autopsy
file=$(ls ${path_share}/autopsy-*.zip)
path=${file%.zip} && path=/opt/${path##*/}
sudo unzip -q -d /opt/ ${file}
sudo chown -R ${USER}:${USER} ${path}
cd /opt && sudo ln -sv ${path##*/} autopsy
cd ${path}
sh unix_setup.sh
ln -sv ${path}/bin/autopsy ~/.local/bin/autopsy
autopsy --nosplash
launcher
echo "[Desktop Entry]
Version=1.0
Type=Application
Terminal=false
Icon=/opt/autopsy/icon.ico
Name=Autopsy
Exec=autopsy" > ~/.local/share/applications/autopsy.desktop
addons
ReportModules / ForensicExpertWitnessReport
https://github.com/chriswipat/forensic_expert_witness_report_module
IngestModules / FileHistory
https://medium.com/@markmckinnon_80619/windows-file-history-plugin-a6208da4efa5
IngestModules / Volatility
https://markmckinnon-80619.medium.com/volatility-autopsy-plugin-module-8beecea6396
install
python3 -m pip install -U pip
python3 -m pip install -U volatility3
cd /usr/local/bin && sudo ln -sv vol vol3; cd
help
volatility [-h] [-c CONFIG] [--parallelism [{processes,threads,off}]] [-e EXTEND] [-p PLUGIN_DIRS] [-s SYMBOL_DIRS] [-v] [-l LOG] [-o OUTPUT_DIR] [-q]
[-r RENDERER] [-f FILE] [--write-config] [--clear-cache] [--cache-path CACHE_PATH] [--offline] [--single-location SINGLE_LOCATION]
[--stackers [STACKERS [STACKERS ...]]] [--single-swap-locations [SINGLE_SWAP_LOCATIONS [SINGLE_SWAP_LOCATIONS ...]]]
plugin ...
An open-source memory forensics framework
-c CONFIG, --config CONFIG # Load the configuration from a json file
--parallelism [{processes,threads,off}] # Enables parallelism (defaults to off if no argument given)
-e EXTEND, --extend EXTEND # Extend the configuration with a new (or changed) setting
-p PLUGIN_DIRS, --plugin-dirs PLUGIN_DIRS # Semi-colon separated list of paths to find plugins
-s SYMBOL_DIRS, --symbol-dirs SYMBOL_DIRS # Semi-colon separated list of paths to find symbols
-v, --verbosity # Increase output verbosity
-l LOG, --log LOG # Log output to a file as well as the console
-o OUTPUT_DIR, --output-dir OUTPUT_DIR # Directory in which to output any generated files
-q, --quiet # Remove progress feedback
-r RENDERER, --renderer RENDERER # Determines how to render the output (quick, csv, pretty, json, jsonl)
-f FILE, --file FILE # Shorthand for --single-location=file:// if single-location is not defined
--write-config # Write configuration JSON file out to config.json
--clear-cache # Clears out all short-term cached items
--cache-path CACHE_PATH # Change the default path (/home/tsurugi/.cache/volatility3) used to store the cache
--offline # Do not search online for additional JSON files
--single-location SINGLE_LOCATION # Specifies a base location on which to stack
--stackers [STACKERS [STACKERS ...]] # List of stackers
--single-swap-locations [SINGLE_SWAP_LOCATIONS [SINGLE_SWAP_LOCATIONS ...]] # Specifies a list of swap layer URIs for use with single-location
windows
windows.bigpools.BigPools # List big page pools
windows.cachedump.Cachedump # Dumps lsa secrets from memory
windows.callbacks.Callbacks # Lists kernel callbacks and notification routines
windows.cmdline.CmdLine # Lists process command line arguments
windows.crashinfo.Crashinfo
windows.dlllist.DllList # Lists the loaded modules in a particular windows memory image
windows.driverirp.DriverIrp # List IRPs for drivers in a particular windows memory image
windows.driverscan.DriverScan # Scans for drivers present in a particular windows memory image
windows.dumpfiles.DumpFiles # Dumps cached file contents from Windows memory samples
windows.envars.Envars # Display process environment variables
windows.filescan.FileScan # Scans for file objects present in a particular windows memory image
windows.getservicesids.GetServiceSIDs # Lists process token sids
windows.getsids.GetSIDs # Print the SIDs owning each process
windows.handles.Handles # Lists process open handles
windows.hashdump.Hashdump # Dumps user hashes from memory
windows.info.Info # Show OS & kernel details of the memory sample being analyzed
windows.lsadump.Lsadump # Dumps lsa secrets from memory
windows.malfind.Malfind # Lists process memory ranges that potentially contain injected code
windows.memmap.Memmap # Prints the memory map
windows.modscan.ModScan # Scans for modules present in a particular windows memory image.
windows.modules.Modules # Lists the loaded kernel modules
windows.mutantscan.MutantScan # Scans for mutexes present in a particular windows memory image
windows.netscan.NetScan # Scans for network objects present in a particular windows memory image
windows.netstat.NetStat # Traverses network tracking structures present in a particular windows memory image.
windows.poolscanner.PoolScanner # A generic pool scanner plugin
windows.privileges.Privs # Lists process token privileges
windows.pslist.PsList # Lists the processes present in a particular windows memory image
windows.psscan.PsScan # Scans for processes present in a particular windows memory image
windows.pstree.PsTree # Plugin for listing processes in a tree based on their parent process ID
windows.registry.certificates.Certificates # Lists the certificates in the registry's Certificate Store
windows.registry.hivelist.HiveList # Lists the registry hives present in a particular memory image
windows.registry.hivescan.HiveScan # Scans for registry hives present in a particular windows memory image.
windows.registry.printkey.PrintKey # Lists the registry keys under a hive or specific key value
windows.registry.userassist.UserAssist # Print userassist registry keys and information
windows.skeleton_key_check.Skeleton_Key_Check # Looks for signs of Skeleton Key malware
windows.ssdt.SSDT # Lists the system call table
windows.statistics.Statistics
windows.strings.Strings # Reads output from the strings command and indicates which process(es) each string belongs to
windows.svcscan.SvcScan # Scans for windows services
windows.symlinkscan.SymlinkScan # Scans for links present in a particular windows memory image
windows.vadinfo.VadInfo # Lists process memory ranges
windows.vadyarascan.VadYaraScan # Scans all the Virtual Address Descriptor memory maps using yara
windows.verinfo.VerInfo # Lists version information from PE files
windows.virtmap.VirtMap # Lists virtual mapped sections
linux
linux.bash.Bash # Recovers bash command history from memory
linux.check_afinfo.Check_afinfo # Verifies the operation function pointers of network protocols
linux.check_creds.Check_creds # Checks if any processes are sharing credential structures
linux.check_idt.Check_idt # Checks if the IDT has been altered
linux.check_modules.Check_modules # Compares module list to sysfs info, if available
linux.check_syscall.Check_syscall # Check system call table for hooks
linux.elfs.Elfs # Lists all memory mapped ELF files for all processes
linux.keyboard_notifiers.Keyboard_notifiers # Parses the keyboard notifier call chain
linux.kmsg.Kmsg # Kernel log buffer reader
linux.lsmod.Lsmod # Lists loaded kernel modules
linux.lsof.Lsof # Lists all memory maps for all processes
linux.malfind.Malfind # Lists process memory ranges that potentially contain injected code
linux.proc.Maps # Lists all memory maps for all processes
linux.pslist.PsList # Lists the processes present in a particular linux memory image
linux.pstree.PsTree # Plugin for listing processes in a tree based on their parent process ID
linux.tty_check.tty_check # Checks tty devices for hooks
mac
mac.bash.Bash # Recovers bash command history from memory
mac.check_syscall.Check_syscall # Check system call table for hooks
mac.check_sysctl.Check_sysctl # Check sysctl handlers for hooks
mac.check_trap_table.Check_trap_table # Check mach trap table for hooks
mac.ifconfig.Ifconfig # Lists loaded kernel modules
mac.kauth_listeners.Kauth_listeners # Lists kauth listeners and their status
mac.kauth_scopes.Kauth_scopes # Lists kauth scopes and their status
mac.kevents.Kevents # Lists event handlers registered by processes
mac.list_files.List_Files # Lists all open file descriptors for all processes
mac.lsmod.Lsmod # Lists loaded kernel modules
mac.lsof.Lsof # Lists all open file descriptors for all processes
mac.malfind.Malfind # Lists process memory ranges that potentially contain injected code
mac.mount.Mount # A module containing a collection of plugins that produce data typically foundin Mac's mount command
mac.netstat.Netstat # Lists all network connections for all processes
mac.proc_maps.Maps # Lists process memory ranges that potentially contain injected code
mac.psaux.Psaux # Recovers program command line arguments
mac.pslist.PsList # Lists the processes present in a particular mac memory image
mac.pstree.PsTree # Plugin for listing processes in a tree based on their parent process ID
mac.socket_filters.Socket_filters # Enumerates kernel socket filters
mac.timers.Timers # Check for malicious kernel timers
mac.trustedbsd.Trustedbsd # Checks for malicious trustedbsd modules
mac.vfsevents.VFSevents # Lists processes that are filtering file system events
others
banners.Banners # Attempts to identify potential linux banners in an image
configwriter.ConfigWriter # Runs the automagics and both prints and outputs configuration in the output directory
frameworkinfo.FrameworkInfo # Plugin to list the various modular components of Volatility
isfinfo.IsfInfo # Determines information about the currently available ISF files, or a specific one
layerwriter.LayerWriter # Runs the automagics and writes out the primary layer produced by the stacker
timeliner.Timeliner # Runs all relevant plugins that provide time related information and orders the results by time
yarascan.YaraScan # Scans kernel memory using yara rules (string or file)
windows notifications
file=/vol6/Users/Angela/AppData/Local/Microsoft/Windows/Notifications/wpndatabase.db
sqlitebrowser ${file}
SELECT datetime((ArrivalTime/10000000)-11644473600, 'unixepoch') AS ArrivalTime,
datetime((ExpiryTime/10000000)-11644473600, 'unixepoch') AS ExpiryTime,
Type, HandlerId, Notification.Id, Payload, Tag, 'Group', 'Order', PrimaryId, HandlerType, WNFEventName, CreatedTime as HandlerCreatedTime, ModifiedTime as HandlerModifiedTime
FROM Notification LEFT JOIN NotificationHandler ON Notification.HandlerId = NotificationHandler.RecordId
regripper [-r Reg hive file] [-f profile] [-p plugin] [options]
Parse Windows Registry files, using either a single module, or a profile
Special
regripper -l -c|sort|column -t -s, # show plugins list in table sorted by plugins
regripper -l -c|sort -t, -k3 -k1|column -t -s, # show plugins list in table sorted by hive/plugins
regripper -p winver -r SOFTWARE # get version of wnidows
regripper -p timezone -r SYSTEM # get timezone information about SYSTEM hive
regripper -a -r SYSTEM # get full analyse for SYSTEM hive
Usefull
-a # Automatically run hive-specific plugins
-l # list all plugins
-f [profile] # use the profile
-p [plugin] # use the plugin
All
-r [hive] # Registry hive file to parse
-d # Check to see if the hive is dirty
-g # Guess the hive file type
-a # Automatically run hive-specific plugins
-aT # Automatically run hive-specific TLN plugins
-f [profile] # use the profile
-p [plugin] # use the plugin
-l # list all plugins
-c # Output plugin list in CSV format (use with -l)
-s systemname # system name (TLN support)
-u username # User name (TLN support)
-uP # Update default profiles
Plugins
adobe 20200522 NTUSER.DAT Gets user's Adobe app cRecentFiles values
allowedenum 20200511 NTUSER.DAT Software Extracts AllowedEnumeration values to determine hidden special folders
amcache 20200515 amcache Parse AmCache.hve file
amcache_tln 20180311 amcache Parse AmCache.hve file
appassoc 20200515 NTUSER.DAT Gets contents of user's ApplicationAssociationToasts key
appcertdlls 20200427 System Get entries from AppCertDlls key
appcompatcache 20200428 System Parse files from System hive AppCompatCache
appcompatcache_tln 20190112 System Parse files from System hive AppCompatCache
appcompatflags 20200525 NTUSER.DAT Software Extracts AppCompatFlags for Windows.
appinitdlls 20200427 Software Gets contents of AppInit_DLLs value
appkeys 20200517 NTUSER.DAT Software Extracts AppKeys entries.
appkeys_tln 20180920 NTUSER.DAT Software Extracts AppKeys entries.
applets 20200525 NTUSER.DAT Gets contents of user's Applets key
applets_tln 20120613 NTUSER.DAT Gets contents of user's Applets key (TLN)
apppaths 20200511 NTUSER.DAT Software Gets content of App Paths subkeys
apppaths_tln 20130429 NTUSER.DAT Software Gets content of App Paths subkeys (TLN)
appspecific 20200515 NTUSER.DAT Gets contents of user's Intellipoint\AppSpecific subkeys
appx 20200427 NTUSER.DAT USRCLASS.DAT Checks for persistence via Universal Windows Platform Apps
appx_tln 20191014 NTUSER.DAT USRCLASS.DAT Checks for persistence via Universal Windows Platform Apps
arpcache 20200515 NTUSER.DAT Retrieves CurrentVersion\App Management\ARPCache entries
at 20200525 Software Checks Software hive for AT jobs
attachmgr 20200525 NTUSER.DAT Checks user's keys that manage the Attachment Manager functionality
attachmgr_tln 20130425 NTUSER.DAT Checks user's keys that manage the Attachment Manager functionality (TLN)
at_tln 20140821 Software Checks Software hive for AT jobs
audiodev 20200525 Software Gets audio capture/render devices
auditpol 20200515 Security Get audit policy from the Security hive file
backuprestore 20200517 System Gets the contents of the FilesNotToSnapshot, KeysNotToRestore, and FilesNotToBackup keys
bam 20200427 System Parse files from System hive BAM Services
bam_tln 20180225 System Parse files from System hive BAM Services
base 20200427 All Parse base info from hive
baseline 20130211 All Scans a hive file, checking sizes of binary value data
btconfig 20200526 Software Determines BlueTooth devices 'seen' by BroadComm drivers
bthenum 20200515 System Get BTHENUM subkey info
bthport 20200517 System Gets Bluetooth-connected devices from System hive
bthport_tln 20180705 System Gets Bluetooth-connected devices from System hive; TLN output
cached 20200525 NTUSER.DAT Gets cached Shell Extensions from NTUSER.DAT hive
cached_tln 20150608 NTUSER.DAT Gets cached Shell Extensions from NTUSER.DAT hive (TLN)
calibrator 20200427 Software Checks DisplayCalibrator value (possible bypass assoc with LockBit ransomware)
clsid 20200526 Software USRCLASS.DAT Get list of CLSID/registered classes
clsid_tln 20200526 Software USRCLASS.DAT Get list of CLSID/registered classes
cmdproc 20200515 NTUSER.DAT Autostart - get Command Processor\AutoRun value from NTUSER.DAT hive
cmdproc_tln 20130425 NTUSER.DAT Autostart - get Command Processor\AutoRun value from NTUSER.DAT hive (TLN)
cmd_shell 20200515 Software Gets shell open cmds for various file types
codepage 20200519 system Checks codepage value
comdlg32 20200517 NTUSER.DAT Gets contents of user's ComDlg32 key
compdesc 20200511 NTUSER.DAT Gets contents of user's ComputerDescriptions key
compname 20090727 System Gets ComputerName and Hostname values from System hive
cred 20200427 system Checks for UseLogonCredential value
cred_tln 20200402 system Checks UseLogonCredential value
dafupnp 20200525 System Parses data from networked media streaming devices
dcom 20200525 Software Check DCOM Ports
ddo 20140414 NTUSER.DAT Gets user's DeviceDisplayObjects key contents
defender 20200427 Software Get Windows Defender settings
del 20200515 All Parse hive, print deleted keys/values
del_tln 20190506 All Parse hive, print deleted keys/values
devclass 20200525 System Get USB device info from the DeviceClasses keys in the System hive
direct 20200515 Software Searches Direct* keys for MostRecentApplication subkeys
direct_tln 20190911 Software Searches Direct* keys for MostRecentApplication subkeys (TLN)
disablelastaccess 20200517 System Get NTFSDisableLastAccessUpdate value
disablemru 20190924 NTUSER.DAT Software Checks settings disabling user's MRUs
disableremotescm 20200513 System Gets DisableRemoteScmEndpoints value from System hive
disablesr 20200515 Software Gets the value that turns System Restore either on or off
drivers32 20200525 Software Get values from the Drivers32 key
emdmgmt 20200511 Software Gets contents of EMDMgmt subkeys and values
environment 20200512 System NTUSER.DAT Get environment vars from NTUSER.DAT & System hives
execpolicy 20200517 Software Gets PowerShell Execution Policy
featureusage 20200511 NTUSER.DAT Extracts user's FeatureUsage data.
fileless 20200525 All Scans a hive file looking for fileless malware entries
findexes 20200525 All Scans a hive file looking for binary value data that contains MZ
gpohist 20200525 Software NTUSER.DAT Collects system/user GPO history
gpohist_tln 20150529 Software NTUSER.DAT Collects system/user GPO history (TLN)
heap 20200427 Software Checks HeapLeakDetection\DiagnosedApplications Subkeys
heidisql 20201227 NTUSER.DAT Gets user's heidisql data
ica_sessions 20200528 Software ARETE ONLY - Extracts Citrix ICA Session info
identities 20200525 NTUSER.DAT Extracts values from Identities key; NTUSER.DAT
imagedev 20140104 System --
imagefile 20200515 Software Checks ImageFileExecutionOptions subkeys values
injectdll64 20200427 NTUSER.DAT Software Retrieve values set to weaken Chrome security
inprocserver 20200427 Software Checks CLSID InProcServer32 values for indications of malware
installer 20200517 Software Determines product install information
ips 20200518 System Get IP Addresses and domains (DHCP, static)
jumplistdata 20200517 NTUSER.DAT Gets contents of user's JumpListData key
killsuit 20200427 Software Check for indications of Danderspritz Killsuit installation
killsuit_tln 20200414 Software Check for indications of Danderspritz Killsuit installation
knowndev 20200515 NTUSER.DAT Gets user's KnownDevices key contents
landesk 20200517 Software Get list of programs monitored by LANDESK - Software hive
landesk_tln 20130214 Software Get list of programs monitored by LANDESK from Software hive
lastloggedon 20200517 Software Gets LastLoggedOn* values from LogonUI key
licenses 20200526 Software Get contents of HKLM/Software/Licenses key
listsoft 20200517 NTUSER.DAT Lists contents of user's Software key
load 20200517 NTUSER.DAT Gets load and run values from user hive
logonstats 20200517 NTUSER.DAT Gets contents of user's LogonStats key
lsa 20200517 System Lists specific contents of LSA key
lxss 20200511 NTUSER.DAT Gets WSL config.
lxss_tln 20140723 NTUSER.DAT Gets WSL config.
macaddr 20200515 System Software --
mixer 20200517 NTUSER.DAT Checks user's audio mixer settings
mixer_tln 20141112 NTUSER.DAT Checks user's audio mixer info
mmc 20200517 NTUSER.DAT Get contents of user's MMC\Recent File List key
mmc_tln 20120828 NTUSER.DAT Get contents of user's MMC\Recent File List key (TLN)
mmo 20200517 NTUSER.DAT Checks NTUSER for Multimedia\Other values [malware]
mndmru 20200517 NTUSER.DAT Get contents of user's Map Network Drive MRU
mndmru_tln 20120829 NTUSER.DAT Get user's Map Network Drive MRU (TLN)
mountdev 20200517 System Return contents of System hive MountedDevices key
mountdev2 20200517 System Return contents of System hive MountedDevices key
mp2 20200526 NTUSER.DAT Gets user's MountPoints2 key contents
mp2_tln 20200525 NTUSER.DAT Gets user's MountPoints2 key contents
mpmru 20200517 NTUSER.DAT Gets user's Media Player RecentFileList values
msis 20200517 Software Determine MSI packages installed on the system
msoffice 20200518 NTUSER.DAT Get user's MSOffice content
msoffice_tln 20200518 NTUSER.DAT Get user's MSOffice content
muicache 20200525 NTUSER.DAT USRCLASS.DAT Gets EXEs from user's MUICache key
muicache_tln 20130425 NTUSER.DAT USRCLASS.DAT Gets EXEs from user's MUICache key (TLN)
nation 20200517 ntuser.dat Gets region information from HKCU
netlogon 20200515 System Parse values for machine account password changes
netsh 20200515 Software Gets list of NetSH helper DLLs
networkcards 20200518 Software Get NetworkCards Info
networklist 20200518 Software Collects network info from NetworkList key
networklist_tln 20150812 Software Collects network info from NetworkList key (TLN)
networksetup2 20191004 System Get NetworkSetup2 subkey info
nic2 20200525 System Gets NIC info from System hive
ntds 20200427 System Parse Services NTDS key for specific persistence values
null 20160119 All Check key/value names in a hive for leading null char
oisc 20091125 NTUSER.DAT Gets contents of user's Office Internet Server Cache
onedrive 20200515 NTUSER.DAT Gets contents of user's OneDrive key
onedrive_tln 20190823 NTUSER.DAT Gets contents of user's OneDrive key
osversion 20200511 NTUSER.DAT Checks for OSVersion value
osversion_tln 20120608 NTUSER.DAT Checks for OSVersion value (TLN)
outlook_homepage 20201002 NTUSER.DAT Software Retrieve values set to attack Outlook WebView Homepage
pagefile 20140505 System Get info on pagefile(s)
pending 20130711 System Gets contents of PendingFileRenameOperations value
pendinggpos 20200427 NTUSER.DAT Gets contents of user's PendingGPOs key
photos 20200525 USRCLASS.DAT Shell/BagMRU traversal in Win7 USRCLASS.DAT hives
Plugin Version Hive Description
portdev 20090118 Software Parses Windows Portable Devices key contents
powershellcore 20200525 Software Extracts PowerShellCore settings
prefetch 20200515 System Gets the the Prefetch Parameters
printdemon 20200514 Software Gets value assoc with printer ports and descriptions
printmon 20200427 System Lists installed Print Monitors
printmon_tln 20191122 System Lists installed Print Monitors
processor_architecture 20140505 System Get from the processor architecture from the System's environment key
profilelist 20200518 Software Get content of ProfileList key
profiler 20200525 NTUSER.DAwindows.memmap.MemmapT System Environment profiler information
pslogging 20200515 NTUSER.DAT Software Extracts PowerShell logging settings
psscript 20200525 Software NTUSER.DAT Get PSScript.ini values
putty 20200515 NTUSER.DAT Extracts the saved SshHostKeys for PuTTY.
rdpport 20200526 System Queries System hive for RDP Port
recentapps 20200515 NTUSER.DAT Gets contents of user's RecentApps key
recentapps_tln 20190513 NTUSER.DAT Gets contents of user's RecentApps key
recentdocs 20200427 NTUSER.DAT Gets contents of user's RecentDocs key
recentdocs_tln 20140220 NTUSER.DAT Gets contents of user's RecentDocs key (TLN)
remoteaccess 20200517 System Get RemoteAccess AccountLockout settings
rlo 20200517 All Parse hive, check key/value names for RLO character
routes 20200526 System Get persistent routes from the Registry
run 20200511 Software NTUSER.DAT [Autostart] Get autostart key contents from Software hive
runmru 20200525 NTUSER.DAT Gets contents of user's RunMRU key
runmru_tln 20120828 NTUSER.DAT Gets contents of user's RunMRU key (TLN)
runonceex 20200427 Software Gets contents of RunOnceEx values
runvirtual 20200427 NTUSER.DAT Software Gets RunVirtual entries
runvirtual_tln 20191211 NTUSER.DAT Software Gets RunVirtual entries
ryuk_gpo 20200427 Software Get GPO policy settings from Software hive related to Ryuk
samparse 20200825 SAM Parse SAM file for user & group mbrshp info
samparse_tln 20200826 SAM Parse SAM file for user acct info (TLN)
ScanButton 20131210 System Get Scan Button information
schedagent 20200518 Software Get SchedulingAgent key contents
scriptleturl 20200525 Software USRCLASS.DAT Check CLSIDs for ScriptletURL subkeys
searchscopes 20200517 NTUSER.DAT Gets contents of user's SearchScopes key
secctr 20200517 Software Get data from Security Center key
secrets 20200517 Security Get the last write time for the Policy\Secrets key
secrets_tln 20140814 Security Get the last write time for the Policy\Secrets key
securityproviders 20200526 System Gets SecurityProvider value from System hive
services 20191024 System Lists services/drivers in Services key by LastWrite times
sevenzip 20210329 NTUSER.DAT Gets records of histories from 7-Zip keys
sfc 20200517 Software Get SFC values
shares 20200525 System Get list of shares from System hive file
shc 20200427 NTUSER.DAT Gets SHC entries from user hive
shellbags 20200428 USRCLASS.DAT Shell/BagMRU traversal in Win7+ USRCLASS.DAT hives
shellbags_tln 20180702 USRCLASS.DAT Shell/BagMRU traversal in Win7 USRCLASS.DAT hives
shellfolders 20200515 NTUSER.DAT Gets user's shell folders values
shelloverlay 20100308 Software Gets ShellIconOverlayIdentifiers values
shimcache 20200428 System Parse file refs from System hive AppCompatCache data
shimcache_tln 20190112 System Parse file refs from System hive AppCompatCache data
shutdown 20200518 System Gets ShutdownTime value from System hive
sizes 20200517 All Scans a hive file looking for binary value data of a min size (5000)
slack 20200517 All Parse hive, print slack space, retrieve keys/values
slack_tln 20190506 All Parse hive, print slack space, retrieve keys/values
source_os 20200511 System Parse Source OS subkey values
speech 20200427 NTUSER.DAT Get values from user's Speech key
speech_tln 20191010 NTUSER.DAT Get values from user's Speech key
spp_clients 20130429 Software Determines volumes monitored by VSS
srum 20200518 Software Gets contents of SRUM subkeys
ssid 20200515 Software Get WZCSVC SSID Info
susclient 20200518 Software Extracts SusClient* info, including HDD SN (if avail)
svc 20200525 System Lists Services key contents by LastWrite time (CSV)
svcdll 20200525 System Lists Services keys with ServiceDll values
svc_tln 20130911 System Lists Services key contents by LastWrite time (CSV)
syscache 20200515 syscache Parse SysCache.hve file
syscache_csv 20200515 syscache
syscache_tln 20190516 syscache
sysinternals 20080324 NTUSER.DAT Checks for SysInternals apps keys
sysinternals_tln 20080324 NTUSER.DAT Checks for SysInternals apps keys (TLN)
systemindex 20200518 Software Gets systemindex\..\Paths info from Windows Search key
taskcache 20200427 Software Checks TaskCache\Tree root keys (not subkeys)
taskcache_tln 20200416 Software Checks TaskCache\Tree root keys (not subkeys)
tasks 20200427 Software Checks TaskCache\Tasks subkeys
tasks_tln 20200416 Software Checks TaskCache\Tasks subkeys
termcert 20200526 System Gets Terminal Server certificate
termserv 20200506 System Software Gets Terminal Server settings from System and Software hives
thispcpolicy 20200511 Software Gets ThisPCPolicy values
timezone 20200518 System Get TimeZoneInformation key contents
tracing 20200511 Software Gets list of apps that can be traced
tracing_tln 20120608 Software Gets list of apps that can be traced (TLN)
tsclient 20200518 NTUSER.DAT Displays contents of user's Terminal Server Client\Default key
tsclient_tln 20120827 NTUSER.DAT Displays contents of user's Terminal Server Client key (TLN)
typedpaths 20200526 NTUSER.DAT Gets contents of user's typedpaths key
typedpaths_tln 20120828 NTUSER.DAT Gets contents of user's typedpaths key (TLN)
typedurls 20200526 NTUSER.DAT Returns contents of user's TypedURLs key.
typedurlstime 20200526 NTUSER.DAT Returns contents of user's TypedURLsTime key.
typedurlstime_tln 20120613 NTUSER.DAT Returns contents of Win8 user's TypedURLsTime key (TLN).
typedurls_tln 20120827 NTUSER.DAT Returns MRU for user's TypedURLs key (TLN)
uac 20200427 Software Get Select User Account Control (UAC) Values from HKLM\SOFTWARE\Microsoft\Windows\CurrentVersion\Policies\System
uacbypass 20200511 USRCLASS.DAT Software Get possible UAC bypass settings
uninstall 20200525 Software NTUSER.DAT Gets contents of Uninstall keys from Software, NTUSER.DAT hives
uninstall_tln 20120523 Software NTUSER.DAT Gets contents of Uninstall keys from Software, NTUSER.DAT hives(TLN format)
usb 20200515 System Get USB key info
usbdevices 20200525 System Parses Enum\USB key for USB & WPD devices
usbstor 20200515 System Get USBStor key info
userassist 20170204 NTUSER.DAT Displays contents of UserAssist subkeys
userassist_tln 20180710 NTUSER.DAT Displays contents of UserAssist subkeys in TLN format
volinfocache 20200518 Software Gets VolumeInfoCache from Windows Search key
wab 20200427 Software Get WAB DLLPath settings
wab_tln 20191122 Software Get WAB DLLPath settings
watp 20200427 Software Gets contents of Windows Advanced Threat Protection key
wbem 20200511 Software Get some contents from WBEM key
wc_shares 20200515 NTUSER.DAT Gets contents of user's WorkgroupCrawler/Shares subkeys
winlogon_tln 20130429 Software Alerts on values from the WinLogon key (TLN)
winrar 20200526 NTUSER.DAT Get WinRAR\ArcHistory entries
winrar_tln 20120829 NTUSER.DAT Get WinRAR\ArcHistory entries (TLN)
winscp 20201227 NTUSER.DAT Gets user's WinSCP 2 data
winver 20200525 Software Get Windows version & build info
winzip 20200526 NTUSER.DAT Get WinZip extract and filemenu values
wordwheelquery 20200823 NTUSER.DAT Gets contents of user's WordWheelQuery key
wordwheelquery_tln 20200824 NTUSER.DAT Gets contents of user's WordWheelQuery key
wow64 20200515 Software Gets contents of WOW64\x86 key
wpdbusenum 20200515 System Get WpdBusEnum subkey info
wsh_settings 20200517 Software Gets WSH Settings
Install
see foralyse
reglookup
reglookup [-v] [-s] [-p <PATH_FILTER>] [-t <TYPE_FILTER>] <REGISTRY_FILE>
Print windows registry elements to stdout in a CSV-like format
Special
for hive in SAM SECURITY SOFTWARE SYSTEM $(find /vol6/ -iname ntuser.dat); do echo $hive; reglookup -i $hive > /share/examen/disk/hive/reglookup_${hive//\//_}; done
Usefull
-p # restrict output to elements below this path.
-H # disables header row.
-s # enables security descriptor output.
All
-v # sets verbose mode.
-h # enables header row. (default)
-H # disables header row.
-s # enables security descriptor output.
-S # disables security descriptor output. (default)
-p # restrict output to elements below this path.
-t # restrict results to this specific data type.
-i # includes parent key modification times with child values.
reglookup-timeline
reglookup-timeline [-H] [-V] <REGISTRY_FILE> [<REGISTRY_FILE> ...]
Builds timelines for forensic investigations, a wrapper for reglookup
Special
cd /vol6/Windows/System32/config && hives="SAM SECURITY SOFTWARE SYSTEM $(find /vol6/ -iname ntuser.dat)" && reglookup-timeline -v $hives > /share/examen/disk/hive/reglookup-tl # complete timeline
sed -n '/^2021-09-09 18:1/,$p' reglookup-tl > reglookup-tl-select # select part of timeline
All
-H # Omit header line
-V # Include values with parent timestamps
reglookup-recover
reglookup-recover [options] <REGISTRY_FILE>
Attempts to scour a Windows registry hive for deleted data structures and outputs those found in a CSV-like format
All
-v # sets verbose mode.
-h # enables header row. (default)
-H # disables header row.
-l # enables leftover(raw) cell output.
-L # disables leftover(raw) cell output. (default)
-r # enables raw cell output for parsed cells.
-R # disables raw cell output for parsed cells. (default)
Install
sudo apt install reglookup
theme
windows version
regripper -p winver -r $path_hive/SOFTWARE
reglookup -p Software/Microsoft $path_hive/SYSTEM | column -t -s,
user password
path_hive=/vol6/Windows/System32/config
path2=/cases/examen/artefacts
# get user id
reglookup -p SAM/Domains/Account/Users ${path_hive}/SAM | grep -i angela # select 0x.....
# data
uid=000003E9
hivexget ${path_hive}/SAM "SAM\Domains\Account\Users\000003E9" V | hexdump -ve '8/1 "%02X"' > ${path2}/sam-user-v.hexdump
hivexget ${path_hive}/SAM "SAM\Domains\Account" F | hexdump -ve '8/1 "%02X"' > ${path2}/sam-f.hexdump
hivexget ${path_hive}/SYSTEM "ControlSet001\Control\Lsa\JD" lookup | hexdump -ve '8/1 "%02X"' > ${path2}/system-jd.hexdump
hivexget ${path_hive}/SYSTEM "ControlSet001\Control\Lsa\Skew1" SkewMatrix | hexdump -ve '8/1 "%02X"' > ${path2}/system-skew.hexdump
hivexget ${path_hive}/SYSTEM "ControlSet001\Control\Lsa\GBG" GrafBlumGroup | hexdump -ve '8/1 "%02X"' > ${path2}/system-gbg.hexdump
hivexget ${path_hive}/SYSTEM "ControlSet001\Control\Lsa\Data" Pattern | hexdump -ve '8/1 "%02X"' > ${path2}/system-data.hexdump
for file in $(ls ${path2}); do echo $file; cat $file; echo; done
regripper
reglookup
reglookup-timeline
pathhive=$device/Windows/System32/config
pathreport=/share/examen/disk
cd $path
reglookup-timeline SAM SECURITY SOFTWARE SYSTEM > $pathreport/reglookup-timeline
windows
security center
disable
[HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Services\SecurityHealthService]"Start"=dword:00000004
[HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Services\wscsvc]"Start"=dword:00000004
enable
[HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Services\SecurityHealthService]"Start"=dword:00000002
[HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Services\wscsvc]"Start"=dword:00000002
cmp [OPTION]... FILE1 [FILE2 [SKIP1 [SKIP2]]]
Compare two files byte by byte
Special
cmp $file1 $file2 # compare 2 binary files
cmp -l $file1 $file2 | wc -l # get number of diferrences
Usefull
-b, --print-bytes # print differing bytes
-n, --bytes=LIMIT # compare at most LIMIT bytes
All
-b, --print-bytes # print differing bytes
-i, --ignore-initial=SKIP # skip first SKIP bytes of both inputs
-i, --ignore-initial=SKIP1:SKIP2 # skip first SKIP1 bytes of FILE1 and first SKIP2 bytes of FILE2
-l, --verbose # output byte numbers and differing byte values
-n, --bytes=LIMIT # compare at most LIMIT bytes
-s, --quiet, --silent # suppress all normal output
mount
info
file ${file} # show informations
fdisk -x ${file} # show informations
qemu-img info ${file} # show informations on virtual disk
guestfish --rw -a $file
run
list-filesystems
sudo modprobe nbd
sudo qemu-nbd -c /dev/nbd0 ${file} -f qcow2
sudo fdisk /dev/nbd0 -l
sudo qemu-nbd -d /dev/nbd0
parted ${file}
print
losetup -a # show mounted devices in /dev/loopX
resize
qemu-img resize -f raw ${file} 20972568064 # resize disk to 20972568064 bytes (correct disk size)
parted ${file}
select # select disk (interactive menu)
resizepart # resize partition (interactive menu)
mount/umount
guestmount --add %f --mount /dev/sda1 /vms/data
guestunmount /vms/data
sudo modprobe nbd
sudo qemu-nbd -c /dev/nbd0 ${file} -f qcow2
sudo fdisk /dev/nbd0 -l
sudo mount /dev/nbd0p1 /vms/data
sudo umount /vms/data
sudo qemu-nbd -d /dev/nbd0
sudo mount -o ro,loop,offset=$((1126400*512)) ${file} /mnt # mount disk partition with the partition offset
sudo mount -o ro,loop,offset=$((1126400*512)) ${file} /mnt # mount disk partition with the partition offset
sudo umount /mnt # umount disk
sudo losetup --find --show ${file} # mount disk in /dev/loopX and show /dev/loopX
sudo losetup --find --show --offset ${offset} ${file} # mount partition/disk with offset in /dev/loopX and show /dev/loopX
sudo losetup -d /dev/loopX # umount disk
info
https://opensource.com/article/18/3/creating-bash-completion-script
COMPREPLY
an array variable used to store the completions. The completion mechanism uses this variable to display its contents as completions
COMPREPLY=( $(compgen -W "now tomorrow never" -- ${COMP_WORDS[COMP_CWORD]}) ) # propose given words at each let choose the first completion from given words and repeat it after (replace)
COMPREPLY=( $(compgen -W "now tomorrow never" "${COMP_WORDS[1]}") ) # let choose the first completion from given words and repeat it after (replace)
complete
complete command to register this list for completion
complete -A directory $cmd # provide completion for directory
complete -d $cmd # provide completion for directory
complete -D $cmd # provide completion for directory
complete -f $cmd # provide completion for file
complete -W "$words" $cmd # Wordlist, provide the list of words for completion to command $cmd
complete -F _foo $cmd # use function _foo_comp to register completions for command $cmd
compopt
https://helpmanual.io/man1/bash/
variables
COMP_WORDS # an array of all the words typed after the name of the program the compspec belongs to
COMP_CWORD # an index of the COMP_WORDS array pointing to the word the current cursor is at—in other words
COMP_LINE # the current command line
tricks
exec bash # reload completions
examples
qemu-img
#!/usr/bin/env bash
_qemuimg_comp()
{
COMPREPLY=()
local cur=${COMP_WORDS[COMP_CWORD]}
local prev="${COMP_WORDS[COMP_CWORD-1]}"
local opts='amend bench bitmap check commit compare convert create dd info map measure snapshot rebase resize'
local formats='blkdebug blklogwrites blkverify bochs cloop compress copy-before-write copy-on-read dmg file ftp ftps gluster host_cdrom host_device http https iscsi iser luks nbd nfs null-aio null
-co nvme parallels preallocate qcow qcow2 qed quorum raw rbd replication snapshot-access ssh throttle vdi vhdx vmdk vpc vvfat'
#echo "COMP_LINE=$COMP_LINE" >> /tmp/qemu
#echo "COMP_WORDS=$COMP_WORDS[@] | COMP_CWORD=$COMP_CWORD" >> /tmp/qemu
#echo "cur=$cur | prev=$prev" >> /tmp/qemu
if [ ${COMP_CWORD} -eq 1 ]; then
COMPREPLY=( $(compgen -W "${opts}" -- "${cur}" ) )
return 0
elif [[ $prev =~ -[oOf] ]]; then
COMPREPLY=( $(compgen -W "${formats}" -- "${cur}" ) )
else
COMPREPLY=( $(compgen -f -- "${cur}") )
if [ -d "${COMPREPLY}" ]; then
compopt -o nospace
COMPREPLY=${COMPREPLY}/
fi
fi
} &&
complete -F _qemuimg_comp qemu-img
haconf
#!/usr/bin/env bash
#
# Bash completion function for the 'haconf' command.
_haconf()
{
local cur prev path_enabled path_available opts
path_enabled="/etc/haproxy/conf-enabled"
path_available="/etc/haproxy/conf-available"
__disabled() {
local confs conf notused
confs="$(ls "${path_available}")"
for conf in ${confs}; do
! [ -h "${path_enabled}/${conf}" ] && notused="${notused} ${conf}"
done
echo ${notused}
}
__enabled() {
ls ${path_enabled}
}
COMPREPLY=()
cur=${COMP_WORDS[COMP_CWORD]}
prev=${COMP_WORDS[COMP_CWORD-1]}
# primary commans
opts='check clear enable disable list reload'
# level 1 for commands
if [ $COMP_CWORD -eq 1 ]; then
COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") )
return 0
# level 2 for arguments
else
case $prev in
enable)
COMPREPLY=( $(compgen -W "$(__disabled)" -- "$cur" ) )
return 0
;;
disable)
COMPREPLY=( $(compgen -W "$(__enabled)" -- "$cur" ) )
return 0
;;
esac
fi
}
complete -F _haconf haconf
xxd [options] [infile [outfile]]
xxd -r [-s [-]offset] [-c cols] [-ps] [infile [outfile]]
ASCII, decimal, hexadecimal, octal dump
Special
xxd -p -c 10000 # export in hexa with 10000 octets by column
xxd -p -u -c 10000 # export in hexa with 10000 octets by column and in uppercase
xxd -s 0x200 -l 0x200 dump.vmdk| xxd -r # print readable content
Usefull
-s [+][-]seek # start at <seek> bytes abs. (or +: rel.) infile offset
-l len # stop after <len> octets
-r # reverse operation: convert (or patch) hexdump into binary
-r -s off # revert with <off> added to file positions found in hexdump
-u # use upper case hex letters
All
-a # toggle autoskip: A single '*' replaces nul-lines. Default off
-b # binary digit dump (incompatible with -ps,-i,-r). Default hex
-C # capitalize variable names in C include file style (-i)
-c cols # format <cols> octets per line. Default 16 (-i: 12, -ps: 30)
-E # show characters in EBCDIC. Default ASCII
-e # little-endian dump (incompatible with -ps,-i,-r)
-g bytes # number of octets per group in normal output. Default 2 (-e: 4)
-i # output in C include file style
-l len # stop after <len> octets
-o off # add <off> to the displayed file position
-ps # output in postscript plain hexdump style
-r # reverse operation: convert (or patch) hexdump into binary
-r -s off # revert with <off> added to file positions found in hexdump
-d # show offset in decimal instead of hex
-s [+][-]seek # start at <seek> bytes abs. (or +: rel.) infile offset
-u # use upper case hex letters
Install
sudo apt install bsdmainutils
system
sudo sh -c "echo 'fs.file-max=3253172' > /etc/sysctl.d/90-cuckoo.conf"
file=/etc/security/limits.conf
sudo cp -a ${file} ${file}.$(date +%s)
sudo sh -c "echo '
# cuckoo
* soft nofile 4096
* hard nofile 16384' >> ${file}"
logout / login
mongodb
service
service=mongodb.service
systemctl is-enabled ${service} || sudo systemctl enable ${service}
systemctl is-active ${service} || sudo systemctl start ${service}
systemctl status ${service}
ss -ltn|grep 27017
users
mongo
db.createUser({ user: "admin", pwd: "7Yt_Gi-sYgCsr", roles:[{ role: "userAdminAnyDatabase", db: "admin" }] })
db.getUsers()
use cuckoo
db.createUser({ user: "cuckoo", pwd: "8hm6_FevpUA5od", roles:[{ role: "dbOwner", db: "cuckoo" }] })
db.getUsers()
show dbs
exit
conf
file=/etc/mongodb.conf
while read str val; do
sudo sed -i "s|#\?\(${str}\) *=.*$|\1 = ${val}|" ${file}
done <<< "port 27017
journal true
auth true
verbose true"
sudo systemctl restart ${service}
postgresql
service=postgresql.service
systemctl is-enabled ${service} || sudo systemctl enable ${service}
systemctl is-active ${service} || sudo systemctl start ${service}
systemctl status ${service}
ss -ltn|grep 5432
sudo -u postgres psql
psql
\du
CREATE DATABASE cuckoo;
CREATE USER cuckoo WITH ENCRYPTED PASSWORD '8hm6_FevpUA5od';
GRANT ALL PRIVILEGES ON DATABASE cuckoo TO cuckoo;
\du
\q
guacd
service=guacd.service
systemctl is-enabled ${service} || sudo systemctl enable ${service}
systemctl is-active ${service} || sudo systemctl start ${service}
systemctl status ${service}
ss -ltn|grep 4822
cuckoo
create
[ -d ~/.cuckoo ] || cuckoo -d
cp -a ~/.cuckoo ~/.cuckoo.$(date +%s)
cuckoo
file=~/.cuckoo/conf/cuckoo.conf
while read str val; do
sed -i "/${str} =/ s|=.*$|= ${val}|" ${file}
done <<< "machinery kvm
memory_dump yes
ip 192.168.122.1
connection postgresql://cuckoo:8hm6_FevpUA5od@localhost:5432/cuckoo
guacd_host localhost
guacd_port 4822"
auxiliary
file=~/.cuckoo/conf/auxiliary.conf
while read str val; do
sed -i "/${str} =/ s|=.*$|= ${val}|" ${file}
done <<< "tcpdump /usr/sbin/tcpdump
mitmdump /usr/local/bin/mitmdump"
kvm
update VMs in ~/.cuckoo/conf/kvm.conf
memory
file=~/.cuckoo/conf/memory.conf
while read str val; do
sed -i "/${str} =/ s|=.*$|= ${val}|" ${file}
done <<< "guest_profile Win7SP1x64
delete_memdump no"
processing
file=~/.cuckoo/conf/processing.conf
sed -i "/^.memory.$/,/^$/ s|^enabled = .*$|enabled = yes|" ${file}
reporting
file=~/.cuckoo/conf/reporting.conf
# singlefile
sed -i "/^.singlefile.$/,/^$/ s|^enabled = .*$|enabled = yes|" ${file}
sed -i "/^.singlefile.$/,/^$/ s|^html = .*$|html = yes|" ${file}
sed -i "/^.singlefile.$/,/^$/ s|^pdf = .*$|pdf = yes|" ${file}
# mongodb
db_name=cuckoo
db_user=cuckoo
db_pwd=8hm6_FevpUA5od
sed -i "/^.mongodb.$/,/^$/ s|^enabled = .*$|enabled = yes|" ${file}
sed -i "/^.mongodb.$/,/^$/ s|^db = .*$|db = ${db_name}|" ${file}
sed -i "/^.mongodb.$/,/^$/ s|^username = .*$|username = ${db_user}|" ${file}
sed -i "/^.mongodb.$/,/^$/ s|^password = .*$|password = ${db_pwd}|" ${file}
interface
user=nikita
hostbr=virbr0
vms="win7 "
for vm in win7; do
sudo tunctl -b -u ${user} -t tap_${vm}
sudo ip link set tap_${vm} master ${hostbr}
sudo ip link set dev tap_${vm} up
sudo ip link set dev ${hostbr} up
done
python
time
disable time settings from internet
set static IP address (disable DHCP)
address 192.168.122.101
gateway 192.168.122.1 / 255.255.255.0
DNS 208.67.222.222, 208.67.222.220
windows
add ;c:\python27;c:\python27\script;C:\Program Files (x86)\GnuWin32\bin to PATH
install python-2.7.10.amd64.msi
install wget-1.11.4-1-setup.exe
wget https://bootstrap.pypa.io/pip/2.7/get-pip.py
python.exe get-pip.py
pip install -U setuptools
pillow
pip install --U Pillow
agent
get from host: $CWD/agent.py
rename, put in guest: C:\ProgramData\Microsoft\Start Menu\programs\Startup\agent.pyw
https://cuckoo.sh/docs/installation/host/requirements.html
xubuntu 18.04 "bionic"
global
sudo apt update
sudo apt-get install -y git swig libjpeg-dev zlib1g-dev libffi-dev libssl-dev
sudo apt-get install -y virt-win-reg libhivex-bin # registry
python
Requirement
local vs global
# pip for user installation
pip=pip
# pip for root installation
pip="sudo -H pip"
sudo apt install -y python python-pip python-dev
# sudo apt-get install -y python-virtualenv
$pip install -U pip setuptools
balbuzard
$pip install -U balbuzard
pydeep
# ssdeep
sudo apt install -y ssdeep libfuzzy-dev
sudo ldconfig
# pydeep
$pip install pydeep
# sudo apt install -y git
# cd /tmp && git clone https://github.com/kbandla/pydeep && cd pydeep
# python setup.py build && python setup.py test
# sudo python setup.py install
m2crypto
$pip install m2crypto # $pip install m2crypto==0.24.0
volatility
https://github.com/volatilityfoundation/volatility/wiki/Installation
libforensic1394.so.2
sudo apt install -y cmake
cd /tmp
git clone https://github.com/FreddieWitherden/libforensic1394
cd libforensic1394
mkdir build && cd build
cmake -G"Unix Makefiles" ../
sudo make install
cd ../python
sudo python setup.py install
cd
sudo rm -fR /tmp/libforensic1394
sudo ln -sv /usr/local/lib/libforensic1394.so.2 /usr/lib/libforensic1394.so.2
pip packages
sudo apt -y install pcregrep libpcre++-dev python-dev
$pip install pycrypto distorm3 yara-python ujson openpyxl pytz ipython
volatility
cd /opt
git clone https://github.com/volatilityfoundation/volatility.git
cd volatility
rm -fR .git
sudo python setup.py install
cd /usr/local/bin
sudo ln -sv vol.py vol2
vol2 -h
bash completion
sudo cp -a /home/shared/dev/install-desktop/conf/cuckoo/vol2 /usr/share/bash-completion/completions/
opts=$(vol2 --info|sed -n '/^Plugins/,/^$/ p'|tail -n+3|cut -f1 -d' '|xargs)
sudo sed -i "s|^\( *opts=\).*$|\1'${opts}'|" /usr/share/bash-completion/completions/vol2
exec bash # reload completions
DB
django
sudo apt-get install -y mongodb
postgresql
sudo apt-get install -y postgresql libpq-dev
$pip install psycopg2
packages
guacd
sudo apt install -y libguac-client-rdp0 libguac-client-vnc0 libguac-client-ssh0 guacd
tcpdump
sudo apt-get install -y tcpdump
# sudo apt-get install -y tcpdump apparmor-utils
# sudo aa-disable /usr/sbin/tcpdump
sudo groupadd pcap
sudo chgrp pcap /usr/sbin/tcpdump
sudo setcap cap_net_raw,cap_net_admin=eip /usr/sbin/tcpdump
# verification
getcap /usr/sbin/tcpdump # /usr/sbin/tcpdump = cap_net_admin,cap_net_raw+eip
mitmproxy
https://mitmproxy.org/downloads/
version=3.0.4
path=/opt/mitmproxy
[ -d ${path} ] || mkdir ${path}
cd ${path}
wget https://snapshots.mitmproxy.org/${version}/mitmproxy-v${version}-linux.tar.gz
wget https://snapshots.mitmproxy.org/${version}/pathod-v${version}-linux.tar.gz
tar xzf mitmproxy-v${version}-linux.tar.gz
tar xzf pathod-v${version}-linux.tar.gz
files="mitmdump mitmproxy mitmweb pathoc pathod"
for file in $files; do sudo ln -sv ${path}/${file} /usr/local/bin/${file}; done
for file in $files; do sudo ls -al /usr/bin/${file}; done
qemu/kvm
sudo apt-get install -y qemu-kvm libvirt-bin ubuntu-vm-builder bridge-utils python-libvirt virt-manager libguestfs-tools uml-utilities
sudo adduser ${USER} libvirt-qemu
for path in /vms/data /vms/iso; do sudo [ -d ${path} ] || sudo mkdir ${path}; done
sudo setfacl -R -m u:$USER:rw /vms/data /vms/iso
sudo setfacl -R -m d:$USER:rw /vms/data /vms/iso
thunar
$HOME/.config/Thunar/uca.xml
<action>
<icon>edit-copy</icon>
<name>Duplicate root</name>
<unique-id>1635257948652123-2</unique-id>
<command>pkexec cp -a %f %f.copy</command>
<description>Duplicate root</description>
<patterns>*</patterns>
<directories/>
<audio-files/>
<image-files/>
<other-files/>
<text-files/>
<video-files/>
</action>
<action>
<icon>media-import-audio-cd</icon>
<name>iso</name>
<unique-id>1653055089123473-10</unique-id>
<command>mkisofs -Jro /vms/iso/tmp.iso %F</command>
<description>make iso in vms/iso/tmp.iso</description>
<patterns>*</patterns>
<startup-notify/>
<directories/>
<audio-files/>
<image-files/>
<other-files/>
<text-files/>
<video-files/>
</action>
<action>
<icon>go-bottom</icon>
<name>mount data</name>
<unique-id>1653055065395840-9</unique-id>
<command>guestmount --add %f --mount /dev/sda1 /vms/data</command>
<description>guest mount in /vms/data</description>
<patterns>*.qcow2;*.img;*.raw</patterns>
<other-files/>
</action>
<action>
<icon>go-top</icon>
<name>unmount data</name>
<unique-id>1653055105839871-11</unique-id>
<command>guestunmount /vms/data</command>
<description>guest unmount /vms/data</description>
<patterns>*</patterns>
<startup-notify/>
<directories/>
<audio-files/>
<image-files/>
<other-files/>
<text-files/>
<video-files/>
</action>
cuckoo
direct
$pip install -U cuckoo
virtualenv
cd /opt
virtualenv venv
. venv/bin/activate
$pip install -U cuckoo
xubuntu 18.04 bionic
update
sudo apt update
sudo apt list --upgradable
sudo apt dist-upgrade
sudo apt autoremove
sudo apt autoclean
sudo apt clean
system
https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git/tree/rtl_nic/
cd /tmp
wget https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git/plain/rtl_nic/rtl8168fp-3.fw
wget https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git/plain/rtl_nic/rtl8125a-3.fw
sudo cp rtl81* /lib/firmware/rtl_nic/
sudo update-initramfs -u
prepare
. /server/server.conf
. /server/install.conf
sudo mount /btrfs/sys
sdate=$(date +%s)
btrfs_sys=focal
btrfs_user=user-focal
grub
# update /etc/grub/40_custom
sudo sed -i '/^GRUB_TIMEOUT=/ s|=.*|=2|' /etc/default/grub
sudo update-grub
init
path_install_conf=${S_PATH_INSTALL_CONF/install-desktop/install}
path_install_bash_completion=${S_PATH_INSTALL_CONF}/bash-completion
file_env=${S_PATH_SCRIPT_CONF}/env
file_bash_aliases=${S_PATH_SCRIPT_CONF}/.bash_aliases
file_bash_functions=${S_PATH_SCRIPT_CONF}/.bash_functions
file_vimrc="${S_PATH_SCRIPT_CONF}/.vimrc"
sudo software-properties-gtk
sudo adduser ${USER} users; sudo adduser ${USER} www-data
sudo adduser ${USER} audio; sudo adduser ${USER} video
file=~/.bash_aliases
[ -e ${file} ] && _eval rm ${file}
ln -s ${file_bash_aliases} ${file}
file=~/.bash_functions
[ -e ${file} ] && _eval rm ${file}
ln -s ${file_bash_functions} ${file}
sudo cp -a ${S_PATH_INSTALL_CONF}/bash-completion/* /usr/share/bash-completion/completions/
file=~/.bashrc
cp -a ${file} ${file}.${sdate}
sed -i 's|^\(HISTSIZE\)=.*$|\1=10000|' ${file}
sed -i 's|^\(HISTFILESIZE\)=.*$|\1=20000|' ${file}
sed -i '/^#force_color_prompt/ s|^#||' ${file}
color='\\[\\033[01;34m\\]'
sed -i 's|^\( *\)\(PS1.*033.*32m.*\)$|\1PS1="\${debian_chroot:+(\$debian_chroot)}\\['${color}'\\]\\u\\[\\e[1;37m\\]@\\['${color}'\\]\\h\\[\\e[1;37m\\]:\\W\\['${color}'\\]\$\\[\\e[0;0m\\]\"|' ${file}
sed -i 's|^#\?\(force_color_prompt\).*$|\1=yes|' ${file}
grep -q "${file_env}" ${file} || echo "
# source global variables
[ -f ${file_env} ] && . ${file_env}
[ -f ~/.bash_functions ] && . ~/.bash_functions
" >> ${file}
grep -q '. ~/.bash_aliases' ${file} || echo "[ -f ~/.bash_aliases ] && . ~/.bash_aliases
" >> ${file}
. ${file}
sudo cp -a /btrfs/sys/${btrfs_sys}/etc/hosts /etc/hosts
/home/shared/dev/keep/share-link nikita
root
file="/root/.bashrc"
sudo cp -a /root/.bashrc /root/.bashrc$(date +%s)
color_root="\033[01;31m"
case "$S_SERVER_TYPE" in home) color='\\[\\033[01;34m\\]' ;; ovh) color='\\[\\033[01;32m\\]' ;; vz) color='\\[\\033[01;33m\\]' ;; lxd) color='\\[\\033[01;33m\\]' ;; kvm) color='"\\[\\033[01;38;5;172m\\]' ;; *) color='\\[\\033[01;34m\\]'; color_root=$color ;; esac
# force color
sudo sed -i '/^#force_color_prompt=/ s|#||' ${file}
# PS1
ps1='${debian_chroot:+($debian_chroot)}'${color}'\\h\\[\\033[00m\\]\\w\\[\\033[01;31m\\]\\$\\[\\033[00m\\]'
# no root
#ps1='${debian_chroot:+($debian_chroot)}\\[\\033[01;31m\\]\\u\\[\\033[00m\\]@\\[\\033[01;32m\\]\\h\\[\\033[00m\\]:\\w\\[\\033[01;31m\\]\\$\\[\\033[00m\\]'
sudo sed -i "\|if \[ \"\$color_prompt\" = yes \]|{n;s|=.*|='$ps1'|}" ${file}
! sudo grep -q "${S_PATH_SCRIPT_CONF}/env" ${file} && sudo sh -c "echo '
# source global variables
[ -f ${S_PATH_SCRIPT_CONF}/env ] && . ${S_PATH_SCRIPT_CONF}/env
# aliases
[ -f ~/.bash_aliases ] && . ~/.bash_aliases
# functions
[ -f ~/.bash_functions ] && . ~/.bash_functions
' >> ${file}"
file=/root/.bash_aliases
sudo [ -f ${file} ] && sudo rm ${file}
sudo ln -s "$file_bash_aliases" ${file}
file=/root/.bash_functions
sudo [ -f ${file} ] && sudo rm ${file}
sudo ln -s "$file_bash_functions" ${file}
file=/root/.vimrc
sudo [ -f ${file} ] && sudo rm ${file}
sudo ln -sv "${file_vimrc}" ${file}
snap
snap list --all
# sudo snap remove --revision ${rev} ${pck}
install
sudo apt install -y curl debconf-utils gnupg2 htop net-tools p7zip-full p7zip-rar pv rar testdisk tree unrar xsysinfo
sudo apt install -y meld most lnav dconf-editor galculator
sudo apt install -y binutils-common bsdmainutils pev wxhexeditor # binwalk
sudo apt install -y gpicview thunar-media-tags-plugin tumbler-plugins-extra
forensic
sudo apt install -y binutils-common bsdmainutils pev radare2 bless wxhexeditor # binwalk
vim
sudo apt install -y vim
cd
ln -sv "${file_vimrc}" .vimrc
sudo cp /home/shared/dev/install/conf/vim/* /usr/share/vim/vim*/colors/
tmux
sudo apt install -y tmux
ln -vs /usr/local/bs/conf/.tmux.conf .tmux.conf
ln -sv /home/shared/.tmux.tmux
cd /usr/share/bash-completion/completions/
sudo rm tmux
sudo ln -sv tmux.git tmux
tmux a
thunar
sudo cp -a /btrfs/sys/user-pahvo/.config/Thunar/uca.xml ~/.config/Thunar/
qt5
sudo apt install -y qt5ct qt5-gtk-platformtheme qt5-style-plugins
sudo sh -e 'echo "QT_QPA_PLATFORMTHEME=qt5ct" >> /etc/environment'
export QT_QPA_PLATFORMTHEME=qt5ct
plank
sudo apt install -y plank
path=~/.config/autostart
[ -d ${path} ] || mkdir ${path}
echo '[Desktop Entry]
Encoding=UTF-8
Version=0.9.4
Type=Application
Name=plank
Comment=plank
Exec=plank
OnlyShowIn=XFCE;
RunHook=0
StartupNotify=false
Terminal=false
Hidden=false' > ${path}/plank.desktop
plank --preferences &
sublimetext
file="/etc/hosts"
sudo sh -c "echo '\n# sublime-text hack\n127.0.0.1\tsublimetext.com\n127.0.0.1\twww.sublimetext.com\n127.0.0.1\tlicense.sublimehq.com' >> ${file}"
ips="45.55.255.55"
for ip in ${ips}; do sudo iptables -A OUTPUT -d ${ip} -j DROP; done
path=/etc/iptables
[ -d "${path}" ] || sudo mkdir "${path}"
sudo sh -c 'iptables-save > /etc/iptables/rules.v4'
cat ${S_PATH_INSTALL_CONF}/soft/sublime-text.license
path=~/.local/share/applications
[ -d "${path}" ] || sudo mkdir "${path}"
tar xzf /ext/shared/Soft/linux/backup/sublime_text_20220516-1652694297.tar.gz -C /tmp/
cd /tmp/
mv opt/sublime_text/ /opt/
mv home/nikita/.config/sublime-text-3/ ~/.config/
mv home/nikita/.sublime-project/ ~/
mv home/nikita/.local/share/applications/sublime-text.desktop ~/.local/share/applications/
[ -e ~/.local/share/applications ] || mkdir ~/.local/share/applications
mv home/nikita/.local/share/applications/sublime-text.desktop ~/.local/share/applications/
mozilla
sudo add-apt-repository -y ppa:ppa-mozillateam
sudo apt remove --purge firefox
sudo snap remove --purge firefox
sudo sh -c "echo 'Package: *
Pin: release o=LP-PPA-mozillateam
Pin-Priority: 1001' > /etc/apt/preferences.d/mozilla-firefox"
apt policy firefox
sudo apt install -y firefox
cd
cp -a /home/shared/.mozilla.ubu /home/shared/.mozilla.ubu.${sdate}
ln -sv /home/shared/.mozilla.ubu .mozilla
cp -a /home/shared/.thunderbird.ubu /home/shared/.thunderbird.ubu.${sdate}
ln -sv /home/shared/.thunderbird.ubu .thunderbird
mudita24
sudo apt install -y mudita24
echo "[Desktop Entry]
Encoding=UTF-8
Version=0.9.4
Type=Application
Name=mudita24
Comment=mudita24
Exec=mudita24
OnlyShowIn=XFCE;
StartupNotify=false
Terminal=false
Hidden=true" > ~/.config/autostart/mudita24.desktop
gmusicbrowser
sudo add-apt-repository ppa:tomtomtom/gmusicbrowser
sudo apt update
sudo apt install -y gmusicbrowser
sudo cp -a /btrfs/sys/user-pahvo/.config/gmusicbrowser ~/.config/
gmusicbrowser &
background
# background desktop
sudo cp /home/shared/dev/install-desktop/conf/wp/xubuntu-development-dark.jpg /usr/share/xfce4/backdrops/
# shortcut keyboard
# xfce4-terminal / shift+ctrl+alt-t
# xfce4-terminal -e "tmux a" / ctrl+alt-t
# xfce4-popup-whiskermenu / Super L
meld
sudo cp /home/shared/dev/install-desktop/conf/soft/meld-dark.xml /usr/share/meld/styles/
end
sudo apt update
sudo apt autoremove
sudo apt autoclean
sudo apt clean
install
dev
sudo apt install wxhexeditor tmux most libscca-utils galculator
IDA
sudo apt-get install libc6-i686:i386 libexpat1:i386 libffi7:i386 libfontconfig1:i386 libfreetype6:i386 libgcc1:i386 libglib2.0-0:i386 libice6:i386 libpcre3:i386 libpng16-16:i386 libsm6:i386 libstdc++6:i386 libuuid1:i386 libx11-6:i386 libxau6:i386 libxcb1:i386 libxdmcp6:i386 libxext6:i386 libxrender1:i386 zlib1g:i386 libx11-xcb1:i386 libdbus-1-3:i386 libxi6:i386 libsm6:i386 libcurl4:i386
conf
sudo swapoff -av
sudo sh -c 'echo vm.swappiness=5 > /etc/sysctl.d/99-swappiness.conf'
volatility
https://github.com/volatilityfoundation/volatility/wiki/Installation
pip
# pip for user installation
#pip=pip2
# pip for root installation
pip="sudo -H pip2"
$pip install -U pip setuptools
$pip install pycrypto distorm3 yara-python ujson openpyxl pytz ipython
libforensic1394.so.2
sudo apt install -y cmake
cd /tmp
git clone https://github.com/FreddieWitherden/libforensic1394
cd libforensic1394
mkdir build && cd build
cmake -G"Unix Makefiles" ../
sudo make install
cd ../python
sudo python setup.py install
cd
sudo rm -fR /tmp/libforensic1394
sudo ln -sv /usr/local/lib/libforensic1394.so.2 /usr/lib/libforensic1394.so.2
volatility 2.6
cd /opt
sudo [ -d volatility ] && sudo rm -fR volatility
sudo git clone https://github.com/volatilityfoundation/volatility.git
cd volatility
sudo rm -fR .git
sudo python setup.py install
cd /usr/local/bin
sudo [ -e vol2 ] && sudo rm vol2
sudo ln -sv vol.py vol2
vol2 -h
# host
cp /home/shared/dev/install-desktop/conf/cuckoo/vol2 /vms/share/
#guest
file=/usr/share/bash-completion/completions/vol2
sudo mv /share/vol2 ${file}
opts=$(vol2 --info|sed -n '/^Plugins/,/^$/ p'|tail -n+3|cut -f1 -d' '|xargs)
sudo sed -i "/^ *opts=/ s|=.*|='${opts}'|" ${file}
grep opts= ${file}
exec bash
bash completion
sudo cp -a /home/shared/dev/install-desktop/conf/cuckoo/vol2 /usr/share/bash-completion/completions/
opts=$(vol2 --info|sed -n '/^Plugins/,/^$/ p'|tail -n+3|cut -f1 -d' '|xargs)
sudo sed -i "s|^\( *opts=\).*$|\1'${opts}'|" /usr/share/bash-completion/completions/vol2
exec bash # reload completions
trick
df -h ; sudo find / -type d -name .git -exec rm -fR {} \; df -h
https://pev.sourceforge.io/doc/manual/en_us/ch06.html
ofs2rva
ofs2rva <offset> FILE
Convert raw file offset to RVA
Example
ofs2rva 0x1b9b8 calc.exe
pedis
pedis OPTIONS FILE
PE sections and functions (by default, until found a RET or LEAVE instruction)
--att # set AT&T syntax
-e, --entrypoint # disassemble entrypoint
-f, --format <text|csv|xml|html> change output format (default: text)
-m, --mode <16|32|64> # disassembly mode (default: auto)
-i, <number> # number of instructions to be disassembled
-n, <number> # number of bytes to be disassembled
-o, --offset <offset> # disassemble at specified file offset
-r, --rva <rva> # disassemble at specified RVA
-s, --section <section_name> # disassemble entire section given
pehash
pehash OPTIONS FILE
Calculate hashes of PE pieces
-f, --format <text|csv|xml|html> # change output format (default: text)
-a, --all # hash file, sections and headers with md5, sha1, sha256, ssdeep and imphash
-c, --content # hash only the file content (default)
-h, --header <dos|coff|optional> # hash only the header with the specified name
-s, --section <section_name> # hash only the section with the specified name
--section-index <section_index> # hash only the section at the specified index (1..n)
peres
peres OPTIONS FILE
Show information about resource section and extract it
-a, --all # Show all information, statistics and extract resources
-i, --info # Show resources information
-s, --statistics # Show resources statistics
-x, --extract # Extract resources
-v, --file-version # Show File Version from PE resource directory
pesec
pesec [OPTIONS] FILE
Check for security features in PE files
-f, --format <text|csv|xml|html> # change output format (default: text)
-c, --certoutform <text|pem> # specifies the certificate output format (default: text)
-o, --certout <filename> # specifies the output filename to write certificates to (default: stdout)
pescan
pescan OPTIONS FILE
Search for suspicious things in PE files
-f, --format <text|html|xml|csv|json> # change output format (default: text)
-v, --verbose # show more info about items found
readpe
readpe OPTIONS FILE
Show PE file headers
-A, --all # full output (default)
-H, --all-headers # print all PE headers
-S, --all-sections # print all PE sections headers
-f, --format <text|csv|xml|html> change output format (default: text)
-d, --dirs # show data directories
-h, --header <dos|coff|optional> show specific header
-i, --imports # show imported functions
-e, --exports # show exported functions
rva2ofs
rva2ofs <rva> FILE
Convert RVA to raw file offset
Example
rva2ofs 0x12db cards.dll
Install
sudo apt install binwalk
binwalk [OPTIONS] [FILE1] [FILE2] ...
tool for searching binary images for embedded files and executable code
Special
binwalk $file # Get signatures (same as -B)
binwalk --hexdump --red $file1 $file2 # show only different lines
binwalk --raw $str $file # search string $str in file. use "\x00" for hexa character ("\x37" for 7)
binwalk --entropy $str $file # get entropy
binwalk --signature $str $file # search string $str in file. use "\x00" for hexa character ("\x37" for 7)
binwalk --extract $file && tree _${file}.extracted/ # extract files and show files in tree
Usefull
-W, --hexdump # Perform a hexdump / diff of a file or files
-i, --red # Only show lines containing bytes that are different among all files
-U, --blue # Only show lines containing bytes that are different among some files
-e, --extract # Automatically extract known file types
-E, --entropy # Calculate file entropy
All
Signature Scan Options:
-B, --signature # Scan target file(s) for common file signatures
-R, --raw=<str> # Scan target file(s) for the specified sequence of bytes
-A, --opcodes # Scan target file(s) for common executable opcode signatures
-m, --magic=<file> # Specify a custom magic file to use
-b, --dumb # Disable smart signature keywords
-I, --invalid # Show results marked as invalid
-x, --exclude=<str> # Exclude results that match <str>
-y, --include=<str> # Only show results that match <str>
Extraction Options:
-e, --extract # Automatically extract known file types
-D, --dd=<type:ext:cmd> # Extract <type> signatures, give the files an extension of <ext>, and execute <cmd>
-M, --matryoshka # Recursively scan extracted files
-d, --depth=<int> # Limit matryoshka recursion depth (default: 8 levels deep)
-C, --directory=<str> # Extract files/folders to a custom directory (default: current working directory)
-j, --size=<int> # Limit the size of each extracted file
-n, --count=<int> # Limit the number of extracted files
-r, --rm # Delete carved files after extraction
-z, --carve # Carve data from files, but don't execute extraction utilities
-V, --subdirs # Extract into sub-directories named by the offset
Entropy Options:
-E, --entropy # Calculate file entropy
-F, --fast # Use faster, but less detailed, entropy analysis
-J, --save # Save plot as a PNG
-Q, --nlegend # Omit the legend from the entropy plot graph
-N, --nplot # Do not generate an entropy plot graph
-H, --high=<float> # Set the rising edge entropy trigger threshold (default: 0.95)
-L, --low=<float> # Set the falling edge entropy trigger threshold (default: 0.85)
Binary Diffing Options:
-W, --hexdump # Perform a hexdump / diff of a file or files
-G, --green # Only show lines containing bytes that are the same among all files
-i, --red # Only show lines containing bytes that are different among all files
-U, --blue # Only show lines containing bytes that are different among some files
-u, --similar # Only display lines that are the same between all files
-w, --terse # Diff all files, but only display a hex dump of the first file
Raw Compression Options:
-X, --deflate # Scan for raw deflate compression streams
-Z, --lzma # Scan for raw LZMA compression streams
-P, --partial # Perform a superficial, but faster, scan
-S, --stop # Stop after the first result
General Options:
-l, --length=<int> # Number of bytes to scan
-o, --offset=<int> # Start scan at this file offset
-O, --base=<int> # Add a base address to all printed offsets
-K, --block=<int> # Set file block size
-g, --swap=<int> # Reverse every n bytes before scanning
-f, --log=<file> # Log results to file
-c, --csv # Log results to file in CSV format
-t, --term # Format output to fit the terminal window
-q, --quiet # Suppress output to stdout
-v, --verbose # Enable verbose output
-h, --help # Show help output
-a, --finclude=<str> # Only scan files whose names match this regex
-p, --fexclude=<str> # Do not scan files whose names match this regex
-s, --status=<int> # Enable the status server on the specified port
Install
sudo apt install binwalk
balbuzard
balbuzard [options] <filename> [filename2 ...]
malware analysis tools to extract patterns of interest and crack obfuscation such as XOR
Special
balbuzard $file # resume all founds
balbuzard $file -v|grep ^---- -A2 # show all sections
Usefull
-c CSV, --csv=CSV # export results to a CSV file
-r # find files recursively in subdirectories.
All
-h, --help # show this help message and exit
-c CSV, --csv=CSV # export results to a CSV file
-v # verbose display, with hex view.
-r # find files recursively in subdirectories.
-z ZIP_PASSWORD, --zip=ZIP_PASSWORD # if the file is a zip archive, open first file from it, using the provided password (requires Python 2.6+)
-f ZIP_FNAME, --zipfname=ZIP_FNAME # if the file is a zip archive, file(s) to be opened within the zip. Wildcards * and ? are supported. (default:*)
Install
pip2 install -U balbuzard
bbcrack
bbcrack [options] <filename>
uses a new algorithm based on patterns of interest to bruteforce typical malware obfuscation such as XOR, ROL, ADD and various combinations, in order to guess which algorithms/keys have been used
All
-l LEVEL, --level=LEVEL # select transforms with level 1, 2 or 3 and below
-i INCLEVEL, --inclevel=INCLEVEL # select transforms only with level 1, 2 or 3 (incremental)
-k KEEP, --keep=KEEP number of transforms to keep after stage 1
-s SAVE, --save=SAVE number of transforms to save to files after stage 2
-t TRANSFORM, --transform=TRANSFORM # only check specific transforms (comma separated list, or "-t list" to display all available transforms)
-z ZIP_PASSWORD, --zip=ZIP_PASSWORD # if the file is a zip archive, open first file from it, using the provided password (requires Python 2.6+)
-p # profiling: measure time spent on each pattern.
bbharvest
bbharvest [options] <filename>
extracts all patterns of interest found when applying typical malware obfuscation transforms such as XOR, ROL, ADD and various combinations, trying all possible keys. It is especially useful when several keys or several transforms are used in a single file
All
-l LEVEL, --level=LEVEL # select transforms level 1, 2 or 3
-i INCLEVEL, --inclevel=INCLEVEL # select transforms only with level 1, 2 or 3 (incremental)
-c CSV, --csv=CSV # export results to a CSV file
-t TRANSFORM, --transform=TRANSFORM # only check specific transforms (comma separated list, or "-t list" to display all available transforms)
-z ZIP_PASSWORD, --zip=ZIP_PASSWORD # if the file is a zip archive, open first file from it, using the provided password (requires Python 2.6+)
-p # profiling: measure time spent on each pattern.
bbharvest
bbtrans [options] <filename>
can apply any of the transforms from bbcrack (XOR, ROL, ADD and various combinations) to a file
All
-t TRANSFORM, --transform=TRANSFORM # transform to be applied (or "-t list" to display all available transforms)
-p PARAMS, --params=PARAMS # parameters for transform (comma separated list)
-z ZIP_PASSWORD, --zip=ZIP_PASSWORD # if the file is a zip archive, open first file from it, using the provided password (requires Python 2.6+)
hexdump [-bcCdovx] [-e format_string] [-f format_file] [-n length] [-s offset] file ...
ASCII, decimal, hexadecimal, octal dump
Special
hexdump -v # do not use * to replace duplicate lines
hexdump -ve '"%02X"' # convert in uppercase hexadecimal
hexdump -ve '8/1 "%02X"' # convert in uppercase hexadecimal in classic format 8bytes
hexdump -e '"%08_ax""|"' -e '16/1 "%02x ""|"' -e '16/1 "%_p""|\n"' # 1 bytes
hexdump -e '"%08_ax""|"' -e '8/2 "%04x ""|"' -e '16/1 "%_p""|\n"' # 2 bytes
hexdump -e '"%08_ax""|"' -e '4/4 "%08x ""|"' -e '16/1 "%_p""|\n"' # 4 bytes
Usefull
-C # Canonical hex+ASCII display. Display the input offset in hexadecimal, followed by sixteen space-separated, two column, hexadecimal bytes, followed by the same sixteen bytes in %_p format enclosed in ``|'' characters. Calling the command hd implies this option.
-n length # Interpret only length bytes of input.
-s offset # Skip offset bytes from the beginning of the input. By default, offset is interpreted as a decimal number.
-v # Cause hexdump to display all input data. Without the -v option, any number of groups of output lines, which would be identical to the immediately preceding group of output lines (except for the input offsets), are replaced with a line comprised of a single asterisk.
All
-b # One-byte octal display. Display the input offset in hexadecimal, followed by sixteen space-separated, three column, zero-filled, bytes of input data, in octal, per line.
-c # One-byte character display. Display the input offset in hexadecimal, followed by sixteen space-separated, three column, space-filled, characters of input data per line.
-C # Canonical hex+ASCII display. Display the input offset in hexadecimal, followed by sixteen space-separated, two column, hexadecimal bytes, followed by the same sixteen bytes in %_p format enclosed in ``|'' characters. Calling the command hd implies this option.
-d # Two-byte decimal display. Display the input offset in hexadecimal, followed by eight space-separated, five column, zero-filled, two-byte units of input data, in unsigned decimal, per line.
-e format_string # Specify a format string to be used for displaying data.
-f format_file # Specify a file that contains one or more newline separated format strings. Empty lines and lines whose first non-blank character is a hash mark (#) are ignored.
-n length # Interpret only length bytes of input.
-o # Two-byte octal display. Display the input offset in hexadecimal, followed by eight space-separated, six column, zero-filled, two byte quantities of input data, in octal, per line.
-s offset # Skip offset bytes from the beginning of the input. By default, offset is interpreted as a decimal number. With a leading 0x or 0X, offset is interpreted as a hexadecimal number, otherwise, with a leading 0, offset is interpreted as an octal number. Appending the character b, k, or m to offset causes it to be interpreted as a multiple of 512, 1024, or 1048576, respectively.
-v # Cause hexdump to display all input data. Without the -v option, any number of groups of output lines, which would be identical to the immediately preceding group of output lines (except for the input offsets), are replaced with a line comprised of a single asterisk.
-x # Two-byte hexadecimal display. Display the input offset in hexadecimal, followed by eight, space separated, four column, zero-filled, two-byte quantities of input data, in hexadecimal, per line.
Install
sudo apt install bsdmainutils
rabin2 [-AceghHiIsSMzlpRrLxvhqQTuUwV] [-a arch] [-b bits] [-B addr] [-C fmt:C:[D]] [-D lang sym|-] [-f subbin] [-k query] [-K algo] [-O binop] [-o str] [-m addr] [-@ addr] [-n str] [-X fmt file ...] file
Binary program info extractor
Special
rabin2 -H $file | grep -i timedate # compilation date
rabin2 -H $file | grep -i sizeofcode # size of code
rabin2 -i $file | grep -i " $dll " | wc -l # count imported functions in specific dll
rabin2 -i $file | awk '{print $5}' | grep -v '^\(lib\|\)$' | sort -u # show all imported libs (dll)
rabin2 -s $file | grep -i " $dll " | wc -l # count symbols functions in specific dll
rabin2 -H $file|grep -iA2 debug # debuger detection present
rabin2 -g Program|grep -i debug # details about debuger detection present
rabin2 -z $file | sed -n "/$str1/,/$str2/p" | sed 's/^.* ascii *//' > $fileout # extract data between 2 strings in file
Usefull
-H # Show header fields (see ih command in r2)
-g # Show all possible information
-I # Show binary info (iI in r2)
-i # Show imports (symbols imported from libraries) (ii)
-R # Show relocations
-s # Show exported symbols
-S # Show sections
-SS # Show segments
-t # Show file hashes
-T # Show Certificates
-U # Show Resources
-z # Show strings inside .data section (like gnu strings does)
-x # Extract all sub binaries from a fat binary (f.ex: fatmach0)
-X format file ... # Package a fat or zip containing all the files passed (fat, zip)
-l # List linked libraries to the binary
-e # Show entrypoints for disk and on-memory
All
-@ addr # Show information (symbol, section, import) of the given address
-A # List sub-binaries and their associated arch-bits pairs
-a arch # Set arch (x86, arm, .. accepts underscore for bits x86_32)
-b bits # Set bits (32, 64, ...)
-B addr # Override baddr
-c # List classes
-cc # List classes in header format
-C [fmt:C[:D]] Create [elf,mach0,pe] # for arm and x86-32/64 tiny binaries where 'C' is an hexpair list of the code bytes and ':D' is an optional concatenation to describe the bytes for the data section.
-d # Show debug/dwarf information
-D lang symbolname # - Demangle symbol name (or - to read from stdin) for lang (cxx, swift, java, cxx, ..)
-e # Show entrypoints for disk and on-memory
-ee # Show constructor/destructors (extended entrypoints)
-f subbin # Select sub-binary architecture. Useful for fat-mach0 binaries
-F binfmt # Force to use that bin plugin (ignore header check)
-g # Show all possible information
-G addr # Load address . offset to header
-h # Show usage help message.
-H # Show header fields (see ih command in r2)
-I # Show binary info (iI in r2)
-i # Show imports (symbols imported from libraries) (ii)
-j # Output in json
-k query # Perform SDB query on loaded file
-K algo # Select a rahash2 checksum algorithm to be performed on sections listing (and maybe others in the future) i.e 'rabin2 -K md5 -S /bin/ls'
-l # List linked libraries to the binary
-L # List supported bin plugins
-M # Show address of 'main' symbol
-m addr # Show source line reference from a given address
-N minlen:maxlen # Force minimum and maximum number of chars per string (see -z and -zz). if (strlen>minlen && (!maxlen || strlen<=maxlen))
-n str # Show information (symbol, section, import) at string offset
-o str # Output file/folder for write operations (out by default)
-O binop # Perform binary operation on target binary (dump, resize, change sections, ...) see '-O help' for more information
-p # Disable VA. Show physical addresses
-P # Show debug/pdb information
-PP # Download pdb file for binary
-q # Be quiet, just show fewer data
-qq # Show less info (no offset/size for -z for ex.)
-Q # Show load address used by dlopen (non-aslr libs)
-r # Show output in radare format
-R # Show relocations
-s # Show exported symbols
-S # Show sections
-SS # Show segments
-t # Show file hashes
-T # Show Certificates
-u # Unfiltered (no rename duplicated symbols/sections)
-U # Show Resources
-v # Show version information
-V # Show binary version information
-w # Show try/catch blocks
-x # Extract all sub binaries from a fat binary (f.ex: fatmach0)
-X format file ... # Package a fat or zip containing all the files passed (fat, zip)
-z # Show strings inside .data section (like gnu strings does)
-Z # Guess size of binary program
-zz # Shows strings from raw bins
-zzz # Dump raw strings to stdout (for huge files)
Install
sudo apt install radare2
objdump <option(s)> <file(s)>
Display information from object <file(s)>
Usefull
objdump Program -x|sed -n '1,/.rdata section/p'
objdump Program -s|grep -A1 ^Contents
objdump Program -sj $section # section=".data"
-a, --archive-headers # Display archive header information
-f, --file-headers # Display the contents of the overall file header
-h, --[section-]headers Display the contents of the section headers
-x, --all-headers # Display the contents of all headers
-s, --full-contents # Display the full contents of all sections requested
All
At least one of the following switches must be given:
-a, --archive-headers # Display archive header information
-f, --file-headers # Display the contents of the overall file header
-p, --private-headers # Display object format specific file header contents
-P, --private=OPT,OPT... Display object format specific contents
-h, --[section-]headers Display the contents of the section headers
-x, --all-headers # Display the contents of all headers
-d, --disassemble # Display assembler contents of executable sections
-D, --disassemble-all # Display assembler contents of all sections
--disassemble=<sym> Display assembler contents from <sym>
-S, --source # Intermix source code with disassembly
--source-comment[=<txt>] Prefix lines of source code with <txt>
-s, --full-contents # Display the full contents of all sections requested
-g, --debugging # Display debug information in object file
-e, --debugging-tags # Display debug information using ctags style
-G, --stabs # Display (in raw form) any STABS info in the file
-W[lLiaprmfFsoRtUuTgAckK] or --dwarf[=rawline,=decodedline,=info,=abbrev,=pubnames,=aranges,=macro,=frames, =frames-interp,=str,=loc,=Ranges,=pubtypes, =gdb_index,=trace_info,=trace_abbrev,=trace_aranges, =addr,=cu_index,=links,=follow-links] # Display DWARF info in the file
--ctf=SECTION # Display CTF info from SECTION
-t, --syms # Display the contents of the symbol table(s)
-T, --dynamic-syms # Display the contents of the dynamic symbol table
-r, --reloc # Display the relocation entries in the file
-R, --dynamic-reloc # Display the dynamic relocation entries in the file
@<file> # Read options from <file>
-v, --version # Display this program's version number
-i, --info # List object formats and architectures supported
-H, --help # Display this information
The following switches are optional:
-b, --target=BFDNAME # Specify the target object format as BFDNAME
-m, --architecture=MACHINE # Specify the target architecture as MACHINE
-j, --section=NAME # Only display information for section NAME
-M, --disassembler-options=OPT Pass text OPT on to the disassembler
-EB --endian=big # Assume big endian format when disassembling
-EL --endian=little # Assume little endian format when disassembling
--file-start-context # Include context from start of file (with -S)
-I, --include=DIR # Add DIR to search list for source files
-l, --line-numbers # Include line numbers and filenames in output
-F, --file-offsets # Include file offsets when displaying information
-C, --demangle[=STYLE] # Decode mangled/processed symbol names. The STYLE, if specified, can be `auto', `gnu', `lucid', `arm', `hp', `edg', `gnu-v3', `java' or `gnat'
--recurse-limit # Enable a limit on recursion whilst demangling. [Default]
--no-recurse-limit # Disable a limit on recursion whilst demangling
-w, --wide # Format output for more than 80 columns
-z, --disassemble-zeroes # Do not skip blocks of zeroes when disassembling
--start-address=ADDR # Only process data whose address is >= ADDR
--stop-address=ADDR # Only process data whose address is < ADDR
--prefix-addresses # Print complete address alongside disassembly
--[no-]show-raw-insn # Display hex alongside symbolic disassembly
--insn-width=WIDTH # Display WIDTH bytes on a single line for -d
--adjust-vma=OFFSET # Add OFFSET to all displayed section addresses
--special-syms # Include special symbols in symbol dumps
--inlines # Print all inlines for source line (with -l)
--prefix=PREFIX # Add PREFIX to absolute paths for -S
--prefix-strip=LEVEL # Strip initial directory names for -S
--dwarf-depth=N # Do not display DIEs at depth N or greater
--dwarf-start=N # Display DIEs starting with N, at the same depth or deeper
--dwarf-check # Make additional dwarf internal consistency checks.
--ctf-parent=SECTION # Use SECTION as the CTF parent
--visualize-jumps # Visualize jumps by drawing ASCII art lines
--visualize-jumps=color # Use colors in the ASCII art
--visualize-jumps=extended-color # Use extended 8-bit color codes
--visualize-jumps=off # Disable jump visualization
Install
sudo apt install binutils-common
clamscan [options] [file/directory/-]
Scan files and directories for viruses
Usefull
-i --infected # Only print infected files
-r --recursive[=yes/no(*)] # Scan subdirectories recursively
-f --file-list=FILE FILE # Scan files from FILE
All
-a --archive-verbose # Show filenames inside scanned archives
--stdout # Write to stdout instead of stderr. Does not affect 'debug' messages.
--no-summary # Disable summary at end of scanning
-i --infected # Only print infected files
--suppress-ok-results -o # Skip printing OK files
--bell # Sound bell on virus detection
--tempdir=DIRECTORY # Create temporary files in DIRECTORY
--leave-temps[=yes/no(*)] # Do not remove temporary files
--gen-json[=yes/no(*)] # Generate JSON description of scanned file(s). JSON will be printed and also dropped to the temp directory if --leave-temps is enabled.
-d --database=FILE/DIR FILE/DIR # Load virus database from FILE or load all supported db files from DIR
--official-db-only[=yes/no(*)] # Only load official signatures
-l --log=FILE FILE # Save scan report to FILE
-r --recursive[=yes/no(*)] # Scan subdirectories recursively
-z --allmatch[=yes/no(*)] # Continue scanning within file after finding a match
--cross-fs[=yes(*)/no] # Scan files and directories on other filesystems
--follow-dir-symlinks[=0/1(*)/2] # Follow directory symlinks (0 = never, 1 = direct, 2 = always)
--follow-file-symlinks[=0/1(*)/2] # Follow file symlinks (0 = never, 1 = direct, 2 = always)
-f --file-list=FILE FILE # Scan files from FILE
--remove[=yes/no(*)] # Remove infected files. Be careful!
--move=DIRECTORY # Move infected files into DIRECTORY
--copy=DIRECTORY # Copy infected files into DIRECTORY
--exclude=REGEX # Don't scan file names matching REGEX
--exclude-dir=REGEX # Don't scan directories matching REGEX
--include=REGEX # Only scan file names matching REGEX
--include-dir=REGEX # Only scan directories matching REGEX
--bytecode[=yes(*)/no] # Load bytecode from the database
--bytecode-unsigned[=yes/no(*)] # Load unsigned bytecode **Caution**: You should NEVER run bytecode signatures from untrusted sources. Doing so may result in arbitrary code execution.
--bytecode-timeout=N # Set bytecode timeout (in milliseconds)
--statistics[=none(*)/bytecode/pcre] # Collect and print execution statistics
--detect-pua[=yes/no(*)] # Detect Possibly Unwanted Applications
--exclude-pua=CAT # Skip PUA sigs of category CAT
--include-pua=CAT # Load PUA sigs of category CAT
--detect-structured[=yes/no(*)] # Detect structured data (SSN, Credit Card)
--structured-ssn-format=X # SSN format (0=normal,1=stripped,2=both)
--structured-ssn-count=N # Min SSN count to generate a detect
--structured-cc-count=N # Min CC count to generate a detect
--structured-cc-mode=X # CC mode (0=credit debit and private label, 1=credit cards only
--scan-mail[=yes(*)/no] # Scan mail files
--phishing-sigs[=yes(*)/no] # Enable email signature-based phishing detection
--phishing-scan-urls[=yes(*)/no] # Enable URL signature-based phishing detection
--heuristic-alerts[=yes(*)/no] # Heuristic alerts
--heuristic-scan-precedence[=yes/no(*)] # Stop scanning as soon as a heuristic match is found
--normalize[=yes(*)/no] # Normalize html, script, and text files. Use normalize=no for yara compatibility
--scan-pe[=yes(*)/no] # Scan PE files
--scan-elf[=yes(*)/no] # Scan ELF files
--scan-ole2[=yes(*)/no] # Scan OLE2 containers
--scan-pdf[=yes(*)/no] # Scan PDF files
--scan-swf[=yes(*)/no] # Scan SWF files
--scan-html[=yes(*)/no] # Scan HTML files
--scan-xmldocs[=yes(*)/no] # Scan xml-based document files
--scan-hwp3[=yes(*)/no] # Scan HWP3 files
--scan-archive[=yes(*)/no] # Scan archive files (supported by libclamav)
--alert-broken[=yes/no(*)] # Alert on broken executable files (PE & ELF)
--alert-broken-media[=yes/no(*)] # Alert on broken graphics files (JPEG, TIFF, PNG, GIF)
--alert-encrypted[=yes/no(*)] # Alert on encrypted archives and documents
--alert-encrypted-archive[=yes/no(*)] # Alert on encrypted archives
--alert-encrypted-doc[=yes/no(*)] # Alert on encrypted documents
--alert-macros[=yes/no(*)] # Alert on OLE2 files containing VBA macros
--alert-exceeds-max[=yes/no(*)] # Alert on files that exceed max file size, max scan size, or max recursion limit
--alert-phishing-ssl[=yes/no(*)] # Alert on emails containing SSL mismatches in URLs
--alert-phishing-cloak[=yes/no(*)] # Alert on emails containing cloaked URLs
--alert-partition-intersection[=yes/no(*)] # Alert on raw DMG image files containing partition intersections
--nocerts # Disable authenticode certificate chain verification in PE files
--dumpcerts # Dump authenticode certificate chain in PE files
--max-scantime=#n # Scan time longer than this will be skipped and assumed clean (milliseconds)
--max-filesize=#n # Files larger than this will be skipped and assumed clean
--max-scansize=#n # The maximum amount of data to scan for each container file (**)
--max-files=#n # The maximum number of files to scan for each container file (**)
--max-recursion=#n # Maximum archive recursion level for container file (**)
--max-dir-recursion=#n # Maximum directory recursion level
--max-embeddedpe=#n # Maximum size file to check for embedded PE
--max-htmlnormalize=#n # Maximum size of HTML file to normalize
--max-htmlnotags=#n # Maximum size of normalized HTML file to scan
--max-scriptnormalize=#n # Maximum size of script file to normalize
--max-ziptypercg=#n # Maximum size zip to type reanalyze
--max-partitions=#n # Maximum number of partitions in disk image to be scanned
--max-iconspe=#n # Maximum number of icons in PE file to be scanned
--max-rechwp3=#n # Maximum recursive calls to HWP3 parsing function
--pcre-match-limit=#n # Maximum calls to the PCRE match function.
--pcre-recmatch-limit=#n # Maximum recursive calls to the PCRE match function.
--pcre-max-filesize=#n # Maximum size file to perform PCRE subsig matching.
--disable-cache # Disable caching and cache checks for hash sums of scanned files.
-h --help # Show this help
--debug # Enable libclamav's debug messages
--quiet # Only output error messages
-v --verbose # Be verbose
-V --version # Print version number
Install
sudo apt install clamav
pescan OPTIONS FILE
Search for suspicious things in PE files
Usefull
-f, --format <text|html|xml|csv|json> # change output format (default: text)
All
-f, --format <text|html|xml|csv|json> # change output format (default: text)
-v, --verbose # show more info about items found
-V, --version # show version and exit
--help # show this help and exit
Install
sudo apt install pev
strings [option(s)] [file(s)]
Display printable strings in [file(s)] (stdin by default)
Usefull
-s --output-separator=<string> String used to separate strings in output.
All
-a - --all # Scan the entire file, not just the data section [default]
-d --data # Only scan the data sections in the file
-f --print-file-name # Print the name of the file before each string
-n --bytes=[number] # Locate & print any NUL-terminated sequence of at -<number> least [number] characters (default 4)
-t --radix={o,d,x} # Print the location of the string in base 8, 10 or 16
-w --include-all-whitespace Include all whitespace as valid string characters
-o # An alias for --radix=o
-T --target=<BFDNAME> # Specify the binary file format
-e --encoding={s,S,b,l,B,L} Select character size and endianness: s = 7-bit, S = 8-bit, {b,l} = 16-bit, {B,L} = 32-bit
@<file> # Read options from <file>
-h --help # Display this information
-v -V --version # Print the program's version number
file [OPTION...] [FILE...]
Determine type of FILEs.
Usefull
-z, --uncompress # try to look inside compressed files
-F, --separator STRING # use string as separator instead of `:'
All
-m, --magic-file LIST # use LIST as a colon-separated list of magic number files
-z, --uncompress # try to look inside compressed files
-Z, --uncompress-noreport only print the contents of compressed files
-b, --brief # do not prepend filenames to output lines
-c, --checking-printout # print the parsed form of the magic file, use in conjunction with -m to debug a new magic file before installing it
-e, --exclude TEST # exclude TEST from the list of test to be performed for file. Valid tests are: apptype, ascii, cdf, compress, csv, elf, encoding, soft, tar, json, text, tokens
-f, --files-from FILE # read the filenames to be examined from FILE
-F, --separator STRING # use string as separator instead of `:'
-i, --mime # output MIME type strings (--mime-type and --mime-encoding)
--apple # output the Apple CREATOR/TYPE
--extension # output a slash-separated list of extensions
--mime-type # output the MIME type
--mime-encoding # output the MIME encoding
-k, --keep-going # don't stop at the first match
-l, --list # list magic strength
-L, --dereference # follow symlinks (default if POSIXLY_CORRECT is set)
-h, --no-dereference # don't follow symlinks (default if POSIXLY_CORRECT is not set) (default)
-n, --no-buffer # do not buffer output
-N, --no-pad # do not pad output
-0, --print0 # terminate filenames with ASCII NUL
-p, --preserve-date # preserve access times on files
-P, --parameter # set file engine parameter limits
indir 15 recursion limit for indirection
name 30 use limit for name/use magic
elf_notes 256 max ELF notes processed
elf_phnum 128 max ELF prog sections processed
elf_shnum 32768 max ELF sections processed
-r, --raw # don't translate unprintable chars to \ooo
-s, --special-files # treat special (block/char devices) files as ordinary ones
-S, --no-sandbox # disable system call sandboxing
-C, --compile # compile file specified by -m
-d, --debug # print debugging messages
--help # display this help and exit
-v, --version # output version information and exit
install
see foralyse in https://code.ambau.fr
info
variables
file=/share/memory/dump
profile=Win7SP0x86
vol2 -f $file --profile $profile
# Options
--output dot/greptext/html/json/sqlite/text/xlsx
vol2 --info # get all informations from volatility
vol2 --info|sed -n '/^Profiles/,/^$/ p' # available profiles
vol2 --info|sed -n '/^Address/,/^$/ p' # available address spaces
vol2 --info|sed -n '/^Scanner/,/^$/ p' # available scanner
vol2 --info|sed -n '/^Plugins/,/^$/ p' # available plugins
vol2 --info|sed -n '/^Plugins/,/^$/ p'|grep -v '^mac_\|^linux_' # windows plugins
vol2 --info|sed -n '/^Plugins/,/^$/ p'|grep '^linux_' # linux plugins
vol2 --info|sed -n '/^Plugins/,/^$/ p'|grep '^mac_' # mac plugins
special
hash
vol2 hashdump -f ${dump} --profile=${profile} -y ${offset_system} -s ${offset_sam}
plugins
cmd
clipboard # Extract the contents of the windows clipboard
cmdline # Display process command-line arguments
cmdscan # Extract command history by scanning for _COMMAND_HISTORY
consoles # Extract command history by scanning for _CONSOLE_INFORMATION
device
devicetree # Show device tree
mbrparser # Scans for and parses potential Master Boot Records (MBRs)
dll
dlldump -D PATH # Dump DLLs from a process address space to PATH
-p PID # specify a process by his PID
-o OFFSET # specify a process by his Virtual OFFSET
dlllist # Print list of loaded dlls for each process
-p PID # specify a process by his PID
ldrmodules # Detect unlinked DLLs
dump
cachedump # Dumps cached domain hashes from memory
dumpcerts # Dump RSA private and public SSL keys
dlldump -D PATH # Dump DLLs from a process address space to PATH
-p PID # specify a process by his PID
-o OFFSET # specify a process by his Virtual OFFSET
dumpfiles # Extract memory mapped and cached files
hashdump # Dumps passwords hashes (LM/NTLM) from memory
hivedump # Prints out a hive
lsadump # Dump (decrypted) LSA secrets from the registry
procdump # Dump a process to an executable file sample
-o OFFSET, --offset=OFFSET # EPROCESS offset (in hex) in the physical address space
-p PID, --pid=PID # Operate on these Process IDs (comma-separated)
-n NAME, --name=NAME # Operate on these process names (regex)
-D DUMP_DIR, --dump-dir=DUMP_DIR # Directory in which to dump executable files
executable
impscan # Scan for calls to imported functions
-p PID, --pid=PID # Process ID (leave off to scan kernel memory)
-o OFFSET, --offset=OFFSET # EPROCESS offset (in hex) in the physical address space
-b BASE, --base=BASE # Base address in process memory if --pid is supplied, otherwise an address in kernel space
-s SIZE, --size=SIZE # Size of memory to scan
joblinks # Print process job link information
malfind # Find hidden and injected code
privs # Display process privileges
shimcache # Parses the Application Compatibility Shim Cache registry key
verinfo # Prints out the version information from PE images
file
dumpfiles # Extract memory mapped and cached files
filescan # Pool scanner for file objects
mftparser # Scans for and parses potential Master Boot Records (MBRs)
notepad # List currently displayed notepad text
hive
amcache # Print AmCache information
hivescan # Pool scanner for registry hives
hivedump # Prints out a hive
hivelist # Print list of registry hives
printkey # Print a registry key, and its subkeys and values
shimcache # Parses the Application Compatibility Shim Cache registry key
shutdowntime # Print ShutdownTime of machine from registry
userassist # Print userassist registry keys and information
hook
apihooks # Detect API hooks in process and kernel memory
driverirp # Driver IRP hook detection
eventhooks # Print details on windows event hooks
messagehooks # List desktop and thread window message hooks
image
imageinfo # get info from OS and profiles
kdbgscan # Search for and dump potential KDBG values
memory
bigpools # Dump the big page pools using BigPagePoolScanner
cachedump # Dumps cached domain hashes from memory
hpakextract # Extract physical memory from an HPAK file
hpakinfo # Info on an HPAK file
memdump # Dump the addressable memory for a process
memmap # Print the memory map
patcher # Patches memory based on page scans
raw2dmp # Converts a physical memory sample to a windbg crash dump
module
drivermodule # Associate driver objects to kernel modules
moddump # Dump a kernel driver to an executable file sample
modscan # Pool scanner for kernel modules
modules # Print list of loaded modules
timers # Print kernel timers and associated module DPCs
unloadedmodules # Print list of unloaded modules
network
connections # Print list of open connections [Windows XP and 2003 Only]
connscan # Pool scanner for tcp connections
netscan # list of connections
sockets # Print list of open sockets
sockscan # Pool scanner for tcp socket objects
password
dumpcerts # Dump RSA private and public SSL keys
hashdump # Dumps passwords hashes (LM/NTLM) from memory
truecryptmaster # Recover TrueCrypt 7.1a Master Keys
truecryptpassphrase # TrueCrypt Cached Passphrase Finder
truecryptsummary # TrueCrypt Summary
process
envars # Display process environment variables
getsids # Print the SIDs owning each process
handles # Print list of open handles for each process
privs # Display process privileges
procdump # Dump a process to an executable file sample
pslist # Print all running processes by following the EPROCESS lists
-P # print for physical offset
psscan # Pool scanner for process objects
pstree # Print process list as a tree
psxview # Find hidden processes with various process listings
thrdscan # Pool scanner for thread objects
threads # Investigate _ETHREAD and _KTHREADs
service
getservicesids # Get the names of services in the Registry and return Calculated SID
servicediff # List Windows services (ala Plugx)
svcscan # Scan for Windows services
system
auditpol # Prints out the Audit Policies from HKLM\SECURITY\Policy\PolAdtEv
bioskbd # Reads the keyboard buffer from Real Mode memory
callbacks # Print system-wide notification routines
crashinfo # Dump crash-dump information
driverirp # Driver IRP hook detection
driverscan # Pool scanner for driver objects
envars # Display process environment variables
evtlogs # Extract Windows Event Logs (XP/2003 only)
kpcrscan # Search for and dump potential KPCR values
machoinfo # Dump Mach-O file format information
mutantscan # Pool scanner for mutex objects
objtypescan # Scan for Windows object type objects
screenshot # Save a pseudo-screenshot based on GDI windows (require PIL)
shutdowntime # Print ShutdownTime of machine from registry
symlinkscan # Pool scanner for symlink objects
timeline
timeliner # Creates a timeline from various artifacts in memory
timers # Print kernel timers and associated module DPCs
user
atoms # Print session and window station atom tables
atomscan # Pool scanner for atom tables
clipboard # Extract the contents of the windows clipboard
deskscan # Poolscaner for tagDESKTOP (desktops)
gahti # Dump the USER handle type information
sessions # List details on _MM_SESSION_SPACE (user logon sessions)
userassist # Print userassist registry keys and information
userhandles # Dump the USER handle tables
vad
vaddump # Dumps out the vad sections to a file
vadinfo # Dump the VAD info
vadtree # Walk the VAD tree and display in tree format
vadwalk # Walk the VAD tree
virtual
qemuinfo # Dump Qemu information
vboxinfo # Dump virtualbox information
vmwareinfo # Dump VMware VMSS/VMSN information
volshell
Use addrspace() for Kernel/Virtual AS
Use addrspace().base for Physical AS
Use proc() to get the current process object
proc().get_process_address_space() for the current process AS
proc().get_load_modules() for the current process DLLs
addrspace() # Get the current kernel/virtual address space.
cc(offset=None, pid=None, name=None, physical=False) # Change current shell context.
db(address, length=128, space=None) # Print bytes as canonical hexdump.
dd(address, length=128, space=None) # Print dwords at address.
dis(address, length=128, space=None, mode=None) # Disassemble code at a given address.
dq(address, length=128, space=None) # Print qwords at address.
dt(objct, address=None, space=None, recursive=False, depth=0) # Describe an object or show type info.
find(needle, max=1, shift=0, skip=0, count=False, length=128)
getmods() # Generator for kernel modules (scripting).
getprocs() # Generator of process objects (scripting).
hh(cmd=None) # Get help on a command.
list_entry(head, objname, offset=-1, fieldname=None, forward=True, space=None) # Traverse a _LIST_ENTRY.
modules() # Print loaded modules in a table view.
proc() # Get the current process object.
ps() # Print active processes in a table view.
sc() # Show the current context.
For help on a specific command, type 'hh(<command>)'
windows
windows # Print Desktop Windows (verbose details)
wintree # Print Z-Order Desktop Windows Tree
wndscan # Pool scanner for window stations
others
editbox # Displays information about Edit controls. (Listbox experimental.)
gditimers # Print installed GDI timers and callbacks
gdt # Display Global Descriptor Table
idt # Display Interrupt Descriptor Table
hibinfo # Dump hibernation file information
imagecopy --profile $profile $file -O $file-converted
iehistory # Reconstruct Internet Explorer cache / history
poolpeek # Configurable pool scanner plugin
shellbags # Prints ShellBags info
strings # Match physical offsets to virtual addresses (may take a while, VERY verbo
yarascan # Scan process or kernel memory with Yara signatures
Operators
See wireshark
Usefull
select
tshark -r ${dump} -e ip.src # get all source ip addresses
tshark -r ${dump} -e ip.proto -Tfields
tshark -r ${file} -e http.user_agent -Tfields
filter
tshark -r ${dump} -e ip.src # get all source ip addresses
tshark -r ${dump} -Y http -w filtered.pcapng # filter all http streams to file
tshark -r ${dump} -Y "not ip.addr == 93.184.221.240" -w filtered.pcapng # filter by IP address
tshark -r ${dump} -Y "frame.time_epoch >= 1631211000" -w filtered.pcapng # filter by frame time epoch
help
tshark [ -i <capture interface>|- ] [ -f <capture filter> ] [ -2 ] [ -r <infile> ] [ -w <outfile>|- ] [ options ] [ <filter> ]
qtshark -G [ <report type> ] [ --elastic-mapping-filter <protocols> ]
Dump and analyze network traffic
Capture interface
-i <interface>, --interface <interface> # name or idx of interface (def: first non-loopback)
-f <capture filter> # packet filter in libpcap filter syntax
-s <snaplen>, --snapshot-length <snaplen> # packet snapshot length (def: appropriate maximum)
-p, --no-promiscuous-mode # don't capture in promiscuous mode
-I, --monitor-mode # capture in monitor mode, if available
-B <buffer size>, --buffer-size <buffer size> # size of kernel buffer (def: 2MB)
-y <link type>, --linktype <link type> # link layer type (def: first appropriate)
--time-stamp-type <type> # timestamp method for interface
-D, --list-interfaces # print list of interfaces and exit
-L, --list-data-link-types # print list of link-layer types of iface and exit
--list-time-stamp-types # print list of timestamp types for iface and exit
Capture stop conditions
-c <packet count> stop after n packets (def: infinite)
-a <autostop cond.> ..., --autostop <autostop cond.> ...
duration:NUM - stop after NUM seconds
filesize:NUM - stop this file after NUM KB
files:NUM - stop after NUM files
packets:NUM - stop after NUM packets```
Capture output
-b <ringbuffer opt.> ..., --ring-buffer <ringbuffer opt.>
duration:NUM - switch to next file after NUM secs
filesize:NUM - switch to next file after NUM KB
files:NUM - ringbuffer: replace after NUM files
packets:NUM - switch to next file after NUM packets
interval:NUM - switch to next file when the time is an exact multiple of NUM secs
Input file
-r <infile>, --read-file <infile>
Processing
-2 # perform a two-pass analysis
-M <packet count> # perform session auto reset
-R <read filter>, --read-filter <read filter> # packet Read filter in Wireshark display filter syntax (requires -2)
-Y <display filter>, --display-filter <display filter> # packet displaY filter in Wireshark display filter syntax
-n # disable all name resolutions (def: all enabled)
-N <name resolve flags> # enable specific name resolution(s): "mnNtdv"
-d <layer_type>==<selector>,<decode_as_protocol> ... # "Decode As", see the man page for details Example: tcp.port==8888,http
-H <hosts file> # read a list of entries from a hosts file, which will then be written to a capture file. (Implies -W n)
--enable-protocol <proto_name> # enable dissection of proto_name
--disable-protocol <proto_name> # disable dissection of proto_name
--enable-heuristic <short_name> # enable dissection of heuristic protocol
--disable-heuristic <short_name> # disable dissection of heuristic protocol
Output
-w <outfile|-> # write packets to a pcapng-format file named "outfile" (or '-' for stdout)
--capture-comment <comment> # set the capture file comment, if supported
-C <config profile> # start with specified configuration profile
-F <output file type> # set the output file type, default is pcapng an empty "-F" option will list the file types
-V # add output of packet tree # (Packet Details)
-O <protocols> # Only show packet details of these protocols, comma separated
-P, --print # print packet summary even when writing to a file
-S <separator> # the line separator to print between packets
-x # add output of hex and ASCII dump (Packet Bytes)
-T pdml|ps|psml|json|jsonraw|ek|tabs|text|fields|? # format of text output (def: text)
-j <protocolfilter> # protocols layers filter if -T ek|pdml|json selected (e.g. "ip ip.flags text", filter does not expand child nodes, unless child is specified also in the filter)
-J <protocolfilter> # top level protocol filter if -T ek|pdml|json selected (e.g. "http tcp", filter which expands all child nodes)
-e <field> # field to print if -Tfields selected (e.g. tcp.port, _ws.col.Info) this option can be repeated to print multiple fields
-E<fieldsoption>=<value> set options for output when -Tfields selected:
bom=y|n # print a UTF-8 BOM
header=y|n # switch headers on and off
separator=/t|/s|<char> # select tab, space, printable character as separator
occurrence=f|l|a # print first, last or all occurrences of each field
aggregator=,|/s|<char> # select comma, space, printable character as aggregator
quote=d|s|n # select double, single, no quotes for values
-t a|ad|adoy|d|dd|e|r|u|ud|udoy # output format of time stamps (def: r: rel. to first)
-u s|hms # output format of seconds (def: s: seconds)
-l # flush standard output after each packet
-q # be more quiet on stdout (e.g. when using statistics)
-Q # only log true errors to stderr (quieter than -q)
-g # enable group read access on the output file(s)
-W n # Save extra information in the file, if supported. n = write network address resolution information
-X <key>:<value> # eXtension options, see the man page for details
-U tap_name # PDUs export mode, see the man page for details
-z <statistics> # various statistics, see the man page for details
--export-objects <protocol>,<destdir> # save exported objects for a protocol to a directory named "destdir"
--color # color output text similarly to the Wireshark GUI, requires a terminal with 24-bit color support Also supplies color attributes to pdml and psml formats (Note that attributes are nonstandard)
--no-duplicate-keys # If -T json is specified, merge duplicate keys in an object into a single key with as value a json array containing all values
--elastic-mapping-filter <protocols> # If -G elastic-mapping is specified, put only the specified protocols within the mapping file
Miscellaneous
-h, --help # display this help and exit
-v, --version # display version info and exit
-o <name>:<value> ... # override preference setting
-K <keytab> # keytab file to use for kerberos decryption
-G [report] # dump one of several available reports and exit default report="fields" use "-G help" for more help
Operators
https://www.wireshark.org/docs/man-pages/wireshark-filter.html
Logical expressions
and, && # Logical AND
or, || # Logical OR
not, ! # Logical NOT
Comparaison
eq, == # Equal
ne, != # Not Equal
gt, > # Greater Than
lt, < # Less Than
ge, >= # Greater than or Equal to
le, <= # Less than or Equal to
Search
contains # Does the protocol, field or slice contain a value (case-sensitive)
matches, ~ # Does the protocol or text string match the given case-insensitive Perl-compatible regular expression (case-insensitive)
In
tcp.port in {80, 443, 8080} # <=> tcp.port == 80 or tcp.port == 443 or tcp.port == 8080
http.request.method in {"HEAD", "GET"}
# ranges
tcp.port in {443, 4430..4434}
ip.addr in {10.0.0.5 .. 10.0.0.9, 192.168.1.1..192.168.1.9}
frame.time_delta in {10 .. 10.5}
Functions
upper(string-field) # converts a string field to uppercase
lower(string-field) # converts a string field to lowercase
len(field) # returns the byte length of a string or bytes field
count(field) # returns the number of field occurrences in a frame
string(field) # converts a non-string field to string
Encoding
frame.len > 10
frame.len > 012
frame.len > 0xa
frame.len > '\n'
frame.len > '\x0a'
frame.len > '\012'
Slice
[i:j] # i = start_offset, j = length
[i-j] # i = start_offset, j = end_offset, inclusive.
[i] # i = start_offset, length = 1
[:j] # start_offset = 0, length = j
[i:] # start_offset = i, end_offset = end_of_field
# examples
eth.src[0:3] == 00:00:83
http.content_type[0:4] == "text"
token[0:5] ne 0.0.0.1.1
llc[0] eq aa
frame[100-199] contains "wireshark"
Protocol
sip # Session Initiation Protocol
rtp # Real-time Transport Protocol
http.request # only requests
http.response == # only response
http.response.code == # only response code
tcp.port == # both source and destination port
tcp.srcport == # source port
tcp.dstport == # destination port
tcp.analysis.flags # completye analysis about flags
tcp.contains # search text in any fields
tcp.flags.syn == # tcp syn flag
tcp.flags.reset == # tcp reset flag
udp.contains # search text in any fields
ip.addr == # both source and destination addresses
ip.src == # source addresses
ip.dst == # destination addresses
Capture Filter
install
https://computingforgeeks.com/install-kvm-qemu-virt-manager-arch-manjar/
sudo pacman -S qemu virt-manager virt-viewer libguestfs # dnsmasq vde2 bridge-utils openbsd-netcat
# see virt-sandbox
sudo systemctl enable libvirtd.service
sudo systemctl start libvirtd.service
systemctl status libvirtd.service
conf
file=/etc/libvirt/libvirtd.conf
sudo cp -a $file $file$(date +%s)
sudo sed -i 's|^#\?\(unix_sock_group =\).*$|\1 "libvirt"|' $file
sudo sed -i 's|^#\?\(unix_sock_rw_perms =\).*$|\1 "0770"|' $file
sudo usermod -a -G libvirt $USER
sudo systemctl restart libvirtd.service
mod=$(lsmod|awk '/^kvm_/ {print $1}')
sudo modprobe -r $mod
sudo modprobe $mod nested=1
echo "options ${mod/_/-} nested=1" | sudo tee /etc/modprobe.d/${mod/_/-}.conf
systool -m ${mod} -v | grep nested
share
host
hostpath=/vms/share
sudo mkdir $hostpath
#sudo usermod -G libvirtd -a $USER
sudo usermod -G libvirt-qemu -a $USER
hostpath=/vms/share
#sudo chown -R libvirt-qemu:libvirt-qemu $hostpath
sudo setfacl -d -Rm g:libvirt-qemu:rwx $hostpath
virt-manager
hostshare
<filesystem type="mount" accessmode="mapped" fmode="0660" dmode="0770">
<source dir="/vms/share"/>
<target dir="/hostshare"/>
<address type="pci" domain="0x0000" bus="0x07" slot="0x00" function="0x0"/>
</filesystem>
guest
guestpath=/share
#sudo chmod -R 777 $guestpath
sudo chown -R $USER:$USER $guestpath
mount
sudo mount -t 9p -o trans=virtio,rw,version=9p2000.L /hostshare $guestpath
sudo sh -c "echo '
# qemu share
/hostshare /share 9p trans=virtio,version=9p2000.L,rw,noauto 0 0' >> /etc/fstab"
disk
shrink
# linux
sudo dd if=/dev/zero of=/zero bs=4M
sudo rm /zero
# windows
sdelete -z C
qemu-img convert -O qcow2 $file $file2
reduce
qemu-img info $file
qemu-img resize -f qcow2 --shrink $file -10G
expand
qemu-img info $file
qemu-img resize $file +2G
import / export
virsh --connect qemu:///system dumpxml $guest > $guest.xml
virsh --connect qemu:///system define $guest.xml
ova
vm=tsurugi_linux_2022.1_vmdk
mkdir tmp
tar xf ${vm}.ova -C tmp/
qemu-img convert -f vmdk tmp/${vm}.vmdk tmp/${vm}.qcow2
qemu-img convert -O qcow2 tmp/${vm}-disk001.qcow2 ${vm}.qcow2
Install
https://forum.manjaro.org/t/how-to-get-cisco-packet-tracer-on-manjaro/25506/3
wget https://aur.archlinux.org/cgit/aur.git/snapshot/packettracer.tar.gz
tar xzf packettracer.tar.gz
cd packettracer
# get CiscoPacketTracer_810_Ubuntu_64bit.deb from cisco
makepkg
sudo pacman -U packettracer-8.1.0-1-x86_64.pkg.tar.zst
Commands
Global
? # show possible commands / options
enable # get in admistrative area
Hub
hostname $hostname # modify device name
Switch
clear mac-address-table dynamic
copy running-config startup-config # save actual configuration to memory for next startup
hostname $hostname # modify device name
show interfaces trunk
show ip interface
show mac-address-table
show startup-config # show if startup configuration have been recorded
show vlan
configure terminal
interface $inter $p/$p
ip proxy-arp # set proxy-arp for $inter $p/$p
no ip proxy-arp # unset proxy-arp for $inter $p/$p
vlan switchport access vlan $v # set port $p/$p to vlan $v (create vlan if not exists)
vlan $v
name $name # set vlan name for $v
no vlan $v # remove vlan $v
Router
hostname $hostname # modify device name
configure terminal # enter in configuration terminal mode
ip route $network_address $mask $next_hop # add a route to router
end devices
hostname $hostname # modify device name
COLOR
base
# normal
m=0; for i in {16..128}; do echo -en "\e[${m};${i}m${i}\e[0m" ; done; echo
# bold
m=1; for i in {16..128}; do echo -en "\e[${m};${i}m${i}\e[0m" ; done; echo
# all
for j in {0..5}; do echo "- ${j}"; for i in {16..256}; do echo -en "\e[${j};${i}m${i}\e[0m" ; done; echo; done; echo
256
for i in {16..255}; do echo -en "$i \e[38;5;${i}m#\e[0m" ; done; echo
SETTINGS
main.cf
mydomain # the base of domain FQDN
myhostname # the full domain FQDN which serve mails
myorigin # the domain added to sender
RESTRICTIONS
http://www.postfix.org/SMTPD_ACCESS_README.html
Restriction list name | Version | Status | Effect of REJECT or DEFER result |
---|---|---|---|
smtpd_client_restrictions | All | Optional | Reject all client commands |
smtpd_helo_restrictions | All | Optional | Reject HELO/EHLO information |
smtpd_sender_restrictions | All | Optional | Reject MAIL FROM information |
smtpd_relay_restrictions | ≥ 2.10 | Required if smtpd_recipient_restrictions does not enforce relay policy | Reject RCPT TO information |
smtpd_data_restrictions | ≥ 2.0 | Optional | Reject DATA command |
smtpd_end_of_data_restrictions | ≥ 2.2 | Optional | Reject END-OF-DATA command |
smtpd_etrn_restrictions | All | Optional | Reject ETRN command |
https://wiki.centos.org/HowTos/postfix_restrictions
telnet 192.168.0.2 25 # Comments
Trying 192.168.0.2...
Connected to 192.168.0.2 (192.168.0.2).
Escape character is '^]'.
220 mail.example.com ESMTP Postfix # <-smtp_client_restrictions
HELO mail.example.com # <-smtp_helo_restrictions
250 mail.example.com #
MAIL FROM:<ned@example.com> # <-smtp_sender_restrictions
250 2.1.0 Ok #
RCPT TO:<ned@example.com> # <-smtp_recipient_restrictions
250 2.1.5 Ok #
DATA # <-smtp_data_restrictions
354 End data with <CR><LF>.<CR><LF> #
To:<ned@example.com> # <-header_checks
From:<ned@example.com> #
Subject:SMTP Test #
This is a test message # <-body_checks
. #
250 2.0.0 Ok: queued as 301AE20034
QUIT
221 2.0.0 Bye
Connection closed by foreign host.
ReST
https://rest-sphinx-memo.readthedocs.io/en/latest/
Syntax
code
module,class,method
:var <TYPE> <NOM>: <Description de la variabl>
:var <NOM>: <Description de la variabl>
:param <TYPE> <NOM>: <Description du paramètre>.
:param <NOM>: <Description d'un autre paramètre. Ici on ne précise pas le type, c'est optionnel>.
:returns: <Description de ce qui est retourné (si la fonction retourne quelque chose)>.
:rtype: <Type de ce qui est retourné>
:raises <Exception>: <Description de l'exception>.
:myindex: (int): deep
reference
`link vers ce siteb <https://code.ambau.ovh/>`_
:class:`mymodule.MyClass` About my class.
:meth:`mymodule.MyClass.myfunction` And my cool function
format
*italique*
**gras**
`code inline``
directives
https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html
.. NOTE:: Ceci est une note.
.. WARNING:: Ceci est un avertissement !
.. IMPORTANT:: Ceci est important
.. TODO:: Liste de choses à faire
.. IMAGE:: files/graph1.png # from source path & html path
title
Titre principal
===============
Titre de niveau 2
-----------------
Titre de niveau 3
~~~~~~~~~~~~~~~~~
Un autre titre de niveau 2
--------------------------
list
* Ceci est une liste
* un autre élément
* Une sous-liste
* notez bien le saut de ligne avec la liste principale,
ça ne marchera pas si vous l'oubliez !
* un dernier élément
1. Un
2. Deux
3. Troishttps://rest-sphinx-memo.readthedocs.io/en/latest/ReST.html
#. Un
#. Deux
#. Trois
table
+-----------+-----------+-----------+
| Heading 1 | Heading 2 | Heading 3 |
+===========+===========+===========+
| Hello | World | |
+-----------+-----------+-----------+
| foo | |
+-----------+ bar |
| baz | |
+-----------+-----------------------+
rst files
index.rst
TITLE 1
=======
.. toctree::
:maxdepth: 2
:caption: Contents:
./AMatrix.rst
name.rst
include
include:: ../README.rst
module
.. module:: parrot
:platform: Unix, Windows
:synopsis: Analyze and reanimate dead parrots.
.. moduleauthor:: Eric Cleese <eric@python.invalid>
.. moduleauthor:: John Idle <john@python.invalid>
auto
.. automodule:: package.module # or just .. automodule:: module
.. autoclass:: classname
.. autofunction:: functionname
.. automethod:: myproject.foomodule.Foo.__contains__
options
:show-inheritance:
:members:
:private-members:
:undoc-members:
:special-members:
:exclude-members: __doc__
examples
.. automodule:: modulename
:members:
:undoc-members:
:exclude-members: __dict__,__weakref__
Installation
# manjaro
yay -S python-sphinx python-sphinx_rtd_theme
Use
https://deusyss.developpez.com/tutoriels/Python/SphinxDoc/
https://blog.flozz.fr/2020/09/07/introduction-a-sphinx-un-outil-de-documentation-puissant/
https://romanvm.pythonanywhere.com/post/autodocumenting-your-python-code-sphinx-part-i-5/
https://samnicholls.net/2016/06/15/how-to-sphinx-readthedocs/
sphinx-quickstart
Go to your project path
cd $path2myproject
mkdir docs && cd docs
sphinx-quickstart
export PYTHONPATH=$path2myproject
make html
sphinx-apidoc
sphinx-apidoc [OPTIONS] -o <OUTPUT_PATH> <MODULE_PATH> [EXCLUDE_PATTERN …]
Configuration
cd $path2myproject
file="source/conf.py"
sed -i "s|.*\(import os\)$|\1|" "${file}"
sed -i "s|.*\(import sys\)$|\1|" "${file}"
sed -i "s|.*sys.path.insert.*|sys.path.insert(0, os.path.abspath('../../'))|" "${file}"
sed -i "s|^\(extensions = [.*\)$|\1\n\t'sphinx.ext.autodoc',|" "${file}"
sed -i "s|^\(extensions = [.*\)$|\1\n\t'sphinx_rtd_theme',|" "${file}"
sed -i "s|^\(html_theme = .*\)$|\1\nhtml_theme = 'sphinx_rtd_theme'|" "${file}"
make html
conf.py
path
import os
import sys
# sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../../'))
extensions
extensions = [
'sphinx.ext.autodoc',
'sphinx_rtd_theme',
'sphinx.ext.todo',
'sphinx.ext.autosummary',
'sphinx_autopackagesummary',
]
[extensions]
todo_include_todos=True
#autosummary_generate = True
autodoc_member_order = 'bysource' # alphabetical (alphabetical), groupwise(member type), bysource (source order)
Installation
yay -S maxima # xmaxima
yay -S ttf-mathtype wxmaxima # wxmaxima
DOT
BASH
echo "digraph G {Hello->World}" | dot -Tpng >hello.png
GRAPHVIZ
https://fr.wikipedia.org/wiki/Prolog
http://gecif.net/articles/linux/prolog.html#exemple1
https://www.tutorialspoint.com/prolog/index.htm
https://www.swi-prolog.org/pldoc/man?section=termrw
Présentation
Terms
Atomes
Les textes constants constituent des atomes. Un atome est ordinairement constitué d'une chaîne de lettres, nombres et traits bas (_), commençant par une lettre minuscule. Pour introduire un atome non alphanumérique, on l'entoure d'apostrophes : ainsi '+' est un atome, + un opérateur).
Nombres
Les implémentations courantes de Prolog ne distinguent pas les nombres entiers des flottants.
Chaînes de caractères
Les chaînes de caractères sont en général écrites comme une séquence de caractères entourés par des apostrophes. Elles sont souvent représentées en interne par une liste de codes ASCII.
Variables
Les variables sont indiquées en utilisant un ensemble de lettres, nombres et caractères de soulignement et commençant avec une lettre majuscule.
Termes composés
Prolog ne peut représenter des données complexes que par termes composés. Un terme composé consiste en une tête (aussi appelée foncteur), qui doit être un atome, et des paramètres sans restriction de type. Le nombre de paramètres, nommé arité du terme, est en revanche significatif. Un terme composé est identifié par sa tête et son arité, et habituellement écrit comme foncteur/arité.
Listes
Une liste n’est pas un type de données isolé, mais est définie par une construction récursive (utilisant le foncteur . d'arité 2, c'est donc au niveau de la représentation interne un terme composé):
l'atome [] est une liste vide ;
si T est une liste et H est un élément, alors le terme '.'(H, T) est une liste.
Prédicats
La programmation en Prolog est très différente de la programmation dans un langage impératif. En Prolog, on alimente une base de connaissances de faits et de règles ; il est alors possible de faire des requêtes à la base de connaissances.
L’unité de base de Prolog est le prédicat, qui est défini comme étant vrai. Un prédicat consiste en une tête et un nombre d’arguments. exemple :
chat(tom).
Prédéfinis
Quelques prédicats sont bâtis dans le langage et permettent à un programme Prolog des activités de routine (comme de l'évaluation numérique, les entrée/sortie, les fonctionnalités de l'interface graphique et généralement communiquer avec le système de l’ordinateur). Par exemple, le prédicat write peut être utilisé pour l’affichage à l’écran.
% print Bonjour
write('Bonjour').
Règles
Le second type d’instructions en Prolog est la règle. Un exemple de règle est :
lumière(on) :- interrupteur(on).
Évaluation
Quand l’interpréteur reçoit une requête, il recherche les règles (faits inclus) dont la partie gauche peut être unifiée avec la requête, et effectue cette unification avec la première règle trouvée. Par exemple ayant ce code Prolog :
frère_ou_sœur(X,Y) :- parent(Z,X), parent(Z,Y), X \= Y.
parent(X,Y) :- père(X,Y).
parent(X,Y) :- mère(X,Y).
mère(trude, sally).
père(tom, sally).
père(tom, erica).
père(mike, tom).
On peut alors questionner:
?- frère_ou_sœur(F,S).
F = sally,
S = erica ;
F = erica,
S = sally ;
false.
?- frère_ou_sœur(sally, erica).
oui.
Négation par l'échec
La négation logique pure n'existe pas en Prolog, on se repose sur la négation par l'échec, qui se note différemment suivant les implémentations de Prolog
Exécution
Prolog est un langage logique, aussi en théorie, on n'a pas à se préoccuper de la façon dont il s’exécute. Cependant il est parfois prudent de prendre en compte comment l’algorithme d’inférence agit, pour éviter qu’un programme Prolog ne dure trop longtemps.
% compter le nombre d’éléments d’une liste.
elems([],0).
elems([H|T], X) :- elems(T, Y), X is Y + 1.
pour miser X, ou j'ai cet argent d'emblée, ou sinon j'ai le crédit nécessaire.
miser(X) :- avoirargent(X), ! ; avoircrédit(X).
Syntax
% # comments
! # Opérateur d’arrêt vert. Le ! dit à l’interpréteur de ne plus chercher d’alternative
_ # récupère un résultat sans affectation, un atome qui commence par _ est équivalent
Operators
Comparison Operators
X > Y % X is greater than Y
X < Y % X is less than Y
X >= Y % X is greater than or equal to Y
X =< Y % X is less than or equal to Y
X =:= Y X == Y % the X and Y values are equal
X =\= Y X \= Y % the X and Y values are not equal
Arithmetic Operators
+ % Addition
- % Subtraction
* % Multiplication
/ % Division
** % Power
// % Integer Division
mod % Modulus
Usage
Base
consult(program). % load program named 'program.pl'
listing. % Lists all predicates defined in the calling module
listing(predicate). % List matching clauses
trace. % start trace
notrace.
nodebug.
trace(predicate). % traces program
trace(predicate/2, +fail). % Trace failures of foo/2 in any module
trace(predicate, -all). % Stop tracing predicate
List
Print elements
print_list([X|L]) :- writeln(X), print_list(L). % print each element in a new line
Examples
statement
test(A) :-
( A =:= 2 ->
write('A is 2')
; A =:= 3 ->
write('A is 3')
; write('HAhahahahaah')
).
start_w
start_w(X,[X|L]).
dce
dce([X,X|_]).
dce([X|S]):- dce(S).
belong_to
belong_to(X,[X|_]):- !.
belong_to(X,[_|L]):- belong_to(X,L).
belong_to(X,[H|L]):- X=H, !; belong_to(X,L).
ended_w
ended_w(X,[X]).
ended_w(X,[_|L]):- ended_w(X,L).
ended_w(X,[_|L]):- [X]=L, !; ended_w(X,L).
remove0
remove0(X,[X|L],L).
remove1
remove1(E,[E|L],L):- !. % set L2 = L when first element of list are E
remove1(E,[X|L],[X|L2]):- remove1(E,L,L2). % resolve & add X to L2 while unstacking
remove1(E,[E|L],L):- write('&'), writeln(L), !. % set L2 = L when first element of list are E
remove1(E,[X|L],[X|L2]):- write(L), write('-'), writeln(L2), remove1(E,L,L2), write('>'), writeln(L2).
remove1(E,[E|L],[5|L]):- write('&'), writeln(L), !. % set L2 = L when first element of list are E
remove1(E,[_|L],[1|L2]):- write(L), write('-'), writeln(L2), remove1(E,L,L2), write('>'), writeln(L2).
min in list
min_get([R],R):- !.
min_get([X,Y|L],R):-
X < Y -> min_get([X|L],R); min_get([Y|L],R).
min_get_t([],R,R):- !.
min_get_t([H|L],T,R):-
H < T -> min_get_t(L,H,R); min_get_t(L,T,R).
min_get_t([H|L],R):- min_get_t(L,H,R).
plus_petit([X],X).
plus_petit([X,Y|L],R):- X =< Y, plus_petit([X|L],R).
plus_petit([X,Y|L],R):- X > Y, plus_petit([Y|L],R).
?- numlist(0,1000000,L), time(min_get(L, 1)).
% 2,000,001 inferences, 0.775 CPU in 0.777 seconds (100% CPU, 2579609 Lips)
?- numlist(0,1000000,L), time(min_get_t(L, 1)).
% 3,000,002 inferences, 0.196 CPU in 0.197 seconds (100% CPU, 15268446 Lips)
?- numlist(0,1000000,L), time(plus_petit(L, 1)).
% 3,000,001 inferences, 5.512 CPU in 5.521 seconds (100% CPU, 544250 Lips)
factoriel
facto(0,1).
facto(1,1):- !.
facto(N,R):- N>1, N1 is N-1, facto(N1,R1), R is R1*N.
facto_t(1,R,R):- !.
facto_t(N,T,R):- N>1, N1 is N-1, R1 is T*N, facto_t(N1,R1,R).
facto_t(0,1).
facto_t(N,R):- facto_t(N,1,R).
fibonacci
get the result
fibo(1,1,1):- !.
fibo(N,R,S):-
N1 is N-1,
fibo(N1,R1,S1),
R is R1+S1,
S is R1.
fibo(N,R):-
N<2 -> R = 1;
fibo(N,R,_).
fibo_t(1,R,R,_):- !. % fibo_t(1,R,X,_):- R = X, !; !. / minus N>1
fibo_t(N,RF,R,S):-
N>1,
N1 is N-1,
R1 is R+S,
fibo_t(N1,RF,R1,R).
fibo_t(N,R):-
N<2 -> R = 1;
fibo_t(N,R,1,1).
fibo(X,1):- X<2. % simple but fat version
fibo(N,R):- N1 is N-1, N2 is N-2, fibo(N1, R1), fibo(N2, R2), R is R1+R2.
% 299,998 inferences, 3.693 CPU in 3.700 seconds (100% CPU, 81244 Lips)
?- time(fibo_t(100000, _)).
% 200,000 inferences, 0.521 CPU in 0.525 seconds (99% CPU, 383935 Lips)
get values in list
fibo_l(1,1,1,[1]):- !.
fibo_l(N,S,R,L):-
N1 is N-1,
fibo_l(N1,S1,R1,L1),
R is R1+S1,
S is R1,
L = [R|L1].
fibo_l(N,L):- fibo_l(N,_,_,L).
fibo_lt(1,L,L,_,_):- !.
fibo_lt(N,L,LT,R,S):-
N>1,
N1 is N-1,
R1 is R+S,
Lt = [R1|LT],
fibo_lt(N1,L,Lt,R1,R).
fibo_lt(N,L):- fibo_lt(N,L,[1],1,1).
?- time(fibo_l(100000, _)).
% 299,998 inferences, 3.876 CPU in 3.883 seconds (100% CPU, 77390 Lips)
?- time(fibo_lt(100000, _)).
% 299,998 inferences, 0.781 CPU in 0.783 seconds (100% CPU, 384010 Lips)
list is sorted
sort_list([],[]).
sort_list(L,R):- min_is(L,M), remove1(M, L, L2), sort_list(L2,R1), R = [M|R1].
is_sorted(L,R):- sort_list(L,LS), R == LS.
USB
RTL8821CU
https://github.com/brektrou/rtl8821CU
RTL88x2BU / RTL8822BU
manjaro
https://github.com/cilynx/rtl88x2BU_WiFi_linux_v5.3.1_27678.20180430_COEX20180427-5959
ubuntu
INSTALL
MANJARO
yay -S autofs sshfs
CONFIGURATION
SSHFS
SSH KEY
Do not forget to put root key in remote server !!
autofs use root rights to connect
MANJARO
/etc/autofs/auto.master.d/cluster.autofs
echo "/home/cluster /etc/autofs/auto.sshfs uid=1000,gid=1000, --timeout=30, --ghost" | sudo tee /etc/autofs/auto.master.d/cluster.autofs
/etc/autofs/auto.sshfs
echo "node1 -fstype=fuse,port=2002,rw,allow_other :sshfs\#root@node1\:/" | sudo tee /etc/autofs/auto.sshfs
TEST SSHFS
path=/tmp/node1
mkdir -p ${path}
sshfs root@node1:/ ${path}
MAN
systemd-resolve [OPTIONS...] HOSTNAME|ADDRESS...
systemd-resolve [OPTIONS...] --service [[NAME] TYPE] DOMAIN
systemd-resolve [OPTIONS...] --openpgp EMAIL@DOMAIN...
systemd-resolve [OPTIONS...] --statistics
systemd-resolve [OPTIONS...] --reset-statistics
Resolve domain names, IPv4 and IPv6 addresses, DNS records, and services.
-h --help # Show this help
--version # Show package version
--no-pager # Do not pipe output into a pager
-4 # Resolve IPv4 addresses
-6 # Resolve IPv6 addresses
-i --interface=INTERFACE # Look on interface
-p --protocol=PROTO|help # Look via protocol
-t --type=TYPE|help # Query RR with DNS type
-c --class=CLASS|help # Query RR with DNS class
--service # Resolve service (SRV)
--service-address=BOOL # Resolve address for services (default: yes)
--service-txt=BOOL # Resolve TXT records for services (default: yes)
--openpgp # Query OpenPGP public key
--tlsa # Query TLS public key
--cname=BOOL # Follow CNAME redirects (default: yes)
--search=BOOL # Use search domains for single-label names (default: yes)
--raw[=payload|packet] # Dump the answer as binary data
--legend=BOOL # Print headers and additional info (default: yes)
--statistics # Show resolver statistics
--reset-statistics # Reset resolver statistics
--status # Show link and server status
--flush-caches # Flush all local DNS caches
--reset-server-features # Forget learnt DNS server feature levels
--set-dns=SERVER # Set per-interface DNS server address
--set-domain=DOMAIN # Set per-interface search domain
--set-llmnr=MODE # Set per-interface LLMNR mode
--set-mdns=MODE # Set per-interface MulticastDNS mode
--set-dnsovertls=MODE # Set per-interface DNS-over-TLS mode
--set-dnssec=MODE # Set per-interface DNSSEC mode
--set-nta=DOMAIN # Set per-interface DNSSEC NTA
--revert # Revert per-interface configuration
TRICKS
LXC
bind DNS from host to containers
dynamically
# for selected interface
resolvectl --interface lxdbr0 dnssec set no
# add DNS configuration to lxd domain
resolvectl dns lxdbr0 "$(lxc network show lxdbr0 | sed -n 's|.*ipv4\.address: \(.*\)/.*|\1|p')"
resolvectl domain lxdbr0 '~lxd'
# old style
# systemd-resolve --interface lxdbr0 --set-dnssec no #~ old style
#sed -i 's|^.\?DNSSEC=.*$|DNSSEC=allow-downgrade|' /etc/systemd/resolved.conf # global / not too advisable
#systemd-resolve --interface lxdbr0 --set-domain '~lxd' --set-dns "$(lxc network show lxdbr0 | sed -n 's|.*ipv4\.address: \(.*\)/.*|\1|p')"
persistently
path="/etc/systemd/resolved.conf.d/"
[ -d "${path}" ] || mkdir -p "${path}"
cidr="$(lxc network show lxdbr0 | sed -n 's|.*ipv4\.address: \(.*\)/.*|\1|p')"
echo "# Configuration file for lxdbr0
[Resolve]
DNS=${cidr}
Domains=lxd
DNSSEC=no" > "${path}/lxd.conf"
start & enable service
[ "$(systemctl status systemd-resolved.service)" = "inactive" ] && systemctl start systemd-resolved.service
[ "$(systemctl is-enabled systemd-resolved.service)" ] && systemctl enable systemd-resolved.service
test
resolvectl query sp20-www.lxd
#systemd-resolve -i lxdbr0 sp20-www.lxd
Use TAB to select options & values !!
journalctl -p err|alert|crit|debug|emerg|err|info|notice|warning # print only level
-u $service # show logs for unit apache2
_PID=1 # show journal for one PID
<command> # show journal for one command (ex: /usr/sbin/apache2)
OTHERS
systemd-cgls [OPTIONS...] [CGROUP...]
Recursively show control group contents
-a --all Show all groups, including empty
-u --unit Show the subtrees of specifified system units
--user-unit Show the subtrees of specifified user units
-l --full Do not ellipsize output
-k Include kernel threads in output
-M --machine= Show container
systemd-cgtop [OPTIONS...] [CGROUP]
Show top control groups by their resource usage
-p --order=path Order by path
-t --order=tasks Order by number of tasks/processes
-c --order=cpu Order by CPU load (default)
-m --order=memory Order by memory load
-i --order=io Order by IO load
-r --raw Provide raw (not human-readable) numbers
--cpu=percentage Show CPU usage as percentage (default)
--cpu=time Show CPU usage as time
-P Count userspace processes instead of tasks (excl. kernel)
-k Count all processes instead of tasks (incl. kernel)
--recursive=BOOL Sum up process count recursively
-d --delay=DELAY Delay between updates
-n --iterations=N Run for N iterations before exiting
-b --batch Run in batch mode, accepting no input
--depth=DEPTH Maximum traversal depth (default: 3)
-M --machine= Show container
systemd-analyze [OPTIONS...] {COMMAND} ...
Profile systemd, show unit dependencies, check unit files
-h --help # Show this help
--version # Show package version
--no-pager # Do not pipe output into a pager
--system # Operate on system systemd instance
--user # Operate on user systemd instance
-H --host=[USER@]HOST # Operate on remote host
-M --machine=CONTAINER # Operate on local container
--order # Show only order in the graph
--require # Show only requirement in the graph
--from-pattern=GLOB # Show only origins in the graph
--to-pattern=GLOB # Show only destinations in the graph
--fuzz=SECONDS # Also print also services which finished SECONDS earlier than the latest in the branch
--man[=BOOL] # Do [not] check for existence of man pages
--generators[=BOOL] # Do [not] run unit generators (requires privileges)
Commands:
time # Print time spent in the kernel
blame # Print list of running units ordered by time to init
critical-chain [UNIT...] # Print a tree of the time critical chain of units
plot # Output SVG graphic showing service initialization
dot [UNIT...] # Output dependency graph in man:dot(1) format
log-level [LEVEL] # Get/set logging threshold for manager
log-target [TARGET] # Get/set logging target for manager
dump # Output state serialization of service manager
syscall-filter [NAME...] # Print list of syscalls in seccomp filter
verify FILE... # Check unit files for correctness
calendar SPEC... # Validate repetitive calendar time events
service-watchdogs [BOOL] # Get/set service watchdog state
path
// get path for a specific value
paths as $path | select(getpath($path) == "10.0.0.159") | $path
// get paths in string format
[paths|map(if type=="number" then "[]" else tostring end)|join(".")|split(".[]")|join("[]")]|unique|map("."+.)|.[]
insert data
data='{"ipv4":"192.168.0.1"}
{"ipv4":"192.168.0.100"}
{"ipv4":"192.168.0.102"}'
echo "$data" | jq -r 'select(.ipv4 == "192.168.0.1") |= . + {"hostname":"toto"}'
LXC
pretty print
# pretty print
lxc list --format=json $ctname$ | jq -C
search
# search in keys recursively & get values for key
lxc list --format json|jq -r '..|.hwaddr?|select(.!=null and .!="")'
# search in keys recursively & get flatten path
lxc list --format json | jq -c 'paths | select(.[-1] == "hwaddr")'
# search by type
jq 'path(recurse(if type|. == "array" or . =="object" then .[] else empty end))'
# search image by alias
lxc image list --format=json | jq -r '.[].aliases[] | select(.name | test("'-1635.*$'")).name'
name
# print name
lxc list --format=json | jq -r '.[].name'
# print selected name for exact name alpine
lxc list --format=json | jq -r '.[] | select(.name == "alpine311").name'
# print selected name for alpine.* in regexp format
lxc list --format=json | jq -r '.[] | select(.name | test("alpine.*")).name'
# display names of running containers
lxc list --format=json | jq -r '.[] | select(.status == "Running").name'
# display names of running containers with a selected name
lxc list --format=json | jq -r '.[] | select(.status == "Running") | select(.name == "alpine314").name'
# display name of containers which have attached profile $profile
lxc list --format=json | jq -r '.[] | select(.profiles | any(contains("'$profile'"))).name'
IP
Display first IP address for specified: interface
# Display IP address of the first network interface of containers which have an interface named 'eth0'
lxc list --format=json | jq -r '.[].state.network.eth0.addresses[0].address'
# Display IP mac address & IP address for containers which have eth0 named interface
lxc list --format json|jq '.[].state.network.eth0 | {(.hwaddr): .addresses[0].address}'
Display IP address for specified: name, scope, family
Display IP address for container named 'alpine311$' with network interface='eth0', with scope='global' & family='inet'
lxc list --format=json alpine311$ | jq -r '.[].state.network.eth0?.addresses[]
| select(.scope == "global" and .family == "inet")
| .address'
Display pairs 'name + Ip address' for specified: interface, family
Display 'name + address' for containers with network, interface='eth0', scope='global' & family='inet'
lxc list --format=json | jq -r '.[] |
select (.state.network != null) |
(.name + " " + (
.state.network.eth0.addresses[] |
select(.family == "inet") |
.address
)lxc profile list -f json|jq -r '.[] | (.name, .used_by)'
)'lxc profile list -f json|jq -r '.[] | (.name, .used_by)'
Display profile names and users in json format
lxc profile list -f json|jq -r '.[] | (.name, .used_by)'
Display name, ipv4, hwaddr for running containers with network in json format
lxc list --format=json | jq -r '.[] |
select (.state.network != null) |
{
"name": .name,
"ip": (
.state.network.eth0.addresses[] |
select(.family == "inet") |
.address
),
"hwaddr": .state.network.eth0.hwaddr
}'
EXAMPLES
CONTAINER
BACKUP
# backup server
CMD = /usr/local/bs/backup-server
OPTS = --vz --vz-dump --vz-cleanlog
LOG_FILE = /var/log/server/cron-backup-server
DATE = date +%Y%m%d-%T
# every week, the monday at 2h00
#*/1 * * * * root echo "$($DATE) /usr/local/bs/backup-server --vz --vz-dump" >> $LOG_FILE
0 2 * * 1 root echo "$($DATE) /usr/local/bs/backup-server --vz --vz-dump --vz-cleanlog" >> $LOG_FILE
# every week, the monday at 02h01
1 2 * * 1 root test -x $CMD && $CMD $OPTS >> $LOG_FILE
55 2 * * 1 root echo "$($DATE) /usr/sbin/vzctl stop 105" >> $LOG_FILE
56 2 * * 1 root /usr/sbin/vzctl stop 105 >> $LOG_FILE
# every hour
#0 */1 * * * root test -x $CMD && $CMD $OPT
MARIADB
# cron to dump mariadb databases
CMD = /usr/local/bs/mysql-dump-slave
OPTS = "db_pwd=txccR_OH2Goal9 path2=/var/share/mariadb/save"
LOG_FILE = /var/log/server/cron-mariadb
DATE = date +%Y%m%d-%T
0 1 * * * root echo "$($DATE) /usr/local/bs/mysql-dump-slave db_pwd= path2=/var/share/mariadb/save" >> $LOG_FILE # every day at 1h00
1 1 * * * root test -x $CMD && $CMD $OPTS >> $LOG_FILE # every day at 01h01
SERVER
BACKUP
# cron to sync server backup from HOST to local
HOST = ns388803
DATE = date +%Y%m%d-%T
LOG_FILE = /var/log/server/cron-ns388803-backup
# every day at 3h00
0 3 * * * root echo "$($DATE) rsync -e 'ssh -p2002' -av root@${HOST}:/save/backup/ /save/${HOST}/backup/" >> $LOG_FILE
# every day at 3h01
1 3 * * * root rsync -e 'ssh -p2002' -av root@${HOST}:/save/backup/ /save/${HOST}/backup/ >> $LOG_FILE
MARIADB
# cron to sync mariadb dump from HOST to local
HOST = ns388803
DATE = date +%Y%m%d-%T
LOG_FILE = /var/log/server/cron-ns388803-mariadb
# every day at 3h30
30 3 * * * root echo "$($DATE) rsync -e 'ssh -p2002' -av root@${HOST}:/save/mariadb/ /save/${HOST}/mariadb/" >> $LOG_FILE
# every day at 3h31
31 3 * * * root rsync -e 'ssh -p2002' -av root@${HOST}:/save/mariadb/ /save/${HOST}/mariadb/ >> $LOG_FILE
# cron to sync mail from HOST to local
HOST = ns388803
DATE = date +%Y%m%d-%T
LOG_FILE = /var/log/server/cron-ns388803-mail
# every day at 3h50
50 3 * * * root echo "$($DATE) rsync -e 'ssh -p2002' -av root@${HOST}:/vm/share/mail/ /save/${HOST}/mail/" >> $LOG_FILE
# every day at 3h51
51 3 * * * root rsync -e 'ssh -p2002' -av root@${HOST}:/vm/share/mail/ /save/${HOST}/mail/ >> $LOG_FILE
INSTALLATION
JAVA
For eclipse compatibility of sourcing and content assisting, install the openjdk-src && openjdk-doc
manjaro
sudo pacman -Sy openjdk-src openjdk-doc
JAVA CONFIGURATION
openjdk
After that go to preferences / Java / Installed JREs, edit the JRE and defines paths for :
- jre: /usr/lib/jvm/java-15-openjdk/
- Source path: give the path of src.zip: /usr/lib/jvm/java-15-openjdk/lib/src.zip
- javadoc location: give the path of api documentation: file:/usr/share/doc/java-openjdk/api
jdk
After that go to preferences / Java / Installed JREs, edit the JRE and defines paths for :
- jre: /usr/lib/jvm/java-16-jdk/
- Source path: give the path of src.zip: /usr/lib/jvm/java-16-jdk/lib/src.zip
- javadoc location: give the path of api documentation: file:/usr/share/doc/java-jdk/api
CONFIGURATION
opt=java
release=2021-03
path_opt="/opt/eclipse_${opt}_${release}"
user_path="/home/nikita/.eclipse_${opt}"
workspace_path="/home/nikita/dev/eclipse-workspaces-${opt}/${opt}"
file="${path_opt}/eclipse.ini"
# install
tar xzf /ext/shared/Soft/multi/eclipse-java-${release}-R-linux-gtk-x86_64.tar.gz -C /opt/
mv /opt/eclipse ${path_opt}
# configuration
cp -a ${file} ${file}-keep$(date +%s)
sed -i "/^-vmargs/ a-Duser.home=${user_path}" $file
sed -i "/^-Dosgi.instance.area.default/ s|=.*|=${workspace_path}|" $file
DOT
add lines in eclipse.ini
echo "-Dosgi.framework.extensions=org.eclipse.fx.osgi
-Defxclipse.java-modules.dir=/usr/lib/jvm/javafx-sdk-15/lib" >> "$file"
JAVAFX
https://openjfx.io/openjfx-docs/
https://openjfx.io/openjfx-docs/#IDE-Eclipse
sudo pacman -S java-openjfx
path="/usr/lib/jvm/java-*-openjdk"
pathfx="/usr/lib/jvm/java-*-openjdk/lib/javafx"
[ -d $pathfx ] || mkdir $pathfx
for jar in javafx.base.jar javafx.controls.jar javafx.fxml.jar javafx.graphics.jar javafx.media.jar javafx.swing.jar javafx-swt.jar javafx.web.jar; do [ -f $path/$jar ] && mv $path/$jar $pathfx/$jar; done
file=/etc/server/env.conf
! grep -q PATH_TO_FX $file && echo -e "\n######## JAVA\nexport JAVA_HOME=$path\nexport PATH_TO_FX=$pathfx" >> $file
BASH EDITOR
Manjaro
xfce4-terminal -x bash --login -c '${BE_CMD_CALL}'
Ubuntu
gnome-terminal -- bash --login -c '${BE_CMD_CALL}'
SOFTWARE SITES
Teams
Tuleap Mylyn http://eclipse.tuleap.net/mylyn.tasks/update/
CodeTogether https://www.codetogether.com/updates/ci/
Yatta Profiles & Launcher https://marketplace.yatta.de/update/profiles/client
tools
ColorTheme http://eclipse-color-theme.github.com/update
DevStyle http://www.genuitec.com/updates/devstyle/ci
DevUtils http://kangtae49.github.io/dev-utils-plugin/update
QuickRex http://netceteragroup.github.io/quickrex/updatesite
softs
DBeaver https://dbeaver.io/update/latest/
Papyrus 2020-06 https://download.eclipse.org/modeling/mdt/papyrus/updates/releases/2020-06
Papyrus Designer http://download.eclipse.org/modeling/mdt/papyrus/components/designer/
Papyrus Marte http://download.eclipse.org/modeling/mdt/papyrus/components/marte/
Papyrus Moka http://download.eclipse.org/modeling/mdt/papyrus/components/moka/
Papyrus Nightly https://download.eclipse.org/modeling/mdt/papyrus/updates/nightly/master
sysml16 releases https://download.eclipse.org/modeling/mdt/papyrus/components/sysml16/2019-06/
EMF 2.14 Release http://marketplace.yatta.de/eclipse-mirror/modeling/emf/emf/builds/release/2.14
languages
BashEditor https://dl.bintray.com/de-jcup/basheditor
CDT http://download.eclipse.org/tools/cdt/releases/10.0
PyDev http://pydev.org/updates
CLEAN PLUGINS
** Clean history before !!
** one
eclipse_path="/opt/eclipse_java_2021-03"
eclipse_profile=`sed -n '/^eclipse.p2.profile=.*/ s|.*=||p' "${eclipse_path}/configuration/config.ini"`
${eclipse_path}/eclipse -application org.eclipse.equinox.p2.garbagecollector.application -profile "${eclipse_profile}"
** all
for eclipse_path in `ls /opt/eclipse*/ -d`; do
eclipse_path="${eclipse_path%/}"
eclipse_profile=`sed -n '/^eclipse.p2.profile=.*/ s|.*=||p' "${eclipse_path}/configuration/config.ini"`
${eclipse_path}/eclipse -application org.eclipse.equinox.p2.garbagecollector.application -profile "${eclipse_profile}"
done
hiberfil.sys reduce
launch "cmd" as administrator
powercfg -h off # remove hibernation capability and file
powercfg -h -size 50 # reduce size
pagefile.sys reduce
Right click on Computer & Select Properties
Click on Advanced system settings on the left
Click on the Settings button under the Performance group
Select the Advanced tab of the Performance Options window
Click on the Change button
Uncheck Automatically manage paging file size for all drives
Select the C: drive in the list
Check the Custom size radio button
Enter the Initial and Maximum sizes you want to use. I would recommend 4096 MB for the Initial and 6144 MB for the Maximum
Click OK on the Virtual Memory dialog
Click OK on the Performance Options dialog
Click OK on the System Properties dialog
genuine
run as administrator in 'cmd'
SLMGR /REARM
root
boot unlock
developer mode
tap 8 times on "Build Number" in "about" settings
in "additional settings", select "developer options" & enable: "OEM unlocking" & "USB debugging"
MI account
create MI account & add device to account
install Drivers
install ADB & Fastboot Drivers on PC with file : "adb-setup-1.4.3.exe" & "Windows6.1-KB2999226-x64.msu"
Add driver manually with:
- Select device in "device manager"
- Select "update driver"
- Select "Install the hardware that I manually select from a list (Advanced)"
- Select "android device"
- Click on "Have Disk..."
- Locate the extracted driver manually on your computer (file "adb-setup-1.4.3.exe" extracted manualy)
Download & unzip software "mi unlock tools" from https://en.miui.com/unlock/
Unlock xiaomi device with launching "mi unlock tools" with administrator rights
connect xiaomi device in fastboot mode with USB cable (fastboot mode: turn off & pull on "touch down" & "power" buttons)
Unlock device & wait 7 days to do it definitively
After 7 days unlock device
Launch "mi unlock tools" with administrator rights
loggin "mi unlock tools" & unlock device with confirmations...
install TWRP
adb devices -l # Get list of connected devices
adb reboot bootloader # reboot in fastboot
fastboot devices -l # Get list of connected devices
fastboot flash recovery /pathto/recovery.img # flash boot with TWRP
fastboot reboot
pull volume- during reboot until TWRP are displayed
TWRP/advanced/Disable replace TWRP
Fews commands with fastboot
fastboot reboot-bootloader # reboot in bootloader mode
fastboot erase cache # erase data cache in device
fastboot boot /pathto/recovery.img # ?
fastboot flash recovery recovery.img
fastboot flash vbmeta vbmeta.img
fastboot boot recovery.img
Password
cp -a $old_profile/key4.db $new_profile/ && cp -a $old_profile/logins.json $new_profile/
Thunderbird
message filters
find ~/.thunderbird/ -name msgFilterRules.dat
accounts order
in about:config or prefs.js, reorder "account number" in
mail.accountmanager.accounts
Create an archive of files from a named tree
Creates an archive of the specified format containing the tree structure for the named tree, and writes it out to the standard output. If <prefix> is specified it is prepended to the filenames in the archive
git archive behaves differently when given a tree ID versus when given a commit ID or tag ID. In the first case the current time is used as the modification time of each file in the archive. In the latter case the commit time as recorded in the referenced commit object is used instead. Additionally the commit ID is stored in a global extended pax header if the tar format is used; it can be extracted using git get-tar-commit-id. In ZIP files it is stored as a file comment
git archive [--format=<fmt>] [--list] [--prefix=<prefix>/] [<extra>] [-o <file> | --output=<file>] [--worktree-attributes] [--remote=<repo> [--exec=<git-upload-archive>]] <tree-ish> [<path>...]
--format=<fmt> # Format of the resulting archive: tar or zip. If this option is not given, and the output file is specified, the format is inferred from the filename if possible (e.g. writing to "foo.zip" makes the output to be in the zip format). Otherwise the output format is tar
-l, --list # Show all available formats
-v, --verbose # Report progress to stderr
--prefix=<prefix>/ # Prepend <prefix>/ to each filename in the archive
-o <file>, --output=<file> # Write the archive to <file> instead of stdout
--worktree-attributes # Look for attributes in .gitattributes files in the working tree as well (see the section called “ATTRIBUTES”)
<extra> # This can be any options that the archiver backend understands. See next section
--remote=<repo> # Instead of making a tar archive from the local repository, retrieve a tar archive from a remote repository. Note that the remote repository may place restrictions on which sha1 expressions may be allowed in <tree-ish>. See git-upload-archive(1) for details
--exec=<git-upload-archive> # Used with --remote to specify the path to the git-upload-archive on the remote side
<tree-ish> # The tree or commit to produce an archive for
<path> # Without an optional path parameter, all files and subdirectories of the current working directory are included in the archive. If one or more paths are specified, only these are included
EXAMPLES
# Create a tar archive that contains the contents of the latest commit on the current branch, and extract it in the /var/tmp/junk directory
git archive --format=tar --prefix=junk/ HEAD | (cd /var/tmp/ && tar xf -)
# Create a compressed tarball for v1.4.0 release
git archive --format=tar --prefix=git-1.4.0/ v1.4.0 | gzip >git-1.4.0.tar.gz
# Same as above, but using the builtin tar.gz handling
git archive --format=tar.gz --prefix=git-1.4.0/ v1.4.0 >git-1.4.0.tar.gz
# Same as above, but the format is inferred from the output file
git archive --prefix=git-1.4.0/ -o git-1.4.0.tar.gz v1.4.0
# Create a compressed tarball for v1.4.0 release, but without a global extended pax header
git archive --format=tar --prefix=git-1.4.0/ v1.4.0^{tree} | gzip >git-1.4.0.tar.gz
# Put everything in the current head’s Documentation/ directory into git-1.4.0-docs.zip, with the prefix git-docs/
git archive --format=zip --prefix=git-docs/ HEAD:Documentation/ > git-1.4.0-docs.zip
# Create a Zip archive that contains the contents of the latest commit on the current branch. Note that the output format is inferred by the extension of the output file
git archive -o latest.zip HEAD
git config tar.tar.xz.command "xz -c"
# Configure a "tar.xz" format for making LZMA-compressed tarfiles. You can use it specifying --format=tar.xz, or by creating an output file like -o foo.tar.xz
TRICKS
Un zip avec uniquement vos fichiers et surtout l’arborescence des fichiers qui va avec
git archive -o delta.zip develop $(git diff --name-only V1.0.0^)
Inconvénient de cette méthodes, les fichiers supprimé ne seront pas supprimé et la commande ci-dessus va planter.
Il faut donc ajouter un filtrer sur les fichiers. Exemple pour ne prendre en compte que les fichier Ajouter, Modifier, Renommer, Changer)
git archive -o delta.zip develop $(git diff --name-only --diff-filter=ACMRT V1.0.0^)
Reapply commits on top of another base tip
If <branch> is specified, git rebase will perform an automatic git switch <branch> before doing anything else. Otherwise it remains on the current branch
A---B---C topic
/
D---E---F---G master
git checkout topic && git rebase master
git rebase master topic
# rebase (from) master (for) topic
A'--B'--C' topic
/
D---E---F---G master
----------------------------------------------------
o---o---o---o---o master
\
o---o---o---o---o next
\
o---o---o topic
git rebase --onto master next topic
# rebase (onto) master (from) next (for) topic
o---o---o---o---o master
| \
| o'--o'--o' topic
\
o---o---o---o---o next
git rebase [-i | --interactive] [<options>] [--exec <cmd>] [--onto <newbase> | --keep-base] [<upstream> [<branch>]]
git rebase [-i | --interactive] [<options>] [--exec <cmd>] [--onto <newbase>] --root [<branch>]
git rebase (--continue | --skip | --abort | --quit | --edit-todo | --show-current-patch)
--onto <newbase> # Starting point at which to create the new commits. If the --onto option is not specified, the starting point is <upstream>. May be any valid commit, and not just an existing branch name
--keep-base # Set the starting point at which to create the new commits to the merge base of <upstream> <branch>. Running git rebase --keep-base <upstream> <branch> is equivalent to running git rebase --onto <upstream>... <upstream>
<upstream> # Upstream branch to compare against. May be any valid commit, not just an existing branch name. Defaults to the configured upstream for the current branch
<branch> # Working branch; defaults to HEAD
--continue # Restart the rebasing process after having resolved a merge conflict
--abort # Abort the rebase operation and reset HEAD to the original branch. If <branch> was provided when the rebase operation was started, then HEAD will be reset to <branch>. Otherwise HEAD will be reset to where it was when the rebase operation was started
--quit # Abort the rebase operation but HEAD is not reset back to the original branch. The index and working tree are also left unchanged as a result
--keep-empty # Keep the commits that do not change anything from its parents in the result
--allow-empty-message # By default, rebasing commits with an empty message will fail. This option overrides that behavior, allowing commits with empty messages to be rebased
--skip # Restart the rebasing process by skipping the current patch
--edit-todo # Edit the todo list during an interactive rebase
--show-current-patch # Show the current patch in an interactive rebase or when rebase is stopped because of conflicts. This is the equivalent of git show REBASE_HEAD
-m, --merge # Use merging strategies to rebase. When the recursive (default) merge strategy is used, this allows rebase to be aware of renames on the upstream side
-s <strategy>, --strategy=<strategy> # Use the given merge strategy. If there is no -s option git merge-recursive is used instead. This implies --merge
-X <strategy-option>, --strategy-option=<strategy-option> # Pass the <strategy-option> through to the merge strategy. This implies --merge and, if no strategy has been specified, -s recursive. Note the reversal of ours and theirs as noted above for the -m option
--rerere-autoupdate, --no-rerere-autoupdate # Allow the rerere mechanism to update the index with the result of auto-conflict resolution if possible
-S[<keyid>], --gpg-sign[=<keyid>] # GPG-sign commits. The keyid argument is optional and defaults to the committer identity; if specified, it must be stuck to the option without a space
-q, --quiet # Be quiet. Implies --no-stat
-v, --verbose # Be verbose. Implies --stat
--stat # Show a diffstat of what changed upstream since the last rebase. The diffstat is also controlled by the configuration option rebase.stat
-n, --no-stat # Do not show a diffstat as part of the rebase process
--no-verify # This option bypasses the pre-rebase hook. See also githooks(5)
--verify # Allows the pre-rebase hook to run, which is the default. This option can be used to override --no-verify. See also githooks(5)
-C<n> # Ensure at least <n> lines of surrounding context match before and after each change. When fewer lines of surrounding context exist they all must match. By default no context is ever ignored
--no-ff, --force-rebase, -f # Individually replay all rebased commits instead of fast-forwarding over the unchanged ones. This ensures that the entire history of the rebased branch is composed of new commits
--fork-point, --no-fork-point # Use reflog to find a better common ancestor between <upstream> and <branch> when calculating which commits have been introduced by <branch>
--ignore-whitespace, --whitespace=<option> # These flag are passed to the git apply program (see git-apply(1)) that applies the patch
--committer-date-is-author-date, --ignore-date # These flags are passed to git am to easily change the dates of the rebased commits (see git-am(1))
--signoff # Add a Signed-off-by: trailer to all the rebased commits. Note that if --interactive is given then only commits marked to be picked, edited or reworded will have the trailer added
-i, --interactive # Make a list of the commits which are about to be rebased. Let the user edit that list before rebasing. This mode can also be used to split commits (see SPLITTING COMMITS below)
-r, --rebase-merges[=(rebase-cousins|no-rebase-cousins)] # By default, a rebase will simply drop merge commits from the todo list, and put the rebased commits into a single, linear branch. With --rebase-merges, the rebase will instead try to preserve the branching structure within the commits that are to be rebased, by recreating the merge commits
-p, --preserve-merges # [DEPRECATED: use --rebase-merges instead] Recreate merge commits instead of flattening the history by replaying commits a merge commit introduces. Merge conflict resolutions or manual amendments to merge commits are not preserved
-x <cmd>, --exec <cmd> # Append "exec <cmd>" after each line creating a commit in the final history. <cmd> will be interpreted as one or more shell commands. Any command that fails will interrupt the rebase, with exit code 1
--root # Rebase all commits reachable from <branch>, instead of limiting them with an <upstream>. This allows you to rebase the root commit(s) on a branch
--autosquash, --no-autosquash # When the commit log message begins with "squash! ..." (or "fixup! ..."), and there is already a commit in the todo list that matches the same ..., automatically modify the todo list of rebase -i so that the commit marked for squashing comes right after the commit to be modified, and change the action of the moved commit from pick to squash (or fixup)
--autostash, --no-autostash # Automatically create a temporary stash entry before the operation begins, and apply it after the operation ends. This means that you can run rebase on a dirty worktree. However, use with care: the final stash application after a # successful rebase might result in non-trivial conflicts
--reschedule-failed-exec, --no-reschedule-failed-exec
Automatically reschedule exec commands that failed. This only makes sense in interactive mode (or when an --exec option was provided)
INCOMPATIBLE OPTIONS
The following options:
--committer-date-is-author-date
--ignore-date
--whitespace
--ignore-whitespace
-C
are incompatible with the following options:
--merge
--strategy
--strategy-option
--allow-empty-message
--[no-]autosquash
--rebase-merges
--preserve-merges
--interactive
--exec
--keep-empty
--edit-todo
--root when used in combination with --onto
In addition, the following pairs of options are incompatible:
--preserve-merges and --interactive
--preserve-merges and --signoff
--preserve-merges and --rebase-merges
--keep-base and --onto
--keep-base and --root
Switch branches
Switch to a specified branch. The working tree and the index are updated to match the branch. All new commits will be added to the tip of this branch
Optionally a new branch could be created with either -c, -C, automatically from a remote branch of same name (see --guess), or detach the working tree from any branch with --detach, along with switching
Switching branches does not require a clean index and working tree (i.e. no differences compared to HEAD). The operation is aborted however if the operation leads to loss of local changes, unless told otherwise with --discard-changes or --merge
git switch [<options>] [--no-guess] <branch>
git switch [<options>] --detach [<start-point>]
git switch [<options>] (-c|-C) <new-branch> [<start-point>]
git switch [<options>] --orphan <new-branch>
<branch> # Branch to switch to
<new-branch> # Name for the new branch
<start-point> # The starting point for the new branch. Specifying a <start-point> allows you to create a branch based on some other point in history than where HEAD currently points. (Or, in the case of --detach, allows you to inspect and detach from some other point.)
-c <new-branch>, --create <new-branch> # Create a new branch named <new-branch> starting at <start-point> before switching to the branch
-C <new-branch>, --force-create <new-branch> # Similar to --create except that if <new-branch> already exists, it will be reset to <start-point>
-d, --detach # Switch to a commit for inspection and discardable experiments
--guess, --no-guess # If <branch> is not found but there does exist a tracking branch in exactly one remote (call it <remote>) with a matching name, treat as equivalent to
git switch -c <branch> --track <remote>/<branch>
-f, --force # An alias for --discard-changes
--discard-changes # Proceed even if the index or the working tree differs from HEAD. Both the index and working tree are restored to match the switching target
-m, --merge # If you have local modifications to one or more files that are different between the current branch and the branch to which you are switching, the command refuses to switch branches in order to preserve your modifications in context
--conflict=<style> # The same as --merge option above, but changes the way the conflicting hunks are presented, overriding the merge.conflictStyle configuration variable
--progress, --no-progress # Progress status is reported on the standard error stream by default when it is attached to a terminal, unless --quiet is specified. This flag enables progress reporting even if not attached to a terminal, regardless of --quiet
-t, --track # When creating a new branch, set up "upstream" configuration. -c is implied. See --track in git-branch(1) for details
--no-track # Do not set up "upstream" configuration, even if the branch.autoSetupMerge configuration variable is true
--orphan <new-branch> # Create a new orphan branch, named <new-branch>. All tracked files are removed
--ignore-other-worktrees # git switch refuses when the wanted ref is already checked out by another worktree. This option makes it check the ref out anyway. In other words, the ref can be held by more than one worktree
--recurse-submodules, --no-recurse-submodules # Using --recurse-submodules will update the content of all initialized submodules according to the commit recorded in the superproject
EXAMPLES
The following command switches to the "master" branch:
git switch master
After working in the wrong branch, switching to the correct branch would be done using:
git switch mytopic
However, your "wrong" branch and correct "mytopic" branch may differ in files that you have modified locally, in which case the above switch would fail like this:
git switch mytopic
error: You have local changes to 'frotz'; not switching branches
You can give the -m flag to the command, which would try a three-way merge:
git switch -m mytopic
Auto-merging frotz
After this three-way merge, the local modifications are not registered in your index file, so git diff would show you what changes you made since the tip of the new branch
To switch back to the previous branch before we switched to mytopic (i.e. "master" branch):
git switch -
You can grow a new branch from any commit. For example, switch to "HEAD~3" and create branch "fixup":
git switch -c fixup HEAD~3
Switched to a new branch 'fixup'
If you want to start a new branch from a remote branch of the same name:
git switch new-topic
Branch 'new-topic' set up to track remote branch 'new-topic' from 'origin'
Switched to a new branch 'new-topic'
To check out commit HEAD~3 for temporary inspection or experiment without creating a new branch:
git switch --detach HEAD~3
HEAD is now at 9fc9555312 Merge branch 'cc/shared-index-permbits'
git-restore - Restore working tree files
MRestore specified paths in the working tree with some contents from a restore source. If a path is tracked but does not exist in the restore source, it will be removed to match the source
The command can also be used to restore the content in the index with --staged, or restore both the working tree and the index with --staged --worktree
By default, the restore sources for working tree and the index are the index and HEAD respectively. --source could be used to specify a commit as the restore source
git restore [<options>] [--source=<tree>] [--staged] [--worktree] [--] <pathspec>...
git restore [<options>] [--source=<tree>] [--staged] [--worktree] --pathspec-from-file=<file> [--pathspec-file-nul]
git restore (-p|--patch) [<options>] [--source=<tree>] [--staged] [--worktree] [--] [<pathspec>...]
-s <tree>, --source=<tree> # Restore the working tree files with the content from the given tree. It is common to specify the source tree by naming a commit, branch or tag associated with it.
-p, --patch # Interactively select hunks in the difference between the restore source and the restore location. See the “Interactive Mode” section of git-add(1) to learn how to operate the --patch mode.
-W, --worktree, -S, --staged # Specify the restore location. If neither option is specified, by default the working tree is restored. Specifying --staged will only restore the index. Specifying both restores both.
-q, --quiet # Quiet, suppress feedback messages. Implies --no-progress.
--progress, --no-progress # Progress status is reported on the standard error stream by default when it is attached to a terminal, unless --quiet is specified. This flag enables progress reporting even if not attached to a terminal, regardless of --quiet.
--ours, --theirs # When restoring files in the working tree from the index, use stage #2 (ours) or #3 (theirs) for unmerged paths.
-m, --merge # When restoring files on the working tree from the index, recreate the conflicted merge in the unmerged paths.
--conflict=<style> # The same as --merge option above, but changes the way the conflicting hunks are presented, overriding the merge.conflictStyle configuration variable. Possible values are "merge" (default) and "diff3" (in addition to what is shown by # "merge" style, shows the original contents).
--ignore-unmerged # When restoring files on the working tree from the index, do not abort the operation if there are unmerged entries and neither --ours, --theirs, --merge or --conflict is specified. Unmerged paths on the working tree are left alone.
--ignore-skip-worktree-bits # In sparse checkout mode, by default is to only update entries matched by <pathspec> and sparse patterns in $GIT_DIR/info/sparse-checkout. This option ignores the sparse patterns and unconditionally restores any files in <pathspec>.
--overlay, --no-overlay # In overlay mode, the command never removes files when restoring. In no-overlay mode, tracked files that do not appear in the --source tree are removed, to make them match <tree> exactly. The default is no-overlay mode.
--pathspec-from-file=<file> # Pathspec is passed in <file> instead of commandline args. If <file> is exactly - then standard input is used. Pathspec elements are separated by LF or CR/LF. Pathspec elements can be quoted as explained for the configuration variable core.quotePath (see git-config(1)). See also --pathspec-file-nul and global --literal-pathspecs.
--pathspec-file-nul # Only meaningful with --pathspec-from-file. Pathspec elements are separated with NUL character and all other characters are taken literally (including newlines and quotes).
-- # Do not interpret any more arguments as options.
<pathspec>... # Limits the paths affected by the operation.
EXAMPLES
The following sequence switches to the master branch, reverts the Makefile to two revisions back, deletes hello.c by mistake, and gets it back from the index
git switch master
git restore --source master~2 Makefile # take a file out of another commit
rm -f hello.c
git restore hello.c # restore hello.c from the index
If you want to restore all C source files to match the version in the index, you can say
git restore '*.c'
Note the quotes around *.c. The file hello.c will also be restored, even though it is no longer in the working tree, because the file globbing is used to match entries in the index (not in the working tree by the shell).
To restore all files in the current directory
git restore .
or to restore all working tree files with top pathspec magic (see gitglossary(7))
git restore :/
To restore a file in the index to match the version in HEAD (this is the same as using git-reset(1))
git restore --staged hello.c
or you can restore both the index and the working tree (this the same as using git-checkout(1))
git restore --source=HEAD --staged --worktree hello.c
or the short form which is more practical but less readable:
git restore -s@ -SW hello.c
https://medium.com/@cq94/zfs-vous-connaissez-vous-devriez-1d2611e7dad6
TOC
chapter | designation |
---|---|
ADD | Adds the specified virtual devices to the given pool |
ATTACH | Attaches new_device to the existing device |
CLEAR | Clears device errors in a pool |
CREATE | Creates a new storage pool containing the virtual devices specified on the command line |
DESTROY | Destroys the given pool, freeing up any devices for other use |
DETACH | Detaches device from a mirror |
EVENTS | Lists all recent events generated by the ZFS kernel modules |
EXPORT | Exports the given pools from the system |
GET | Retrieves the given list of properties (or all properties if all is used) for the specified storage pool(s) |
HISTORY | Displays the command history of the specified pool(s) or all pools if no pool is specified |
IMPORT-LIST | Lists pools available to import |
IMPORT-ALL | Imports all pools found in the search directories |
IMPORT | Imports a specific pool |
IOSTAT | Displays I/O statistics for the given pools/vdevs |
LABELCLEAR | Removes ZFS label information from the specified device |
LIST | Lists the given pools along with a health status and space usage |
OFFLINE | Takes the specified physical device offline |
ONLINE | Brings the specified physical device online |
REGUID | Generates a new unique identifier for the pool |
REOPEN | Reopen all the vdevs associated with the pool |
REMOVE | Removes the specified device from the pool |
REPLACE | Replaces old_device with new_device |
SCRUB | Begins a scrub or resumes a paused scrub |
SET | Sets the given property on the specified pool |
SPLIT | Splits devices off pool creating newpool |
STATUS | Displays the detailed health status for the given pools |
UPGRADE-DISPLAY-NOT | Displays pools which do not have all supported features enabled and pools formatted using a legacy ZFS version number |
UPGRADE-DISPLAY | Displays legacy ZFS versions supported by the current software |
UPGRADE | Enables all supported features on the given pool |
PROPERTIES | Available propserties |
ADD
Adds the specified virtual devices to the given pool
The vdev specification is described in the Virtual Devices section. The behavior of the -f option, and the device checks performed are described in the zpool create subcommand
zpool add [-fgLnP] [-o property=value] pool vdev...
-f # Forces use of vdevs, even if they appear in use or specify a conflicting replication level
-g # Display vdev, GUIDs instead of the normal device names
-L # Display real paths for vdevs resolving all symbolic links
-n # Displays the configuration that would be used without actually adding the vdevs
-P # Display real paths for vdevs instead of only the last component of the path
-o property=value # Sets the given pool properties
ATTACH
Attaches new_device to the existing device
The existing device cannot be part of a raidz configuration. If device is not currently part of a mirrored configuration, device automatically transforms into a two-way mirror of device and new_device. If device is part of a two-way mirror, attaching new_device creates a three-way mirror, and so on. In either case, new_device begins to resilver immediately
zpool attach [-f] [-o property=value] pool device new_device
-f # Forces use of new_device, even if its appears to be in use
-o property=value # Sets the given pool properties
CLEAR
Clears device errors in a pool
If no arguments are specified, all device errors within the pool are cleared. If one or more devices is specified, only those errors associated with the specified device or devices are cleared
zpool clear pool [device]
CREATE
Creates a new storage pool containing the virtual devices specified on the command line
The pool name must begin with a letter, and can only contain alphanumeric characters as well as underscore ("_"), dash ("."), colon (":"),space ("-"), and period ("."). The pool names mirror, raidz, spare and log are reserved, as are names beginning with the pattern c[0-9]. The vdev specification is described in the Virtual Devices section
zpool create [-dfn] [-m mountpoint] [-o property=value]... [-o feature@feature=value]... [-O file-system-property=value]... [-R root] [-t tname] pool vdev...
-d # Do not enable any features on the new pool
-f # Forces use of vdevs, even if they appear in use or specify a conflicting replication level
-m mountpoint # Sets the mount point for the root dataset
-n # Displays the configuration that would be used without actually creating the pool
-o property=value # Sets the given pool properties
-o feature@feature=value # Sets the given pool feature
-O file-system-property=value # Sets the given file system properties in the root file system of the pool
-R root # Equivalent to -o cachefile=none -o altroot=root
-t tname # Sets the in-core pool name to tname while the on-disk name will be the name specified as the pool name pool
DESTROY
Destroys the given pool, freeing up any devices for other use
This command tries to unmount any active datasets before destroying the pool
zpool destroy [-f] pool
-f # Forces any active datasets contained within the pool to be unmounted
DETACH
Detaches device from a mirror
The operation is refused if there are no other valid replicas of the data
# Destroys the given pool, freeing up any devices for other use
zpool detach pool device
EVENTS
Lists all recent events generated by the ZFS kernel modules
These events are consumed by the zed(8) and used to automate administrative tasks such as replacing a failed device with a hot spare. For more information about the subclasses and event payloads that can be generated see the zfs-events(5) man page
zpool events
-c # Clear all previous events
-f # Follow mode
-H # Scripted mode. Do not display headers, and separate fields by a single tab instead of arbitrary space
-v # Print the entire payload for each event
EXPORT
Exports the given pools from the system
All devices are marked as exported, but are still considered in use by other subsystems. The devices can be moved between systems (even those of different endianness) and imported as long as a sufficient number of devices are present
zpool export [-a] [-f] pool...
-a # Exports all pools imported on the system
-f # Forcefully unmount all datasets, using the unmount -f command
GET
Retrieves the given list of properties (or all properties if all is used) for the specified storage pool(s)
These properties are displayed with the following fields:
- name Name of storage pool
- property Property name
- value Property value
- source Property source, either 'default' or 'local'
zpool get [-Hp] [-o field[,field]...] all|property[,property]... pool...
-H # Scripted mode. Do not display headers, and separate fields by a single tab instead of arbitrary space
-o field # A comma-separated list of columns to display. name,property,value,source is the default value
-p # Display numbers in parsable (exact) values
HISTORY
Displays the command history of the specified pool(s) or all pools if no pool is specified
zpool history [-il] [pool]...
-i # Displays internally logged ZFS events in addition to user initiated events
-l # Displays log records in long format: + user name, the hostname, and the zone
IMPORT-LIST
Lists pools available to import
zpool import [-D] [-c cachefile|-d dir]
-c cachefile # Reads configuration from the given cachefile that was created with the cachefile pool property
-d dir # Searches for devices or files in dir
-D # Lists destroyed pools only
IMPORT-ALL
Imports all pools found in the search directories
Identical to the previous command, except that all pools with a sufficient number of devices available are imported. Destroyed pools, pools that were previously destroyed with the zpool destroy command, will not be imported unless the -D option is specified
zpool import -a [-DfmN] [-F [-n] [-T] [-X]] [-c cachefile|-d dir] [-o mntopts] [-o property=value]... [-R root] [-s]
-a # Searches for and imports all pools found
-c cachefile # Reads configuration from the given cachefile that was created with the cachefile pool property
-d dir # Searches for devices or files in dir
-D # Imports destroyed pools only
-f # Forces import, even if the pool appears to be potentially active
-F # Recovery mode for a non-importable pool
-m # Allows a pool to import when there is a missing log device
-n # Used with the -F recovery option
-N # Import the pool without mounting any file systems
-o mntopts # Comma-separated list of mount options to use when mounting datasets within the pool
-o property=value # Sets the specified property on the imported pool
-R root # Sets the cachefile property to none and the altroot property to root
-s # Scan using the default search path, the libblkid cache will not be consulted
-X # Used with the -F recovery option
-T # Specify the txg to use for rollback
IMPORT
Imports a specific pool
A pool can be identified by its name or the numeric identifier. If newpool is specified, the pool is imported using the name newpool. Otherwise, it is imported with the same name as its exported name
zpool import [-Dfm] [-F [-n] [-t] [-T] [-X]] [-c cachefile|-d dir] [-o mntopts] [-o property=value]... [-R root] [-s] pool|id [newpool]
-c cachefile # Reads configuration from the given cachefile that was created with the cachefile pool property
-d dir Searches for devices or files in dir. The -d option can be specified multiple times. This option is incompatible with the -c option.
-D # Imports destroyed pool. The -f option is also required
-f # Forces import, even if the pool appears to be potentially active
-F # Recovery mode for a non-importable pool
-m # Allows a pool to import when there is a missing log device
-n # Used with the -F recovery option
-o mntopts # Comma-separated list of mount options to use when mounting datasets within the pool
-o property=value # Sets the specified property on the imported pool
-R root # Sets the cachefile property to none and the altroot property to root
-s # Scan using the default search path, the libblkid cache will not be consulted
-X # Used with the -F recovery option
-T # Specify the txg to use for rollback
-t # Used with newpool
IOSTAT
Displays I/O statistics for the given pools/vdevs
You can pass in a list of pools, a pool and list of vdevs in that pool, or a list of any vdevs from any pool. If no items are specified, statistics for every pool in the system are shown. When given an interval, the statistics are printed every interval seconds until ^C is pressed. If count is specified, the command exits after count reports are printed. The first report printed is always the statistics since boot regardless of whether interval and count are passed. However, this behavior can be suppressed with the -y flag. Also note that the units of K, M, ... that are printed in the report are in base 1024. To get the raw values, use the -p flag
zpool iostat [[[-c SCRIPT] [-lq]]|-rw] [-T u|d] [-ghHLpPvy] [[pool...]|[pool vdev...]|[vdev...]] [interval [count]]
-c [SCRIPT1[,SCRIPT2]...] # Run a script (or scripts) on each vdev and include the output as a new column in the zpool iostat output
-T u|d # Display a time stamp
-g # Display vdev GUIDs instead of the normal device names
-H # Scripted mode
-L # Display real paths for vdevs resolving all symbolic links
-p # Display numbers in parsable (exact) values
-P # Display full paths for vdevs instead of only the last component of the path
-r # Print request size histograms for the leaf ZIOs
-v # Verbose statistics Reports usage statistics for individual vdevs within the pool, in addition to the pool-wide statistics
-l # Include average latency statistics:
- total_wait: Average total IO time (queuing + disk IO time)
- disk_wait: Average disk IO time (time reading/writing the disk)
- syncq_wait: Average amount of time IO spent in synchronous priority queues. Does not include disk time
- asyncq_wait: Average amount of time IO spent in asynchronous priority queues. Does not include disk time
- scrub: Average queuing time in scrub queue. Does not include disk time
-q # Include active queue statistics
- syncq_read/write: Current number of entries in synchronous priority queues
- asyncq_read/write: Current number of entries in asynchronous priority queues
- scrubq_read: Current number of entries in scrub queue
LABELCLEAR
Removes ZFS label information from the specified device
The device must not be part of an active pool configuration
zpool labelclear [-f] device
-f # Treat exported or foreign devices as inactive
LIST
Lists the given pools along with a health status and space usage
If no pools are specified, all pools in the system are listed. When given an interval, the information is printed every interval seconds until ^C is pressed. If count is specified, the command exits after count reports are printed
zpool list [-HgLpPv] [-o property[,property]...] [-T u|d] [pool]... [interval [count]]
-g # print vdev GUIDs instead of the normal device names
-H # scripted mode. Do not display headers, and separate fields by a single tab instead of arbitrary space
-o property,... # print only specidied properties. Default list is name, size, alloc, free, fragmentation, expandsize, capacity, dedupratio, health, altroot
-L # Display real paths for vdevs resolving all symbolic links
-p # Display numbers in parsable (exact) values
-P # Display full paths for vdevs instead of only the last component of the path
-T u|d # Display a time stamp
-v # Verbose statistics
OFFLINE
Takes the specified physical device offline
While the device is offline, no attempt is made to read or write to the device. This command is not applicable to spares
zpool offline [-f] [-t] pool device...
-f # Force fault. Instead of offlining the disk, put it into a faulted state
-t # Temporary. Upon reboot, the specified physical device reverts to its previous state
ONLINE
Brings the specified physical device online
This command is not applicable to spares or cache devices
zpool online [-e] pool device...
-e # Expand the device to use all available space```
REGUID
Generates a new unique identifier for the pool
You must ensure that all devices in this pool are online and healthy before performing this action
zpool reguid pool
REOPEN
Reopen all the vdevs associated with the pool
zpool reopen pool
REMOVE
Removes the specified device from the pool
This command currently only supports removing hot spares, cache, and log devices. A mirrored log device can be removed by specifying the top-level mirror for the log. Non-log devices that are part of a mirrored configuration can be removed using the zpool detach command. Non-redundant and raidz devices cannot be removed from a pool
zpool remove pool device...
REPLACE
Replaces old_device with new_device
This is equivalent to attaching new_device, waiting for it to resilver, and then detaching old_device
The size of new_device must be greater than or equal to the minimum size of all the devices in a mirror or raidz configuration
zpool replace [-f] [-o property=value] pool device [new_device]
-f # Forces use of new_device, even if its appears to be in use
-o property=value # Sets the given pool properties. See the Properties section for a list of valid properties that can be set
SCRUB
Begins a scrub or resumes a paused scrub
The scrub examines all data in the specified pools to verify that it checksums correctly. For replicated (mirror or raidz) devices, ZFS automatically repairs any damage discovered during the scrub. The zpool status command reports the progress of the scrub and summarizes the results of the scrub upon completion
zpool scrub [-s | -p] pool...
-s # Stop scrubbing
-p # Pause scrubbing
SET
Sets the given property on the specified pool
zpool set property=value pool
SPLIT
Splits devices off pool creating newpool
All vdevs in pool must be mirrors and the pool must not be in the process of resilvering. At the time of the split, newpool will be a replica of pool. By default, the last device in each mirror is split from pool to create newpool
zpool split [-gLnP] [-o property=value]... [-R root] pool newpool [device ...]
-g # Display vdev GUIDs instead of the normal device names
-L # Display real paths for vdevs resolving all symbolic links
-n # Do dry run, do not actually perform the split
-P # Display full paths for vdevs instead of only the last component of the path
-o property=value # Sets the specified property for newpool
-R root # Set altroot for newpool to root and automatically import it
STATUS
Displays the detailed health status for the given pools
If no pool is specified, then the status of each pool in the system is displayed. For more information on pool and device health, see the Device Failure and Recovery section.
If a scrub or resilver is in progress, this command reports the percentage done and the estimated time to completion. Both of these are only approximate, because the amount of data in the pool and the other workloads on the system can change
zpool status [-c [SCRIPT1[,SCRIPT2]...]] [-gLPvxD] [-T u|d] [pool]... [interval [count]]
-c [SCRIPT1[,SCRIPT2]...] # Run a script (or scripts) on each vdev and include the output as a new column in the zpool status output
-g # Display vdev GUIDs instead of the normal device names
-L # Display real paths for vdevs resolving all symbolic links
-P # Display full paths for vdevs instead of only the last component of the path
-D # Display a histogram of deduplication statistics
-T u|d # Display a time stamp. -u for a the internal representation of time, -d for standard date format
-v # Displays verbose data error information, printing out a complete list of all data errors since the last complete pool scrub
-x # Only display status for pools that are exhibiting errors or are otherwise unavailable
UPGRADE-DISPLAY-NOT
Displays pools which do not have all supported features enabled and pools formatted using a legacy ZFS version number
These pools can continue to be used, but some features may not be available. Use zpool upgrade -a to enable all features on all pools
zpool upgrade
UPGRADE-DISPLAY
Displays legacy ZFS versions supported by the current software
See zpool-features(5) for a description of feature flags features supported by the current software
zpool upgrade -v
UPGRADE
Enables all supported features on the given pool
Once this is done, the pool will no longer be accessible on systems that do not support feature flags. See zfs-features(5) for details on compatibility with systems that support feature flags, but do not support all features enabled on the pool
zpool upgrade [-V version] -a|pool...
-a # Enables all supported features on all pools.
-V version # Upgrade to the specified legacy version. If the -V flag is specified, no features will be enabled on the pool
PROPERTIES
available # Amount of storage available within the pool
capacity # Percentage of pool space used
expandsize # Amount of uninitialized space within the pool or device that can be used to increase the total capacity of the pool
fragmentation # The amount of fragmentation in the pool
free # The amount of free space available in the pool
freeing # After a file system or snapshot is destroyed, the space it was using is returned to the pool asynchronously. freeing is the amount of space remaining to be reclaimed. Over time freeing will decrease while free increases
health # The current health of the pool. Health can be one of ONLINE, DEGRADED, FAULTED, OFFLINE, REMOVED, UNAVAIL
guid # A unique identifier for the pool
size # Total size of the storage pool
unsupported@feature_guid # Information about unsupported features that are enabled on the pool. See zpool-features(5) for details
used # Amount of storage space used within the pool
The following property can be set at creation time and import time:
altroot # Alternate root directory. If set, this directory is prepended to any mount points within the pool
The following property can be set only at import time:
readonly=on|off # If set to on, the pool will be imported in read-only mode
The following properties can be set at creation time and import time, and later changed with the zpool set command:
ashift=ashift # Pool sector size exponent, to the power of 2 (internally referred to as ashift )
autoexpand=on|off # Controls automatic pool expansion when the underlying LUN is grown. If set to on, the pool will be resized according to the size of the expanded device
autoreplace=on|off # Controls automatic device replacement. If set to off, device replacement must be initiated by the administrator by using the zpool replace command. If set to on, any new device, found in the same physical location as a device that previously belonged to the pool, is automatically formatted and replaced. The default behavior is off
bootfs=(unset)|pool/dataset # Identifies the default bootable dataset for the root pool
cachefile=path|none # Controls the location of where the pool configuration is cached. Discovering all pools on system startup requires a cached copy of the configuration data that is stored on the root file system
comment=text # A text string consisting of printable ASCII characters that will be stored such that it is available even if the pool becomes faulted. An administrator can provide additional information about a pool using this property
dedupditto=number # Threshold for the number of block ditto copies
delegation=on|off # Controls whether a non-privileged user is granted access based on the dataset permissions defined on the dataset
failmode=wait|continue|panic # Controls the system behavior in the event of catastrophic pool failure
wait # Blocks all I/O access until the device connectivity is recovered and the errors are cleared
continue # Returns EIO to any new write I/O requests but allows reads to any of the remaining healthy devices. Any write requests that have yet to be committed to disk would be blocked
panic # Prints out a message to the console and generates a system crash dump
feature@feature_name=enabled # The value of this property is the current state of feature_name. The only valid value when setting this property is enabled which moves feature_name to the enabled state
listsnapshots=on|off # Controls whether information about snapshots associated with this pool is output when zfs list is run without the -t option. The default value is off
version=version # The current on-disk version of the pool. This can be increased, but never decreased
shaarli 0.11.1 modifed for snippet usage
shaarli
differ
- application/http/UrlUtils.php
- composer.lock
- plugins/markdown/markdown.php
- vendor/composer/autoload_namespaces.php
- vendor/composer/autoload_static.php
- vendor/composer/installed.json
added
- tpl/perso
- vendor/erusev/parsedown
shaarli/tpl
tpl/perso / default
differ
- tpl/perso/img/apple-touch-icon.png
- tpl/perso/img/favicon.png
- tpl/perso/includes.html
- tpl/perso/linklist.html
added
- tpl/perso/css/highlight
- tpl/perso/css/monokai-sublime.css
- tpl/perso/css/perso.css
- tpl/perso/js/highlight.pack.js
- tpl/perso/README.md
REGEXP
sublime-text
# cut at the first .
^(\s+[-<\[].* # [^\.]+)\..*$
->
\1
# add link to TOC
^(#+) (.*)$
->
\1 \2 {#\2)
# add link to TOC
^\| \*\*(.+)\*\* \|
->
| **[\1](#APK-\1)** |
# add anchor to title in body
^(#{2,4}) (.*)$
->
\1 \2 {#APK-\2}
[^](#APK-TOP)
bash
# transform title to link
cd /vm/share/www/ambau.ovh/code/snippets
t=lxc; sed -n "/^#\{2,4\} / s/^#\+ \(.*\)$/| **[\1](#${t^^}-\1)** |/p" ${t}.md
https://www.tutorialspoint.com/yaml/index.htm
TOC
chapter |
---|
SCALAR |
COLLECTION |
COMMENT |
DOCUMENT |
REFERENCE |
DIRECTIVE |
PyYAML |
<br />
“YAML Ain’t Markup Language”
Le nom YAML veut dire “YAML Ain’t Markup Language”, soit “YAML n’est pas un langage de balises”. Si cela met d’emblée des distances avec XML, cela ne nous dit pas ce qu’est YAML. YAML est, d’après sa spécification, un langage de sérialisation de données conçu pour être lisible par des humains et travaillant bien avec les langage de programmation modernes pour les tâches de tous les jours
SCALAR
string
- Chaîne
- "3"
- Chaîne sur
une ligne
- "Guillemets doubles\t"
- 'Guillemets simples\t'
Le résultat de ce parsing nous amène aux commentaires suivants :
- Les caractères accentués sont gérés, en fait, l’Unicode est géré de manière plus générale
- Les retours à la ligne ne sont pas pris en compte dans les chaînes, ils sont gérés comme en HTML ou XML, à savoir qu’ils sont remplacés par des espaces
- Les guillemets doubles gèrent les caractères d’échappement, comme \t pour la tabulation par exemple
- Les guillemets simples ne gèrent pas les caractères d’échappement qui sont transcrits de manière littérale
- La liste des caractères d’échappement gérés par YAML comporte les valeurs classiques, mais aussi nombre d’autres que l’on pourra trouver dans la spécification YAML
UTF8 :
\xNN : pour écrire des caractères Unicode sur 8 bits, où NN est un nombre hexadécimal.
\uNNNN : pour des caractères Unicode sur 16 bits.
\UNNNNNNNN : pour des caractères Unicode sur 32 bits.
integer
canonique: 12345
decimal: +12_345
sexagesimal: 3:25:45
octal: 030071
hexadecimal: 0x3039
float
canonique: 1.23015e+3
exponentielle: 12.3015e+02
sexagesimal: 20:30.15
fixe: 1_230.15
infini negatif: -.inf
pas un nombre: .NaN
date
canonique: 2001-12-15T02:59:43.1Z
iso8601: 2001-12-14t21:59:43.10-05:00
espace: 2001-12-14 21:59:43.10 -5
date: 2002-12-14
others
nul: null
nul bis: ~
vrai: true
vrai bis: yes
vrai ter: on
faux: false
faux bis: no
faux ter: off
COLLECTION
List
- beans
- chocolat
- ham
[beans, chocolat, ham]
Associated Array
croissants: 2
chocolatines: 1
jambon: 0
{ croissants: 2, chocolatines: 1, jambon: 0}
COMMENT
# This is a comment
DOCUMENT
# this a first document stated by --- & ended by ...
---
first document
...
---
second document
...
REFERENCE
&ref : Defines the reference 'ref'
*ref : Link the reference 'ref'
# define the reference
monday: &p 'patatoes'
# uses the reference with pointeur
tuesday: *p
wednesday: *p
DIRECTIVE
Yaml
Give the version of YAML used
%YAML 1.1
---
Tag
Predefined Tag
tag is a data type
omap : is ordered map (list)
null: !!null
integer: !!int 3
float: !!float 1.2
string: !!str string
boolean: !!bool true
binary: !!binary dGVzdA==
map: !!map { key: value }
seq: !!seq [ element1, element2 ]
set: !!set { element1, element2 }
omap: !!omap [ key: value ]
Personnal Tag
Defines tag & use it
%TAG !person! tag:myfirst,2020:bar
---
- !person
nom: Simpson
prenom: Omer
PyYAML
http://sweetohm.net/article/introduction-yaml.html
Install yaml library like LibYaml
Read Yaml
Load file passed by argument & print the first document in file
#!/usr/bin/env python
# encoding: UTF-8
import sys
import yaml
print yaml.load(open(sys.argv[1]))
To load all documents (entire file) you can use the method 'yaml.load_all()'
for document in yaml.load_all(documents):
print document
Write Yaml
Uses method yaml.dump()
#!/usr/bin/env python
# encoding: UTF-8
import yaml
recette = {
'nom': 'sushi',
'ingredients': ['riz', 'vinaigre', 'sucre', 'sel', 'thon', 'saumon'],
'temps de cuisson': 10,
'difficulte': 'difficile'
}
print yaml.dump(recette)
Class Serialization
#!/usr/bin/env python
# encoding: UTF-8
import yaml
class Personne(object):
def __init__(self, nom, age):
self.nom = nom
self.age = age
def __repr__(self):
return "%s(nom=%r, age=%r)" % \
(self.__class__.__name__, self.nom, self.age)
print yaml.dump(Personne('Robert', 25), default_flow_style=False)
==>
!!python/object:__main__.Personne
age: 25
nom: Robert```
Class Unserialization
#!/usr/bin/env python
# encoding: UTF-8
import yaml
class Personne(object):
def __init__(self, nom, age):
self.nom = nom
self.age = age
def __repr__(self):
return "%s(nom=%r, age=%r)" % \
(self.__class__.__name__, self.nom, self.age)
print yaml.load("""
!!python/object:__main__.Personne
nom: Robert
age: 25
""")
==>
Personne(nom='Robert', age=25)
https://mikefarah.gitbook.io/yq/
yq is a lightweight and portable command-line YAML processor
It aims to be the jq or sed of yaml files
yq [flags]
yq [command]
SUBCOMMANDS
alias | subcommand | Designation |
---|---|---|
x | COMPARE | Deeply compares two yaml files |
d | DELETE | Deletes the nodes matching the given path expression from the YAML file |
h | HELP | Help provides help for any command in the application |
m | MERGE | Updates the yaml file by adding/updating the path(s) and value(s) from additional yaml file(s) |
n | NEW | Creates a new yaml w.r.t the given path and value |
p | PREFIX | Prefixes w.r.t to the yaml file at the given path |
r | READ | Outputs the value of the given path in the yaml file to STDOUT |
v | VALIDATE | test syntax of file |
w | WRITE | Updates the yaml file w.r.t the given path and value |
TRICKS |
Global options
-h, --help # help for yq
-C, --colors # print with colors
-I, --indent int # sets indent level for output (default 2)
-P, --prettyPrint # pretty print
-j, --tojson # output as json. By default it prints a json document in one line, use the prettyPrint flag to print a formatted doc.
-v, --verbose # verbose mode
-V, --version # Print version information and quit
COMPARE
Deeply compares two yaml files, prints the difference
Use with prettyPrint flag to ignore formatting differences
yq compare [yaml_file_a] [yaml_file_b] [flags]
-D, --defaultValue string # default value printed when there are no results
-d, --doc string # process document index number (0 based, * for all documents) (default "0")
-h, --help # help for compare
-p, --printMode string # print mode (v (values, default), p (paths), pv (path and value pairs) (default "v")
examples
yq x - data2.yml # reads from stdin
yq x -pp dataA.yaml dataB.yaml '**' # compare paths
yq x -d1 dataA.yaml dataB.yaml 'a.b.c'
DELETE
Deletes the nodes matching the given path expression from the YAML file
Outputs to STDOUT unless the inplace flag is used, in which case the file is updated instead
yq delete [yaml_file] [path_expression] [flags]
-d, --doc string # process document index number (0 based, * for all documents) (default "0")
-h, --help # help for delete
-i, --inplace # update the yaml file inplace
examples
yq delete things.yaml 'a.b.c'
yq delete things.yaml 'a.*.c'
yq delete things.yaml 'a.(child.subchild==co*).c'
yq delete things.yaml 'a.**'
yq delete --inplace things.yaml 'a.b.c'
yq delete --inplace -- things.yaml '--key-starting-with-dash' # need to use '--' to stop processing arguments as flags
yq d -i things.yaml 'a.b.c'
HELP
Help provides help for any command in the application
Simply type yq help [path to command] for full details
yq help [command] [flags]
-h, --help # help for help
MERGE
Updates the yaml file by adding/updating the path(s) and value(s) from additional yaml file(s)
Outputs to STDOUT unless the inplace flag is used, in which case the file is updated instead.
If overwrite flag is set then existing values will be overwritten using the values from each additional yaml file.
If append flag is set then existing arrays will be merged with the arrays from each additional yaml file.
yq merge [initial_yaml_file] [additional_yaml_file]... [flags]
-a, --append # update the yaml file by appending array values
-c, --autocreate # automatically create any missing entries (default true)
-d, --doc string # process document index number (0 based, * for all documents) (default "0")
-h, --help # help for merge
-i, --inplace # update the yaml file inplace
-x, --overwrite # update the yaml file by overwriting existing values
examples
yq merge things.yaml other.yaml
yq merge --inplace things.yaml other.yaml
yq m -i things.yaml other.yaml
yq m --overwrite things.yaml other.yaml
yq m -i -x things.yaml other.yaml
yq m -i -a things.yaml other.yaml
yq m -i --autocreate=false things.yaml other.yaml
NEW
Creates a new yaml w.r.t the given path and value
Outputs to STDOUT
Create Scripts:
Note that you can give a create script to perform more sophisticated yaml This follows the same format as the update script
yq new [path] [value] [flags]
-h, --help # help for new
-s, --script string # yaml script for creating yaml
-t, --tag string # set yaml tag (e.g. !!int)
examples
yq new 'a.b.c' cat
yq n 'a.b.c' --tag '!!str' true # force 'true' to be interpreted as a string instead of bool
yq n 'a.b[+]' cat
yq n -- '--key-starting-with-dash' cat # need to use '--' to stop processing arguments as flags
yq n --script create_script.yaml
PREFIX
Prefixes w.r.t to the yaml file at the given path
Outputs to STDOUT unless the inplace flag is used, in which case the file is updated instead
yq prefix [yaml_file] [path] [flags]
-d, --doc string # process document index number (0 based, * for all documents) (default "0")
-h, --help # help for prefix
-i, --inplace # update the yaml file inplace
examples
yq prefix things.yaml 'a.b.c'
yq prefix --inplace things.yaml 'a.b.c'
yq prefix --inplace -- things.yaml '--key-starting-with-dash' # need to use '--' to stop processing arguments as flags
yq p -i things.yaml 'a.b.c'
yq p --doc 2 things.yaml 'a.b.d'
yq p -d2 things.yaml 'a.b.d'
READ
Outputs the value of the given path in the yaml file to STDOUT
yq read [yaml_file] [path_expression] [flags]
-c, --collect # collect results into array
-D, --defaultValue string # default value printed when there are no results
-d, --doc string # process document index number (0 based, * for all documents) (default "0")
-X, --explodeAnchors # explode anchors
-h, --help # help for read
-l, --length # print length of results
-p, --printMode string # print mode (v (values, default), p (paths), pv (path and value pairs) (default "v")
examples
yq read things.yaml 'a.b.c'
yq r - 'a.b.c' # reads from stdin
yq r things.yaml 'a.*.c'
yq r things.yaml 'a.**.c' # deep splat
yq r things.yaml 'a.(child.subchild==co*).c'
yq r -d1 things.yaml 'a.array[0].blah'
yq r things.yaml 'a.array[*].blah'
yq r -- things.yaml '--key-starting-with-dashes.blah'
VALIDATE
test syntax of file
yq v sample.yaml
yq validate [yaml_file] [flags]
-d, --doc string # process document index number (0 based, * for all documents) (default "0")
-h, --help # help for validate
examples
yq v - # reads from stdin
WRITE
Updates the yaml file w.r.t the given path and value
Outputs to STDOUT unless the inplace flag is used, in which case the file is updated instead
Append value to array adds the value to the end of array
Update Scripts:
Note that you can give an update script to perform more sophisticated update. Update script format is list of update commands (update or delete) like so:
- command: update
path: b.c
value:
#great
things: frog # wow!
- command: delete
path: b.d
yq write [yaml_file] [path_expression] [value] [flags]
-d, --doc string # process document index number (0 based, * for all documents) (default "0")
-f, --from string # yaml file for updating yaml (as-is)
-h, --help # help for write
-i, --inplace # update the yaml file inplace
-s, --script string # yaml script for updating yaml
-t, --tag string # set yaml tag (e.g. !!int)
examples
yq write things.yaml 'a.b.c' true
yq write things.yaml 'a.*.c' true
yq write things.yaml 'a.**' true
yq write things.yaml 'a.(child.subchild==co*).c' true
yq write things.yaml 'a.b.c' --tag '!!str' true # force 'true' to be interpreted as a string instead of bool
yq write things.yaml 'a.b.c' --tag '!!float' 3
yq write --inplace -- things.yaml 'a.b.c' '--cat' # need to use '--' to stop processing arguments as flags
yq w -i things.yaml 'a.b.c' cat
yq w -i -s update_script.yaml things.yaml
yq w things.yaml 'a.b.d[+]' foo # appends a new node to the 'd' array
yq w --doc 2 things.yaml 'a.b.d[+]' foo # updates the 3rd document of the yaml file
TRICKS
LXC
pretty print
# pretty print
lxc list --format=yaml $ctname$ | yq r - -C
name
# print name
lxc list --format=yaml $ctname$ | yq r - '.name'
# print selected name for alpine.* in regexp format
lxc list --format=yaml | yq r - 'name==alpine*'
# display names of running containers
lxc list --format yaml | yq r - 'status==Running.name'
# display name of containers which have attached profile $profile
?
https://www.atlassian.com/fr/git/tutorials
https://git-scm.com/docs
TOC
chapter | |
---|---|
REFERENCE | USED |
- URL | - ADD |
- VALUES | - ARCHIVE |
GITHUB | - BRANCH |
GPG | - CHECKOUT |
GITIGNORE | - CLONE |
TRICKS | - COMMIT |
- CONFIG | |
- LOG | |
- MERGE | |
- PULL | |
- PUSH | |
- REMOTE | |
- RESET | |
- SUBMODULE | |
- SWITCH | |
- TAG |
REFERENCE
URL
https://<fqdn>/<user>/<project> # https://github.com/aguytech/Shaarli
git@<fqdn>:<user>/<project>.git # git@github.com:aguytech/Shaarli.git
VALUES
git rev-parse --symbolic-full-name --abbrev-ref @{upstream} # print value for upstream
git rev-parse --symbolic-full-name --abbrev-ref @{push} # print value for push
git for-each-ref --format='%(refname:short) <- %(upstream:short)' refs/heads # show all upstream
git for-each-ref --format='%(upstream:short)' "$(git symbolic-ref -q HEAD)" # idem
git for-each-ref --format='%(refname:short) <- %(push:short)' refs/heads # show all upstream
git for-each-ref --format='%(push:short)' "$(git symbolic-ref -q HEAD)" # idem
USED
ADD
git add -i / --interactive # add with interactively mode
git add -u / --update # Update the index for already referred files (just where it already has an entry matching <pathspec>
gi add -A / --all / --no-ignore-removal # add all files
ARCHIVE
git archive -l # list available formats
git archive --format tar.gz -9 -o "$(git br --show-current).$(date +%s).tar.gz" <branch> # create an archive from local <branch> with best compression -9 & in format tar.gz
BRANCH
https://stackoverflow.com/questions/11266478/git-add-remote-branch
list
git branch / git br # print list of local branches
git br -v # print informations about local branches
git br -vv # print full information about local branches
git branch -a -vv # print full information about all branches
git br --show-current # show name of current branch
git br -r # print list of remote branches for all repositories
git br -rlv <remote>/<pattern> # list remote branches for <remote> repository & with name matched <pattern>
create/delete
git br <branch> # create a local branch
git br <branch> <remote>/<remote_branch> # create local branch from remote
git br -u <remote>/<remote_branch> <branch> # attach a local branch to remote existing one
git br --set-upstream-to=<remote>/<remote_branch> <branch> # idem
# Equal to: git checkout <branch> && git push --set-upstream <remote> <branch>
git br -m <branch> <new_branch># rename local branch && 'git push'
git br -d <branch> # delete local branch
git br -rd <remote>/<branch> # delete remote branch
CHECKOUT
git co -b <branch> # create a branch from HEAD and switch to it
git co -t <repo>/<branch> -b <branch> # create a local branch from <repo>/<branch>
git co --orphan=<branch> # create an orphan branch (whithout history)
git co --orphan=<branch> # create an orphan branch (whithout history)
git co --detach -b <branch> # check out a commit for inspection and discardable experiments
CLONE
git clone <url> # clone a repository
git clone <url> <alias> # clone a repository & give it an alias
git clone -b <branch> <url> # clone only one branch from repository
git clone -b v0.11-snippets --single-branch --no-tags git@github.com:aguytech/Shaarli.git shaarli-snippets # clone from a repository a single branch
COMMIT
# amend
git commit --amend --no-edit # amends a commit without changing its commit message
git commit --amend -m "message" # amends a commit with a new message
CONFIG
# amend
git config <variable> # show variable and his value
git config --global core.editor vim # set selected editor
git config -l # list all config variables
git config -l --show-origin # list all config variables with their origins
git config -l --name-only # list all names of system config variables
git config -l --local # list all config variables defined for user
git config -l --global # list all global config variables for global users
git config -l --system # list all system config variables for system
LOG
git log # show logs
git log -n3 <repo>/<branch> # show only last 23 lines of logs
git log --pretty=format:'%h' -n1 <repo>/<branch> # show short sha of last commit
git log --name-only # with file names
git log --name-status # with file names with its status
git log --stat # with file names with its statisticals
git reflog # show logs with a reference (sha) view
MERGE
git merge -m "message" <branch> # merge branch with actual one with a message to committing
git merge --allow-unrelated-histories <branch> # allows to merge branch with no common history
PULL
If you tried a pull which resulted in complex conflicts and would want to start over, you can recover with git reset
git pull # Update actual local branch from current remote
git pull <remote> # Update actual local branch from a selected remote
git pull <remote> <branch> # Merge into the current branch the remote branch
<=>
git fetch origin
git merge origin/next
git pull --rebase # pull automatically last modifications on remote (with fetch + merge) & put your validation on head directly
pull all submodules
git submodule foreach git pull
git submodule foreach git pull origin master
git submodule foreach 'git pull origin master || true' # for some submodules without updates"
PUSH
git push -u <remote> <branch> # set upstream for actual local branch & push it to remote (create one if needed), while you created a local branch to local and you want to push to another repository !
git push --tags # push tags also
git push -d <remote> <branch> # delete remote branch
REMOTE
git remote rename <name> <new_name> # rename a remote source
git remote add <name> <url> # add a remote source to repository
git remote add -t <branch> <name> <url> # add only a branch from a repository like source
git remote remove <name> # remove/delete a remote source to repository
RESET
git reset --merge # resets the index and updates the files in the working tree that are different between <commit> and HEAD
git reset --hard <commit_sha> # reset branch to commit_sha, 'git reflog' is an better way to find commit_sha
git reset --hard HEAD~1
SWITCH
TAG
git tag -l # List all tags
git tag -a -m "message" # Defines an unsigned, annoted tag
git tag -s "tag" -m "message" # Creates a signed tag with message (define the default key with git config --global user.signingkey before)
git tag -s <tag> -u <keyid> -m <message> # Creates a signed tag with a specified key user
git tag -d <tag> # Delete existing tags with the given names
git tag -v <tag> # Verify the GPG signature of the given tag names
git push --delete origin <tag> # delete tag in origin
# rename tag
git tag new old
git tag -d old
git push origin new :old
git pull --prune --tags # for coworkers
SUBMODULE
git submodule add <url> # add submodule to actual repository
GITHUB
create a local git repository and publish it to github
touch README.md
git init
git add *
git status
git commit -m "First commit"
git remote add origin <url>
git push -u origin master
GPG
https://kamarada.github.io/en/2019/07/14/using-git-with-ssh-keys/
GITHUB
-
import public to github
-
test
ssh -T git@github.com # test ssh connection ssh -T -p 443 git@ssh.github.com # test ssh connection over https
-
set the default key
git config --global user.signingkey <keyid>
change remote url for remote existing repository
git remote -v # print https://github.com/user/project
git remote set-url origin git@github.com:user/project.git # change the connection url to use ssh
git remote -v # print git@github.com:user/project.git
delete tags
git tag -d [tag];
git push origin :[tag]
git tag -d [tag]
git push origin :refs/tags/[tag]
GITIGNORE
https://www.atlassian.com/git/tutorials/saving-changes/gitignore
pattern
**/path # match directories anywhere in the repository, relative definition
*.pattern # matches zero or more characters
!pattern # mark to a pattern negates it
/pattern # matches files only in the repository root
path/ # appending a slash indicates the pattern is a directory
debug?.log # a question mark matches exactly one character
debug[0-9].log # Square brackets matches a single character from a specified range like [01] [a-z] [A-Z]
debug[!01].log # an exclamation mark matches any character except one from the specified set
logs/**/debug.log # a double asterisk matches zero or more directories like logs/*day/debug.log
example
*.ba[kt]
*~
!myfile.a # include file in repo
tmp/ # exclude all files in directory tmp
head/**/*.tmp # exclude all files *.tmp in subdirectory of head
TRICKS
create orphan repo from another
Create origin to remote server
repo_local="shaarli-snippets"
tmp_branch="dev"
origin="github"
url_origin="git@github.com:aguytech/Shaarli-snippets.git"
upstream="shaarli"
url_upstream="git@github.com:aguytech/Shaarli.git"
upstream_branch="v0.11-snippets" # remote branch to track
mkdir -p "$repo_local"
cd "$repo_local"
git init
# remote
git remote add "$origin" "$url_origin"
git remote add -t "$upstream_branch" "$upstream" "$url_upstream"
git remote -v
git config --get-regexp '^remote'
# upstream
git fetch "$upstream"
git co --orphan="$tmp_branch" "$upstream"/"$upstream_branch"
git st
git ci -m "Initialize branch from $upstream/$upstream_branch $(git log --pretty=format:'%h' -n 1 "$upstream"/"$upstream_branch")"
# origin
git push --set-upstream "$origin" "$tmp_branch"
git co -b master
git push --set-upstream "$origin" master
git br -vv
git br -rlv github/*
# archive
git archive --format tar.gz -9 -o "master.$(date +%s).tar.gz" master
add a submodule
git submodule add $url
git diff --cached $submodule
git diff --cached --submodule
git commit -m "Add $submodule module"
clone with submodules
git clone --recurse-submodules
GIT URLS
In general, URLs contain information about the transport protocol, the address of the remote server, and the path to the repository. Depending on the
transport protocol, some of this information may be absent.
Git supports ssh, git, http, and https protocols (in addition, ftp, and ftps can be used for fetching, but this is inefficient and deprecated; do not use
it).
The native transport (i.e. git:// URL) does no authentication and should be used with caution on unsecured networks.
The following syntaxes may be used with them:
- ssh://[user@]host.xz[:port]/path/to/repo.git/
- git://host.xz[:port]/path/to/repo.git/
- http[s]://host.xz[:port]/path/to/repo.git/
- ftp[s]://host.xz[:port]/path/to/repo.git/
An alternative scp-like syntax may also be used with the ssh protocol:
- [user@]host.xz:path/to/repo.git/
This syntax is only recognized if there are no slashes before the first colon. This helps differentiate a local path that contains a colon. For example the
local path foo:bar could be specified as an absolute path or ./foo:bar to avoid being misinterpreted as an ssh url.
The ssh and git protocols additionally support ~username expansion:
- ssh://[user@]host.xz[:port]/~[user]/path/to/repo.git/
- git://host.xz[:port]/~[user]/path/to/repo.git/
- [user@]host.xz:/~[user]/path/to/repo.git/
For local repositories, also supported by Git natively, the following syntaxes may be used:
- /path/to/repo.git/
- file:///path/to/repo.git/
These two syntaxes are mostly equivalent, except when cloning, when the former implies --local option
git clone, git fetch and git pull, but not git push, will also accept a suitable bundle file
When Git doesn’t know how to handle a certain transport protocol, it attempts to use the remote-<transport> remote helper, if one exists. To explicitly
request a remote helper, the following syntax may be used:
- <transport>::<address>
where <address> may be a path, a server and path, or an arbitrary URL-like string recognized by the specific remote helper being invoked
If there are a large number of similarly-named remote repositories and you want to use a different format for them (such that the URLs you use will be rewritten into URLs that work), you can create a configuration section of the form:
[url "<actual url base>"]
insteadOf = <other url base>
For example, with this:
[url "git://git.host.xz/"]
insteadOf = host.xz:/path/to/
insteadOf = work:
a URL like "work:repo.git" or like "host.xz:/path/to/repo.git" will be rewritten in any context that takes a URL to be "git://git.host.xz/repo.git".
If you want to rewrite URLs for push only, you can create a configuration section of the form:
[url "<actual url base>"]
pushInsteadOf = <other url base>
For example, with this:
[url "ssh://example.org/"]
pushInsteadOf = git://example.org/
a URL like "git://example.org/path/to/repo.git" will be rewritten to "ssh://example.org/path/to/repo.git" for pushes, but pulls will still use the original URL.
REMOTES
The name of one of the following can be used instead of a URL as <repository> argument:
- a remote in the Git configuration file: $GIT_DIR/config,
- a file in the $GIT_DIR/remotes directory, or
- a file in the $GIT_DIR/branches directory.
All of these also allow you to omit the refspec from the command line because they each contain a refspec which git will use by default.
Named remote in configuration file
You can choose to provide the name of a remote which you had previously configured using git-remote(1), git-config(1) or even by a manual edit to the
$GIT_DIR/config file. The URL of this remote will be used to access the repository. The refspec of this remote will be used by default when you do not
provide a refspec on the command line. The entry in the config file would appear like this:
[remote "<name>"]
url = <url>
pushurl = <pushurl>
push = <refspec>
fetch = <refspec>
The <pushurl> is used for pushes only. It is optional and defaults to <url>.
Named file in $GIT_DIR/remotes
You can choose to provide the name of a file in $GIT_DIR/remotes. The URL in this file will be used to access the repository. The refspec in this file will
be used as default when you do not provide a refspec on the command line. This file should have the following format:
URL: one of the above URL format
Push: <refspec>
Pull: <refspec>
Push: lines are used by git push and Pull: lines are used by git pull and git fetch. Multiple Push: and Pull: lines may be specified for additional branch
mappings.
Named file in $GIT_DIR/branches
You can choose to provide the name of a file in $GIT_DIR/branches. The URL in this file will be used to access the repository. This file should have the
following format:
<url>#<head>
<url> is required; #<head> is optional.
Depending on the operation, git will use one of the following refspecs, if you don’t provide one on the command line. <branch> is the name of this file in
$GIT_DIR/branches and <head> defaults to master.
git fetch uses:
refs/heads/<head>:refs/heads/<branch>
git push uses:
HEAD:refs/heads/<head>
https://git-scm.com/book/fr/v2/Utilitaires-Git-Sous-modules
Inspects, updates and manages submodules
git submodule
add [-b <branch>] [-f|--force] [--name <name>] [--reference <repository>] [--depth <depth>] [--] <repository> [<path>] # add the given repository as a submodule at the given path to the changeset to be committed next to the current project: the current project is termed the "superproject"
add [-b <branch>] [-f|--force] [--name <name>] [--reference <repository>] [--depth <depth>] [--] <repository> [<path>] # Add the given repository as a submodule at the given path to the changeset to be committed next to the current project: the current project is termed the "superproject".
status [--cached] [--recursive] [--] [<path>...] # Show the status of the submodules. This will print the SHA-1 of the currently checked out commit for each submodule, along with the submodule path and the output of git describe for the SHA-1.
init [--] [<path>...] # Initialize the submodules recorded in the index (which were added and committed elsewhere) by setting submodule.$name.url in .git/config.
deinit [-f|--force] (--all|[--] <path>...) # Unregister the given submodules, i.e. remove the whole submodule.$name section from .git/config together with their work tree.
update [--init] [--remote] [-N|--no-fetch] [--[no-]recommend-shallow] [-f|--force] [--checkout|--rebase|--merge] [--reference <repository>] [--depth <depth>] [--recursive] [--jobs <n>] # Update the registered submodules to match what the superproject expects by cloning missing submodules, fetching missing commits in submodules and updating the working tree of the submodules.
checkout # the commit recorded in the superproject will be checked out in the submodule on a detached HEAD.
rebase # the current branch of the submodule will be rebased onto the commit recorded in the superproject.
merge # the commit recorded in the superproject will be merged into the current branch in the submodule.
none # the submodule is not updated.
set-branch (-b|--branch) <branch> [--] <path>, set-branch (-d|--default) [--] <path> # Sets the default remote tracking branch for the submodule.
set-url [--] <path> <newurl> # Sets the URL of the specified submodule to <newurl>. Then, it will automatically synchronize the submodule’s new remote URL configuration.
summary [--cached|--files] [(-n|--summary-limit) <n>] [commit] [--] [<path>...] # Show commit summary between the given commit (defaults to HEAD) and working tree/index.
foreach [--recursive] <command> # Evaluates an arbitrary shell command in each checked out submodule.
sync [--recursive] [--] [<path>...] # Synchronizes submodules' remote URL configuration setting to the value specified in .gitmodules.
absorbgitdirs # If a git directory of a submodule is inside the submodule, move the git directory of the submodule into its superproject’s $GIT_DIR/modules path and then connect the git directory and its working directory by setting the core.worktree and adding a .git file pointing to the git directory embedded in the superprojects git directory.
Create, list, delete or verify a tag object signed with GPG
Add a tag reference in refs/tags/, unless -d/-l/-v is given to delete, list or verify tags
Unless -f is given, the named tag must not yet exist
If one of -a, -s, or -u <keyid> is passed, the command creates a tag object, and requires a tag message. Unless -m <msg> or -F <file> is given, an editor is started for the user to type in the tag message
If -m <msg> or -F <file> is given and -a, -s, and -u <keyid> are absent, -a is implied
Otherwise, a tag reference that points directly at the given object (i.e., a lightweight tag) is created
A GnuPG signed tag object will be created when -s or -u <keyid> is used. When -u <keyid> is not used, the committer identity for the current user is used to find the GnuPG key for signing. The configuration variable gpg.program is used to specify custom GnuPG binary
Tag objects (created with -a, -s, or -u) are called "annotated" tags; they contain a creation date, the tagger name and e-mail, a tagging message, and an optional GnuPG signature. Whereas a "lightweight" tag is simply a name for an object (usually a commit object)
Annotated tags are meant for release while lightweight tags are meant for private or temporary object labels. For this reason, some git commands for naming objects (like git describe) will ignore lightweight tags by default
git tag [-a | -s | -u <keyid>] [-f] [-m <msg> | -F <file>] [-e] <tagname> [<commit> | <object>]
git tag -d <tagname>...
git tag [-n[<num>]] -l [--contains <commit>] [--no-contains <commit>] [--points-at <object>] [--column[=<options>] | --no-column] [--create-reflog] [--sort=<key>] [--format=<format>] [--[no-]merged [<commit>]] [<pattern>...]
git tag -v [--format=<format>] <tagname>...
-a, --annotate # Make an unsigned, annotated tag object
-s, --sign # Make a GPG-signed tag, using the default e-mail address’s key
--no-sign # Override tag
-u <keyid>, --local-user=<keyid> # Make a GPG-signed tag, using the given key
-f, --force # Replace an existing tag with the given name (instead of failing)
-d, --delete # Delete existing tags with the given names
-v, --verify # Verify the GPG signature of the given tag names
-n<num> # <num> specifies how many lines from the annotation, if any, are printed when using -l
-l, --list # List tags
--sort=<key> # Sort based on the key given
--color[=<when>] # Respect any colors specified in the --format option
-i, --ignore-case # Sorting and filtering tags are case insensitive
--column[=<options>], --no-column # Display tag listing in columns
--contains [<commit>] # Only list tags which contain the specified commit (HEAD if not specified)
--no-contains [<commit>] # Only list tags which don’t contain the specified commit (HEAD if not specified)
--merged [<commit>] # Only list tags whose commits are reachable from the specified commit (HEAD if not specified), incompatible with --no-merged
--no-merged [<commit>] # Only list tags whose commits are not reachable from the specified commit (HEAD if not specified), incompatible with --merged
--points-at <object> # Only list tags of the given object (HEAD if not specified)
-m <msg>, --message=<msg> # Use the given tag message (instead of prompting)
-F <file>, --file=<file> # Take the tag message from the given file
-e, --edit # The message taken from file with -F and command line with -m are usually used as the tag message unmodified
--cleanup=<mode> # This option sets how the tag message is cleaned up
--create-reflog # Create a reflog for the tag
--format=<format> # A string that interpolates %(fieldname) from a tag ref being shown and the object it points at
<tagname> # The name of the tag to create, delete, or describe
<commit>, <object> # The object that the new tag will refer to, usually a commit
Fetch from and integrate with another repository or a local branch
Incorporates changes from a remote repository into the current branch. In its default mode, git pull is shorthand for git fetch followed by git merge FETCH_HEAD
More precisely, git pull runs git fetch with the given parameters and calls git merge to merge the retrieved branch heads into the current branch. With
--rebase, it runs git rebase instead of git merge
<repository> should be the name of a remote repository as passed to git-fetch(1). <refspec> can name an arbitrary remote ref (for example, the name of a tag) or even a collection of refs with corresponding remote-tracking branches (e.g., refs/heads/:refs/remotes/origin/), but usually it is the name of a branch in the remote repository
Default values for <repository> and <branch> are read from the "remote" and "merge" configuration for the current branch as set by git-branch(1) --track.
Assume the following history exists and the current branch is "master":
A---B---C master on origin
/
D---E---F---G master
^
origin/master in your repository
Then "git pull" will fetch and replay the changes from the remote master branch since it diverged from the local master (i.e., E) until its current commit (C) on top of master and record the result in a new commit along with the names of the two parent commits and a log message from the user describing the changes.
A---B---C origin/master
/ \
D---E---F---G---H master
See git-merge(1) for details, including how conflicts are presented and handled.
In Git 1.7.0 or later, to cancel a conflicting merge, use git reset --merge. Warning: In older versions of Git, running git pull with uncommitted changes is discouraged: while possible, it leaves you in a state that may be hard to back out of in the case of a conflict.
If any of the remote changes overlap with local uncommitted changes, the merge will be automatically canceled and the work tree untouched. It is generally best to get any local changes in working order before pulling or stash them away with git-stash(1).
git pull [<options>] [<repository> [<refspec>...]]
-q, --quiet # This is passed to both underlying git-fetch to squelch reporting of during transfer, and underlying git-merge to squelch output during merging
-v, --verbose # Pass --verbose to git-fetch and git-merge
--[no-]recurse-submodules[=yes|on-demand|no] # This option controls if new commits of all populated submodules should be fetched and updated, too
Options related to merging
--commit, --no-commit # Perform the merge and commit the result
--edit, -e, --no-edit # Invoke an editor before committing successful mechanical merge to further edit the auto-generated merge message, so that the user can explain and justify the merge
--cleanup=<mode> # This option determines how the merge message will be cleaned up before committing
--ff, --no-ff, --ff-only # Specifies how a merge is handled when the merged-in history is already a descendant of the current history
-S[<keyid>], --gpg-sign[=<keyid>] # GPG-sign the resulting merge commit
--log[=<n>], --no-log # In addition to branch names, populate the log message with one-line descriptions from at most <n> actual commits that are being merged
--signoff, --no-signoff # Add Signed-off-by line by the committer at the end of the commit log message
--stat, -n, --no-stat # Show a diffstat at the end of the merge
--squash, --no-squash # Produce the working tree and index state as if a real merge happened (except for the merge information), but do not actually make a commit, move the
--no-verify # This option bypasses the pre-merge and commit-msg hooks
-s <strategy>, --strategy=<strategy> # Use the given merge strategy; can be supplied more than once to specify them in the order they should be tried
-X <option>, --strategy-option=<option> # Pass merge strategy specific option through to the merge strategy
--verify-signatures, --no-verify-signatures # Verify that the tip commit of the side branch being merged is signed with a valid key, i
--summary, --no-summary # Synonyms to --stat and --no-stat; these are deprecated and will be removed in the future
--allow-unrelated-histories # By default, git merge command refuses to merge histories that do not share a common ancestor
-r, --rebase[=false|true|merges|preserve|interactive] # When true, rebase the current branch on top of the upstream branch after fetching
--no-rebase # Override earlier --rebase
--autostash, --no-autostash # Before starting rebase, stash local modifications away (see git-stash(1)) if needed, and apply the stash entry when done
Options related to fetching
--all # Fetch all remotes
-a, --append # Append ref names and object names of fetched refs to the existing contents of
--depth=<depth> # Limit fetching to the specified number of commits from the tip of each remote branch history
--deepen=<depth> # Similar to --depth, except it specifies the number of commits from the current shallow boundary instead of from the tip of each remote branch history
--shallow-since=<date> # Deepen or shorten the history of a shallow repository to include all reachable commits after <date>
--shallow-exclude=<revision> # Deepen or shorten the history of a shallow repository to exclude commits reachable from a specified remote branch or tag
--unshallow # If the source repository is complete, convert a shallow repository to a complete one, removing all the limitations imposed by shallow repositories
--update-shallow # By default when fetching from a shallow repository, git fetch refuses refs that require updating
--negotiation-tip=<commit|glob> # By default, Git will report, to the server, commits reachable from all local refs to find common commits in an attempt to reduce the size of the to-be-received packfile
-f, --force # When git fetch is used with <src>:<dst> refspec it may refuse to update the local branch as discussed in the <refspec> part of the git-fetch(1) documentation
-k, --keep # Keep downloaded pack
--no-tags # By default, tags that point at objects that are downloaded from the remote repository are fetched and stored locally
-u, --update-head-ok # By default git fetch refuses to update the head which corresponds to the current branch
--upload-pack <upload-pack> # When given, and the repository to fetch from is handled by git fetch-pack, --exec=<upload-pack> is passed to the command to specify non-default path for the command run on the other end
--progress # Progress status is reported on the standard error stream by default when it is attached to a terminal, unless -q is specified
-o <option>, --server-option=<option> # Transmit the given string to the server when communicating using protocol version 2
--show-forced-updates # By default, git checks if a branch is force-updated during fetch
--no-show-forced-updates # By default, git checks if a branch is force-updated during fetch
-4, --ipv4 # Use IPv4 addresses only, ignoring IPv6 addresses
-6, --ipv6 # Use IPv6 addresses only, ignoring IPv4 addresses
<repository> # The "remote" repository that is the source of a fetch or pull operation
<refspec> # Specifies which refs to fetch and which local refs to update
examples
Update the remote-tracking branches for the repository you cloned from, then merge one of them into your current branch:
git pull
git pull origin
Normally the branch merged in is the HEAD of the remote repository, but the choice is determined by the branch.<name>.remote and branch.<name>.merge
options; see git-config(1) for details.
Merge into the current branch the remote branch next:
git pull origin next
This leaves a copy of next temporarily in FETCH_HEAD, but does not update any remote-tracking branches. Using remote-tracking branches, the same can be
done by invoking fetch and merge:
git fetch origin
git merge origin/next
If you tried a pull which resulted in complex conflicts and would want to start over, you can recover with git reset
Download objects and refs from another repository
Fetch branches and/or tags (collectively, "refs") from one or more other repositories, along with the objects necessary to complete their histories. Remote-tracking branches are updated (see the description of <refspec> below for ways to control this behavior)
By default, any tag that points into the histories being fetched is also fetched; the effect is to fetch tags that point at branches that you are interested in. This default behavior can be changed by using the --tags or --no-tags options or by configuring remote.<name>.tagOpt. By using a refspec that fetches tags explicitly, you can fetch tags that do not point into branches you are interested in as well
git fetch can fetch from either a single named repository or URL, or from several repositories at once if <group> is given and there is a remotes.<group> entry in the configuration file
When no remote is specified, by default the origin remote will be used, unless there’s an upstream branch configured for the current branch
The names of refs that are fetched, together with the object names they point at, are written to .git/FETCH_HEAD. This information may be used by
scripts or other git commands, such as git-pull(1)
git fetch [<options>] [<repository> [<refspec>...]]
git fetch [<options>] <group>
git fetch --multiple [<options>] [(<repository> | <group>)...]
git fetch --all [<options>]
--all # Fetch all remotes
-a, --append # Append ref names and object names of fetched refs to the existing contents of .git/FETCH_HEAD. Without this option old data in .git/FETCH_HEAD will be overwritten
--depth=<depth> # Limit fetching to the specified number of commits from the tip of each remote branch history. If fetching to a shallow repository created by git clone with --depth=<depth> option, deepen or shorten the history to the specified number of commits. Tags for the deepened commits are not fetched
--deepen=<depth> # Similar to --depth, except it specifies the number of commits from the current shallow boundary instead of from the tip of each remote branch history
--shallow-since=<date> # Deepen or shorten the history of a shallow repository to include all reachable commits after <date>
--shallow-exclude=<revision> # Deepen or shorten the history of a shallow repository to exclude commits reachable from a specified remote branch or tag. This option can be specified multiple times
--unshallow # If the source repository is complete, convert a shallow repository to a complete one, removing all the limitations imposed by shallow repositories
--update-shallow # By default when fetching from a shallow repository, git fetch refuses refs that require updating .git/shallow. This option updates .git/shallow and accept such refs
--negotiation-tip=<commit|glob> # By default, Git will report, to the server, commits reachable from all local refs to find common commits in an attempt to reduce the size of the to-be-received packfile. If specified, Git will only report commits reachable from the given tips. This is useful to speed up fetches when the user knows which local ref is likely to have commits in common with the upstream ref being fetched
--dry-run # Show what would be done, without making any changes
-f, --force # When git fetch is used with <src>:<dst> refspec it may refuse to update the local branch as discussed in the <refspec> part below. This option overrides that check
-k, --keep # Keep downloaded pack
--multiple # Allow several <repository> and <group> arguments to be specified. No <refspec>s may be specified
--[no-]auto-gc # Run git gc --auto at the end to perform garbage collection if needed. This is enabled by default
--[no-]write-commit-graph # Write a commit-graph after fetching. This overrides the config setting fetch.writeCommitGraph
-p, --prune # Before fetching, remove any remote-tracking references that no longer exist on the remote. Tags are not subject to pruning if they are fetched only because of the default tag auto-following or due to a --tags option. However, if tags are fetched due to an explicit refspec (either on the command line or in the remote configuration, for example if the remote was cloned with the --mirror option), then they are also subject to pruning. Supplying --prune-tags is a shorthand for providing the tag refspec
-P, --prune-tags # Before fetching, remove any local tags that no longer exist on the remote if --prune is enabled. This option should be used more carefully, unlike --prune it will remove any local references (local tags) that have been created. This option is a shorthand for providing the explicit tag refspec along with --prune, see the discussion about that in its documentation
-n, --no-tags # By default, tags that point at objects that are downloaded from the remote repository are fetched and stored locally. This option disables this automatic tag following. The default behavior for a remote may be specified with the remote.<name>.tagOpt setting
--refmap=<refspec> # When fetching refs listed on the command line, use the specified refspec (can be given more than once) to map the refs to remote-tracking branches, instead of the values of remote.*.fetch configuration variables for the remote repository
-t, --tags # Fetch all tags from the remote (i.e., fetch remote tags refs/tags/* into local tags with the same name), in addition to whatever else would otherwise be fetched. Using this option alone does not subject tags to pruning, even if --prune is used (though tags may be pruned anyway if they are also the destination of an explicit refspec; see --prune)
--recurse-submodules[=yes|on-demand|no] # This option controls if and under what conditions new commits of populated submodules should be fetched too. It can be used as a boolean option to completely disable recursion when set to no or to unconditionally recurse into all populated submodules when set to yes, which is the default when this option is used without any value. Use on-demand to only recurse into a populated submodule when the superproject retrieves a commit that updates the submodule’s reference to a commit that isn’t already in the local submodule clone
-j, --jobs=<n> # Number of parallel children to be used for all forms of fetching
--no-recurse-submodules # Disable recursive fetching of submodules (this has the same effect as using the --recurse-submodules=no option)
--set-upstream # If the remote is fetched successfully, pull and add upstream (tracking) reference, used by argument-less git-pull(1) and other commands. For more information, see branch.<name>.merge and branch.<name>.remote in git-config(1)
--submodule-prefix=<path> # Prepend <path> to paths printed in informative messages such as "Fetching submodule foo". This option is used internally when recursing over submodules
--recurse-submodules-default=[yes|on-demand] # This option is used internally to temporarily provide a non-negative default value for the --recurse-submodules option. All other methods of configuring fetch’s submodule recursion (such as settings in gitmodules(5) and git-config(1)) override this option, as does specifying --[no-]recurse-submodules directly
-u, --update-head-ok # By default git fetch refuses to update the head which corresponds to the current branch. This flag disables the check. This is purely for the internal use for git pull to communicate with git fetch, and unless you are implementing your own Porcelain you are not supposed to use it
--upload-pack <upload-pack> # When given, and the repository to fetch from is handled by git fetch-pack, --exec=<upload-pack> is passed to the command to specify non-default path for the command run on the other end
-q, --quiet # Pass --quiet to git-fetch-pack and silence any other internally used git commands. Progress is not reported to the standard error stream
-v, --verbose # Be verbose
--progress # Progress status is reported on the standard error stream by default when it is attached to a terminal, unless -q is specified. This flag forces progress status even if the standard error stream is not directed to a terminal
-o <option>, --server-option=<option> # Transmit the given string to the server when communicating using protocol version 2. The given string must not contain a NUL or LF character. The server’s handling of server options, including unknown ones, is server-specific. When multiple --server-option=<option> are given, they are all sent to the other side in the order listed on the command line
--show-forced-updates # By default, git checks if a branch is force-updated during fetch. This can be disabled through fetch.showForcedUpdates, but the --show-forced-updates option guarantees this check occurs
--no-show-forced-updates # By default, git checks if a branch is force-updated during fetch. Pass --no-show-forced-updates or set fetch.showForcedUpdates to false to skip this check for performance reasons. If used during git-pull the --ff-only option will still check for forced updates before attempting a fast-forward update
-4, --ipv4 # Use IPv4 addresses only, ignoring IPv6 addresses
-6, --ipv6 # Use IPv6 addresses only, ignoring IPv4 addresses
<repository> # The "remote" repository that is the source of a fetch or pull operation. This parameter can be either a URL or the name of a remote
<group> # A name referring to a list of repositories as the value of remotes.<group> in the configuration file
<refspec> # Specifies which refs to fetch and which local refs to update. When no <refspec>s appear on the command line, the refs to fetch are read from remote.<repository>.fetch variables instead
examples
Update the remote-tracking branches
git fetch origin
The above command copies all branches from the remote refs/heads/ namespace and stores them to the local refs/remotes/origin/ namespace,
unless the branch.<name>.fetch option is used to specify a non-default refspec
Using refspecs explicitly:
git fetch origin +pu:pu maint:tmp
This updates (or creates, as necessary) branches pu and tmp in the local repository by fetching from the branches (respectively) pu and maint
from the remote repository
The pu branch will be updated even if it does not fast-forward, because it is prefixed with a plus sign; tmp will not be
Peek at a remote’s branch, without configuring the remote in your local repository
git fetch git://git.kernel.org/pub/scm/git/git.git maint
git log FETCH_HEAD
The first command fetches the maint branch from the repository at git://git.kernel.org/pub/scm/git/git.git and the second command uses
FETCH_HEAD to examine the branch with git-log(1). The fetched objects will eventually be removed by git’s built-in housekeeping
Update remote refs along with associated objects
Updates remote refs using local refs, while sending objects necessary to complete the given refs
You can make interesting things happen to a repository every time you push into it, by setting up hooks there. See documentation for git-receive-
pack(1)
When the command line does not specify where to push with the <repository> argument, branch.*.remote configuration for the current branch is
consulted to determine where to push. If the configuration is missing, it defaults to origin
When the command line does not specify what to push with <refspec>... arguments or --all, --mirror, --tags options, the command finds the default
<refspec> by consulting remote.*.push configuration, and if it is not found, honors push.default configuration to decide what to push (See git-
config(1) for the meaning of push.default)
When neither the command-line nor the configuration specify what to push, the default behavior is used, which corresponds to the simple value for
push.default: the current branch is pushed to the corresponding upstream branch, but as a safety measure, the push is aborted if the upstream
branch does not have the same name as the local one
git push [--all | --mirror | --tags] [--follow-tags] [--atomic] [-n | --dry-run] [--receive-pack=<git-receive-pack>] [--repo=<repository>] [-f | --force] [-d | --delete] [--prune] [-v | --verbose] [-u | --set-upstream] [-o <string> | --push-option=<string>] [--[no-]signed|--signed=(true|false|if-asked)] [--force-with-lease[=<refname>[:<expect>]]] [--no-verify] [<repository> [<refspec>...]]
<repository> # The "remote" repository that is destination of a push operation. This parameter can be either a URL (see the section GIT URLS below) or the name of a remote (see the section REMOTES below)
<refspec>... # Specify what destination ref to update with what source object. The format of a <refspec> parameter is an optional plus +, followed by the source object <src>, followed by a colon :, followed by the destination ref <dst>
--all # Push all branches (i.e. refs under refs/heads/); cannot be used with other <refspec>
--prune # Remove remote branches that don’t have a local counterpart. For example a remote branch tmp will be removed if a local branch with the same name doesn’t exist any more. This also respects refspecs, e.g. git push --prune remote refs/heads/*:refs/tmp/* would make sure that remote refs/tmp/foo will be removed if refs/heads/foo doesn’t exist
--mirror # Instead of naming each ref to push, specifies that all refs under refs/ (which includes but is not limited to refs/heads/, refs/remotes/, and refs/tags/) be mirrored to the remote repository. Newly created local refs will be pushed to the remote end, locally updated refs will be force updated on the remote end, and deleted refs will be removed from the remote end. This is the default if the configuration option remote.<remote>.mirror is set
-n, --dry-run # Do everything except actually send the updates
--porcelain # Produce machine-readable output. The output status line for each ref will be tab-separated and sent to stdout instead of stderr. The full symbolic names of the refs will be given
-d, --delete # All listed refs are deleted from the remote repository. This is the same as prefixing all refs with a colon
--tags # All refs under refs/tags are pushed, in addition to refspecs explicitly listed on the command line
--follow-tags # Push all the refs that would be pushed without this option, and also push annotated tags in refs/tags that are missing from the remote but are pointing at commit-ish that are reachable from the refs being pushed. This can also be specified with configuration variable push.followTags. For more information, see push.followTags in git-config(1)
--[no-]signed, --signed=(true|false|if-asked) # GPG-sign the push request to update refs on the receiving side, to allow it to be checked by the hooks and/or be logged. If false or --no-signed, no signing will be attempted. If true or --signed, the push will fail if the server does not support signed pushes. If set to if-asked, sign if and only if the server supports signed pushes. The push will also fail if the actual call to gpg --sign fails. See git-receive-pack(1) for the details on the receiving end
--[no-]atomic # Use an atomic transaction on the remote side if available. Either all refs are updated, or on error, no refs are updated. If the server does not support atomic pushes the push will fail
-o <option>, --push-option=<option> # Transmit the given string to the server, which passes them to the pre-receive as well as the post-receive hook. The given string must not contain a NUL or LF character. When multiple --push-option=<option> are given, they are all sent to the other side in the order listed on the command line. When no --push-option=<option> is given from the command line, the values of configuration variable push.pushOption are used instead
--receive-pack=<git-receive-pack>, --exec=<git-receive-pack> # Path to the git-receive-pack program on the remote end. Sometimes useful when pushing to a remote repository over ssh, and you do not have the program in a directory on the default $PATH
--[no-]force-with-lease, --force-with-lease=<refname>, --force-with-lease=<refname>:<expect> # Usually, "git push" refuses to update a remote ref that is not an ancestor of the local ref used to overwrite it. This option overrides this restriction if the current value of the remote ref is the expected value. "git push" fails otherwise
-f, --force # Usually, the command refuses to update a remote ref that is not an ancestor of the local ref used to overwrite it. Also, when --force-with-lease option is used, the command refuses to update a remote ref whose current value does not match what is expected
--repo=<repository> # This option is equivalent to the <repository> argument. If both are specified, the command-line argument takes precedence
-u, --set-upstream # For every branch that is up to date or successfully pushed, add upstream (tracking) reference, used by argument-less git-pull(1) and other commands. For more information, see branch.<name>.merge in git-config(1)
--[no-]thin # These options are passed to git-send-pack(1). A thin transfer significantly reduces the amount of sent data when the sender and receiver share many of the same objects in common. The default is --thin
-q, --quiet # Suppress all output, including the listing of updated refs, unless an error occurs. Progress is not reported to the standard error stream
-v, --verbose # Run verbosely
--progress # Progress status is reported on the standard error stream by default when it is attached to a terminal, unless -q is specified. This flag forces progress status even if the standard error stream is not directed to a terminal
--no-recurse-submodules, --recurse-submodules=check|on-demand|only|no # May be used to make sure all submodule commits used by the revisions to be pushed are available on a remote-tracking branch. If check is used Git will verify that all submodule commits that changed in the revisions to be pushed are available on at least one remote of the submodule. If any commits are missing the push will be aborted and exit with non-zero status. If on-demand is used all submodules that changed in the revisions to be pushed will be pushed. If on-demand was not able to push all necessary revisions it will also be aborted and exit with non-zero status. If only is used all submodules will be recursively pushed while the superproject is left unpushed. A value of no or using --no-recurse-submodules can be used to override the push.recurseSubmodules configuration variable when no submodule recursion is required
--[no-]verify # Toggle the pre-push hook (see githooks(5)). The default is --verify, giving the hook a chance to prevent the push. With --no-verify, the hook is bypassed completely
-4, --ipv4 # Use IPv4 addresses only, ignoring IPv6 addresses
-6, --ipv6 # Use IPv6 addresses only, ignoring IPv4 addresses
OUTPUT
The output of "git push" depends on the transport method used; this section describes the output when pushing over the Git protocol (either
locally or via ssh)
The status of the push is output in tabular form, with each line representing the status of a single ref. Each line is of the form:
- <flag> <summary> <from> -> <to> (<reason>)
If --porcelain is used, then each line of the output is of the form:
- <flag> \t <from>:<to> \t <summary> (<reason>)
The status of up-to-date refs is shown only if --porcelain or --verbose option is used
flag
(space) # for a successfully pushed fast-forward;
+ # for a successful forced update;
- # for a successfully deleted ref;
* # for a successfully pushed new ref;
! # for a ref that was rejected or failed to push; and
= # for a ref that was up to date and did not need pushing
summary
For a successfully pushed ref
the summary shows the old and new values of the ref in a form suitable for using as an argument to git log (this is <old>..<new> in most cases, and <old>...<new> for forced non-fast-forward updates)
For a failed update, more details are given:
rejected # Git did not try to send the ref at all, typically because it is not a fast-forward and you did not force the update
remote rejected # The remote end refused the update. Usually caused by a hook on the remote side, or because the remote repository has one of the following safety options in effect: receive.denyCurrentBranch (for pushes to the checked out branch), receive.denyNonFastForwards (for forced non-fast-forward updates), receive.denyDeletes or receive.denyDeleteCurrent
remote failure # The remote end did not report the successful update of the ref, perhaps because of a temporary error on the remote side, a break in the network connection, or other transient error
from # The name of the local ref being pushed, minus its refs/<type>/ prefix. In the case of deletion, the name of the local ref is omitted
to # The name of the remote ref being updated, minus its refs/<type>/ prefix
reason # A human-readable explanation. In the case of successfully pushed refs, no explanation is needed. For a failed ref, the reason for failure is
described
examples
git push
# Works like git push <remote>, where <remote> is the current branch’s remote (or origin, if no remote is configured for the current branch)
git push origin
# Without additional configuration, pushes the current branch to the configured upstream (remote.origin.merge configuration variable) if it has the same name as the current branch, and errors out without pushing otherwise
#The default behavior of this command when no <refspec> is given can be configured by setting the push option of the remote, or the push.default configuration variable
# For example, to default to pushing only the current branch to origin use git config remote.origin.push HEAD. Any valid <refspec> (like the ones in the examples below) can be configured as the default for git push origin
git push origin:
# Push "matching" branches to origin. See <refspec> in the OPTIONS section above for a description of "matching" branches
git push origin master
# Find a ref that matches master in the source repository (most likely, it would find refs/heads/master), and update the same ref (e.g. refs/heads/master) in origin repository with it. If master did not exist remotely, it would be created
git push origin HEAD
# A handy way to push the current branch to the same name on the remote
git push mothership master:satellite/master dev:satellite/dev
# Use the source ref that matches master (e.g. refs/heads/master) to update the ref that matches satellite/master (most probably refs/remotes/satellite/master) in the mothership repository; do the same for dev and satellite/dev
# See the section describing <refspec>... above for a discussion of the matching semantics
# This is to emulate git fetch run on the mothership using git push that is run in the opposite direction in order to integrate the work done on satellite, and is often necessary when you can only make connection in one way (i.e. satellite can ssh into mothership but mothership cannot initiate connection to satellite because the latter is behind a firewall or does not run sshd)
# After running this git push on the satellite machine, you would ssh into the mothership and run git merge there to complete the emulation of git pull that were run on mothership to pull changes made on satellite
git push origin HEAD:master
# Push the current branch to the remote ref matching master in the origin repository. This form is convenient to push the current branch without thinking about its local name
git push origin master:refs/heads/experimental
# Create the branch experimental in the origin repository by copying the current master branch. This form is only needed to create a new branch or tag in the remote repository when the local name and the remote name are different; otherwise, the ref name on its own will work
git push origin :experimental
# Find a ref that matches experimental in the origin repository (e.g. refs/heads/experimental), and delete it
git push origin +dev:master
# Update the origin repository’s master branch with the dev branch, allowing non-fast-forward updates. This can leave unreferenced commits dangling in the origin repository. Consider the following situation, where a fast-forward is not possible:
o---o---o---A---B origin/master
\
X---Y---Z dev
The above command would change the origin repository to
A---B (unnamed branch)
/
o---o---o---X---Y---Z master
# Commits A and B would no longer belong to a branch with a symbolic name, and so would be unreachable. As such, these commits would be removed by a git gc command on the origin repository
List references in a remote repository
Displays references available in a remote repository along with the associated commit IDs
git ls-remote [--heads] [--tags] [--refs] [--upload-pack=<exec>] [-q | --quiet] [--exit-code] [--get-url] [--sort=<key>] [--symref] [<repository> [<refs>...]]
-h, --heads, -t, --tags # Limit to only refs/heads and refs/tags, respectively. These options are not mutually exclusive; when given both, references stored in refs/heads and refs/tags are displayed
--refs # Do not show peeled tags or pseudorefs like HEAD in the output
-q, --quiet # Do not print remote URL to stderr
--upload-pack=<exec> # Specify the full path of git-upload-pack on the remote host. This allows listing references from repositories accessed via SSH and where the SSH daemon does not use the PATH configured by the user
--exit-code # Exit with status "2" when no matching refs are found in the remote repository. Usually the command exits with status "0" to indicate it successfully talked with the remote repository, whether it found any matching refs
--get-url # Expand the URL of the given remote repository taking into account any "url.<base>.insteadOf" config setting (See git-config(1)) and exit without talking to the remote
--symref # In addition to the object pointed by it, show the underlying ref pointed by it when showing a symbolic ref. Currently, upload-pack only shows the symref HEAD, so it will be the only one shown by ls-remote
--sort=<key> # Sort based on the key given. Prefix - to sort in descending order of the value. Supports "version:refname" or "v:refname" (tag names are treated as versions). The "version:refname" sort order can also be affected by the "versionsort.suffix" configuration variable. See git-for- each-ref(1) for more sort options, but be aware keys like committerdate that require access to the objects themselves will not work for refs whose objects have not yet been fetched from the remote, and will give a missing object error
-o <option>, --server-option=<option> # Transmit the given string to the server when communicating using protocol version 2. The given string must not contain a NUL or LF character. When multiple --server-option=<option> are given, they are all sent to the other side in the order listed on the command line
<repository> # The "remote" repository to query. This parameter can be either a URL or the name of a remote (see the GIT URLS and REMOTES sections of git- fetch(1))
<refs>... # When unspecified, all references, after filtering done with --heads and --tags, are shown. When <refs>... are specified, only references matching the given patterns are displayed
examples
git ls-remote --tags ./
git ls-remote http://www.kernel.org/pub/scm/git/git.git master pu rc
git remote add korg http://www.kernel.org/pub/scm/git/git.git
git ls-remote --tags korg v\*
Manage set of tracked repositories
Manage the set of repositories ("remotes") whose branches you track
SUBCOMMANDS
subcommand | Designation |
---|---|
ADD | Adds a remote named <name> for the repository at <url> |
RENAME | REMOVE THE REMOTE NAMED <NAME> |
RM | Remove the remote named <name> |
SET-HEAD | Sets or deletes the default branch for the named remote |
SET-BRANCHES | Changes the list of branches tracked by the named remote |
GET-URL | Retrieves the URLs for a remote. Configurations for insteadOf and pushInsteadOf are expanded here |
SET-URL | Changes URLs for the remote. Sets first URL for remote <name> to <newurl> |
SHOW | Gives some information about the remote <name> |
PRUNE | Deletes stale references associated with <name> |
UPDATE | Fetch updates for remotes or remote groups in the repository as defined by remotes.<group> |
EXAMPLES | Fetch updates for remotes or remote groups in the repository as defined by remotes.<group> |
ADD
Adds a remote named <name> for the repository at <url>. The command git fetch <name> can then be used to create and update remote-tracking branches <name>/<branch>
git remote add [-t <branch>] [-m <master>] [-f] [--[no-]tags] [--mirror=<fetch|push>] <name> <url>
-f # git fetch <name> is run immediately after the remote information is set up
--tags # git fetch <name> imports every tag from the remote repository
--no-tags # git fetch <name> does not import tags from the remote repository. By default, only tags on fetched branches are imported
-t <branch> # instead of the default glob refspec for the remote to track all branches under the refs/remotes/<name>/ namespace, a refspec to track only <branch> is created. You can give more than one -t <branch> to track
-m <master> # a symbolic-ref refs/remotes/<name>/HEAD is set up to point at remote’s <master> branch
RENAME
Rename the remote named <old> to <new>. All remote-tracking branches and configuration settings for the remote are updated
In case <old> and <new> are the same, and <old> is a file under $GIT_DIR/remotes or $GIT_DIR/branches, the remote is converted to the configuration file format
git remote rename <old> <new>
RM
Remove the remote named <name>. All remote-tracking branches and configuration settings for the remote are removed
git remote remove <name>
SET-HEAD
Sets or deletes the default branch (i.e. the target of the symbolic-ref refs/remotes/<name>/HEAD) for the named remote. Having a default branch for a remote is not required, but allows the name of the remote to be specified in lieu of a specific branch. For example, if the default branch for origin is set to master, then origin may be specified wherever you would normally specify origin/master
git remote set-head <name> (-a | --auto | -d | --delete | <branch>)
-d, --delete # the symbolic ref refs/remotes/<name>/HEAD is deleted
-a, --auto # the remote is queried to determine its HEAD, then the symbolic-ref refs/remotes/<name>/HEAD is set to the same branch. e.g., if the remote HEAD is pointed at next, "git remote set-head origin -a" will set the
symbolic-ref refs/remotes/origin/HEAD to refs/remotes/origin/next. This will only work if refs/remotes/origin/next already exists; if not it must be fetched first
Use <branch> to set the symbolic-ref refs/remotes/<name>/HEAD explicitly. e.g., "git remote set-head origin master" will set the symbolic-ref refs/remotes/origin/HEAD to refs/remotes/origin/master. This will only work if
refs/remotes/origin/master already exists; if not it must be fetched first
SET-BRANCHES
Changes the list of branches tracked by the named remote. This can be used to track a subset of the available remote branches after the initial setup for a remote
The named branches will be interpreted as if specified with the -t option on the git remote add command line
git remote set-branches [--add] <name> <branch>...
--add # instead of replacing the list of currently tracked branches, adds to that list
GET-URL
Retrieves the URLs for a remote. Configurations for insteadOf and pushInsteadOf are expanded here. By default, only the first URL is listed
git remote get-url [--push] [--all] <name>
--push # push URLs are queried rather than fetch URLs
--all # all URLs for the remote will be listed
SET-URL
Changes URLs for the remote. Sets first URL for remote <name> that matches regex <oldurl> (first URL if no <oldurl> is given) to <newurl>. If <oldurl> doesn’t match any URL, an error occurs and nothing is changed
Note that the push URL and the fetch URL, even though they can be set differently, must still refer to the same place. What you pushed to the push URL should be what you would see if you immediately fetched from the fetch URL. If
you are trying to fetch from one place (e.g. your upstream) and push to another (e.g. your publishing repository), use two separate remotes
git remote set-url [--push] <name> <newurl> [<oldurl>]
git remote set-url --add [--push] <name> <newurl>
git remote set-url --delete [--push] <name> <url>
--push # push URLs are manipulated instead of fetch URLs
--add # instead of changing existing URLs, new URL is added
--delete # instead of changing existing URLs, all URLs matching regex <url> are deleted for remote <name>. Trying to delete all non-push URLs is an error
SHOW
Gives some information about the remote <name>
git remote [-v | --verbose] show [-n] <name>...
-n # the remote heads are not queried first with git ls-remote <name>; cached information is used instead
PRUNE
Deletes stale references associated with <name>. By default, stale remote-tracking branches under <name> are deleted, but depending on global configuration and the configuration of the remote we might even prune local tags that
haven’t been pushed there. Equivalent to git fetch --prune <name>, except that no new references will be fetched
git remote prune [-n | --dry-run] <name>...
--dry-run # report what branches will be pruned, but do not actually prune them
UPDATE
Fetch updates for remotes or remote groups in the repository as defined by remotes.<group>. If neither group nor remote is specified on the command line, the configuration parameter remotes.default will be used; if remotes.default is not defined, all remotes which do not have the configuration parameter remote.<name>.skipDefaultUpdate set to true will be updated
git remote [-v | --verbose] update [-p | --prune] [(<group> | <remote>)...]
--prune # run pruning against all the remotes that are updated
EXAMPLES
Add a new remote, fetch, and check out a branch from it
$ git remote
origin
$ git branch -r
origin/HEAD -> origin/master
origin/master
$ git remote add staging git://git.kernel.org/.../gregkh/staging.git
$ git remote
origin
staging
$ git fetch staging
...
From git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging
* [new branch] master -> staging/master
* [new branch] staging-linus -> staging/staging-linus
* [new branch] staging-next -> staging/staging-next
$ git branch -r
origin/HEAD -> origin/master
origin/master
staging/master
staging/staging-linus
staging/staging-next
$ git switch -c staging staging/master
...
Imitate git clone but track only selected branches
$ mkdir project.git
$ cd project.git
$ git init
$ git remote add -f -t master -m master origin git://example.com/git.git/
$ git merge origin
Shows the commit logs
The command takes options applicable to the git rev-list command to control what is shown and how, and options applicable to the git diff-* commands to control how the changes each commit introduces are shown
OPTIONS
git log [<options>] [<revision range>] [[--] <path>...]
--follow # Continue listing the history of a file beyond renames (works only for a single file)
--no-decorate, --decorate[=short|full|auto|no] # Print out the ref names of any commits that are shown
--decorate-refs=<pattern>, --decorate-refs-exclude=<pattern> # If no --decorate-refs is given, pretend as if all refs were included
--source # Print out the ref name given on the command line by which each commit was reached
--[no-]use-mailmap # Use mailmap file to map author and committer names and email addresses to canonical real names and email addresses
--full-diff # Without this flag, git log -p <path>
Note that this affects all diff-based output types, e
--log-size # Include a line “log size <number>” in the output for each commit, where <number> is the length of that commit’s message in bytes
-L <start>,<end>:<file>, -L :<funcname>:<file> # Trace the evolution of the line range given by "<start>,<end>" (or the function name regex <funcname>) within the <file>
<start> and <end> can take one of these forms:
- number # If <start> or <end> is a number, it specifies an absolute line number (lines count from 1)
- /regex/ # This form will use the first line matching the given POSIX regex
- +offset or -offset # This is only valid for <end> and will specify a number of lines before or after the line given by <start>
<revision range> # Show only commits in the specified revision range
[--] <path>... # Show only commits that are enough to explain how the files that match the specified paths came to be. See History Simplification below for details and other simplification modes
-<number>, -n <number>, --max-count=<number> # Limit the number of commits to output
--skip=<number> # Skip number commits before starting to show the commit output
--since=<date>, --after=<date> # Show commits more recent than a specific date
--until=<date>, --before=<date> # Show commits older than a specific date
--author=<pattern>, --committer=<pattern> # Limit the commits output to ones with author/committer header lines that match the specified pattern (regular expression)
--grep-reflog=<pattern> # Limit the commits output to ones with reflog entries that match the specified pattern (regular expression)
--grep=<pattern> # Limit the commits output to ones with log message that matches the specified pattern (regular expression)
--all-match # Limit the commits output to ones that match all given --grep, instead of ones that match at least one
--invert-grep # Limit the commits output to ones with log message that do not match the pattern specified with --grep=<pattern>
-i, --regexp-ignore-case # Match the regular expression limiting patterns without regard to letter case
--basic-regexp # Consider the limiting patterns to be basic regular expressions; this is the default
-E, --extended-regexp # Consider the limiting patterns to be extended regular expressions instead of the default basic regular expressions
-F, --fixed-strings # Consider the limiting patterns to be fixed strings (don’t interpret pattern as a regular expression)
-P, --perl-regexp # Consider the limiting patterns to be Perl-compatible regular expressions
--remove-empty # Stop when a given path disappears from the tree
--merges # Print only merge commits
--no-merges # Do not print commits with more than one parent
--min-parents=<number>, --max-parents=<number>, --no-min-parents, --no-max-parents # Show only commits which have at least (or at most) that many parent commits
--first-parent # Follow only the first parent commit upon seeing a merge commit
--not # Reverses the meaning of the ^ prefix (or lack thereof) for all following revision specifiers, up to the next --not
--all # Pretend as if all the refs in refs/, along with HEAD, are listed on the command line as <commit>
--branches[=<pattern>] # Pretend as if all the refs in refs/heads are listed on the command line as <commit>
--tags[=<pattern>] # Pretend as if all the refs in refs/tags are listed on the command line as <commit>
--remotes[=<pattern>] # Pretend as if all the refs in refs/remotes are listed on the command line as <commit>
--glob=<glob-pattern> # Pretend as if all the refs matching shell glob <glob-pattern> are listed on the command line as <commit>
--exclude=<glob-pattern> # Do not include refs matching <glob-pattern> that the next --all, --branches, --tags, --remotes, or --glob would otherwise consider
--reflog # Pretend as if all objects mentioned by reflogs are listed on the command line as <commit>
--alternate-refs # Pretend as if all objects mentioned as ref tips of alternate repositories were listed on the command line
--single-worktree # By default, all working trees will be examined by the following options when there are more than one (see git-worktree(1)): --all, --reflog and --indexed-objects
--ignore-missing # Upon seeing an invalid object name in the input, pretend as if the bad input was not given
--bisect # Pretend as if the bad bisection ref refs/bisect/bad was listed and as if it was followed by --not and the good bisection refs refs/bisect/good-* on the command line
--stdin # In addition to the <commit> listed on the command line, read them from the standard input
--cherry-mark # Like --cherry-pick (see below) but mark equivalent commits with = rather than omitting them, and inequivalent ones with +
--cherry-pick # Omit any commit that introduces the same change as another commit on the “other side” when the set of commits are limited with symmetric difference
--left-only, --right-only # List only commits on the respective side of a symmetric difference, i
--cherry # A synonym for --right-only --cherry-mark --no-merges; useful to limit the output to the commits on our side and mark those that have been applied to the other side of a forked history with git log --cherry upstream
-g, --walk-reflogs # Instead of walking the commit ancestry chain, walk reflog entries from the most recent one to older ones
--merge # After a failed merge, show refs that touch files having a conflict and don’t exist on all heads to merge
--boundary # Output excluded boundary commits
<paths> # Commits modifying the given <paths> are selected
--simplify-by-decoration # Commits that are referred by some branch or tag are selected
--full-history # Same as the default mode, but does not prune some history
--dense # Only the selected commits are shown, plus some to have a meaningful history
--sparse # All commits in the simplified history are shown
--simplify-merges # Additional option to --full-history to remove some needless merges from the resulting history, as there are no selected commits contributing to this merge
--ancestry-path # When given a range of commits to display (e
--full-history without parent rewriting # This mode differs from the default in one point: always follow all parents of a merge, even if it is TREESAME to one of them
--full-history with parent rewriting # Ordinary commits are only included if they are !TREESAME (though this can be changed, see --sparse below)
--dense # Commits that are walked are included if they are not TREESAME to any parent
--sparse # All commits that are walked are included
--simplify-merges # First, build a history graph in the same way that --full-history with parent rewriting does (see above)
--ancestry-path # Limit the displayed commits to those directly on the ancestry chain between the “from” and “to” commits in the given commit range
--date-order # Show no parents before all of its children are shown, but otherwise show commits in the commit timestamp order
--author-date-order # Show no parents before all of its children are shown, but otherwise show commits in the author timestamp order
--topo-order # Show no parents before all of its children are shown, and avoid showing commits on multiple lines of history intermixed
--reverse # Output the commits chosen to be shown (see Commit Limiting section above) in reverse order
--no-walk[=(sorted|unsorted)] # Only show the given commits, but do not traverse their ancestors
--do-walk # Overrides a previous --no-walk
Commit Formatting
--pretty[=<format>], --format=<format> # Pretty-print the contents of the commit logs in a given format, where <format> can be one of oneline, short, medium, full, fuller, reference, email, raw, format:<string> and tformat:<string>
--abbrev-commit # Instead of showing the full 40-byte hexadecimal commit object name, show only a partial prefix
--no-abbrev-commit # Show the full 40-byte hexadecimal commit object name
--oneline # This is a shorthand for "--pretty=oneline --abbrev-commit" used together
--encoding=<encoding> # The commit objects record the encoding used for the log message in their encoding header; this option can be used to tell the command to re-code the commit log message in the encoding preferred by the user
--expand-tabs=<n>, --expand-tabs, --no-expand-tabs # Perform a tab expansion (replace each tab with enough spaces to fill to the next display column that is multiple of <n>) in the log message before showing it in the output
--notes[=<ref>] # Show the notes (see git-notes(1)) that annotate the commit, when showing the commit log message
--no-notes # Do not show notes
--show-notes[=<ref>], --[no-]standard-notes # These options are deprecated
--show-signature # Check the validity of a signed commit object by passing the signature to gpg --verify and show the output
--relative-date # Synonym for --date=relative
--date=<format> # Only takes effect for dates shown in human-readable format, such as when using --pretty
--parents # Print also the parents of the commit (in the form "commit parent
--children # Print also the children of the commit (in the form "commit child
--left-right # Mark which side of a symmetric difference a commit is reachable from
--graph # Draw a text-based graphical representation of the commit history on the left hand side of the output
--show-linear-break[=<barrier>] # When --graph is not used, all history branches are flattened which can make it hard to see that the two consecutive commits do not belong to a linear branch
Diff Formatting
Listed below are options that control the formatting of diff output
-c # With this option, diff output for a merge commit shows the differences from each of the parents to the merge result simultaneously instead of showing pairwise diff between a parent and the result one at a time
--cc # This flag implies the -c option and further compresses the patch output by omitting uninteresting hunks whose contents in the parents have only two variants and the merge result picks one of them without modification
--combined-all-paths # This flag causes combined diffs (used for merge commits) to list the name of the file from all parents
-m # This flag makes the merge commits show the full diff like regular commits; for each merge parent, a separate log entry and diff is generated
-r # Show recursive diffs
-t # Show the tree objects in the diff output
PRETTY FORMATS
If the commit is a merge, and if the pretty-format is not oneline, email or raw, an additional line is inserted before the Author: line. This line begins with "Merge: " and the hashes of ancestral commits are printed, separated by spaces. Note that the listed commits may not necessarily be the list of the direct parent commits if you have limited your view of history: for example, if you are only interested in changes related to a certain directory or file
There are several built-in formats, and you can define additional formats by setting a pretty.<name> config option to either another format name, or a format: string, as described below (see git-config(1))
Here are the details of the built-in formats:
- oneline
- short
- medium
- full
- fuller
- reference
- raw
- format:<string> # see man for details
- tformat:<string> # see man for details
COMMON DIFF OPTIONS
-p, -u, --patch # Generate patch (see section on generating patches)
-s, --no-patch # Suppress diff output
-U<n>, --unified=<n> # Generate diffs with <n> lines of context instead of the usual three
--output=<file> # Output to a specific file instead of stdout
--output-indicator-new=<char>, --output-indicator-old=<char>, --output-indicator-context=<char> # Specify the character used to indicate new, old or context lines in the generated patch
--raw # For each commit, show a summary of changes using the raw diff format
--patch-with-raw # Synonym for -p --raw
--indent-heuristic # Enable the heuristic that shifts diff hunk boundaries to make patches easier to read
--no-indent-heuristic # Disable the indent heuristic
--minimal # Spend extra time to make sure the smallest possible diff is produced
--patience # Generate a diff using the "patience diff" algorithm
--histogram # Generate a diff using the "histogram diff" algorithm
--anchored=<text> # Generate a diff using the "anchored diff" algorithm
--diff-algorithm={patience|minimal|histogram|myers} # Choose a diff algorithm
The variants are as follows:
default, myers # The basic greedy diff algorithm
minimal # Spend extra time to make sure the smallest possible diff is produced
patience # Use "patience diff" algorithm when generating patches
histogram # This algorithm extends the patience algorithm to "support low-occurrence common elements"
--stat[=<width>[,<name-width>[,<count>]]] # Generate a diffstat
--compact-summary # Output a condensed summary of extended header information such as file creations or deletions ("new" or "gone", optionally "+l" if it’s a symlink) and mode changes ("+x" or "-x" for adding or removing executable bit respectively) in diffstat
--numstat # Similar to --stat, but shows number of added and deleted lines in decimal notation and pathname without abbreviation, to make it more machine friendly
--shortstat # Output only the last line of the --stat format containing total number of modified files, as well as number of added and deleted lines
-X[<param1,param2,
The following parameters are available:
changes # Compute the dirstat numbers by counting the lines that have been removed from the source, or added to the destination
lines # Compute the dirstat numbers by doing the regular line-based diff analysis, and summing the removed/added line counts
files # Compute the dirstat numbers by counting the number of files changed
cumulative # Count changes in a child directory for the parent directory as well
<limit> # An integer parameter specifies a cut-off percent (3% bygit log [<options>] [<revision range>] [[--] <path>...] default)
Example: The following will count changed files, while ignoring directories with less than 10% of the total amount of changed files, and accumulating child directory counts in the parent directories: --dirstat=files,10,cumulative
--cumulative # Synonym for --dirstat=cumulative
--summary # Output a condensed summary of extended header information such as creations, renames and mode changes
--patch-with-stat # Synonym for -p --stat
-z # Separate the commits with NULs instead of with new newlines
--name-only # Show only names of changed files
--name-status # Show only names and status of changed files
--submodule[=<format>] # Specify how differences in submodules are shown
--color[=<when>] # Show colored diff
--no-color # Turn off colored diff
--color-moved[=<mode>] # Moved lines of code are colored differently
The mode must be one of:
no # Moved lines are not highlighted
default # Is a synonym for zebra
plain # Any line that is added in one location and was removed in another location will be colored with color
blocks # Blocks of moved text of at least 20 alphanumeric characters are detected greedily
zebra # Blocks of moved text are detected as in blocks mode
dimmed-zebra # Similar to zebra, but additional dimming of uninteresting parts of moved code is performed
--no-color-moved # Turn off move detection
--color-moved-ws=<modes> # This configures how whitespace is ignored when performing the move detection for --color-moved
These modes can be given as a comma separated list:
no # Do not ignore whitespace when performing move detection
ignore-space-at-eol # Ignore changes in whitespace at EOL
ignore-space-change # Ignore changes in amount of whitespace
ignore-all-space # Ignore whitespace when comparing lines
allow-indentation-change # Initially ignore any whitespace in the move detection, then group the moved code blocks only into a block if the change in whitespace is the same per line
--no-color-moved-ws # Do not ignore whitespace when performing move detection
--word-diff[=<mode>] # Show a word diff, using the <mode> to delimit changed words
The <mode> defaults to plain, and must be one of:
color # Highlight changed words using only colors
plain # Show words as [-removed-] and {+added+}
porcelain # Use a special line-based format intended for script consumption
none # Disable word diff again
--word-diff-regex=<regex> # Use <regex> to decide what a word is, instead of considering runs of non-whitespace to be a word
--color-words[=<regex>] # Equivalent to --word-diff=color plus (if a regex was specified) --word-diff-regex=<regex>
--no-renames # Turn off rename detection, even when the configuration file gives the default to do so
--[no-]rename-empty # Whether to use empty blobs as rename source
--check # Warn if changes introduce conflict markers or whitespace errors
--ws-error-highlight=<kind> # Highlight whitespace errors in the context, old or new lines of the diff
--full-index # Instead of the first handful of characters, show the full pre- and post-image blob object names on the "index" line when generating patch format output
--binary # In addition to --full-index, output a binary diff that can be applied with git-apply
--abbrev[=<n>] # Instead of showing the full 40-byte hexadecimal object name in diff-raw format output and diff-tree header lines, show only a partial prefix
-B[<n>][/<m>], --break-rewrites[=[<n>][/<m>]] # Break complete rewrite changes into pairs of delete and create
-M[<n>], --find-renames[=<n>] # If generating diffs, detect and report renames for each commit
-C[<n>], --find-copies[=<n>] # Detect copies as well as renames
--find-copies-harder # For performance reasons, by default, -C option finds copies only if the original file of the copy was modified in the same changeset
-D, --irreversible-delete # Omit the preimage for deletes, i.e. print only the header but not the diff between the preimage and /dev/null
-l<num> # The -M and -C options require O(n^2) processing time where n is the number of potential rename/copy targets
--diff-filter=[(A|C|D|M|R|T|U|X|B)...[*]] # Select only files that are Added (A), Copied (C), Deleted (D), Modified (M), Renamed (R), have their type (i.e. regular file, symlink, submodule, ...) changed (T), are Unmerged (U), are Unknown (X), or have had their pairing Broken (B)
-S<string> # Look for differences that change the number of occurrences of the specified string (i.e. addition/deletion) in a file. Intended for the scripter’s use
-G<regex> # Look for differences whose patch text contains added/removed lines that match <regex>
--find-object=<object-id> # Look for differences that change the number of occurrences of the specified object
--pickaxe-all # When -S or -G finds a change, show all the changes in that changeset, not just the files that contain the change in <string>
--pickaxe-regex # Treat the <string> given to -S as an extended POSIX regular expression to match
-O<orderfile> # Control the order in which files appear in the output
<orderfile> is parsed as follows:
- Blank lines are ignored, so they can be used as separators for readability
- Lines starting with a hash ("#") are ignored, so they can be used for comments. Add a backslash ("\\") to the beginning of the pattern if it starts with a hash
- Each other line contains a single pattern
-R # Swap two inputs; that is, show differences from index or on-disk file to tree contents
--relative[=<path>] # When run from a subdirectory of the project, it can be told to exclude changes outside the directory and show pathnames relative to it with this option
-a, --text # Treat all files as text
--ignore-cr-at-eol # Ignore carriage-return at the end of line when doing a comparison
--ignore-space-at-eol # Ignore changes in whitespace at EOL
-b, --ignore-space-change # Ignore changes in amount of whitespace
-w, --ignore-all-space # Ignore whitespace when comparing lines
--ignore-blank-lines # Ignore changes whose lines are all blank
--inter-hunk-context=<lines> # Show the context between diff hunks, up to the specified number of lines, thereby fusing hunks that are close to each other
-W, --function-context # Show whole surrounding functions of changes
--ext-diff # Allow an external diff helper to be executed
--no-ext-diff # Disallow external diff drivers
--textconv, --no-textconv # Allow (or disallow) external text conversion filters to be run when comparing binary files
--ignore-submodules[=<when>] # Ignore changes to submodules in the diff generation
--src-prefix=<prefix> # Show the given source prefix instead of "a/"
--dst-prefix=<prefix> # Show the given destination prefix instead of "b/"
--no-prefix # Do not show any source or destination prefix
--line-prefix=<prefix> # Prepend an additional prefix to every line of output
--ita-invisible-in-index # By default entries added by "git add -N" appear as an existing empty file in "git diff" and a new file in "git diff --cached"
EXAMPLES
git log --no-merges
Show the whole commit history, but skip any merges
git log v2.6.12.. include/scsi drivers/scsi
Show all commits since version v2.6.12 that changed any file in the include/scsi or drivers/scsi subdirectories
git log --since="2 weeks ago" -- gitk
Show the changes during the last two weeks to the file gitk. The -- is necessary to avoid confusion with the branch named gitk
git log --name-status release..test
Show the commits that are in the "test" branch but not yet in the "release" branch, along with the list of paths each commit modifies
git log --follow builtin/rev-list.c
Shows the commits that changed builtin/rev-list.c, including those commits that occurred before the file was given its present name
git log --branches --not --remotes=origin
Shows all commits that are in any of local branches but not in any of remote-tracking branches for origin (what you have that origin doesn’t)
git log master --not --remotes=*/master
Shows all commits that are in local master but not in any remote repository master branches
git log -p -m --first-parent
Shows the history including change diffs, but only from the “main branch” perspective, skipping commits that come from merged branches, and showing full diffs of changes introduced by the merges. This makes sense only when following
a strict policy of merging all topic branches when staying on a single integration branch
git log -L '/int main/',/^}/:main.c
Shows how the function main() in the file main.c evolved over time
git log -3
Limits the number of commits to show to 3
TRICK
# show all logs
git log
-p -2 # show diff between two last commit
-U1 --word-diff # show diff in line
--stat # show statistics
--pretty=oneline
--pretty=short
--pretty=full
--pretty=fuller
--pretty=format:"%h - %an, %ar : %s"
https://git-scm.com/book/en/v2/Git-Branching-Basic-Branching-and-Merging
Join two or more development histories together
Incorporates changes from the named commits (since the time their histories diverged from the current branch) into the current branch. This command is used by git pull to incorporate changes from another repository and can be used by hand to merge changes from one branch into another
git merge [-n] [--stat] [--no-commit] [--squash] [--[no-]edit] [--no-verify] [-s <strategy>] [-X <strategy-option>] [-S[<keyid>]] [--[no-]allow-unrelated-histories] [--[no-]rerere-autoupdate] [-m <msg>] [-F <file>] [<commit>...]
git merge (--continue | --abort | --quit)
--commit # Perform the merge and commit the result
--no-commit # perform the merge and stop just before creating a merge commit, to give the user a chance to inspect and further tweak the merge result before committing
--edit, -e # Invoke an editor before committing successful mechanical merge to further edit the auto-generated merge message, so that the user can explain and justify the merge
--no-edit # Accept the auto-generated message (this is generally discouraged)
--cleanup=<mode> # Determines how the merge message will be cleaned up before committing
--ff, --no-ff, --ff-only # Specifies how a merge is handled when the merged-in history is already a descendant of the current history
- --ff # is the default unless merging an annotated (and possibly signed) tag that is not stored in its natural place in the refs/tags/ hierarchy, in which case --no-ff is assumed
--no-ff # create a merge commit in all cases, even when the merge could instead be resolved as a fast-forward
--ff-only # resolve the merge as a fast-forward when possible. When not possible, refuse to merge and exit with a non-zero status
-S[<keyid>], --gpg-sign[=<keyid>] # GPG-sign the resulting merge commit. The keyid argument is optional and defaults to the committer identity
--log[=<n>], --no-log # In addition to branch names, populate the log message with one-line descriptions from at most <n> actual commits that are being merged
--no-log # do not list one-line descriptions from the actual commits being merged
--signoff, --no-signoff # Add Signed-off-by line by the committer at the end of the commit log message
--stat # Show a diffstat at the end of the merge
-n, --no-stat # do not show a diffstat at the end of the merge
--squash # produce the working tree and index state as if a real merge happened
--no-squash # perform the merge and commit the result
--verify-signatures, --no-verify-signatures # verify that the tip commit of the side branch being merged is signed with a valid key
-q, --quiet # Operate quietly. Implies --no-progress
-v, --verbose # be verbose
--progress, --no-progress # Turn progress on/off explicitly
--allow-unrelated-histories # by default, git merge command refuses to merge histories that do not share a common ancestor
-m <msg> # set the commit message to be used for the merge commit (in case one is created)
--continue # after a git merge stops due to conflicts you can conclude the merge by running git merge --continue
-s <strategy>, --strategy=<strategy> # use the given merge strategy
- resolve # this can only resolve two heads
recursive # this can only resolve two heads using a 3-way merge algorithm
- options:
ours # forces conflicting hunks to be auto-resolved cleanly by favoring our version
theirs # opposite of ours; note that, unlike ours, there is no theirs merge strategy to confuse this merge option with
patience # merge-recursive spends a little extra time to avoid mismerges that sometimes occur due to unimportant matching lines
diff-algorithm=[patience|minimal|histogram|myers] # use a different diff algorithm, which can help avoid mismerges that occur due to unimportant matching lines
ignore-space-change #
ignore-all-space
ignore-space-at-eol
ignore-cr-at-eol
renormalize # runs a virtual check-out and check-in of all three stages of a file when resolving a three-way merge
no-renormalize # disables the renormalize option
no-renames # turn off rename detection
find-renames[=<n>] # turn on rename detection, optionally setting the similarity threshold
subtree[=<path>] # is a more advanced form of subtree strategy, where the strategy makes a guess on how two trees must be shifted to match with each other when merging
octopus # resolves cases with more than two heads, but refuses to do a complex merge that needs manual resolution
ours # resolves any number of heads, but the resulting tree of the merge is always that of the current branch head, effectively ignoring all changes from all other branches
subtree # This is a modified recursive strategy. When merging trees A and B, if B corresponds to a subtree of A, B is first adjusted to match the tree structure of A, instead of reading the trees at the samelevel
Switch branches or restore working tree files
# To prepare for working on <branch>, switch to it by updating the index and the files in the working tree, and by pointing HEAD at the branch
git checkout [<branch>]
#Specifying -b causes a new branch to be created as if git-branch(1) were called and then checked out
git checkout -b|-B <new_branch> [<start point>]
# Prepare to work on top of <commit>, by detaching HEAD at it (see "DETACHED HEAD" section), and updating the index and the files in the working tree
git checkout --detach [<branch>], git checkout [--detach] <commit>
# Overwrite the contents of the files that match the pathspec. When the <tree-ish> (most often a commit) is not given, overwrite working tree with the contents in the index
git checkout [-f|--ours|--theirs|-m|--conflict=<style>] [<tree-ish>] [--] <pathspec>..., git checkout [-f|--ours|--theirs|-m|--conflict=<style>] [<tree-ish>] --pathspec-from-file=<file> [--pathspec-file-nul]
# similar to the previous mode, but lets you use the interactive interface to show the "diff" output and choose which hunks to use in the result
git checkout (-p|--patch) [<tree-ish>] [--] [<pathspec>...]
-q, --quiet # Quiet, suppress feedback messages
--progress, --no-progress # Progress status is reported on the standard error stream by default when it is attached to a terminal, unless --quiet is specified
-f, --force # When switching branches, proceed even if the index or the working tree differs from HEAD. This is used to throw away local changes
--ours, --theirs # When checking out paths from the index, check out stage #2 (ours) or #3 (theirs) for unmerged paths
-b <new_branch> # Create a new branch named <new_branch> and start it at <start_point>
-B <new_branch> # Creates the branch <new_branch> and start it at <start_point>; if it already exists, then reset it to <start_point>. This is equivalent to running "git branch" with "-f"
-t, --track # When creating a new branch, set up "upstream" configuration. See "--track" in git-branch(1) for details. If no -b option is given, the name of the new branch will be derived from the remote-tracking branch, by looking at the local part of the refspec configured for the corresponding remote, and then stripping the initial part up to the "*"
--no-track # Do not set up "upstream" configuration, even if the branch.autoSetupMerge configuration variable is true
--guess, --no-guess # If <branch> is not found but there does exist a tracking branch in exactly one remote (call it <remote>) with a matching name, treat as equivalent to $ git checkout -b <branch> --track <remote>/<branch>
-l # Create the new branch’s reflog
--detach # Rather than checking out a branch to work on it, check out a commit for inspection and discardable experiments
--orphan <new_branch> # Create a new orphan branch, named <new_branch>, started from <start_point> and switch to it
--ignore-skip-worktree-bits # In sparse checkout mode, git checkout -- <paths> would update only entries matched by <paths> and sparse patterns in $GIT_DIR/info/sparse-checkout
-m, --merge # a three-way merge between the current branch, your working tree contents, and the new branch is done, and you will be on the new branch
--conflict=<style> # The same as --merge option above, but changes the way the conflicting hunks are presented, overriding the merge.conflictStyle configuration variable
-p, --patch # Interactively select hunks in the difference between the <tree-ish> (or the index, if unspecified) and the working tree
--ignore-other-worktrees # git checkout refuses when the wanted ref is already checked out by another worktree. This option makes it check the ref out anyway. In other words, the ref can be held by more than one worktree
--overwrite-ignore, --no-overwrite-ignore # Silently overwrite ignored files when switching branches. This is the default behavior. Use --no-overwrite-ignore to abort the operation when the new branch contains ignored files
--recurse-submodules, --no-recurse-submodules # Using --recurse-submodules will update the content of all initialized submodules according to the commit recorded in the superproject
--overlay, --no-overlay # In the default overlay mode, git checkout never removes files from the index or the working tree
--pathspec-from-file=<file> # Pathspec is passed in <file> instead of commandline args
--pathspec-file-nul # Only meaningful with --pathspec-from-file. Pathspec elements are separated with NUL character and all other characters are taken literally (including newlines and quotes)
<branch> # Branch to checkout; if it refers to a branch (i.e., a name that, when prepended with "refs/heads/", is a valid ref), then that branch is checked out. Otherwise, if it refers to a valid commit, your HEAD becomes "detached" and you are no longer on any branch
<new_branch> # Name for the new branch
<start_point> # The name of a commit at which to start the new branch. Defaults to HEAD
<tree-ish> # Tree to checkout from (when paths are given). If not specified, the index will be used
-- # Do not interpret any more arguments as options
<pathspec>... # Limits the paths affected by the operation
USED
checkout a remote branch
In order to checkout a remote branch you have to first fetch the contents of the branch
git fetch --all
# or
# git fetch <repo> <branch>
Checkout the remote branch
# In modern versions of Git, you can then checkout the remote branch like a local branch
git checkout <remotebranch>
# Older versions of Git require the creation of a new branch based on the remote
git checkout <remotebranch> <repo>/<remotebranch>
Checkout the local branch
# Additionally you can checkout a new local branch and reset it to the remote branches last commit
git checkout -b <branch> && git reset --hard origin/<branch>
List, create, or delete branches
If --list is given, or if there are no non-option arguments, existing branches are listed; the current branch will be highlighted in green and marked with an asterisk. Any branches checked out in linked worktrees will be highlighted in cyan and marked with a plus sign. Option -r causes the remote-tracking branches to be listed, and option -a shows both local and remote branches
If a <pattern> is given, it is used as a shell wildcard to restrict the output to matching branches. If multiple patterns are given, a branch is shown if it matches any of the patterns
git branch [--color[=<when>] | --no-color] [--show-current] [-v [--abbrev=<length> | --no-abbrev]] [--column[=<options>] | --no-column] [--sort=<key>] [(--merged | --no-merged) [<commit>]] [--contains [<commit]] [--no-contains [<commit>]] [--points-at <object>] [--format=<format>] [(-r | --remotes) | (-a | --all)] [--list] [<pattern>...]
git branch [--track | --no-track] [-f] <branchname> [<start-point>]
git branch (--set-upstream-to=<upstream> | -u <upstream>) [<branchname>]
git branch --unset-upstream [<branchname>]
git branch (-m | -M) [<oldbranch>] <newbranch>
git branch (-c | -C) [<oldbranch>] <newbranch>
git branch (-d | -D) [-r] <branchname>...
git branch --edit-description [<branchname>]
-d, --delete # Delete a branch. The branch must be fully merged in its upstream branch, or in HEAD if no upstream was set with --track or --set-upstream-to
-D # Shortcut for --delete --force
--create-reflog # Create the branch’s reflog. This activates recording of all changes made to the branch ref, enabling use of date based sha1 expressions such as "<branchname>@{yesterday}"
-f, --force # Reset <branchname> to <startpoint>, even if <branchname> exists already. Without -f, git branch refuses to change an existing branch. In combination with -d (or --delete), allow deleting the branch irrespective of its merged status. In combination with -m (or --move), allow renaming the branch even if the new branch name already exists, the same applies for -c (or --copy)
-m, --move # Move/rename a branch and the corresponding reflog
-M # Shortcut for --move --force
-c, --copy # Copy a branch and the corresponding reflog
-C # Shortcut for --copy --force
--color[=<when>] # Color branches to highlight current, local, and remote-tracking branches. The value must be always (the default), never, or auto
--no-color # Turn off branch colors, even when the configuration file gives the default to color output. Same as --color=never
-i, --ignore-case # Sorting and filtering branches are case insensitive
--column[=<options>], --no-column # Display branch listing in columns. See configuration variable column.branch for option syntax
-r, --remotes # List or delete (if used with -d) the remote-tracking branches. Combine with --list to match the optional pattern(s)
-a, --all # List both remote-tracking branches and local branches. Combine with --list to match optional pattern(s)
-l, --list # List branches. With optional <pattern>..., e.g. git branch --list 'maint-*', list only the branches that match the pattern(s)
--show-current # Print the name of the current branch. In detached HEAD state, nothing is printed
-v, -vv, --verbose # When in list mode, show sha1 and commit subject line for each head, along with relationship to upstream branch (if any)
-q, --quiet # Be more quiet when creating or deleting a branch, suppressing non-error messages
--abbrev=<length> # Alter the sha1’s minimum display length in the output listing. The default value is 7 and can be overridden by the core.abbrev config option
--no-abbrev # Display the full sha1s in the output listing rather than abbreviating them
-t, --track # When creating a new branch, set up branch.<name>.remote and branch.<name>.merge configuration entries to mark the start-point branch as "upstream" from the new branch
--no-track # Do not set up "upstream" configuration, even if the branch.autoSetupMerge configuration variable is true
-u <upstream>, --set-upstream-to=<upstream> # Set up <branchname>'s tracking information so <upstream> is considered <branchname>'s upstream branch. If no <branchname> is specified, then it defaults to the current branch
--unset-upstream # Remove the upstream information for <branchname>. If no branch is specified it defaults to the current branch
--edit-description # Open an editor and edit the text to explain what the branch is for, to be used by various other commands (e.g. format-patch, request-pull, and merge (if enabled)). Multi-line explanations may be used
--contains [<commit>] # Only list branches which contain the specified commit (HEAD if not specified). Implies --list
--no-contains [<commit>] # Only list branches which don’t contain the specified commit (HEAD if not specified). Implies --list
--merged [<commit>] # Only list branches whose tips are reachable from the specified commit (HEAD if not specified). Implies --list, incompatible with --no-merged
--no-merged [<commit>] # Only list branches whose tips are not reachable from the specified commit (HEAD if not specified). Implies --list, incompatible with --merged
<branchname> # The name of the branch to create or delete. The new branch name must pass all checks defined by git-check-ref-format(1). Some of these checks may restrict the characters allowed in a branch name
<start-point> # The new branch head will point to this commit. It may be given as a branch name, a commit-id, or a tag. If this option is omitted, the current HEAD will be used instead
<oldbranch> # The name of an existing branch to rename
<newbranch> # The new name for an existing branch. The same restrictions as for <branchname> apply
--sort=<key> # Sort based on the key given. Prefix - to sort in descending order of the value. You may use the --sort=<key> option multiple times, in which case the last key becomes the primary key
--points-at <object> # Only list branches of the given object
--format <format> # A string that interpolates %(fieldname) from a branch ref being shown and the object it points at. The format is the same as that of git-for-each-ref(1)
Show changes between commits, commit and working tree, etc
Show changes between the working tree and the index or a tree, changes between the index and a tree, changes between two trees, changes between two blob objects, or changes between two files on disk
# view the changes you made relative to the index (staging area for the next commit)
git diff [<options>] [--] [<path>...]
# compare the given two paths on the filesystem
git diff [<options>] --no-index [--] <path> <path>
# view the changes you staged for the next commit relative to the named <commit>
git diff [<options>] --cached [<commit>] [--] [<path>...]
# view the changes you have in your working tree relative to the named <commit>
git diff [<options>] <commit> [--] [<path>...]
# view the changes between two arbitrary <commit>
git diff [<options>] <commit> <commit> [--] [<path>...]
git diff [<options>] <commit>..<commit> [--] [<path>...]
# view the changes on the branch containing and up to the second <commit>, starting at a common ancestor of both <commit>
git diff [<options>] <commit>...<commit> [--] [<path>...]
-p, -u, --patch # Generate patch (see section on generating patches). This is the default
-s, --no-patch # Suppress diff output. Useful for commands like git show that show the patch by default, or to cancel the effect of --patch
-U<n>, --unified=<n> # Generate diffs with <n> lines of context instead of the usual three. Implies --patch. Implies -p
--output=<file> # Output to a specific file instead of stdout
--output-indicator-new=<char>, --output-indicator-old=<char>, --output-indicator-context=<char> # Specify the character used to indicate new, old or context lines in the generated patch. Normally they are +, - and ' ' respectively
--raw # Generate the diff in raw format
--patch-with-raw # Synonym for -p --raw
--indent-heuristic # Enable the heuristic that shifts diff hunk boundaries to make patches easier to read. This is the default
--no-indent-heuristic # Disable the indent heuristic
--minimal # Spend extra time to make sure the smallest possible diff is produced
--patience # Generate a diff using the "patience diff" algorithm
--histogram # Generate a diff using the "histogram diff" algorithm
--anchored=<text> # Generate a diff using the "anchored diff" algorithm
--diff-algorithm={patience|minimal|histogram|myers} # Choose a diff algorithm. The variants are as follows:
default, myers # The basic greedy diff algorithm. Currently, this is the default
minimal # Spend extra time to make sure the smallest possible diff is produced
patience # Use "patience diff" algorithm when generating patches
histogram # This algorithm extends the patience algorithm to "support low-occurrence common elements"
--stat[=<width>[,<name-width>[,<count>]]] # Generate a diffstat
--compact-summary # Output a condensed summary of extended header information such as file creations or deletions ("new" or "gone", optionally "+l" if it’s a symlink) and mode changes ("+x" or "-x" for adding or removing executable bit respectively) in diffstat
--numstat # Similar to --stat, but shows number of added and deleted lines in decimal notation and pathname without abbreviation, to make it more machine friendly
--shortstat # Output only the last line of the --stat format containing total number of modified files, as well as number of added and deleted lines
-X[<param1,param2,...>], --dirstat[=<param1,param2,...>] # Output the distribution of relative amount of changes for each sub-directory. The behavior of --dirstat can be customized by passing it a comma separated list of parameters. The following parameters are available:
changes # # Compute the dirstat numbers by counting the lines that have been removed from the source, or added to the destination
lines # Compute the dirstat numbers by doing the regular line-based diff analysis, and summing the removed/added line counts
files # Compute the dirstat numbers by counting the number of files changed. Each changed file counts equally in the dirstat analysis
cumulative # Count changes in a child directory for the parent directory as well
<limit> # An integer parameter specifies a cut-off percent (3% by default)
--dirstat-by-file[=<param1,param2>...] # Synonym for --dirstat=files,param1,param2...
--summary # Output a condensed summary of extended header information such as creations, renames and mode changes
--patch-with-stat # Synonym for -p --stat
-z # When --raw, --numstat, --name-only or --name-status has been given, do not munge pathnames and use NULs as output field terminators
--name-only # Show only names of changed files
--name-status # Show only names and status of changed files. See the description of the --diff-filter option on what the status letters mean
--submodule[=<format>] # Specify how differences in submodules are shown. When specifying --submodule=short the short format is used
--color[=<when>] # Show colored diff. --color (i.e. without =<when>) is the same as --color=always
--no-color # Turn off colored diff. This can be used to override configuration settings. It is the same as --color=never
--color-moved[=<mode>] # Moved lines of code are colored differently. <mode> defaults to no. The mode must be one of:
no # Moved lines are not highlighted
default # Is a synonym for zebra. This may change to a more sensible mode in the future
plain # Any line that is added in one location and was removed in another location will be colored with color.diff.newMoved
blocks # Blocks of moved text of at least 20 alphanumeric characters are detected greedily
zebra # Blocks of moved text are detected as in blocks mode
dimmed-zebra # Similar to zebra, but additional dimming of uninteresting parts of moved code is performed
--no-color-moved-ws # Do not ignore whitespace when performing move detection. This can be used to override configuration settings
--word-diff[=<mode>] # Show a word diff, using the <mode> to delimit changed words. The <mode> defaults to plain, and must be one of:
color # Highlight changed words using only colors. Implies --color
plain # Show words as [-removed-] and {+added+}
porcelain # Use a special line-based format intended for script consumption
none # Disable word diff again
--word-diff-regex=<regex> # Use <regex> to decide what a word is, instead of considering runs of non-whitespace to be a word
--color-words[=<regex>] # Equivalent to --word-diff=color plus (if a regex was specified) --word-diff-regex=<regex>
--no-renames # Turn off rename detection, even when the configuration file gives the default to do so
--[no-]rename-empty # Whether to use empty blobs as rename source
--check # Warn if changes introduce conflict markers or whitespace errors. What are considered whitespace errors is controlled by core
--ws-error-highlight=<kind> # Highlight whitespace errors in the context, old or new lines of the diff
--full-index # Instead of the first handful of characters, show the full pre- and post-image blob object names on the "index" line when generating patch format output
--binary # In addition to --full-index, output a binary diff that can be applied with git-apply. Implies --patch
--abbrev[=<n>] # Instead of showing the full 40-byte hexadecimal object name in diff-raw format output and diff-tree header lines, show only a partial prefix
-B[<n>][/<m>], --break-rewrites[=[<n>][/<m>]] # Break complete rewrite changes into pairs of delete and create
-M[<n>], --find-renames[=<n>] # Detect renames. If n is specified, it is a threshold on the similarity index (i.e. amount of addition/deletions compared to the file’s size)
-C[<n>], --find-copies[=<n>] # Detect copies as well as renames
--find-copies-harder # For performance reasons, by default, -C option finds copies only if the original file of the copy was modified in the same changeset
-D, --irreversible-delete # Omit the preimage for deletes, i.e. print only the header but not the diff between the preimage and /dev/null
-l<num> # The -M and -C options require O(n^2) processing time where n is the number of potential rename/copy targets
--diff-filter=[(A|C|D|M|R|T|U|X|B)...[*]] # Select only files that are Added (A), Copied (C), Deleted (D), Modified (M), Renamed (R), have their type (i.e. regular file, symlink, submodule, ...) changed (T), are Unmerged (U), are Unknown (X), or have had their pairing Broken (B)
-S<string> # Look for differences that change the number of occurrences of the specified string (i.e. addition/deletion) in a file
-G<regex> # Look for differences whose patch text contains added/removed lines that match <regex>
--find-object=<object-id> # Look for differences that change the number of occurrences of the specified object
--pickaxe-all # When -S or -G finds a change, show all the changes in that changeset, not just the files that contain the change in <string>
--pickaxe-regex # Treat the <string> given to -S as an extended POSIX regular expression to match
-O<orderfile> # Control the order in which files appear in the output. <orderfile> is parsed as follows:
- Blank lines are ignored, so they can be used as separators for readability
- Lines starting with a hash ("#") are ignored, so they can be used for comments. Add a backslash ("\\") to the beginning of the pattern if it starts with a hash
- Each other line contains a single pattern
-R # Swap two inputs; that is, show differences from index or on-disk file to tree contents
--relative[=<path>] # When run from a subdirectory of the project, it can be told to exclude changes outside the directory and show pathnames relative to it with this option
-a, --text # Treat all files as text
--ignore-cr-at-eol # Ignore carriage-return at the end of line when doing a comparison
--ignore-space-at-eol # Ignore changes in whitespace at EOL
--ignore-space-at-eol # Ignore changes in whitespace at EOL
-b, --ignore-space-change # Ignore changes in amount of whitespace
-w, --ignore-all-space # Ignore whitespace when comparing lines
--ignore-blank-lines # Ignore changes whose lines are all blank
--inter-hunk-context=<lines> # Show the context between diff hunks, up to the specified number of lines, thereby fusing hunks that are close to each other
-W, --function-context # Show whole surrounding functions of changes
--exit-code # Make the program exit with codes similar to diff(1). That is, it exits with 1 if there were differences and 0 means no differences
--quiet # Disable all output of the program. Implies --exit-code
--ext-diff # Allow an external diff helper to be executed. If you set an external diff driver with gitattributes(5), you need to use this option with git-log(1) and friends
--no-ext-diff # Disallow external diff drivers
--textconv, --no-textconv # Allow (or disallow) external text conversion filters to be run when comparing binary files
--ignore-submodules[=<when>] # Ignore changes to submodules in the diff generation
--src-prefix=<prefix> # Show the given source prefix instead of "a/"
--dst-prefix=<prefix> # Show the given destination prefix instead of "b/"
--no-prefix # Do not show any source or destination prefix
--line-prefix=<prefix> # Prepend an additional prefix to every line of output
--ita-invisible-in-index # By default entries added by "git add -N" appear as an existing empty file in "git diff" and a new file in "git diff --cached"
-1 --base, -2 --ours, -3 --theirs # Compare the working tree with the "base" version (stage #1), "our branch" (stage #2) or "their branch" (stage #3)
-0 # Omit diff output for unmerged entries and just show "Unmerged". Can be used only when comparing the working tree with the index
<path>... # The <paths> parameters, when given, are used to limit the diff to the named paths (you can give directory names and get diff for all files under them)
USED
# show diff staged / repo
git diff --staged
Move or rename a file, a directory, or a symlink
# renames <source>
git mv [-v] [-f] [-n] [-k] <source> <destination>
# the last argument has to be an existing directory; the given sources will be moved into this directory
git mv [-v] [-f] [-n] [-k] <source> ... <destination directory>
-f, --force # Force renaming or moving of a file even if the target exists
-k # Skip move or rename actions which would lead to an error condition. An error happens when a source is neither existing nor controlled by Git, or when it would overwrite an existing file unless -f is given.
-n, --dry-run # Do nothing; only show what would happen
-v, --verbose # Report the names of files as they are moved.
Remove files from the working tree and from the index
Remove files from the index, or from the working tree and the index. git rm will not remove a file from just your working directory. (There is no option to remove a file only from the working tree and yet keep it in the index; use /bin/rm if you want to do that.) The files being removed have to be identical to the tip of the branch, and no updates to their contents can be staged in the index, though that default behavior can be overridden with the -f option. When --cached is given, the staged content has to match either the tip of the branch or the file on disk, allowing the file to be removed from just the index
git rm [-f | --force] [-n] [-r] [--cached] [--ignore-unmatch] [--quiet] [--] <file>...
<file>... # Files to remove. Fileglobs (e.g. *.c) can be given to remove all matching files. If you want Git to expand file glob characters, you may need to shell-escape them
-f, --force # Override the up-to-date check
-n, --dry-run # Don’t actually remove any file(s). Instead, just show if they exist in the index and would otherwise be removed by the command
-r # Allow recursive removal when a leading directory name is given
-- # This option can be used to separate command-line options from the list of files, (useful when filenames might be mistaken for command-line options)
--cached # Use this option to unstage and remove paths only from the index. Working tree files, whether modified or not, will be left alone
--ignore-unmatch # Exit with a zero status even if no files matched
-q, --quiet # git rm normally outputs one line (in the form of an rm command) for each file removed. This option suppresses that output
Reset current HEAD to the specified state
In the first three forms, copy entries from <tree-ish> to the index. In the last form, set the current branch head (HEAD) to <commit>, optionally modifying index and working tree to match. The <tree-ish>/<commit> defaults to HEAD in all forms
# These forms reset the index entries for all paths that match the <pathspec> to their state at <tree-ish>
git reset [-q] [<tree-ish>] [--] <pathspec>..., git reset [-q] [--pathspec-from-file=<file> [--pathspec-file-nul]] [<tree-ish>]
# Interactively select hunks in the difference between the index and <tree-ish> (defaults to HEAD). The chosen hunks are applied in reverse to the index. This means that git reset -p is the opposite of git add -p
git reset (--patch | -p) [<tree-ish>] [--] [<pathspec>...]
# This form resets the current branch head to <commit> and possibly updates the index (resetting it to the tree of <commit>) and the working tree depending on <mode>
git reset [<mode>] [<commit>]
--soft # Does not touch the index file or the working tree at all (but resets the head to <commit>, just like all modes do)
--mixed # Resets the index but not the working tree (i.e., the changed files are preserved but not marked for commit) and reports what has not been updated
--hard # Resets the index and working tree. Any changes to tracked files in the working tree since <commit> are discarded
--merge # Resets the index and updates the files in the working tree that are different between <commit> and HEAD, but keeps those which are different between the index and working tree (i.e. which have changes which have not been added)
--keep # Resets index entries and updates files in the working tree that are different between <commit> and HEAD
-q, --quiet, --no-quiet # Be quiet, only report errors. The default behavior is set by the reset.quiet config option. --quiet and --no-quiet will override the default behavior
--pathspec-from-file=<file> # Pathspec is passed in <file> instead of commandline args. If <file> is exactly - then standard input is used
--pathspec-file-nul # Only meaningful with --pathspec-from-file. Pathspec elements are separated with NUL character and all other characters are taken literally (including newlines and quotes)
-- # Do not interpret any more arguments as options
<pathspec>... # Limits the paths affected by the operation
Add file contents to the index
This command updates the index using the current content found in the working tree, to prepare the content staged for the next commit. It typically adds the current content of existing paths as a whole, but with some options it can also be used to add content with only part of the changes made to the working tree files applied, or remove paths that do not exist in the working tree anymore
git add [--verbose | -v] [--dry-run | -n] [--force | -f] [--interactive | -i] [--patch | -p] [--edit | -e] [--[no-]all | --[no-]ignore-removal | [--update | -u]] [--intent-to-add | -N] [--refresh] [--ignore-errors] [--ignore-missing] [--renormalize] [--chmod=(+|-)x] [--pathspec-from-file=<file> [--pathspec-file-nul]] [--] [<pathspec>...]
<pathspec>... # Files to add content from. Fileglobs (e.g. *.c) can be given to add all matching files
-n, --dry-run # Don’t actually add the file(s), just show if they exist and/or will be ignored
-v, --verbose # Be verbose
-f, --force # Allow adding otherwise ignored files
-i, --interactive # Add modified contents in the working tree interactively to the index. Optional path arguments may be supplied to limit operation to a subset of the working tree
-p, --patch # Interactively choose hunks of patch between the index and the work tree and add them to the index
-e, --edit # Open the diff vs. the index in an editor and let the user edit it
-u, --update # Update the index just where it already has an entry matching <pathspec>
-A, --all, --no-ignore-removal # Update the index not only where the working tree has a file matching <pathspec> but also where the index already has an entry
--no-all, --ignore-removal # Update the index by adding new files that are unknown to the index and files modified in the working tree, but ignore files that have been removed from the working tree
-N, --intent-to-add # Record only the fact that the path will be added later. An entry for the path is placed in the index with no content
--refresh # Don’t add the file(s), but only refresh their stat() information in the index
--ignore-errors # If some files could not be added because of errors indexing them, do not abort the operation, but continue adding the others
--ignore-missing # This option can only be used together with --dry-run
--no-warn-embedded-repo # By default, git add will warn when adding an embedded repository to the index without using git submodule add to create an entry in .gitmodules
--renormalize # Apply the "clean" process freshly to all tracked files to forcibly add them again to the index
--chmod=(+|-)x # Override the executable bit of the added files
--pathspec-from-file=<file> # Pathspec is passed in <file> instead of commandline args
--pathspec-file-nul # Only meaningful with --pathspec-from-file
-- # This option can be used to separate command-line options from the list of files, (useful when filenames might be mistaken for command-line options)
https://chris.beams.io/posts/git-commit/
Record changes to the repository
Create a new commit containing the current contents of the index and the given log message describing the changes. The new commit is a direct child of HEAD, usually the tip of the current branch, and the branch is updated to point to it (unless no branch is associated with the working tree, in which case HEAD is "detached" as described in git-checkout(1))
git commit [-a | --interactive | --patch] [-s] [-v] [-u<mode>] [--amend] [--dry-run] [(-c | -C | --fixup | --squash) <commit>] [-F <file> | -m <msg>] [--reset-author] [--allow-empty] [--allow-empty-message] [--no-verify] [-e] [--author=<author>] [--date=<date>] [--cleanup=<mode>] [--[no-]status] [-i | -o] [--pathspec-from-file=<file> [--pathspec-file-nul]] [-S[<keyid>]] [--] [<pathspec>...]
-a, --all # Tell the command to automatically stage files that have been modified and deleted, but new files you have not told Git about are not affected
-p, --patch # Use the interactive patch selection interface to chose which changes to commit. See git-add(1) for details
-C <commit>, --reuse-message=<commit> # Take an existing commit object, and reuse the log message and the authorship information (including the timestamp) when creating the commit
-c <commit>, --reedit-message=<commit> # Like -C, but with -c the editor is invoked, so that the user can further edit the commit message
--fixup=<commit> # Construct a commit message for use with rebase --autosquash
--squash=<commit> # Construct a commit message for use with rebase --autosquash
--reset-author # When used with -C/-c/--amend options, or when committing after a conflicting cherry-pick, declare that the authorship of the resulting commit now belongs to the committer
--short # When doing a dry-run, give the output in the short-format. See git-status(1) for details. Implies --dry-run
--branch # Show the branch and tracking info even in short-format
--porcelain # When doing a dry-run, give the output in a porcelain-ready format. See git-status(1) for details. Implies --dry-run
--long # When doing a dry-run, give the output in the long-format. Implies --dry-run
-z, --null # When showing short or porcelain status output, print the filename verbatim and terminate the entries with NUL, instead of LF
-F <file>, --file=<file> # Take the commit message from the given file. Use - to read the message from the standard input
--author=<author> # Override the commit author
--date=<date> # Override the author date used in the commit
-m <msg>, --message=<msg> # Use the given <msg> as the commit message
-t <file>, --template=<file> # When editing the commit message, start the editor with the contents in the given file
-s, --signoff # Add Signed-off-by line by the committer at the end of the commit log message
-n, --no-verify # This option bypasses the pre-commit and commit-msg hooks
--allow-empty # Usually recording a commit that has the exact same tree as its sole parent commit is a mistake, and the command prevents you from making such a commit
--allow-empty-message # Like --allow-empty this command is primarily for use by foreign SCM interface scripts
--cleanup=<mode> # This option determines how the supplied commit message should be cleaned up before committing. The <mode> can be strip, whitespace, verbatim, scissors or default
strip # Strip leading and trailing empty lines, trailing whitespace, commentary and collapse consecutive empty lines
whitespace # Same as strip except #commentary is not removed
verbatim # Do not change the message at all
scissors # Same as whitespace except that everything from (and including) the line found below is truncated, if the message is to be edited. "#" can be customized with core.commentChar
default # Same as strip if the message is to be edited. Otherwise whitespace
-e, --edit # The message taken from file with -F, command line with -m, and from commit object with -C are usually used as the commit log message unmodified
--no-edit # Use the selected commit message without launching an editor
--amend # Replace the tip of the current branch by creating a new commit
--no-post-rewrite # Bypass the post-rewrite hook
-i, --include # Before making a commit out of staged contents so far, stage the contents of paths given on the command line as well
-o, --only # Make a commit by taking the updated working tree contents of the paths specified on the command line, disregarding any contents that have been staged for other paths
--pathspec-from-file=<file> # Pathspec is passed in <file> instead of commandline args
--pathspec-file-nul # Only meaningful with --pathspec-from-file
-u[<mode>], --untracked-files[=<mode>] # Show untracked files. The mode parameter is optional (defaults to all). The possible options are:
no # Show no untracked files
normal # Shows untracked files and directories
all # Also shows individual files in untracked directories
-v, --verbose # Show unified diff between the HEAD commit and what would be committed at the bottom of the commit message template to help the user describe the commit by reminding what changes the commit has
-q, --quiet # Suppress commit summary message
--dry-run # Do not create a commit, but show a list of paths that are to be committed, paths with local changes that will be left uncommitted and paths that are untracked
--status # Include the output of git-status(1) in the commit message template when using an editor to prepare the commit message
--no-status # Do not include the output of git-status(1) in the commit message template when using an editor to prepare the default commit message
-S[<keyid>], --gpg-sign[=<keyid>] # GPG-sign commits. The keyid argument is optional and defaults to the committer identity; if specified,
--no-gpg-sign # Countermand commit.gpgSign configuration variable that is set to force each and every commit to be signed
-- # Do not interpret any more arguments as options
<pathspec>... # When pathspec is given on the command line, commit the contents of the files that match the pathspec without recording the changes already added to the index
USED
git commit -m "initial version - $(date +%Y-%m-%d)" # commit with message
git commit -a -m "$message" # commit unstaged files in working copy to the repo
SHOW
Show a reference to an object or a list of objects may be passed to examine those specific objects
Shows one or more objects (blobs, trees, tags and commits)
- For commits it shows the log message and textual diff. It also presents the merge commit in a special format as produced by git diff-tree --cc
- For tags, it shows the tag message and the referenced objects
- For trees, it shows the names (equivalent to git ls-tree with --name-only)
- For plain blobs, it shows the plain contents
git show [<options>] [<object>...]
--pretty[=<format>] # displays more or less information depending on the format chosen: oneline, short, medium, full, fuller, email, raw, string. default is medium
--abbrev-commit # shortens the length of output commit IDs. With --pretty=oneline can produce a highly succinct git log output
--no-abbrev-commit # Show the full 40 character commit ID
--oneline # uses the expanded command --pretty=oneline --abbrev-commit
--encoding[=<encoding>] # Character encoding on Git log messages defaults to UTF-8
--expand-tabs=<n> # replace tab characters with <n> spaces in the log message output
--expand-tabs # replace tab characters with 8 default spaces
--no-expand-tabs # replace tab characters with 0 space
--notes=<ref> # filters notes with <ref>
--no-notes # hide notes in output
--show-signature # show signature if commit are signed with gpg key
SHOW-BRANCH
Show branches and their commits
Shows the commit ancestry graph starting from the commits named with <rev>s or <glob>s (or all refs under refs/heads and/or refs/tags) semi-visually
git show-branch [-a|--all] [-r|--remotes] [--topo-order | --date-order] [--current] [--color[=<when>] | --no-color] [--sparse] [--more=<n> | --list | --independent | --merge-base] [--no-name | --sha1-name] [--topics] [(<rev> | <glob>)...]
git show-branch (-g|--reflog)[=<n>[,<base>]] [--list] [<ref>]
<rev> # Arbitrary extended SHA-1 expression (see gitrevisions(7)) that typically names a branch head or a tag
<glob> # A glob pattern that matches branch or tag names under refs/. For example, if you have many topic branches under refs/heads/topic, giving topic/* would show all of them
-r, --remotes # Show the remote-tracking branches
-a, --all # Show both remote-tracking branches and local branches
--current # With this option, the command includes the current branch to the list of revs to be shown when it is not given on the command line
--topo-order # By default, the branches and their commits are shown in reverse chronological order. This option makes them appear in topological order (i.e., descendant commits are shown before their parents)
--date-order # This option is similar to --topo-order in the sense that no parent comes before all of its children, but otherwise commits are ordered according to their commit date
--sparse # By default, the output omits merges that are reachable from only one tip being shown. This option makes them visible
--more=<n> # Usually the command stops output upon showing the commit that is the common ancestor of all the branches. This flag tells the command to go <n> more common commits beyond that
--list # Synonym to --more=-1
--merge-base # Instead of showing the commit list, determine possible merge bases for the specified commits
--independent # Among the <reference>s given, display only the ones that cannot be reached from any other <reference>
--no-name # Do not show naming strings for each commit
--sha1-name # Instead of naming the commits using the path to reach them from heads (e.g. "master~2" to mean the grandparent of "master"), name them with the unique prefix of their object names
--topics # Shows only commits that are NOT on the first branch given. This helps track topic branches by hiding any commit that is already in the main line of development
-g, --reflog[=<n>[,<base>]] [<ref>] # Shows <n> most recent ref-log entries for the given ref. If <base> is given, <n> entries going back from that entry. <base> can be specified as count or date. When no explicit <ref> parameter is given, it defaults to the current branch (or HEAD if it is detached)
--color[=<when>] # Color the status sign (one of these: * ! + -) of each commit corresponding to the branch it’s in
--no-color # Turn off colored output, even when the configuration file gives the default to color output
Show the working tree status
Displays paths that have differences between the index file and the current HEAD commit, paths that have differences between the working tree and the index file, and paths in the working tree that are not tracked by Git
git status [<options>...] [--] [<pathspec>...]
-s, --short # Give the output in the short-format
-b, --branch # Show the branch and tracking info even in short-format
--show-stash # Show the number of entries currently stashed away
--porcelain[=<version>] # Give the output in an easy-to-parse format for scripts. This is similar to the short output, but will remain stable across Git versions and regardless of user configuration
--long # Give the output in the long-format. This is the default
-v, --verbose # In addition to the names of files that have been changed, also show the textual changes that are staged to be committed
-u[<mode>], --untracked-files[=<mode>] # Show untracked files. The possible options are:
no # Show no untracked files
normal # Shows untracked files and directories
all # Also shows individual files in untracked directories
--ignore-submodules[=<when>] # Ignore changes to submodules when looking for changes. <when> can be either "none", "untracked", "dirty" or "all", which is the default
--ignored[=<mode>] # Show ignored files as well. defaults to traditional. The possible options are:
traditional # Shows ignored files and directories, unless --untracked-files=all is specified, in which case individual files in ignored directories are displayed
no # Show no ignored files
matching # Shows ignored files and directories matching an ignore pattern
-z # Terminate entries with NUL, instead of LF. This implies the --porcelain=v1 output format if no other format is given
--column[=<options>], --no-column # Display untracked files in columns. See configuration variable column.status for option syntax.--column and --no-column without options are equivalent to always and never respectively
--ahead-behind, --no-ahead-behind # Display or do not display detailed ahead/behind counts for the branch relative to its upstream branch. Defaults to true
--renames, --no-renames # Turn on/off rename detection regardless of user configuration. See also git-diff(1) --no-renames
--find-renames[=<n>] # Turn on rename detection, optionally setting the similarity threshold. See also git-diff(1) --find-renames
<pathspec>... # See the pathspec entry in gitglossary(7)
Clone a repository into a new directory
Clones a repository into a newly created directory, creates remote-tracking branches for each branch in the cloned repository (visible using git branch --remotes), and creates and checks out an initial branch that is forked from the cloned repository’s currently active branch
The following syntaxes may be used with them:
- ssh://[user@]host.xz[:port]/path/to/repo.git/
- git://host.xz[:port]/path/to/repo.git/
- http[s]://host.xz[:port]/path/to/repo.git/
- ftp[s]://host.xz[:port]/path/to/repo.git/
The ssh and git protocols additionally support ~username expansion:
- ssh://[user@]host.xz[:port]/~[user]/path/to/repo.git/
- git://host.xz[:port]/~[user]/path/to/repo.git/
- [user@]host.xz:/~[user]/path/to/repo.git/
For local repositories, the following syntaxes may be used:
- /path/to/repo.git/
- file:///path/to/repo.git/
git clone [--template=<template_directory>] [-l] [-s] [--no-hardlinks] [-q] [-n] [--bare] [--mirror] [-o <name>] [-b <name>] [-u <upload-pack>] [--reference <repository>] [--dissociate] [--separate-git-dir <git dir>] [--depth <depth>] [--[no-]single-branch] [--no-tags] [--recurse-submodules[=<pathspec>]] [--[no-]shallow-submodules] [--[no-]remote-submodules] [--jobs <n>] [--sparse] [--] <repository> [<directory>]
-l, --local # When the repository to clone from is on a local machine, this flag bypasses the normal "Git aware" transport mechanism and clones the repository by making a copy of HEAD and everything under objects and refs directories
--no-hardlinks # Force the cloning process from a repository on a local filesystem to copy the files under the .git/objects directory instead of using hardlinks. This may be desirable if you are trying to make a back-up of your repository
-s, --shared # When the repository to clone is on the local machine, instead of using hard links, automatically setup .git/objects/info/alternates to share the objects with the source repository
--reference[-if-able] <repository> # If the reference repository is on the local machine, automatically setup .git/objects/info/alternates to obtain objects from the reference repository
--dissociate # Borrow the objects from reference repositories specified with the --reference options only to reduce network transfer, and stop borrowing from them after a clone is made by making necessary local copies of borrowed objects
-q, --quiet # Operate quietly. Progress is not reported to the standard error stream
-v, --verbose # Run verbosely. Does not affect the reporting of progress status to the standard error stream
--progress # Progress status is reported on the standard error stream by default when it is attached to a terminal, unless --quiet is specified. This flag forces progress status even if the standard error stream is not directed to a terminal
--server-option=<option> # Transmit the given string to the server when communicating using protocol version 2. The given string must not contain a NUL or LF character
-n, --no-checkout # No checkout of HEAD is performed after the clone is complete
--bare # Make a bare Git repository. That is, instead of creating <directory> and placing the administrative files in <directory>/.git, make the <directory> itself the $GIT_DIR
--sparse # Initialize the sparse-checkout file so the working directory starts with only the files in the root of the repository. The sparse-checkout file can be modified to grow the working directory as needed
--mirror # Set up a mirror of the source repository
-o <name>, --origin <name> # Instead of using the remote name origin to keep track of the upstream repository, use <name>
-b <name>, --branch <name> # Instead of pointing the newly created HEAD to the branch pointed to by the cloned repository’s HEAD, point to <name> branch instead
-u <upload-pack>, --upload-pack <upload-pack> # When given, and the repository to clone from is accessed via ssh, this specifies a non-default path for the command run on the other end
--template=<template_directory> # Specify the directory from which templates will be used; (See the "TEMPLATE DIRECTORY" section of git-init(1).)
-c <key>=<value>, --config <key>=<value> # Set a configuration variable in the newly-created repository; this takes effect immediately after the repository is initialized, but before the remote history is fetched or any files checked out
--depth <depth> # Create a shallow clone with a history truncated to the specified number of commits
--shallow-since=<date> # Create a shallow clone with a history after the specified time
--shallow-exclude=<revision> # Create a shallow clone with a history, excluding commits reachable from a specified remote branch or tag. This option can be specified multiple times
--[no-]single-branch # Clone only the history leading to the tip of a single branch, either specified by the --branch option or the primary branch remote’s HEAD points at. Further fetches into the resulting repository will only update the remote-tracking branch for the branch this option was used for the initial cloning. If the HEAD at the remote did not point at any branch when --single-branch clone was made, no remote-tracking branch is created
--no-tags # Don’t clone any tags, and set remote.<remote>.tagOpt=--no-tags in the config, ensuring that future git pull and git fetch operations won’t follow any tags
--recurse-submodules[=<pathspec] # After the clone is created, initialize and clone submodules within based on the provided pathspec
--[no-]shallow-submodules # All submodules which are cloned will be shallow with a depth of 1
--[no-]remote-submodules # All submodules which are cloned will use the status of the submodule’s remote-tracking branch to update the submodule, rather than the superproject’s recorded SHA-1. Equivalent to passing --remote to git submodule update
--separate-git-dir=<git dir> # Instead of placing the cloned repository where it is supposed to be, place the cloned repository at the specified directory, then make a filesystem-agnostic Git symbolic link to there
-j <n>, --jobs <n> # The number of submodules fetched at the same time. Defaults to the submodule.fetchJobs option
<repository> # The (possibly remote) repository to clone from. See the GIT URLS section below for more information on specifying repositories
<directory> # The name of a new directory to clone into. The "humanish" part of the source repository is used if no directory is explicitly given (repo for /path/to/repo.git and foo for host.xz:foo/.git)
USED
git://$urlrepo # clone a repository
git://$urlrepo <alias> # clone a repository & give it an alias
Create an empty Git repository or reinitialize an existing one
This command creates an empty Git repository - basically a .git directory with subdirectories for objects, refs/heads, refs/tags, and template files. An initial HEAD file that references the HEAD of the master branch is also created
git init [-q | --quiet] [--bare] [--template=<template_directory>] [--separate-git-dir <git dir>] [--shared[=<permissions>]] [directory]
-q, --quiet # Only print error and warning messages; all other output will be suppressed
--bare # Create a bare repository. If GIT_DIR environment is not set, it is set to the current working directory
--template=<template_directory> # Specify the directory from which templates will be used
--separate-git-dir=<git dir> # Instead of initializing the repository as a directory to either $GIT_DIR or ./.git/, create a text file there containing the path to the actual repository
--shared[=(false|true|umask|group|all|world|everybody|0xxx)] # Specify that the Git repository is to be shared amongst several users
Get and set repository or global options
You can query/set/replace/unset options with this command. The name is actually the section and the key separated by a dot, and the value will be escaped
git config [<file-option>] [--type=<type>] [--show-origin] [-z|--null] name [value [value_regex]]
git config [<file-option>] [--type=<type>] --add name value
git config [<file-option>] [--type=<type>] --replace-all name value [value_regex]
git config [<file-option>] [--type=<type>] [--show-origin] [-z|--null] --get name [value_regex]
git config [<file-option>] [--type=<type>] [--show-origin] [-z|--null] --get-all name [value_regex]
git config [<file-option>] [--type=<type>] [--show-origin] [-z|--null] [--name-only] --get-regexp name_regex [value_regex]
git config [<file-option>] [--type=<type>] [-z|--null] --get-urlmatch name URL
git config [<file-option>] --unset name [value_regex]
git config [<file-option>] --unset-all name [value_regex]
git config [<file-option>] --rename-section old_name new_name
git config [<file-option>] --remove-section name
git config [<file-option>] [--show-origin] [-z|--null] [--name-only] -l | --list
git config [<file-option>] --get-color name [default]
git config [<file-option>] --get-colorbool name [stdout-is-tty]
git config [<file-option>] -e | --edit
--local # write to the repository .git/config file
--global # write to global ~/.gitconfig
--system # write to system-wide $(prefix)/etc/gitconfig
-e, --edit # opens an editor to modify the specified config file
--replace-all # Default behavior is to replace at most one line. This replaces all lines matching the key
--add # adds a new line to the option without altering any existing values
--get key [value-regex] # print the value of the key where the key corresponds exactly and the value if necessary with the pattern
--get-all key [value-regex] # same as --get but return all values
--get-regexp key-regex [value-regex] # print all "key value" pairs whose key/value pair corresponds to its pattern
--get-urlmatch section[.var] URL # when given a two-part name section.key, the value for section.<url>.key whose <url> part matches the best to the given URL is returned (if no such key exists, the value for section.key is used as a fallback)
--worktree # similar to --local except that .git/config.worktree is read from or written to if extensions.worktreeConfig is present
-f config-file, --file config-file # use the given config file instead of the one specified by GIT_CONFIG
--blob blob # similar to --file but use the given blob instead of a file
--remove-section # remove the given section from the configuration file
--rename-section # rename the given section to a new name
--unset # remove the line matching the key from config file
--unset-all # remove all lines matching the key from config file
-l, --list # list all variables set in config file, along with their values
--type <type> # ensure that any input or output is valid under the given type constraint(s)
bool / int / bool-or-int / path / expiry-date / color
-z, --null # output values and/or keys, always end values with the null character (instead of a newline)
--name-only # output only the names of config variables for --list or --get-regexp
--show-origin # Augment the output of all queried config options with the origin type and the actual origin
USED
git config
--system core.editor vim # set the default editor
--system merge.tool meld # set the default editor/viewer for diff
--global user.name "aguy tech" # set the name of user
--global user.email "aguytech@free.fr" # set the email of user
-l # print all configurations
-l --system # print system configurations
-l user.email # print values of configuration for a name
--get-all core.editor # print all defined values (local, global, system) for the key 'core.editor'
PACKAGE
package states
not-installed # The package is not installed on your system
config-files # Only the configuration files of the package exist on the system
half-installed # The installation of the package has been started, but not completed for some reason
unpacked # The package is unpacked, but not configured
half-configured # The package is unpacked and configuration has been started, but not yet completed for some reason
triggers-awaited # The package awaits trigger processing by another package
triggers-pending # The package has been triggered
installed # The package is correctly unpacked and configured
package selection states
install # The package is selected for installation
hold # A package marked to be on hold is not handled by dpkg, unless forced to do that with option --force-hold
deinstall # The package is selected for deinstallation (i.e. we want to remove all files, except configuration files)
purge # The package is selected to be purged (i.e. we want to remove everything from system directories, even configuration files)
package flags
ok # A package marked ok is in a known state, but might need further processing
reinstreq # A package marked reinstreq is broken and requires reinstallation. These packages cannot be removed, unless forced with option --force-remove-reinstreq
ACTIONS
-i, --install package-file... # Install the package. If --recursive or -R option is specified, package-file must refer to a directory instead
--unpack package-file... # Unpack the package, but don't configure it. If --recursive or -R option is specified, package-file must refer to a directory instead
--configure package...|-a|--pending # Configure a package which has been unpacked but not yet configured. If -a or --pending is given instead of package, all unpacked but unconfigured packages are configured
--triggers-only package...|-a|--pending # Processes only triggers
-r, --remove package...|-a|--pending # Remove an installed package
-V, --verify [package-name...] # Verifies the integrity of package-name or all packages if omitted, by comparing information from the files installed by a package with the files metadata information stored in the dpkg database
-C, --audit [package-name...] # Performs database sanity and consistency checks for package-name or all packages if omitted (per package checks
--update-avail [Packages-file] # Update dpkg's & old information is replaced with the information in the Packages-file
--merge-avail [Packages-file] # Update dpkg's & old information is combined with information from Packages-file
-A, --record-avail package-file... # Update dpkg and dselect's idea of which packages are available with information from the package package-file
--clear-avail # Erase the existing information about what packages are available
--get-selections [package-name-pattern...] # Get list of package selections, and write it to stdout
--set-selections # Set package selections using file read from stdin
--clear-selections # Set the requested state of every non-essential package to deinstall
--yet-to-unpack # Searches for packages selected for installation, but which for some reason still haven't been installed
--predep-package # Print a single package which is the target of one or more relevant pre-dependencies and has itself no unsatisfied pre-dependencies
--add-architecture architecture # Add architecture to the list of architectures for which packages can be installed without using --force-architecture
--remove-architecture architecture # Remove architecture from the list of architectures for which packages can be installed without using --force-architecture
--print-architecture # Print architecture of packages dpkg installs
--print-foreign-architectures # Print a newline-separated list of the extra architectures dpkg is configured to allow packages to be installed for
--assert-feature # Asserts that dpkg supports the requested feature. assertable features is:
support-predepends # Supports the Pre-Depends field
working-epoch # Supports epochs in version strings
long-filenames # Supports long filenames in deb(5) archives
multi-conrep # Supports multiple Conflicts and Replaces
multi-arch # Supports multi-arch fields and semantics
versioned-provides # Supports versioned Provides
--validate-thing string # Validate that the thing string has a correct syntax. validatable things is:
pkgname # Validates the given package name
trigname # Validates the given trigger name
archname # Validates the given architecture name
version # Validates the given version
--compare-versions ver1 op ver2 # Compare version numbers, where op is a binary operator. dpkg returns true (0) if the specified condition is satisfied, and false (1) otherwise
-?, --help # Display a brief help message
--force-help # Give help about the --force-thing options
-Dh, --debug=help # Give help about debugging options
--version # Display dpkg version information
dpkg-deb actions # See dpkg-deb(1) for more information about the following actions
-b, --build directory [archive|directory] # Build a deb package
-c, --contents archive # List contents of a deb package
-e, --control archive [directory] # Extract control-information from a package
-x, --extract archive directory # Extract the files contained by package
-X, --vextract archive directory # Extract and display the filenames contained by a
package
-f, --field archive [control-field...]Display control field(s) of a package
--ctrl-tarfile archiveOutput the control tar-file contained in a Debian package
--fsys-tarfile archiveOutput the filesystem tar-file contained by a Debian package
-I, --info archive [control-file...]Show information about a package
dpkg-query actions # See dpkg-query(1) for more information about the following actions
-l, --list package-name-pattern... # List packages matching given pattern
-s, --status package-name... # Report status of specified package
-L, --listfiles package-name... # List files installed to your system from package-name
-S, --search filename-search-pattern... # Search for a filename from installed packages
-p, --print-avail package-name... # Display details about package-name, as found in /var/lib/dpkg/available. Users of APT-based frontends should use apt-cache show package-name instead
OPTIONS
--abort-after=number # Change after how many errors dpkg will abort. The default is 50
-B, --auto-deconfigure # When a package is removed, there is a possibility that another installed package depended on the removed package
-Doctal, --debug=octal # Switch debugging on
--force-things
--no-force-things, --refuse-things # Force or refuse (no-force and refuse mean the same thing) to do some things
--ignore-depends=package,... # Ignore dependency-checking for specified packages
--no-act, --dry-run, --simulate # Do everything which is supposed to be done, but don't write any changes
-R, --recursive # Recursively handle all regular files matching pattern *.deb found at specified directories and all of its subdirectories
-G # Don't install a package if a newer version of the same package is already installed. This is an alias of --refuse-downgrade
--admindir=dir # Change default administrative directory, which contains many files that give information about status of installed or uninstalled packages, etc
--instdir=dir # Change default installation directory which refers to the directory where packages are to be installed
--root=dir # Changing root changes instdir to «dir» and admindir to «dir/var/lib/dpkg»
-O, --selected-only # Only process the packages that are selected for installation
-E, --skip-same-version # Don't install the package if the same version of the package is already installed
--pre-invoke=command
--post-invoke=command # Set an invoke hook command to be run via “sh -c” before or after the dpkg run for the unpack, configure, install, triggers-only, remove, purge, add-architecture and remove-architecture dpkg actions
--path-exclude=glob-pattern
--path-include=glob-pattern # Set glob-pattern as a path filter, either by excluding or re-including previously excluded paths matching the specified patterns during install
--verify-format format-name # Sets the output format for the --verify command
--status-fd n # Send machine-readable package status and progress information to file descriptor n
--status-logger=command # Send machine-readable package status and progress information to the shell command s standard input, to be run via “sh -c”
--log=filename # # Log status change updates and actions to filename, instead of the default /var/log/dpkg.log
--no-debsig # Do not try to verify package signatures
--no-triggers # Do not run any triggers in this run
--triggers # Cancels a previous --no-triggers
MAN
synopsis
sed [OPTION]... {script-only-if-no-other-script} [input-file]...
sed [-options] [commande] [] sed [-n [-e commande] [-f script] [-i[.extension]] [l [cesure]] rsu] [] []
[adresse[,adresse]][!]commande[arguments]
[adresse[,adresse]]{
commande1
commande2;commande3
}
options
-n, --quiet, --silent # suppress automatic printing of pattern space
-e script, --expression=script # add the script to the commands to be executed
-f script-file, --file=script-file # add the contents of script-file to the commands to be executed
--follow-symlinks # follow symlinks when processing in place
-i[SUFFIX], --in-place[=SUFFIX] # edit files in place (makes backup if SUFFIX supplied)
-l N, --line-length=N # specify the desired line-wrap length for the `l' command
--posix # disable all GNU extensions
-E, -r, --regexp-extended # use extended regular expressions in the script (for portability use POSIX -E)
-s, --separate # consider files as separate rather than as a single, continuous long stream
--sandbox # operate in sandbox mode
-u, --unbuffered # load minimal amounts of data from the input files and flush the output buffers more often
-z, --null-data # separate lines by NUL characters
--help # display this help and exit
--version # output version information and exit
Zero-address ``commands''
: label # Label for b and t commands
'#'comment # The comment extends until the next newline (or the end of a -e script fragment)
} # The closing bracket of a { } block
Zero- or One- address commands
= # Print the current line number
a \text # Append text, which has each embedded newline preceded by a backslash
i \text # Insert text, which has each embedded newline preceded by a backslash
c \text # Replace text, which has each embedded newline preceded by a backslash
q [exit-code] # Immediately quit the sed script without processing any more input, except that if auto-print is not disabled the current pattern space will be printed
Q [exit-code] # Immediately quit the sed script without processing any more input
r filename # Append text read from filename
R filename # Append a line read from filename. Each invocation of the command reads a line from the file. This is a GNU extension
Commands which accept address ranges
{ # Begin a block of commands (end with a })
b label # Branch to label; if label is omitted, branch to end of script
c \text # Replace the selected lines with text, which has each embedded newline preceded by a backslash
d # Delete pattern space. Start next cycle
D # If pattern space contains no newline, start a normal new cycle as if the d command was issued. Otherwise, delete text in the pattern space up to the first newline, and restart cycle with the resultant pattern space, without reading a new line of input
h H # Copy/append pattern space to hold space
g G # Copy/append hold space to pattern space
l # List out the current line in a ``visually unambiguous'' form
l width # List out the current line in a ``visually unambiguous'' form, breaking it at width characters. This is a GNU extension
n N # Read/append the next line of input into the pattern space
p # Print the current pattern space
P # Print up to the first embedded newline of the current pattern space
s/regexp/replacement/ # Attempt to match regexp against the pattern space. If successful, replace that portion matched with replacement. The replacement may contain the special character & to refer to that portion of the pattern space which matched, and the special escapes \1 through \9 to refer to the corresponding matching sub-expressions in the regexp
t label # If a s/// has done a successful substitution since the last input line was read and since the last t or T command, then branch to label; if label is omitted, branch to end of script
T label # If no s/// has done a successful substitution since the last input line was read and since the last t or T command, then branch to label; if label is omitted, branch to end of script. This is a GNU extension
w filename # Write the current pattern space to filename
W filename # Write the first line of the current pattern space to filename. This is a GNU extension
x # Exchange the contents of the hold and pattern spaces
y/source/dest/ # Transliterate the characters in the pattern space which appear in source to the corresponding character in dest
Addresses
Sed commands can be given with no addresses, in which case the command will be executed for all input lines; with one address, in which case the command will only be executed for input lines which match that address; or with two addresses, in which case the command will be executed for all input lines which match the inclusive range of lines starting from the first address and continuing to the second address. Three things to note about address ranges: the syntax is addr1,addr2 (i.e., the addresses are separated by a comma); the line which addr1 matched will always be accepted, even if addr2 selects an earlier line; and if addr2 is a regexp, it will not be tested against the line that addr1 matched
After the address (or address-range), and before the command, a ! may be inserted, which specifies that the command shall only be executed if the address (or address-range) does not match
The following address types are supported:
number # Match only the specified line number (which increments cumulatively across files, unless the -s option is specified on the command line)
first~step # Match every step'th line starting with line first. For example, ``sed -n 1~2p'' will print all the odd-numbered lines in the input stream, and the address 2~5 will match every fifth line, starting with the second. first can be zero; in this case, sed operates as if it were equal to step. (This is an extension.)
$ # Match the last line
/regexp/ # Match lines matching the regular expression regexp
\cregexpc # Match lines matching the regular expression regexp. The c may be any character
GNU sed also supports some special 2-address forms:
0,addr2 # Start out in "matched first address" state, until addr2 is found. This is similar to 1,addr2, except that if addr2 matches the very first line of input the 0,addr2 form will be at the end of its range, whereas the 1,addr2 form will still be at the beginning of its range. This works only when addr2 is a regular expression
addr1,+N # Will match addr1 and the N lines following addr1
addr1,~N # Will match addr1 and the lines following addr1 until the next line whose input line number is a multiple of N
base
italic italic
bold bold
Titre1
Titre2
Titre6
- listA
- listB
- listB
- listA
- list1
- list2
- list1
Une citation
Une réponse à la citation
- Puce
- Autre puce
code1
code2
separator line
Name | Lunch order | Spicy | Owes |
---|---|---|---|
Joan | saag paneer | medium | $11 |
Sally | vindaloo | mild | $14 |
Erin | lamb madras | HOT | $5 |
There are multiple syntax highlighting themes to choose from. Here s one of them:
// All the code you will ever need
var hw = "Hello World!"
alert(hw);
--- comment
SELECT tat FROM mybase WHERE id.tat IS LIKE '%TATA%';
DROP DATABASE test;
My math is so rusty that I barely remember the quadratic equation:
$-b \pm \sqrt{b^2 - 4ac} \over 2a$
comment
To improve platform compatibility
https://doc.ubuntu-fr.org/gnupg
CONF
~/.gnupg/gpg.conf
# default key to use
default-key <key-uid>
LIST
--list-keys # list all keys
--list-keys --keyid-format {none|short|0xshort|long|0xlong} # list keys with a specified format
--list-secret-keys # list only secret keys
--list-public-keys # list only public keys
--search-keys <id>|<identifier>|<email> # search a key with id, identifier, email ...
KEY
--full-gen-key # generate a key (rsa, 4096, 2y)
--delete-keys <id> # delete keys with id
--gen-revoke <email> # generate a certificate of revocation. keep it safely !
--fingerprint <id> # verify the fingerprint of key
--sign-key <id> # sign a trsuted public key
SERVER
--send-key <id> [--keyserver <serveur>] # send key to specified server or the default one
--recv-keys <id> [--keyserver <serveur>] # receive key with id from server
FILE
--verify <file.sign> <file> # verify the file are the same hash than the signed reference
--clearsign <file> # sign a file
# the file must be encrypted with the recipient's public key
# --sign & --encrypt can be use together
gpg --encrypt <file> # encrypt file but decrypted stay in binary mode
gpg --armor --output "file.gpg" --encrypt "file" # encrypt file and decrypted are available for reading
gpg --output <file> --decrypt <file.gpg> # decrypt file
TOC
chapter |
---|
CONFIG |
COPY |
DNSMASQ |
LIST |
IMAGE |
INIT |
NETWORK |
PROFILE |
STORAGE |
EXEC |
PUBLISH |
TRICK |
CONFIG
set network configuration
echo -e "lxc.network.0.ipv4 = 10.100.0.10/24\nlxc.network.0.ipv4.gateway = 10.100.0.1\n" | lxc config set my-container raw.lxc -
COPY
Copy container with changing device key/value
lxc copy $ct $ctnew --device $devicename,$key=$value
# lxc copy srv-mail srv-mail-maria --device eth0,ipv4.address=10.0.0.10
DNSMASQ
# /path/to/host-to-ip-file.conf with following dnsmasq syntax
c1,10.100.0.10
c2,10.100.0.20
&
lxc network set lxdbr0 set raw.dnsmasq hostsfile=/path/to/host-to-ip-file.conf
lxc restart c1 c2
LIST
lxc list $str # be careful: list container names with a name contains $str
lxc list $regexp # filters containers list with name matches regexp
lxc list $property_name=$value # filter container list with properties matches values (use list --format json to see real properties name)
lxc list -c 46abcdlnNpPsSt L # show all container informations
lxc list -c n4s # outputs minimal container informations
lxc list image.os=Alpine image.architecture=amd64 # outputs informations for containers matched with properties: image.os=Alpine image.architecture=amd64
# list names of all containers
lxc list -f csv -c n
lxc list --format=json | jq -r '.[].name
# list names of running containers
lxc list status=Running -f csv -c n
lxc list --format=json | jq -r '.[] | select(.status == "Running").name
# list names of stopped containers
lxc list status=Stopped -f csv -c n
lxc list --format=json | jq -r '.[] | select(.status == "Stopped").name
# list image with finding any of aliases
lxc image list --format json | jq -r '.[].aliases[] | select(.name == "debian10").name'
IMAGE
lxc image list $repo:$os/$release
lxc image list $repo: architecture=$regexp description=$regexp os=$regexp release=$regexp serial=$regexp
lxc image list ubuntu: # list availables images from repository ubuntu
lxc image list images:debian # list availables images from repository images started debian"
lxc image list images:alpine/3.11/amd64 # list availables images from repository images matched with os:Alpine release:3.11 architecture:amd64
lxc image list images: architecture=amd64 os=Debian release=buster
lxc image copy images:debian/10/amd64 local: --alias debian10 --alias debian-10 --alias buster --auto-update
lxc image copy images:alpine/3.12 local: --alias alpine312 --alias alpine-312 --auto-update
lxc image copy ubuntu:18.04/amd64 local: --alias bionic --alias ubuntu1804 --alias ubuntu-1804 --auto-update
lxc image copy ubuntu:20.04/amd64 local: --alias focal --alias ubuntu2004 --alias ubuntu-2004 --auto-update
lxc image copy images:centos/8/amd64 local: --alias centos8 --alias centos-8 --auto-update
INIT
lxc init alpine311 alpine311 # Initialize image with name
NETWORK
lxc network set $inet_name ipv4.address $address_cidr # set network address for interface $inet
lxc network create $inet_name ipv4.address=10.0.1.1/24 # add network interface
lxc network attach-profile $inet_name stock eth0
PROFILE
lxc profile device add prod root disk pool=prod path=/ # add root path to profile from a pool 'prod'
lxc profile device add $profile_name $device_name disk pool=$pool_name path=$path_ct # add $device_name to $profile_name for $path_ct
lxc profile device add $profile_name $device_name disk source=$path_host path=$path_ct
STORAGE
lxc storage create default zfs size=50GB # create a pool using a loop device named 'default' with zfs driver & a size of 50G
lxc storage create stock zfs source=stock # create a pool 'stock' using a zfs pool named 'stock'
EXEC
echo -e "auth-zone=lxd\ndns-loop-detect" | lxc network set lxdbr0 raw.dnsmasq -
PUBLISH
lxc publish $CTNAME --alias $CTNAME-$HOSTNAME # default compression are gzip
lxc publish $CTNAME --alias $CTNAME-$HOSTNAME --compression xz # good compression but long
TRICK
pretty print
json
lxc list --format=json $ctname$ # '$' is to limit to exact name & not only started with ctname
# pretty print
lxc list --format=json $ctname$ | jq
lxc list --format=json $ctname$ | python -m json.tool
yaml
lxc list --format=yaml $ctname$ # '$' is to limit to exact name & not only started with ctname
# pretty print
lxc list --format=yaml $ctname$ |yq r - -C
profile
yaml
Print name of host interfaces attached to the profile $profile
lxc profile show $profile | yq r - "devices.(type==nic).parent"
Print name of containers that use the profile $profile
lxc profile show $profile | yq r - 'used_by' | sed 's|^.*/||'
LXC
# permit to a container to run in lxc container without 'lxc.apparmor.profile: unconfined' or
security.nesting = true
TOC
chapter |
---|
OPTIONS |
ADD |
DEL |
System maintenance |
FIX |
UPDATE |
UPGRADE |
CACHE |
Querying information about packages |
INFO |
LIST |
SEARCH |
STATS |
DOT |
POLICY |
Repository maintenance |
INDEX |
FETCH |
VERIFY |
MANIFEST |
RC |
RC-STATUS |
RC-UPDATE |
RC-SERVICE |
OPTIONS
Global options
-h, --help # Show generic help or applet specific help
-p, --root DIR # Install packages to DIR
-X, --repository REPO # Use packages from REPO
-q, --quiet # Print less information
-v, --verbose # Print more information (can be doubled)
-i, --interactive # Ask confirmation for certain operations
-V, --version # Print program version and exit
-f, --force # Enable selected --force-* (deprecated)
--force-binary-stdout # Continue even if binary data is to be output
--force-broken-world # Continue even if 'world' cannot be satisfied
--force-non-repository # Continue even if packages may be lost on reboot
--force-old-apk # Continue even if packages use unsupported features
--force-overwrite # Overwrite files in other packages
--force-refresh # Do not use cached files (local or from proxy)
-U, --update-cache # Alias for --cache-max-age 1
--progress # Show a progress bar
--progress-fd FD # Write progress to fd
--no-progress # Disable progress bar even for TTYs
--purge # Delete also modified configuration files (pkg removal) and uninstalled packages from cache (cache clean)
--allow-untrusted # Install packages with untrusted signature or no signature
--wait TIME # Wait for TIME seconds to get an exclusive repository lock before failing
--keys-dir KEYSDIR # Override directory of trusted keys
--repositories-file # REPOFILE Override repositories file
--no-network # Do not use network (cache is still used)
--no-cache # Do not use any local cache path
--cache-dir CACHEDIR # Override cache directory
--cache-max-age AGE # Maximum AGE (in minutes) for index in cache before refresh
--arch ARCH # Use architecture with --root
--print-arch # Print default arch and exit
Commit options
-s, --simulate # Show what would be done without actually doing it
--clean-protected # Do not create .apk-new files in configuration dirs
--overlay-from-stdin # Read list of overlay files from stdin
--no-scripts # Do not execute any scripts
--no-commit-hooks # Skip pre/post hook scripts (but not other scripts)
--initramfs-diskless-boot # Enables options for diskless initramfs boot
ADD
Add PACKAGEs to 'world' and install (or upgrade) them, while ensuring that all dependencies are met
apk add [OPTIONS...] PACKAGE...
--initdb # Initialize database
-u, --upgrade # Prefer to upgrade package
-l, --latest # Select latest version of package (if it is not pinned), and print error if it cannot be installed due to other dependencies
-t, --virtual NAME # Instead of adding all the packages to 'world', create a new virtual package with the listed dependencies and add that to 'world'; the actions of the command are easily reverted by deleting the virtual package
examples
apk add $pkg=$version # install and fix package $pkg in version $version. ex: apk add bash=5.0.0-r0
apk add $pkg=~$major_version # install and fix package $pkg in major version $major_version. ex: apk add bash=~5.0
apk add $pkg=$version # install version $version of package $pkg & remove version holding. ex: apk add bash>5.0.0-r0
apk add --allow-untrusted $path/$pkg.apk # install untrusted package from file
DEL
Remove PACKAGEs from 'world' and uninstall them
apk del [OPTIONS...] PACKAGE...
-r, --rdepends # Recursively delete all top-level reverse dependencies too
FIX
Repair package or upgrade it without modifying main dependencies
apk fix [OPTIONS...] PACKAGE...
-d, --depends # Fix all dependencies too
-r, --reinstall # Reinstall the package (default)
-u, --upgrade # Prefer to upgrade package
-x, --xattr # Fix packages with broken xattrs
--directory-permissions # Reset all directory permissions
UPDATE
Update repository indexes from all remote repositories
apk update
UPGRADE
Upgrade currently installed packages to match repositories
apk fix [OPTIONS...]
-a, --available # Resets versioned world dependencies, and changes to prefer replacing or downgrading packages (instead of holding them) if the currently installed package is no longer available from any repository
-l, --latest # Select latest version of package (if it is not pinned), and print error if it cannot be installed due to other dependencies
--no-self-upgrade # Do not do early upgrade of 'apk-tools' package
--self-upgrade-only # Only do self-upgrade
CACHE
Download missing PACKAGEs to cache and/or delete unneeded files from cache
apk cache [OPTIONS...] sync | clean | download
-u, --upgrade # Prefer to upgrade package
-l, --latest # Select latest version of package (if it is not pinned), and print error if it cannot be installed due to other dependencies
examples
apk cache -v sync # clean cache & download missing packages
INFO
Give detailed information about PACKAGEs or repositories
apk info [OPTIONS...] PACKAGE...
-L, --contents # List contents of the PACKAGE
-e, --installed # Check if PACKAGE is installed
-W, --who-owns # Print the package owning the specified file
-R, --depends # List packages that the PACKAGE depends on
-P, --provides # List virtual packages provided by PACKAGE
-r, --rdepends # List all packages depending on PACKAGE
--replaces # List packages whom files PACKAGE might replace
-i, --install-if # List the PACKAGE's install_if rule
-I, --rinstall-if # List all packages having install_if referencing PACKAGE
-w, --webpage # Show URL for more information about PACKAGE
-s, --size # Show installed size of PACKAGE
-d, --description # Print description for PACKAGE
--license # Print license for PACKAGE
-t, --triggers # Print active triggers of PACKAGE
-a, --all # Print all information about PACKAGE
LIST
List packages by PATTERN and other criteria
apk list [OPTIONS...] PATTERN
-I, --installed # List installed packages only
-O, --orphaned # List orphaned packages only
-a, --available # List available packages only
-u, --upgradable # List upgradable packages only
-o, --origin # List packages by origin
-d, --depends # List packages by dependency
-P, --providers # List packages by provider
SEARCH
Search package by PATTERNs or by indexed dependencies
apk search [OPTIONS...] PATTERN
-a, --all # Show all package versions (instead of latest only)
-d, --description # Search package descriptions (implies -a)
-x, --exact # Require exact match (instead of substring match)
-e # Synonym for -x (deprecated)
-o, --origin # Print origin package name instead of the subpackage
-r, --rdepends # Print reverse dependencies of package
--has-origin # List packages that have the given origin
STATS
Show statistics about repositories and installations
apk stats
examples
apk stats # Show statistics about repositories and installations
DOT
Generate graphviz graphs
apk dot [OPTIONS...] PKGMASK...
--errors # Output only parts of the graph which are considered erroneous: e.g. cycles and missing packages
--installed # Consider only installed packages
POLICY
Show repository policy for packages
apk policy
INDEX
Create repository index file from FILEs
apk index [OPTIONS...] FILE...
-o, --output FILE # Write the generated index to FILE
-x, --index INDEX # Read INDEX to speed up new index creation by reusing the information from an old index
-d, --description TEXT # Embed TEXT as description and version information of the repository index
--rewrite-arch ARCH # Use ARCH as architecture for all packages
FETCH
Download PACKAGEs from global repositories to a local directory
apk fetch [OPTIONS...] PACKAGE...
-L, --link # Create hard links if possible
-R, --recursive # Fetch the PACKAGE and all its dependencies
--simulate # Show what would be done without actually doing it
-s, --stdout # Dump the .apk to stdout (incompatible with -o, -R, --progress)
-o, --output DIR # Directory to place the PACKAGEs to
VERIFY
Verify package integrity and signature
apk verify FILE...
MANIFEST
Show checksums of package contents
apk manifest PACKAGE...
RC-STATUS
run-level
- boot – Generally the only services you should add to the boot runlevel are those which deal with the mounting of filesystems, set the initial state of attached peripherals and logging. Hotplugged services are added to the boot runlevel by the system. All services in the boot and sysinit runlevels are automatically included in all other runlevels except for those listed here
- single – Stops all services except for those in the sysinit runlevel
- reboot – Changes to the shutdown runlevel and then reboots the host
- shutdown – Changes to the shutdown runlevel and then halts the host
- default – Used if no runlevel is specified. (This is generally the runlevel you want to add services to.)
rc-status
rc-status # show services attached to actual run-level
rc-status boot # show services attached to run-level 'boot'
-l --list # Show list of run levels
-a --all # Show services from all run levels
-s --servicelist # Show service list
-m, --manual # show manually started services
-u, --unused # show services not assigned to any runlevel
-c, --crashed # show crashed services
-S, --supervised # show supervised services
RC-UPDATE
rc-update
rc-update show {run-level} # show services attached to a run-level
rc-update add {service} {run-level} # attach service to a run-level
rc-update del {service} {run-level} # dettach service to a run-level
rc
rc $runlevel # change run-level to $runlevel
RC-SERVICE
rc-service
rc-service {service} start # start service
rc-service {service} stop # stop service
rc-service {service} restart # restart service
-e, --exists <arg> # tests if the service exists or not
-c, --ifcrashed # if the service is crashed run the command
-i, --ifexists # if the service exists run the command
-I, --ifinactive # if the service is inactive run the command
-N, --ifnotstarted # if the service is not started run the command
-s, --ifstarted # if the service is started run the command
-S, --ifstopped # if the service is stopped run the command
-l, --list # list all available services
-r, --resolve <arg> # resolve the service name to an init script
-Z, --dry-run # dry run (show what would happen)
-q, --quiet # run quietly (repeat to suppress errors)
yt-dlp $link # download $link with better video & audio format
-F # list all formats available for $link
-f $format # download $link with the specified formats (audio or video, separate files)
-f $videoformat+$audioformat --merge-output-format # download $link with the specified formats & merge it to a file. One of mkv, mp4,ogg, webm, flv
-o $file # specify the output file name
--audio-format $format # specify audio format: "best", "aac", "flac", "mp3", "m4a", "opus", "vorbis", or "wav"; "best" by default; No effect without -x
FORMAT
(mp4,webm)[height<480] # use first mp4, second webm with a maximum height of 480
examples
yt-dlp $link -f 'webm[height<800]+bestaudio' --merge-output-format webm # merge specified video + audio formats
yt-dlp $link -f '243+251' --merge-output-format webm # merge specified video + audio formats
manual rip the videos list
After copying right div with inspector from youtube.com in file videos_list.html
videos_file="videos_list.html"
videos_format="244+250"
videos_id=`sed -n "s|.*watch?v=\([^&]\+\)&.*|\1|p" "${videos_file}" | uniq | xargs`
for id in ${videos_id}; do echo yt-dlp "https://www.youtube.com/watch?v=${id}" -f "${videos_format}"; done
create batch from youtube list
grep 'watch?v=' ~/Downloads/yt_ls |sed "s|.*watch?v=\([^&]\+\)&.*|\1|"| sort -u > yt_id
format="247+251"
yt-dlp $(head -n1 yt_id) -F
is=$(wc -l < yt_id); i=0; while read id; do i=$((i+1)); echo "----- $i / $is"; yt-dlp $id -f ${format} || echo $id >> yt_err; done < yt_id
https://github.com/junegunn/vim-plug
TOC
chapter | designation |
---|---|
NORMAL MODE | For navigation and manipulation of text. This is the mode that vim will usually start in |
COMMAND MODE | For executing extra commands like the help, shell, ... |
INSERT/EDIT MODE | Press i to enter insert/edit/mode & q or <esc> to quit |
3 main modes
Normal mode
For navigation and manipulation of text. This is the mode that vim will usually start in
Command mode
For executing extra commands like the help, shell, ...
Insert (edit) mode
For inserting new text, where you type into your file like other editors.
NORMAL MODE
MOTIONS
move around the text (file) by
gg
ctrl+b
ctrl+u
-
k
^
0 / ^ / B / b / h < > l / e / E / w / W / $
v
+
j
ctrl+d
ctrl+f
G
arrow keys
k
^
h < >l
j
word
w # next word
W # next WORD
b # previous word
B # previous WORD
e # end of word
E # end of WORD
w / W # word / WORD
[, ] # block
(, ) # block
<, > # block
", ' #" in double quote or quote
t # XML/HTML tag
s # sentence
line
0 # begin of line (column 0)
^ # begin of line (non-blank)
$ # end of line
\- = k^ # start of previous line
\+ = j^ # start of next line
file
gg / G # go to begin / end of file
[num]gg / [num]G / :num<CR> # go to line num
gd # go to definition of current word
gf # go to the file (under the cursor)
EDITING
syntax
set ft=prolog # set the file type to prolog
copy/paste:
yy # yank/copy current line
p # paste to the next line
P # paste above current line
commands
. # repeat last command
~ # swap case
x # delete char current char
r # replace char under the cursor
J # merge with the next line
dd # delete current line
D / C # delete line from cursor to the EOL
u # undo
ctrl+r # redo
visual mode
v # into visual/select mode
V # into visual/select mode by line
ctrl+v # into visual/select mode by block
alignment
== # auto indent
>> # shift right, increase indent
<< # shift left, decrease indent
examples
di) # delete the text inside current paranthesis
ci" #" change the text inside ""
gUiw # make the word under the cursor to upper case
registers
"[char][y|d] #" yank/delete into register
"[char][p|P] #" paste from register
:echo @[char] # shows register content
:reg[isters] # shows all registers
macro
q[char] # start recording into register char
q # stop recording macro
@[char] # play macro in register char
@@ # repeat last playback
code folding
zi # toggles folding on or off
za # toggles current fold open or close
zc # close current fold
zC # close current fold recursively
zo # open current fold
zO # open current fold recursively
zR # open all folds
zM # close all folds
zv # expand folds to reveal the cursor
zk / zj # move to previous / next fold
WINDOW
move inside
H # top of window
M # middle of window
L # low (bottom) of window
zt # scroll to top
zz # scroll to middle
zb # scroll to bottom
ctrl+b / ctrl+f # previous / next page
ctrl+u / ctrl+d # previous / next half page
split
ctrl+w s = :sp[lit] # split current window horizontally
ctrl+w v = :vs[plit] # split current window vertically
ctrl+w c = :cl[ose] # close current window
ctrl+w o = :on[ly] # close all windows except current one
ctrl+w ctrl+w # switch to next split window
ctrl+w ctrl+p # switch to previous split window
ctrl+w hjkl # switch (move cursor) to left, below, above or right split
ctrl+w HJKL # move current window to left, below, above or right
ctrl+w r # rotate window clockwise
ctrl+w = # make all windows equal in size
[num]ctrl+w +- # increase/decrease current window height
[num]ctrl+w <> # increase/decrease current window width
ctrl+w _ # minimize current window
ctrl+w T # move current window to new tab
JUMPS & MARKS
ctrl+o # jump/switch back in the buffer history
ctrl+i # jump/switch forward in the buffer history
ctrl+6 # jump/switch to the buffer you just left
ctrl+] # jump/switch to tag under cursor (if ./tags is available)
' ' # jump/switch back to last jump
'. #' jump/switch to last edited line
} # next paragraph
{ # previous paragraph
% # switch matching (), {} or []
m[char] / '[char] # mark by / jump to [char]
m[CHAR] / '[CHAR] # mark by / jump to [CHAR] across the files.
SPELL CHECKING
]s # jump to next spelling error
[s # jump to previous spelling error
z= # suggest corrections for current word
zg # add current word to the dictionary
zw # remove current word from dictionary
SEARCHING
word
- # find next current word
'#' # find previous current word
/[pattern] # search forward by matching pattern
?[pattern] # search backward by matching pattern
n # next result
N # previous result
[I # show lines with matching word under cursor
character
f[char] # find next exact character in the line
F[char] # find previous exact character in the line
t[char] # find next exact character in the word
T[char] # find previous exact character in the line
; next repeat for f/t/F/T action
, previous repeat for f/t/F/T action
COMMAND MODE
editing the text without transition to Insert Mode:
@: # repeat last command-line change (command invoked with ":", for example :s/old/new/).
windows and splits
:sp[lit] = ctrl+w s # split current window horizontally
:vs[plit] = ctrl+w v # split current window vertically
:cl[ose] = ctrl+w c # close current window
:on[ly] = ctrl+w o # close all windows except current one
lists
:jumps # shows the jump list
:changes # shows the change list
:reg[isters] # shows the registers
:marks # shows the marks
:delm[arks] {marks} # delete specified mark(s)
delm a b 1 \" # deletes a, b, 1 and "
delm a-h # deletes all marks from a to h
:delm[marks]! # deletes all lowercase marks
file and buffers
:w[rite] # write current file
:q # close/quit current file, split or tab
:wq = ZZ # write current file and quit
:q! = ZQ # quit without writing the changes
:qa # quit all splits
:ls # list all open files/buffers
:f[ile] = ctrl+g # shows current file path
:e[dit] # open a file for editing
:ene[w] # open a blank new file for editing
:b<n> # jump to buffer n returned from :ls
:b<file> # jump to buffer file, Tab to scroll through available options
:bn[ext] # jump to next buffer
:bp[rev] # jump to previous buffer
:bd[elete] # remove file from buffer list
shell
:mak[e] # run make in current directory
:cw # toggle mini window for errors
:! # executes external shell command
:r[ead] # read external program output into current file
tabs
ctrl+w gf # open file under the cursor into new tab
:tabs # list current tabs and windows
:tabn = <ctrl+PageDown> # next tab
:tabn <n> # goto tab n
:tabp = tabN = <ctrl+PageUp> # previous tab
:tabe [file] # create a new blank tab or opens file in that tab
OPERATORS
operator are generally constructed as:
[operator][count][motion]
[operator]i[motion]
operators:
c # change command ...
d # delete ...
y # yank (copy) ...
g~ # swap case ...
gu # to lower case ...
gU # to upper case ...
HELP
:h cmd # normal mode command help
:h :cmd # command line help for cmd
:h i_cmd # insert mode command help
:h v_cmd # visual mode command help
:h c_cmd # command line editing cmd help
:h 'option' # help of option
:helpg[rep] # search through help docs!
special help sections
:h motions
:h word-motions
:h jump-motions
:h mark-motions
:h operators
:h buffres
:h windows
:h tabs
:h registers
:h pattern-searches
OPTIONS
:set <opt>? # shows current option value
:set no<opt> # turn off flag opt
:set opt # turn on flag opt
:set opt=val # override value of opt
:set opt+=val # append val to opt
:echo &opt # shows value of opt
essential options
hidden or hid # when off, a buffer is unloaded when it's abandoned.
laststatus or ls # shows status line # 0 (never), 1 (only if at least two windows), 3 (always)
hisearch or his # highlight search matches
number or nu # shows line number
showcmd or sc # shows command as you type them (may not be available on your compilation)
ruler or ru # shows line and column number of the cursor
wrap # controls line wrapping
ignorecase or ic # ignores case for search patterns
smartindent or si # flag for smart indenting
foldmethod or fdm # fold method
spell / nospell # turn spell checking enable or disable.
SUBSTITUTE
:s/search/replace/ # basic substitution on a line
:%s/search/replace/ # run substitution on every line
:%s/search/replace/g # g flag means apply to every match
:%s/search/replace/c # c flag means ask for confirmation
tags / ctags
by executing $> ctags -r under project tree:
:tag <name>TAB # goes to tag name
ctrl+] # goes to the tag under cursor
INSERT/EDIT MODE
insert
i # insert at left of cursor
a # insert at right of cursor
I # insert at the line beginning (non-blank)
A # insert at end of line
o # insert by adding new line below the cursor
O # insert by insert new line above the cursor
s # substitute at cursor and enter insert mode
S = ^DA = ddO # delete current line and enter insert mode
C = c$ # change line from cursor to EOL
mode change
Esc = ctrl+c = ctrl+[ # exit insert mode
auto complete
ctrl+p # auto-complete / previous item
ctrl+n # auto-complete / next item
ctrl+xctrl+l # auto complete line mode
cool editing stuff
ctrl+w # delete word before cursor
ctrl+u # delete line before cursor
ctrl+r[char] # insert content of register [char]
ctrl+t # increase line indent
ctrl+u # decrease line indent
https://gist.github.com/azadkuh/5d223d46a8c269dadfe4
OPTIONS
:syntax on # enable syntax highlightning
:syntax off # disable syntax highlightning
:set nu / :set number # show line numbers
:set nonu / :set nonumber / set nu! # hide line numbers
VIMDIFF
ctrl+(w+w) # toggle buffer
do # get changes from other window into the current window
dp # put the changes from current window into the other window
]c # jump to the next change
[c # jump to the previous change
zo # open fold
zc # close fold
zr # reducing folding level
zm # one more folding level, please
:diffupdate, :diffu # recalculate the diff
:diffg RE # get from REMOTE
:diffg BA # get from BASE
:diffg LO # get from LOCAL
VIM to VIMDIFF
:vs file # vertical split with file
:split file # horizontal split file
ctrl+w ctrl+w # switch cursors to different split screen
:diffthis # invoke "diff mode" in file
:diffthis # switch to other file and invoke "diff mode"
:diffoff # turn off "diff mode"
SSH
change identity of key in 'authorized_keys'
file="/root/.ssh/authorized_keys"
sudo sed '/manjaro@970g/ s|^.* \(ssh-.*\)$|\1|' $file
sudo systemctl restart sshd.service
APT SOURCES
add contrib to main backports
file="/etc/apt/sources.list"
sed -i '/backports/ s| main| main contrib|' $file
apt update
ZFS
install zfs
apt install zfs-dkms zfsutils-linux # install zfs tools
echo -e "# zfs utils\nzfs" >> /etc/modules
modprobe zfs # or reboot
lsmod | grep zfs # verify zfs module are correctly loaded
systemctl status zfs-* # verify all zfs services are correctkly started
format
fdisk $device # use 36 for FreeBSD type
https://medium.com/@cq94/zfs-vous-connaissez-vous-devriez-1d2611e7dad6
The zfs command configures ZFS datasets within a ZFS storage poo. A dataset is identified by a unique path within the ZFS namespace
A dataset can be one of the following:
- File system
A ZFS dataset of type filesystem can be mounted within the standard system namespace and behaves like other file systems - Volume
A logical volume exported as a raw or block device - Snapshot
A read-only version of a file system or volume at a given point in time. It is specified as filesystem@name or volume@name. - Bookmark
Much like a snapshot, but without the hold on on-disk data. It can be used as the source of a send (but not for a receive). It is specified as filesystem#name or volume#name.
ZFS File System Hierarchy
A ZFS storage pool is a logical collection of devices that provide space for datasets. A storage pool is also the root of the ZFS file system hierarchy
- Snapshots
A snapshot is a read-only copy of a file system or volume - Bookmarks
A bookmark is like a snapshot, a read-only copy of a file system or volume. Bookmarks can be created extremely quickly, compared to snapshots, and they consume no additional space within the pool. Unlike snapshots, bookmarks can not be accessed through the filesystem in any way - Clones
A clone is a writable volume or file system whose initial contents are the same as another dataset. Clones can only be created from a snapshot. As with snapshots, creating a clone is nearly instantaneous, and initially consumes no additional space - Mount Points
Creating a ZFS file system is a simple operation, so the number of file systems per system is likely to be numerous - Deduplication
Deduplication is the process for removing redundant data at the block level, reducing the total amount of data stored
SUBCOMMANDS
subcommand | Designation |
---|---|
CREATE | Creates a new ZFS file system |
DESTROY | Destroys the given dataset |
SNAPSHOT | Creates snapshots with the given names |
ROLLBACK | Roll back the given dataset to a previous snapshot |
CLONE | Creates a clone of the given snapshot |
PROMOTE | Promotes a clone file system to no longer be dependent on its "origin" snapshot |
RENAME | Renames dataset |
LIST | Lists the property information for the given datasets in tabular form |
SET | Sets the property or list of properties to the given value(s) for each dataset |
GET | Displays properties for the given datasets |
INHERIT | Clears the specified property, causing it to be inherited from an ancestor |
USERSPACE | Displays space consumed by, and quotas on, each user in the specified filesystem or snapshot |
GROUPSPACE | Displays space consumed by, and quotas on, each group in the specified filesystem or snapshot |
MOUNT | Displays all ZFS file systems currently mounted or mounts it |
UNMOUNT | Unmounts currently mounted ZFS file systems |
SHARE | Shares available ZFS file systems |
UNSHARE | Unshares currently shared ZFS file systems |
BOOKMARK | Creates a bookmark of the given snapshot |
SEND | Creates a stream representation of the second snapshot |
RECEIVE | Creates a snapshot whose contents are as specified in the stream provided |
ALLOW | Displays permissions or Delegates ZFS administration permission for the file systems to non-privileged users |
UNALLOW | Removes permissions that were granted with the zfs allow command |
HOLD | Adds a single reference, named with the tag argument, to the specified snapshot or snapshots |
HOLDS | Lists all existing user references for the given snapshot or snapshots |
RELEASE | Removes a single reference, named with the tag argument, from the specified snapshot or snapshots |
DIFF | Display the difference between a snapshot of a given filesystem and another snapshot of that filesystem from a later time or the current contents of the filesystem |
PROPERTIES |
CREATE
Creates a new ZFS file system
zfs create [-p] [-o property=value]... filesystem # Creates a new ZFS file system. The file system is automatically mounted according to the mountpoint property inherited from the parent.
-o property=value # Sets the specified property as if the command zfs set property=value was invoked at the same time the dataset was created
-p # Creates all the non-existing parent datasets. Datasets created in this manner are automatically mounted according to the mountpoint property inherited from their parent.
Creates a volume of the given size
zfs create [-ps] [-b blocksize] [-o property=value]... -V size volume # Creates a volume of the given size. The volume is exported as a block device in /dev/zvol/path, where path is the name of the volume in the ZFS namespace
-b blocksize # Equivalent to -o volblocksize=blocksize
-o property=value # Sets the specified property as if the zfs set property=value command was invoked at the same time the dataset was created
-p # Creates all the non-existing parent datasets
-s # Creates a sparse volume with no reservation
examples
zfs create -o mountpoint=/var -o compression=lz4 $fs # create a filesytem with a mountpoint & compression options
zfs set quota=10G $fs # set a quota to user 'user'
zfs set compression=lz4 $fs# set lz4 compression for a fs
zfs set mountpoint=/var $fs # set mountpoint for a filesystem
DESTROY
Destroys the given dataset
zfs destroy [-Rfnprv] filesystem|volume
-R # Recursively destroy all dependents, including cloned file systems outside the target hierarchy.
-f # Force an unmount of any file systems using the unmount -f command
-n # Do a dry-run ("No-op") deletion. No data will be deleted
-p # Print machine-parsable verbose information about the deleted data
-r # Recursively destroy all children
-v # Print verbose information about the deleted data
Destroys the given snapshot
The given snapshots are destroyed immediately if and only if the zfs destroy command without the -d option would have destroyed it
zfs destroy [-Rdnprv] filesystem|volume@snap[%snap[,snap[%snap]]]...
-R # Recursively destroy all clones of these snapshots, including the clones, snapshots, and children. -d flag will have no effect
-d # Defer snapshot deletion.
-n # Do a dry-run ("No-op") deletion. No data will be deleted
-p # Print machine-parsable verbose information about the deleted data
-r # Destroy (or mark for deferred deletion) all snapshots with this name in descendent file systems
-v # Print verbose information about the deleted data
Destroys the given bookmark
zfs destroy filesystem|volume#bookmark
SNAPSHOT
Creates snapshots with the given names
zfs snapshot [-r] [-o property=value]... filesystem@snapname|volume@snapname...
-o property=value # Sets the specified property; see zfs create for details
-r # Recursively create snapshots of all descendent datasets
ROLLBACK
Roll back the given dataset to a previous snapshot
Roll back the given dataset to a previous snapshot. The -rR options do not recursively destroy the child snapshots of a recursive snapshot. Only direct snapshots of the specified filesystem are destroyed by either of these options. To completely roll back a recursive snapshot, you must rollback the individual child snapshots.
zfs rollback [-Rfr] snapshot
-R # Destroy any more recent snapshots and bookmarks, as well as any clones of those snapshots
-f # Used with the -R option to force an unmount of any clone file systems that are to be destroyed
-r # Destroy any snapshots and bookmarks more recent than the one specified
CLONE
Creates a clone of the given snapshot
zfs clone [-p] [-o property=value]... snapshot filesystem|volume
-o property=value # Sets the specified property
-p # Creates all the non-existing parent datasets. Datasets created in this manner are automatically mounted according to the mountpoint property inherited from their parent
PROMOTE
Promotes a clone file system to no longer be dependent on its "origin" snapshot
zfs promote clone-filesystem
RENAME
Renames dataset
Renames the given dataset. The new target can be located anywhere in the ZFS hierarchy, with the exception of snapshots. Snapshots can only be renamed within the parent file system or volume
zfs rename [-fp] filesystem|volume filesystem|volume
-f # Force unmount any filesystems that need to be unmounted in the process
-p # Creates all the nonexistent parent datasets. Datasets created in this manner are automatically mounted according to the mountpoint property inherited from their parent
Renames snapshot
Recursively rename the snapshots of all descendent datasets. Snapshots are the only dataset that can be renamed recursively
zfs rename -r snapshot snapshot
LIST
Lists the property information for the given datasets in tabular form
zfs list [-r|-d depth] [-Hp] [-o property[,property]...] [-s property]... [-S property]... [-t type[,type]...] [filesystem|volume|snapshot]...
-H # Used for scripting mode. Do not print headers and separate fields by a single tab instead of arbitrary white space
-S property # Same as the -s option, but sorts by property in descending order
-d depth # Recursively display any children of the dataset, limiting the recursion to depth
-o property # A comma-separated list of properties to display
-p # Display numbers in parsable (exact) values
-r # Recursively display any children of the dataset on the command line
-s property # A property for sorting the output by column in ascending order based on the value of the property
-t type # A comma-separated list of types to display, where type is one of filesystem, snapshot, volume, bookmark, or all
examples
zfs list -o name,used,available,readonly,exec,referenced,mountpoint,mounted,quota,clones
zfs list -t all -r $pool # print recursively space used by volumes
SET
Sets the property or list of properties to the given value(s) for each dataset. Only some properties can be edited
zfs set property=value [property=value]... filesystem|volume|snapshot...
GET
Displays properties for the given datasets
zfs get [-r|-d depth] [-Hp] [-o field[,field]...] [-s source[,source]...] [-t type[,type]...] all | property[,property]... filesystem|volume|snapshot|bookmark...
name Dataset name
property Property name
value Property value
source Property source. Can either be local, default, temporary, inherited, or none (-)
-H # Any headers are omitted, and fields are explicitly separated by a single tab instead of an arbitrary amount of space
-d depth # Recursively display any children of the dataset, limiting the recursion to depth
-o field # A comma-separated list of columns to display. name,property,value,source is the default value
-p # Display numbers in parsable (exact) values
-r # Recursively display properties for any children
-s source # A comma-separated list of sources to display
-t type # A comma-separated list of types to display, where type is one of filesystem, snapshot, volume, bookmark, or all
INHERIT
Clears the specified property, causing it to be inherited from an ancestor, restored to default if no ancestor has the property set, or with the -S option reverted to the received value if one exists
zfs inherit [-rS] property filesystem|volume|snapshot...
-r # Recursively inherit the given property for all children
-S # Revert the property to the received value if one exists, otherwise operate as if the -S option was not specified
USERSPACE
Displays space consumed by, and quotas on, each user in the specified filesystem or snapshot. This corresponds to the userused@user, userobjused@user, userquota@user, and userobjquota@user properties.
zfs userspace [-Hinp] [-o field[,field]...] [-s field]... [-S field]... [-t type[,type]...] filesystem|snapshot
-H # Do not print headers, use tab-delimited output
-S field # Sort by this field in reverse order
-i # Translate SID to POSIX ID. The POSIX ID may be ephemeral if no mapping exists
-n # Print numeric ID instead of user/group name
-o field[,field]... # Display only the specified fields from the following set: type, name, used, quota. The default is to display all fields.
-p # Use exact (parsable) numeric output
-s field # Sort output by this field. The -s and -S flags may be specified multiple times to sort first by one field, then by another. The default is -s type -s name
-t # type[,type]... # Print only the specified types from the following set: all, posixuser, smbuser, posixgroup, smbgroup. The default is -t posixuser,smbuser. The default can be changed to include group types.
GROUPSPACE
Displays space consumed by, and quotas on, each group in the specified filesystem or snapshot
zfs groupspace [-Hinp] [-o field[,field]...] [-s field]... [-S field]... [-t type[,type]...] filesystem|snapshot
MOUNT
display
Displays all ZFS file systems currently mounted
zfs mount
mount
Mounts ZFS file systems
zfs mount [-Ov] [-o options] -a | filesystem
-O # Perform an overlay mount
-a # Mount all available ZFS file systems. Invoked automatically as part of the boot process
filesystem # Mount the specified filesystem
-o options # An optional, comma-separated list of mount options to use temporarily for the duration of the mount
-v # Report mount progress
UNMOUNT
Unmounts currently mounted ZFS file systems
zfs unmount [-f] -a | filesystem|mountpoint
-a # Unmount all available ZFS file systems
filesystem|mountpoint # Unmount the specified filesystem
-f # Forcefully unmount the file system, even if it is currently in use
SHARE
Shares available ZFS file systems
zfs share -a | filesystem
-a # Share all available ZFS file systems
filesystem # Share the specified filesystem according to the sharenfs and sharesmb properties
UNSHARE
Unshares currently shared ZFS file systems
zfs unshare -a | filesystem|mountpoint
-a # Unshare all available ZFS file systems
filesystem|mountpoint # Unshare the specified filesystem
BOOKMARK
Creates a bookmark of the given snapshot. Bookmarks mark the point in time when the snapshot was created, and can be used as the incremental source for a zfs send command
zfs bookmark snapshot bookmark
SEND
Creates a stream
Creates a stream representation of the second snapshot, which is written to standard output
zfs send [-DLPRcenpv] [[-I|-i] snapshot] snapshot
-D, --dedup # Generate a deduplicated stream. Blocks which would have been sent multiple times in the send stream will only be sent once. The receiving system must also support this feature to receive a dedu‐
plicated stream
-I snapshot # Generate a stream package that sends all intermediary snapshots from the first snapshot to the second snapshot
-L, --large-block # Generate a stream which may contain blocks larger than 128KB
-P, --parsable # Print machine-parsable verbose information about the stream package generated
-R, --replicate # Generate a replication stream package, which will replicate the specified file system, and all descendent file systems, up to the named snapshot
-e, --embed # Generate a more compact stream by using WRITE_EMBEDDED records for blocks which are stored more compactly on disk by the embedded_data pool feature
-c, --compressed # Generate a more compact stream by using compressed WRITE records for blocks which are compressed on disk and in memory
-i # snapshot # Generate an incremental stream from the first snapshot (the incremental source) to the second snapshot (the incremental target)
-n, --dryrun # Do a dry-run ("No-op") send
-p, --props # Include the dataset's properties in the stream
-v, --verbose # Print verbose information about the stream package generated
Generate a send stream
Generate a send stream, which may be of a filesystem, and may be incremental from a bookmark
zfs send [-Lce] [-i snapshot|bookmark] filesystem|volume|snapshot
-L, --large-block # Generate a stream which may contain blocks larger than 128KB
-c, --compressed # Generate a more compact stream by using compressed WRITE records for blocks which are compressed on disk and in memory
-e, --embed # Generate a more compact stream by using WRITE_EMBEDDED records for blocks which are stored more compactly on disk by the embedded_data pool feature
-i # snapshot|bookmark # Generate an incremental send stream
Generate a send stream
Creates a send stream which resumes an interrupted receive
zfs send [-Penv] -t receive_resume_token
RECEIVE
Creates a snapshot
Creates a snapshot whose contents are as specified in the stream provided on standard input
zfs receive [-Fnsuv] [-d|-e] [-o origin=snapshot] [-o property=value] [-x property] filesystem
-F # Force a rollback of the file system to the most recent snapshot before performing the receive operation
-d # Discard the first element of the sent snapshot's file system name
-e # Discard all but the last element of the sent snapshot's file system name
-n # Do not actually receive the stream
-o origin=snapshot # Forces the stream to be received as a clone of the given snapshot
-o property=value # Sets the specified property as if the command zfs set property=value was invoked immediately before the receive
-s # If the receive is interrupted, save the partially received state, rather than deleting it
-u # File system that is associated with the received stream is not mounted
-v # Print verbose information about the stream and the time required to perform the receive operation
-x property # Ensures that the effective value of the specified property after the receive is unaffected by the value of that property in the send stream (if any), as if the property had been excluded from the send stream
Abort an interrupted receive
Abort an interrupted zfs receive -s, deleting its saved partially received state
zfs receive -A filesystem|volume
ALLOW
Display
Displays permissions that have been delegated on the specified filesystem or volume
zfs allow filesystem|volume
Delegates permission
Delegates ZFS administration permission for the file systems to non-privileged users
zfs allow [-dglu] user|group[,user|group]... perm|@setname[,perm|@setname]... filesystem|volume zfs allow [-dl] -e|everyone perm|@setname[,perm|@setname]... filesystem|volume
-d # Allow only for the descendent file systems
-e|everyone # Specifies that the permissions be delegated to everyone
-g group[,group]... # Explicitly specify that permissions are delegated to the group
-l # Allow "locally" only for the specified file system
-u user[,user]... # Explicitly specify that permissions are delegated to the user
user|group[,user|group]... # Specifies to whom the permissions are delegated. Multiple entities can be specified as a comma-separated list
perm|@setname[,perm|@setname]... # The permissions to delegate. Multiple permissions may be specified as a comma-separated list. Permissions are generally the ability to use a ZFS subcommand or change a ZFS property
Available permissions
NAME TYPE NOTES
allow subcommand Must also have the permission that isbeing allowed
clone subcommand Must also have the 'create' ability and 'mount' ability in the origin file system
create subcommand Must also have the 'mount' ability
destroy subcommand Must also have the 'mount' ability
diff subcommand Allows lookup of paths within a dataset given an object number, and the ability to create snapshots necessary to 'zfs diff'
mount subcommand Allows mount/umount of ZFS datasets
promote subcommand Must also have the 'mount' and 'promote' ability in the origin file system
receive subcommand Must also have the 'mount' and 'create' ability
rename subcommand Must also have the 'mount' and 'create' ability in the new parent
rollback subcommand Must also have the 'mount' ability
send subcommand
share subcommand Allows sharing file systems over NFS or SMB protocols
snapshot subcommand Must also have the 'mount' ability
groupquota other Allows accessing any groupquota@... property
groupused other Allows reading any groupused@... property
userprop other Allows changing any user property
userquota other Allows accessing any userquota@... property
userused other Allows reading any userused@... property
aclinherit property
acltype property
atime property
canmount property
casesensitivity property
checksum property
compression property
copies property
devices property
exec property
filesystem_limit property
mountpoint property
nbmand property
normalization property
primarycache property
quota property
readonly property
recordsize property
refquota property
refreservation property
reservation property
secondarycache property
setuid property
sharenfs property
sharesmb property
snapdir property
snapshot_limit property
utf8only property
version property
volblocksize property
volsize property
vscan property
xattr property
zoned property
time permission
Sets "create time" permissions
zfs allow -c perm|@setname[,perm|@setname]... filesystem|volume
to a set
Defines or adds permissions to a permission set
zfs allow -s @setname perm|@setname[,perm|@setname]... filesystem|volume
UNALLOW
Removes permissions
Removes permissions that were granted with the zfs allow command. No permissions are explicitly denied, so other permissions granted are still in effect
zfs unallow [-dglru] user|group[,user|group]... [perm|@setname[,perm|@setname]...] filesystem|volume zfs unallow [-dlr] -e|everyone [perm|@setname[,perm|@setname]...] filesystem|volume zfs unallow [-r] -c
[perm|@setname[,perm|@setname]...] filesystem|volume
-r # Recursively remove the permissions from this file system and all descendents
from a set
Removes permissions from a permission set
zfs unallow [-r] -s @setname [perm|@setname[,perm|@setname]...] filesystem|volume
HOLD
Adds a single reference, named with the tag argument, to the specified snapshot or snapshots
zfs hold [-r] tag snapshot...
-r # Specifies that a hold with the given tag is applied recursively to the snapshots of all descendent file systems
HOLDS
Lists all existing user references for the given snapshot or snapshots
zfs holds [-r] snapshot...
-r # Lists the holds that are set on the named descendent snapshots, in addition to listing the holds on the named snapshot
RELEASE
Removes a single reference, named with the tag argument, from the specified snapshot or snapshots
zfs release [-r] tag snapshot...
-r # Recursively releases a hold with the given tag on the snapshots of all descendent file systems
DIFF
Display the difference between a snapshot of a given filesystem and another snapshot of that filesystem from a later time or the current contents of the filesystem
zfs diff [-FHt] snapshot snapshot|filesystem
-F # Display an indication of the type of file, in a manner similar to the - option of ls(1).
-F B Block device
-F C Character device
-F / Directory
-F > Door
-F | Named pipe
-F @ Symbolic link
-F P Event port
-F = Socket
-F F Regular file
-H # Give more parsable tab-separated output, without header lines and without arrows.
-t # Display the path's inode change time as the first column of output.
The types of change are:
- The path has been removed
+ The path has been created
M The path has been modified
R The path has been renamed
PROPERTIES
Properties are divided into two types, native and user properties. Native properties either export internal statistics or control ZFS behavior. In addition, native properties are either editable or read-only
available # The amount of space available to the dataset and all its children
compressratio # For non-snapshots, the compression ratio achieved for the used space of this dataset, expressed as a multiplier
createtxg # The transaction group (txg) in which the dataset was created
creation # The time this dataset was created
clones # For snapshots, this property is a comma-separated list of filesystems or volumes which are clones of this snapshot
defer_destroy # This property is on if the snapshot has been marked for deferred destroy by using the zfs destroy -d command
filesystem_count # The total number of filesystems and volumes that exist under this location in the dataset tree
guid # The 64 bit GUID of this dataset or bookmark which does not change over its entire lifetime
logicalreferenced # The amount of space that is "logically" accessible by this dataset
logicalused # The amount of space that is "logically" consumed by this dataset and all its descendents
mounted # For file systems, indicates whether the file system is currently mounted
origin # For cloned file systems or volumes, the snapshot from which the clone was created
receive_resume_token For filesystems or volumes which have saved partially-completed state from zfs receive -s, this opaque token can be provided to zfs send -t to resume and complete the zfs receive
referenced # The amount of data that is accessible by this dataset, which may or may not be shared with other datasets in the pool
refcompressratio # The compression ratio achieved for the referenced space of this dataset, expressed as a multiplier
snapshot_count # The total number of snapshots that exist under this location in the dataset tree
type # The type of dataset: filesystem, volume, or snapshot
used # The amount of space consumed by this dataset and all its descendents
usedby* # The usedby* properties decompose the used properties into the various reasons that space is used
usedbychildren # The amount of space used by children of this dataset, which would be freed if all the dataset s children were destroyed
usedbydataset # The amount of space used by this dataset itself
usedbyrefreservation The amount of space used by a refreservation set on this dataset
usedbysnapshots # The amount of space consumed by snapshots of this dataset
userused@user # The amount of space consumed by the specified user in this dataset
userobjused@user # The userobjused property is similar to userused but instead it counts the number of objects consumed by a user
userrefs # This property is set to the number of user holds on this snapshot
groupused@group # The amount of space consumed by the specified group in this dataset
groupobjused@group # The number of objects consumed by the specified group in this dataset
volblocksize # For volumes, specifies the block size of the volume
written # The amount of space referenced by this dataset
written@snapshot # The amount of referenced space written to this dataset since the specified snapshot
The following native properties can be used to change the behavior of a ZFS dataset
aclinherit=discard|noallow|restricted|passthrough|passthrough-x # Controls how ACEs are inherited when files and directories are created
acltype=off|noacl|posixacl # Controls whether ACLs are enabled and if so what type of ACL to use
atime=on|off # Controls whether the access time for files is updated when they are read
canmount=on|off|noauto # If this property is set to off, the file system cannot be mounted, and is ignored by zfs mount -a
checksum=on|off|fletcher2|fletcher4|sha256|noparity|sha512|skein|edonr # Controls the checksum used to verify data integrity
compression=on|off|gzip|gzip-N|lz4|lzjb|zle # Controls the compression algorithm used for this dataset
context=none|SELinux_User:SElinux_Role:Selinux_Type:Sensitivity_Level # This flag sets the SELinux context for all files in the file system under a mount point for that file system
fscontext=none|SELinux_User:SElinux_Role:Selinux_Type:Sensitivity_Level # This flag sets the SELinux context for the file system file system being mounted
defcontext=none|SELinux_User:SElinux_Role:Selinux_Type:Sensitivity_Level # This flag sets the SELinux default context for unlabeled files
rootcontext=none|SELinux_User:SElinux_Role:Selinux_Type:Sensitivity_Level # This flag sets the SELinux context for the root inode of the file system
copies=1|2|3 # Controls the number of copies of data stored for this dataset
devices=on|off # Controls whether device nodes can be opened on this file system
dnodesize=legacy|auto|1k|2k|4k|8k|16k # Specifies a compatibility mode or literal value for the size of dnodes in the file system.
exec=on|off # Controls whether processes can be executed from within this file system
filesystem_limit=count|none # Limits the number of filesystems and volumes that can exist under this point in the dataset tree
mountpoint=path|none|legacy # Controls the mount point used for this file system
nbmand=on|off # Controls whether the file system should be mounted with nbmand (Non Blocking mandatory locks)
overlay=off|on # Allow mounting on a busy directory or a directory which already contains files or directories
primarycache=all|none|metadata # Controls what is cached in the primary cache (ARC)
quota=size|none # Limits the amount of space a dataset and its descendents can consume
snapshot_limit=count|none # Limits the number of snapshots that can be created on a dataset and its descendents
userquota@user=size|none # Limits the amount of space consumed by the specified user
userobjquota@user=size|none # The userobjquota is similar to userquota but it limits the number of objects a user can create
groupquota@group=size|none # Limits the amount of space consumed by the specified group
groupobjquota@group=size|none # The groupobjquota is similar to groupquota but it limits number of objects a group can consume
readonly=on|off # Controls whether this dataset can be modified
recordsize=size # Specifies a suggested block size for files in the file system
redundant_metadata=all|most # Controls what types of metadata are stored redundantly
refquota=size|none # Limits the amount of space a dataset can consume
refreservation=size|none # The minimum amount of space guaranteed to a dataset, not including its descendents
relatime=on|off # Controls the manner in which the access time is updated when atime=on is set
reservation=size|none # The minimum amount of space guaranteed to a dataset and its descendants
secondarycache=all|none|metadata # Controls what is cached in the secondary cache (L2ARC)
setuid=on|off # Controls whether the setuid bit is respected for the file system
sharesmb=on|off|opts # Controls whether the file system is shared by using Samba USERSHARES and what options are to be used
sharenfs=on|off|opts # Controls whether the file system is shared via NFS, and what options are to be used
logbias=latency|throughput # Provide a hint to ZFS about handling of synchronous requests in this dataset
snapdev=hidden|visible # Controls whether the volume snapshot devices under /dev/zvol/<pool> are hidden or visible
snapdir=hidden|visible # Controls whether the .zfs directory is hidden or visible in the root of the file system as discussed in the Snapshots section
sync=standard|always|disabled # Controls the behavior of synchronous requests (e.g. fsync, O_DSYNC)
version=N|current # The on-disk version of this file system, which is independent of the pool version
volsize=size # For volumes, specifies the logical size of the volume
volmode=default|full|geom|dev|none # This property specifies how volumes should be exposed to the OS
vscan=on|off # Controls whether regular files should be scanned for viruses when a file is opened and closed
xattr=on|off|sa # Controls whether extended attributes are enabled for this file system
zoned=on|off # Controls whether the dataset is managed from a non-global zone
The following three properties cannot be changed after the file system is created, and therefore, should be set when the file system is created
If the properties are not set with the zfs create or zpool create commands, these properties are inherited from the parent dataset. If the parent dataset lacks these properties due to having been created prior to these features being supported, the new file system will have the default values for these properties.
casesensitivity=sensitive|insensitive|mixed # Indicates whether the file name matching algorithm used by the file system should be case-sensitive, case-insensitive, or allow a combination of both styles of matching
normalization=none|formC|formD|formKC|formKD # Indicates whether the file system should perform a unicode normalization of file names whenever two file names are compared, and which normalization algorithm should be used
utf8only=on|off # Indicates whether the file system should reject file names that include characters that are not present in the UTF-8 character code set
MT6750T BIOS
- auto test
- manual test
- single test
- test report
- debug test system
- clear emmc
- version information
- restart
BIOS RESET
vol-Down + power # wait until robot appears
FACTORY RESET
vol-Up + power # wait until robot appears
power + vol-Up # keep power & press only one time on vol-up
SOFTWARE
DOWNLOAD
f-droid
whats-app
ampere
F-DROID
audio recorder
calendar notifications
osm and+
davx5
Dir
Document viewer
firefox klar
k-9 mail
libreoffice viewer
nextcloud
opencamera
openkeychain
pdf creator
simple flashlight
simple gallery pro
sms backup+
tasks
telegram
todo agenda for Android 4 - 7.0
vanilla music
vanilla tag editor
vlc
wikipedia
MTP MOUNT POINT
/run/user/1000/gvfs/... # manjaro
rsync /run/user/1000/gvfs/mtp\:host\=motorola_Moto_G__5__ZY3228VHQG/Carte\ SD/ Downloads/moto-g5/ -av --delete -n # rsync moto-g5
rsync
A fast, versatile, remote (and local) file-copying tool
-v, --verbose # increase verbosity
-q, --quiet # suppress non-error messages
-a, --archive # archive mode; equals -rlptgoD (no -H,-A,-X)
-r, --recursive # recurse into directories
-l, --links # copy symlinks as symlinks
-p, --perms # preserve permissions
-t, --times # preserve modification times
-o, --owner # preserve owner (super-user only)
-g, --group # preserve group
--devices # preserve device files (super-user only)
--specials # preserve special files
-D # same as --devices --specials
-e # specify the remote ssh options to use
examples
rsync -rlptDv --delete -n # same as -av --delete but without preserving group & user
rsync -e "ssh -p 22" # use specified port 22 to connect to remote server
rsync Music/ root@node1:/save/vm/nextcloud/data/aguy/files/perso/music/ -rlptDv --delete -n
https://ss64.com/bash/syntax.html
SYNTAX
command > filename # Redirect command output (stdout) into a file
command > /dev/null # Discard stdout of command
command 2> filename # Redirect error output (stderr) to a file
command 2>&1 filename # Redirect stderr to stdout
command 1>&2 filename # Redirect stdout to stderr
command >> filename # Redirect command output and APPEND into a file
command < filename # Redirect a file into a command
command1 < (command2) # Redirect the output of command2 as file input to command1
command1 | tee filename | command2 # Redirect command1 into filename AND command2
command1 | command2 # Redirect stdout of command1 to command2
command1 |& command2 # Redirect stdERR of command1 to command2
command1 & command2 # Run command1 and then run command2 (asynchronous).
command1 ; command2 # Run command1 and afterwards run command2 (synchronous)
command1 && command2 # Run command2 only if command1 is successful (synchronous AND)
command1 || command2 # Run command2 only if command1 is NOT successful
command & # Run command in a subshell.
command &> filename # Redirect every output of command to filename
command > >(tee -a filename1 filename2) # Redirect command output (stdout) to stdout and into filename1 and filename2
# noclobber option can prevent overwriting an existing file
$ set -o noclobber turns ON noclobber
$ set +o noclobber turns OFF noclobber
[n]<word # Redirection of input causes the file whose name results from the expansion of word to be opened for reading on file descriptor n, or the standard input (file descriptor 0) if n is not specified.
[n]>[|]word # Redirection of output causes the file whose name results from the expansion of word to be opened for writing on file descriptor n, or the standard output (file descriptor 1) if n is not specified. If the file does not exist it is created; if it does exist it is truncated to zero size. If the redirection operator is '>', and the noclobber option to the set builtin has been enabled, the redirection will fail if the file whose name results from the expansion of word exists and is a regular file. If the redirection operator is '>|', or the redirection operator is '>' and the noclobber option is not enabled, the redirection is attempted even if the file named by word exists.
[n]>>word # Redirection of output in this fashion causes the file whose name results from the expansion of word to be opened for appending on file descriptor n, or the standard output (file descriptor 1) if n is not specified. If the file does not exist it is created.
# There are three formats for redirecting standard output and standard error:
&>word
>&word
>word 2>&1
ls > dirlist 2>&1 # directs both standard output (file descriptor 1) and standard error (file descriptor 2) to the file dirlist, while the command
ls 2>&1 > dirlist # directs only the standard output to file dirlist, because the standard error was duplicated as standard output before the standard output was redirected to dirlist.
DESCRIPTOR
exec 3< echolist # for reading
exec 3<&-
exec 3>&- # for writing
exec 3<&1.
exec 2> >(tee -a /tmp/2) > >(tee -a /tmp/1) 4>&1 # duplicate stderror & stdout in files & 4 in 1
examples
echo 1234567890 > $file # Write string to file
exec 3<> $file # Open $file and assign fd 3 to it
read -n 4 <&3 # Read only 4 characters
echo -n , >&3 # Write a decimal point there
exec 3>&- # Close fd 3
cat $file # show 1234.67890
SPECIAL FILE FOR REDIRECTIONS
/dev/fd/fd # If fd is a valid integer, file descriptor fd is duplicated
/dev/stdin # File descriptor 0 is duplicated
/dev/stdout # File descriptor 1 is duplicated
/dev/stderr # File descriptor 2 is duplicated
/dev/tcp/host/port # If host is a valid hostname or Internet address, and port is an integer port number, Bash attempts to open a TCP connection to the corresponding socket
/dev/udp/host/port # If host is a valid hostname or Internet address, and port is an integer port number, Bash attempts to open a UDP connection to the corresponding socket
HERE DOCUMENTS
This type of redirection instructs the shell to read input from the current source until a line containing only word (with no trailing blanks) is seen. All of the lines read up to that point are then used as the standard input for a command. If the redirection operator is '<<-', then all leading tab characters are stripped from input lines and the line containing delimiter. This allows here-documents within shell scripts to be indented in a natural fashion
<<[-]word
here-document
word
HERE STRINGS
A here string can be considered as a stripped-down form of a here document.
It consists of nothing more than command <<<$word, where $word is expanded and fed to the stdin of command.
command <<<$word
command <<<"$word" # keep formatting
DUPLICATING FILE DESCRIPTORS
[n]<&word
Is used to duplicate input file descriptors. If word expands to one or more digits, the file descriptor denoted by n is made to be a copy of that file descriptor. If the digits in word do not specify a file descriptor open for input, a redirection error occurs. If word evaluates to '-', file descriptor n is closed. If n is not specified, the standard input (file descriptor 0) is used
[n]>&word
Is used similarly to duplicate output file descriptors. If n is not specified, the standard output (file descriptor 1) is used. If the digits in word do not specify a file descriptor open for output, a redirection error occurs. As a special case, if n is omitted, and word does not expand to one or more digits, the standard output and standard error are redirected as described previously
THE REDIRECTION OPERATOR
[n]<>word
causes the file whose name is the expansion of word to be opened for both reading and writing on file descriptor n, or on file descriptor 0 if n is not specified. If the file does not exist, it is created.
PROCESS SUBSTITUTION
>(commands) & <(commands)
The process list is run with its input or output connected to a FIFO or some file in /dev/fd. The name of this file is passed as an argument to the current command as the result of the expansion. If the >(list) form is used, writing to the file will provide input for list. If the <(list) form is used, the file passed as an argument should be read to obtain the output of list. Note that no space can appear between the < or > and the left parenthesis, otherwise the construct would be interpreted as a redirection.
examples
$(< file) is faster than $(cat file)