LIST
ffmpeg -codecs # list all codecs
ffmpeg -encoders # list all encoders
ffmpeg -decoders # lis tall decoders
ffmpeg -formats # list all formats
TRIM
ffmpeg -accurate_seek -ss $SECONDS -i $FILE -frames:v 1 -quality 100 image.png # Extract frame to image
ffmpeg -i "${file}" -ss 00:00:30 -t 00:00:05 -codec copy ${fileout} # Extract a part a video from -ss for a duration -t
ffmpeg -i "${file}" -ss 00:00:00 -to 00:56:33 -c copy "${fileout}" # trim outside of -ss & -to
ffmpeg -f concat -safe 0 -i <(echo -e "file \"${file1}\"\nfile \"${file2}\"") -c copy ${fileout}
ffmpeg -i "concat:${file1}|${file2}" -codec copy "${fileout}" # join files
AUDIO & VIDEO
ffmpeg -i "${file}" -c:v libx265 -codec:a libopus -b:a 64k -vbr on -compression_level 10 "${path}/${file%.*}.mp4"
batch to encode audio & video
path="/ext/shared/Videos/ffmpeg" && path2="/tmp/ffmpeg-batch-$(date +%s)" && while read file; do echo "ffmpeg -i \"${file}\" -c:v libx265 -codec:a libopus -b:a 64k -vbr on -compression_level 10 \"${file%.*}-resized.mp4\"" >> "${path2}"; done < <(find "${path}" -name "*.*") && chmod +x "${path2}" && echo "Launch: ${path2}"
batch to encode audio video with crop & scale
crop="W:H:X:Y"
scale="800x720"
scale="800:-1"
path="/home/nikita/Downloads/.tmp/ffmpeg" && path2="/tmp/ffmpeg-batch-$(date +%s)" && while read file; do echo "ffmpeg -i \"${file}\" -filter:v crop=${crop},scale=${scale} -c:v libx265 -codec:a libopus -b:a 64k -vbr on -compression_level 10 \"${file%.*}-resized.mp4\"" >> "${path2}"; done < <(find "${path}" -name "*.webm") && chmod +x "${path2}" && echo "Launch: ${path2}"
AUDIO
# replace audio in video
ffmpeg -i "$file" -i "${file%.mp4}.opus" -c:v copy -c:a copy -map 0:v:0 -map 1:a:0 "${file%.mp4}-audio.mp4"
# batch to replace audio
path="/home/nikita/Downloads/.tmp/ffmpeg" && path2="/tmp/ffmpeg-batch-$(date +%s)" && while read file; do echo "ffmpeg -i \"${file}\" -i \"${file%.mp4}.opus\" -c:v copy -c:a copy -map 0:v:0 -map 1:a:0 \"${file%.mp4}-audio.mp4\"" >> "${path2}"; done < <(find "${path}" -name "*.mp4") && chmod +x "${path2}" && echo "Launch: ${path2}"
# compress audio
ffmpeg -i "$file" -codec:a libopus -b:a 64k -vbr on -compression_level 10 "${file%.*}.opus"
# batch to compress audio
path="/home/nikita/Downloads/.tmp/ffmpeg" && path2="/tmp/ffmpeg-batch-$(date +%s)" && while read file; do echo "ffmpeg -i \"$file\" -c:v copy -codec:a libopus -b:a 64k -vbr on -compression_level 10 \"${file%.mp4}-audio.mp4\"" >> "${path2}"; done < <(find "${path}" -name "*.mp4") && chmod +x "${path2}" && echo "Launch: ${path2}"
ENCODE
ffmpeg -i "$file" -vn -acodec copy $file_out # extract audio
ffmpeg -i "$file" -filter:v scale=720:-1 -c:a copy "$file_out" # resize video, -1 asks to ffmpeg to keep proportion
ffmpeg -i "$file" -filter:v crop=w:h:x:y -c:a copy "$file_out" # crop to w-width:h-height:x-left:y-top, passthru audio
ffmpeg -i "$file" -filter:v crop=w:h:x:y -c:v libx265 -c:a copy "$file_out" # crop & encode encode with h265, passthru audio
ffmpeg -i "$file" -filter:v "crop=w:h:x:y,scale=w_max:h_max530" -c:v libx265 -c:a copy "$file_out" # crop > scale to max w_max/h_max (-1 keeps proportion), encode h265, passthru audio
OTHERS
ffmpeg -i $file -hide_banner # info
ffmpeg -accurate_seek -ss $SECONDS -i $FILE -frames:v 1 image.bmp # Extract frame to image
FFPROBE
gt info from file
ffprobe -i $file
examples
create batch & launch it to crope, scale & encode files from file list
crop="W:H:X:Y"
scale="800x720"
scale="800:-1"
path="/home/nikita/_new/ffmpeg" && path2="/tmp/ffmpeg-batch-$(date +%s)" && while read file; do name="${file##*/}"; path="${file%/*}"; echo "ffmpeg -i \"${file}\" -filter:v "crop=${crop},scale=${scale}" -c:v libx265 -c:a copy \"${path}/${name%.*}-resized.mp4\"" >> "${path2}"; done < <(find "${path}" -name "*.mp4") && chmod +x "${path2}" && echo "Launch: ${path2}"
create batch & launch it to only encode files searching
path="/home/nikita/_new/ffmpeg" && path2="/tmp/ffmpeg-batch-$(date +%s)" && while read file; do name="${file##*/}"; path="${file%/*}"; echo "ffmpeg -i \"${file}\" -c:v libx265 -c:a copy \"${path}/${name%.*}.mp4\"" >> "${path2}"; done < <(find "${path}" -name "*.mkv") && chmod +x "${path2}" && echo "Launch: ${path2}"
AUDACITY
Export /Export Audio/(external program)
ffmpeg -i - -codec:a libopus -b:a 64k -vbr on -compression_level 10 "%f" # whithout space in path file
start
log mimikatz.log
lsadump
cd {$path_hive}
log c:\lsadump.log
lsadump::sam /system:SYSTEM /sam:SAM
exit
hivexsh
hivexsh [-options] [hivefile]
Provides a simple shell for navigating Windows Registry 'hive' files
options
-d # Enable lots of debug messages. If you find a Registry file that this program cannot parse, please enable this option and post the complete output and the Registry hive file in your bug report.
-f filename # Read commands from "filename" instead of stdin. To write a hivexsh script, use: #!/usr/bin/hivexsh -f
-u # Use heuristics to tolerate certain levels of corruption within hives. This is unsafe but may allow to export/merge valid keys/values in an othewise corrupted hive.
-w # If this option is given, then writes are allowed to the hive (see "commit" command below, and the discussion of modifying hives in "WRITING TO HIVE FILES" in hivex(3)). Important Note: Even if you specify this option, nothing is written to a hive unless you call the "commit" command. If you exit the shell without committing, all changes will be discarded. If this option is not given, then write commands are disabled.
commands
add name # Add a subkey named "name" below the current node. The name may contain spaces and punctuation characters, and does not need to be quoted.
cd path # Change to the subkey "path". Use Windows-style backslashes to separate path elements, and start with a backslash in order to start from the root of the hive. For example:
close | unload # Close the currently loaded hive. If you modified the hive, all uncommitted writes are lost when you call this command (or if the shell exits). You have to call "commit" to write changes.
commit [newfile] # Commit changes to the hive. If the optional "newfile" parameter is supplied, then the hive is written to that file, else the original file is overwritten.
del # Delete the current node and everything beneath it. The current directory is moved up one level (as if you did "cd ..") after this command.
exit | quit # Exit the shell.
load hivefile # Load the binary hive named "hivefile". The currently loaded hive, if any, is closed. The current directory is changed back to the root node.
ls # List the subkeys of the current hive Registry key. Note this command does not take any arguments.
lsval [key] # List the (key, value) pairs of the current hive Registry key. If no argument is given then all pairs are displayed. If "key" is given, then the value of the named key is displayed. If "@" is given, then the value of the default key is displayed.
setval nrvals # This command replaces all (key, value) pairs at the current node with the values in subsequent input. "nrvals" is the number of values (ie. (key, value) pairs), and any existing values at this node are deleted. So "setval 0" just deletes any values at the current node.
hivexget
hivexget hivefile PATH [NAME]
Get subkey from a Windows Registry binary "hive" file
example
hivexget ${path_hive}/SAM "SAM\Domains\Account\Users\000003E9" V
hivexml
hivexml [-dk] HIVE > FILE
Convert Windows Registry binary "hive" into XML
options
-d # Enable lots of debug messages. If you find a Registry file that this program cannot parse, please enable this option and post the complete output and the Registry file in your bug report.
-k # Keep going even if we find errors in the Registry file. This skips over any parts of the Registry that we cannot read.
-u # Use heuristics to tolerate certain levels of corruption within hives. This is unsafe but may allow to export/merge valid keys/values in an othewise corrupted hive.
Install
sudo apt install -y libhivex-bin
https://helpmanual.io/man8/chntpw/
chntpw
chntpw [options] <samfile> [systemfile] [securityfile] [otherreghive] [...]
Utility to overwrite passwords of Windows systems
usage
chntpw -i $hive
options
-u username # Username or username ID (RID) to change. The default is 'Administrator'.
-l # List all users in the SAM database and exit.
-i # Interactive Menu system: list all users (as per -l option) and then ask for the user to change.
-e # Registry editor with limited capabilities (but it does include write support). For a slightly more powerful editor see reged
-d # Use buffer debugger instead (hex editor)
-L # Log all changed filenames to /tmp/changed. When this option is set the program automatically saves the changes in the hive files without rompting the user. Be careful when using the -L option as a root user in a multiuser system. The filename is fixed and this can be used by alicious users (dropping a symlink with the same name) to overwrite system files.
-N # Do not allocate more information, only allow the editing of existing values with same size.
-E # Do not expand the hive file (safe mode).
commands
hive [<n>] # list loaded hives or switch to hive numer n
cd <key> # change current key
ls | dir [<key>] # show subkeys & values,
cat | type <value> # show key value
dpi <value> # show decoded DigitalProductId value
hex <value> # hexdump of value data
ck [<keyname>] # Show keys class data, if it has any
nk <keyname> # add key
dk <keyname> # delete key (must be empty)
ed <value> # Edit value
nv <type#> <valuename> # Add value
dv <valuename> # Delete value
delallv # Delete all values in current key
rdel <keyname> # Recursively delete key & subkeys
ek <filename> <prefix> <keyname> # export key to <filename> (Windows .reg file format)
debug # enter buffer hexeditor
st [<hexaddr>] # debug function: show struct info
q # quit
reged
reged [options] -x<registryhivefile><prefixstring><key><output.reg>
reged [options] -I<registryhivefile><prefixstring><input.reg>
reged [options] -e<registryhivefile>
Utility to export/import and edit a Windows registry hives
usage
reged -x SYSTEM 'HKEY_LOCAL_MACHINE\SYSTEM' 'ControlSet001\Control\Lsa\Skew1' test.reg
modes
-x <registryhivefile> <prefixstring> <key> <output.reg> # Xport. Where <prefixstring> for example is HKEY_LOCAL_MACHINE\SOFTWARE <key> is key o dump (recursively), \ or \\ means all keys in hive. Only one .reg and one hive file supported at the same time
-I <registryhivefile> <prefixstring> <input.reg> # Import from .reg file. Where <prefixstring> for example is HKEY_LOCAL_MACHINE\SOFTWARE. Only one .reg and one hive file supported at the same time
-e <registryhive> ... # Interactive edit one or more of registry files
options
-L # Log changed filenames to /tmp/changed, also auto-saves
-C # Auto-save (commit) changed hives without asking
-N # No allocate mode, only allow edit of existing values with same size
-E # No expand mode, do not expand hive file (safe mode)
-t # Debug trace of allocated blocks
-v # Some more verbose messages
sampasswd
sampasswd [options] -uuser <samfile>
Reset passwords of users in the SAM user database
options
-r # Reset the user's password.
-a # Reset all the users. If this option is used there is no need to specify the next option.
-u <user> # User to change. The user value can be provided as a username, or a RID number in hexadecimal (if the username is preceded with '0x').
-l # Lists the users in the SAM database.
-H # Output human readable output. The program by default will print a parsable table unless this option is used.
-N # Do not allocate more information, only allow the editing of existing values with same size.
-E # Do not expand the hive file (safe mode).
-t # Print debug information of allocated blocks.
-v # Print verbose information and debug messages. ```
Install
sudo apt install -y chntpw
install
python3 -m pip install -U pip
python3 -m pip install -U volatility3
cd /usr/local/bin && sudo ln -sv vol vol3; cd
help
volatility [-h] [-c CONFIG] [--parallelism [{processes,threads,off}]] [-e EXTEND] [-p PLUGIN_DIRS] [-s SYMBOL_DIRS] [-v] [-l LOG] [-o OUTPUT_DIR] [-q]
[-r RENDERER] [-f FILE] [--write-config] [--clear-cache] [--cache-path CACHE_PATH] [--offline] [--single-location SINGLE_LOCATION]
[--stackers [STACKERS [STACKERS ...]]] [--single-swap-locations [SINGLE_SWAP_LOCATIONS [SINGLE_SWAP_LOCATIONS ...]]]
plugin ...
An open-source memory forensics framework
-c CONFIG, --config CONFIG # Load the configuration from a json file
--parallelism [{processes,threads,off}] # Enables parallelism (defaults to off if no argument given)
-e EXTEND, --extend EXTEND # Extend the configuration with a new (or changed) setting
-p PLUGIN_DIRS, --plugin-dirs PLUGIN_DIRS # Semi-colon separated list of paths to find plugins
-s SYMBOL_DIRS, --symbol-dirs SYMBOL_DIRS # Semi-colon separated list of paths to find symbols
-v, --verbosity # Increase output verbosity
-l LOG, --log LOG # Log output to a file as well as the console
-o OUTPUT_DIR, --output-dir OUTPUT_DIR # Directory in which to output any generated files
-q, --quiet # Remove progress feedback
-r RENDERER, --renderer RENDERER # Determines how to render the output (quick, csv, pretty, json, jsonl)
-f FILE, --file FILE # Shorthand for --single-location=file:// if single-location is not defined
--write-config # Write configuration JSON file out to config.json
--clear-cache # Clears out all short-term cached items
--cache-path CACHE_PATH # Change the default path (/home/tsurugi/.cache/volatility3) used to store the cache
--offline # Do not search online for additional JSON files
--single-location SINGLE_LOCATION # Specifies a base location on which to stack
--stackers [STACKERS [STACKERS ...]] # List of stackers
--single-swap-locations [SINGLE_SWAP_LOCATIONS [SINGLE_SWAP_LOCATIONS ...]] # Specifies a list of swap layer URIs for use with single-location
windows
windows.bigpools.BigPools # List big page pools
windows.cachedump.Cachedump # Dumps lsa secrets from memory
windows.callbacks.Callbacks # Lists kernel callbacks and notification routines
windows.cmdline.CmdLine # Lists process command line arguments
windows.crashinfo.Crashinfo
windows.dlllist.DllList # Lists the loaded modules in a particular windows memory image
windows.driverirp.DriverIrp # List IRPs for drivers in a particular windows memory image
windows.driverscan.DriverScan # Scans for drivers present in a particular windows memory image
windows.dumpfiles.DumpFiles # Dumps cached file contents from Windows memory samples
windows.envars.Envars # Display process environment variables
windows.filescan.FileScan # Scans for file objects present in a particular windows memory image
windows.getservicesids.GetServiceSIDs # Lists process token sids
windows.getsids.GetSIDs # Print the SIDs owning each process
windows.handles.Handles # Lists process open handles
windows.hashdump.Hashdump # Dumps user hashes from memory
windows.info.Info # Show OS & kernel details of the memory sample being analyzed
windows.lsadump.Lsadump # Dumps lsa secrets from memory
windows.malfind.Malfind # Lists process memory ranges that potentially contain injected code
windows.memmap.Memmap # Prints the memory map
windows.modscan.ModScan # Scans for modules present in a particular windows memory image.
windows.modules.Modules # Lists the loaded kernel modules
windows.mutantscan.MutantScan # Scans for mutexes present in a particular windows memory image
windows.netscan.NetScan # Scans for network objects present in a particular windows memory image
windows.netstat.NetStat # Traverses network tracking structures present in a particular windows memory image.
windows.poolscanner.PoolScanner # A generic pool scanner plugin
windows.privileges.Privs # Lists process token privileges
windows.pslist.PsList # Lists the processes present in a particular windows memory image
windows.psscan.PsScan # Scans for processes present in a particular windows memory image
windows.pstree.PsTree # Plugin for listing processes in a tree based on their parent process ID
windows.registry.certificates.Certificates # Lists the certificates in the registry's Certificate Store
windows.registry.hivelist.HiveList # Lists the registry hives present in a particular memory image
windows.registry.hivescan.HiveScan # Scans for registry hives present in a particular windows memory image.
windows.registry.printkey.PrintKey # Lists the registry keys under a hive or specific key value
windows.registry.userassist.UserAssist # Print userassist registry keys and information
windows.skeleton_key_check.Skeleton_Key_Check # Looks for signs of Skeleton Key malware
windows.ssdt.SSDT # Lists the system call table
windows.statistics.Statistics
windows.strings.Strings # Reads output from the strings command and indicates which process(es) each string belongs to
windows.svcscan.SvcScan # Scans for windows services
windows.symlinkscan.SymlinkScan # Scans for links present in a particular windows memory image
windows.vadinfo.VadInfo # Lists process memory ranges
windows.vadyarascan.VadYaraScan # Scans all the Virtual Address Descriptor memory maps using yara
windows.verinfo.VerInfo # Lists version information from PE files
windows.virtmap.VirtMap # Lists virtual mapped sections
linux
linux.bash.Bash # Recovers bash command history from memory
linux.check_afinfo.Check_afinfo # Verifies the operation function pointers of network protocols
linux.check_creds.Check_creds # Checks if any processes are sharing credential structures
linux.check_idt.Check_idt # Checks if the IDT has been altered
linux.check_modules.Check_modules # Compares module list to sysfs info, if available
linux.check_syscall.Check_syscall # Check system call table for hooks
linux.elfs.Elfs # Lists all memory mapped ELF files for all processes
linux.keyboard_notifiers.Keyboard_notifiers # Parses the keyboard notifier call chain
linux.kmsg.Kmsg # Kernel log buffer reader
linux.lsmod.Lsmod # Lists loaded kernel modules
linux.lsof.Lsof # Lists all memory maps for all processes
linux.malfind.Malfind # Lists process memory ranges that potentially contain injected code
linux.proc.Maps # Lists all memory maps for all processes
linux.pslist.PsList # Lists the processes present in a particular linux memory image
linux.pstree.PsTree # Plugin for listing processes in a tree based on their parent process ID
linux.tty_check.tty_check # Checks tty devices for hooks
mac
mac.bash.Bash # Recovers bash command history from memory
mac.check_syscall.Check_syscall # Check system call table for hooks
mac.check_sysctl.Check_sysctl # Check sysctl handlers for hooks
mac.check_trap_table.Check_trap_table # Check mach trap table for hooks
mac.ifconfig.Ifconfig # Lists loaded kernel modules
mac.kauth_listeners.Kauth_listeners # Lists kauth listeners and their status
mac.kauth_scopes.Kauth_scopes # Lists kauth scopes and their status
mac.kevents.Kevents # Lists event handlers registered by processes
mac.list_files.List_Files # Lists all open file descriptors for all processes
mac.lsmod.Lsmod # Lists loaded kernel modules
mac.lsof.Lsof # Lists all open file descriptors for all processes
mac.malfind.Malfind # Lists process memory ranges that potentially contain injected code
mac.mount.Mount # A module containing a collection of plugins that produce data typically foundin Mac's mount command
mac.netstat.Netstat # Lists all network connections for all processes
mac.proc_maps.Maps # Lists process memory ranges that potentially contain injected code
mac.psaux.Psaux # Recovers program command line arguments
mac.pslist.PsList # Lists the processes present in a particular mac memory image
mac.pstree.PsTree # Plugin for listing processes in a tree based on their parent process ID
mac.socket_filters.Socket_filters # Enumerates kernel socket filters
mac.timers.Timers # Check for malicious kernel timers
mac.trustedbsd.Trustedbsd # Checks for malicious trustedbsd modules
mac.vfsevents.VFSevents # Lists processes that are filtering file system events
others
banners.Banners # Attempts to identify potential linux banners in an image
configwriter.ConfigWriter # Runs the automagics and both prints and outputs configuration in the output directory
frameworkinfo.FrameworkInfo # Plugin to list the various modular components of Volatility
isfinfo.IsfInfo # Determines information about the currently available ISF files, or a specific one
layerwriter.LayerWriter # Runs the automagics and writes out the primary layer produced by the stacker
timeliner.Timeliner # Runs all relevant plugins that provide time related information and orders the results by time
yarascan.YaraScan # Scans kernel memory using yara rules (string or file)
windows notifications
file=/vol6/Users/Angela/AppData/Local/Microsoft/Windows/Notifications/wpndatabase.db
sqlitebrowser ${file}
SELECT datetime((ArrivalTime/10000000)-11644473600, 'unixepoch') AS ArrivalTime,
datetime((ExpiryTime/10000000)-11644473600, 'unixepoch') AS ExpiryTime,
Type, HandlerId, Notification.Id, Payload, Tag, 'Group', 'Order', PrimaryId, HandlerType, WNFEventName, CreatedTime as HandlerCreatedTime, ModifiedTime as HandlerModifiedTime
FROM Notification LEFT JOIN NotificationHandler ON Notification.HandlerId = NotificationHandler.RecordId
regripper [-r Reg hive file] [-f profile] [-p plugin] [options]
Parse Windows Registry files, using either a single module, or a profile
Special
regripper -l -c|sort|column -t -s, # show plugins list in table sorted by plugins
regripper -l -c|sort -t, -k3 -k1|column -t -s, # show plugins list in table sorted by hive/plugins
regripper -p winver -r SOFTWARE # get version of wnidows
regripper -p timezone -r SYSTEM # get timezone information about SYSTEM hive
regripper -a -r SYSTEM # get full analyse for SYSTEM hive
Usefull
-a # Automatically run hive-specific plugins
-l # list all plugins
-f [profile] # use the profile
-p [plugin] # use the plugin
All
-r [hive] # Registry hive file to parse
-d # Check to see if the hive is dirty
-g # Guess the hive file type
-a # Automatically run hive-specific plugins
-aT # Automatically run hive-specific TLN plugins
-f [profile] # use the profile
-p [plugin] # use the plugin
-l # list all plugins
-c # Output plugin list in CSV format (use with -l)
-s systemname # system name (TLN support)
-u username # User name (TLN support)
-uP # Update default profiles
Plugins
adobe 20200522 NTUSER.DAT Gets user's Adobe app cRecentFiles values
allowedenum 20200511 NTUSER.DAT Software Extracts AllowedEnumeration values to determine hidden special folders
amcache 20200515 amcache Parse AmCache.hve file
amcache_tln 20180311 amcache Parse AmCache.hve file
appassoc 20200515 NTUSER.DAT Gets contents of user's ApplicationAssociationToasts key
appcertdlls 20200427 System Get entries from AppCertDlls key
appcompatcache 20200428 System Parse files from System hive AppCompatCache
appcompatcache_tln 20190112 System Parse files from System hive AppCompatCache
appcompatflags 20200525 NTUSER.DAT Software Extracts AppCompatFlags for Windows.
appinitdlls 20200427 Software Gets contents of AppInit_DLLs value
appkeys 20200517 NTUSER.DAT Software Extracts AppKeys entries.
appkeys_tln 20180920 NTUSER.DAT Software Extracts AppKeys entries.
applets 20200525 NTUSER.DAT Gets contents of user's Applets key
applets_tln 20120613 NTUSER.DAT Gets contents of user's Applets key (TLN)
apppaths 20200511 NTUSER.DAT Software Gets content of App Paths subkeys
apppaths_tln 20130429 NTUSER.DAT Software Gets content of App Paths subkeys (TLN)
appspecific 20200515 NTUSER.DAT Gets contents of user's Intellipoint\AppSpecific subkeys
appx 20200427 NTUSER.DAT USRCLASS.DAT Checks for persistence via Universal Windows Platform Apps
appx_tln 20191014 NTUSER.DAT USRCLASS.DAT Checks for persistence via Universal Windows Platform Apps
arpcache 20200515 NTUSER.DAT Retrieves CurrentVersion\App Management\ARPCache entries
at 20200525 Software Checks Software hive for AT jobs
attachmgr 20200525 NTUSER.DAT Checks user's keys that manage the Attachment Manager functionality
attachmgr_tln 20130425 NTUSER.DAT Checks user's keys that manage the Attachment Manager functionality (TLN)
at_tln 20140821 Software Checks Software hive for AT jobs
audiodev 20200525 Software Gets audio capture/render devices
auditpol 20200515 Security Get audit policy from the Security hive file
backuprestore 20200517 System Gets the contents of the FilesNotToSnapshot, KeysNotToRestore, and FilesNotToBackup keys
bam 20200427 System Parse files from System hive BAM Services
bam_tln 20180225 System Parse files from System hive BAM Services
base 20200427 All Parse base info from hive
baseline 20130211 All Scans a hive file, checking sizes of binary value data
btconfig 20200526 Software Determines BlueTooth devices 'seen' by BroadComm drivers
bthenum 20200515 System Get BTHENUM subkey info
bthport 20200517 System Gets Bluetooth-connected devices from System hive
bthport_tln 20180705 System Gets Bluetooth-connected devices from System hive; TLN output
cached 20200525 NTUSER.DAT Gets cached Shell Extensions from NTUSER.DAT hive
cached_tln 20150608 NTUSER.DAT Gets cached Shell Extensions from NTUSER.DAT hive (TLN)
calibrator 20200427 Software Checks DisplayCalibrator value (possible bypass assoc with LockBit ransomware)
clsid 20200526 Software USRCLASS.DAT Get list of CLSID/registered classes
clsid_tln 20200526 Software USRCLASS.DAT Get list of CLSID/registered classes
cmdproc 20200515 NTUSER.DAT Autostart - get Command Processor\AutoRun value from NTUSER.DAT hive
cmdproc_tln 20130425 NTUSER.DAT Autostart - get Command Processor\AutoRun value from NTUSER.DAT hive (TLN)
cmd_shell 20200515 Software Gets shell open cmds for various file types
codepage 20200519 system Checks codepage value
comdlg32 20200517 NTUSER.DAT Gets contents of user's ComDlg32 key
compdesc 20200511 NTUSER.DAT Gets contents of user's ComputerDescriptions key
compname 20090727 System Gets ComputerName and Hostname values from System hive
cred 20200427 system Checks for UseLogonCredential value
cred_tln 20200402 system Checks UseLogonCredential value
dafupnp 20200525 System Parses data from networked media streaming devices
dcom 20200525 Software Check DCOM Ports
ddo 20140414 NTUSER.DAT Gets user's DeviceDisplayObjects key contents
defender 20200427 Software Get Windows Defender settings
del 20200515 All Parse hive, print deleted keys/values
del_tln 20190506 All Parse hive, print deleted keys/values
devclass 20200525 System Get USB device info from the DeviceClasses keys in the System hive
direct 20200515 Software Searches Direct* keys for MostRecentApplication subkeys
direct_tln 20190911 Software Searches Direct* keys for MostRecentApplication subkeys (TLN)
disablelastaccess 20200517 System Get NTFSDisableLastAccessUpdate value
disablemru 20190924 NTUSER.DAT Software Checks settings disabling user's MRUs
disableremotescm 20200513 System Gets DisableRemoteScmEndpoints value from System hive
disablesr 20200515 Software Gets the value that turns System Restore either on or off
drivers32 20200525 Software Get values from the Drivers32 key
emdmgmt 20200511 Software Gets contents of EMDMgmt subkeys and values
environment 20200512 System NTUSER.DAT Get environment vars from NTUSER.DAT & System hives
execpolicy 20200517 Software Gets PowerShell Execution Policy
featureusage 20200511 NTUSER.DAT Extracts user's FeatureUsage data.
fileless 20200525 All Scans a hive file looking for fileless malware entries
findexes 20200525 All Scans a hive file looking for binary value data that contains MZ
gpohist 20200525 Software NTUSER.DAT Collects system/user GPO history
gpohist_tln 20150529 Software NTUSER.DAT Collects system/user GPO history (TLN)
heap 20200427 Software Checks HeapLeakDetection\DiagnosedApplications Subkeys
heidisql 20201227 NTUSER.DAT Gets user's heidisql data
ica_sessions 20200528 Software ARETE ONLY - Extracts Citrix ICA Session info
identities 20200525 NTUSER.DAT Extracts values from Identities key; NTUSER.DAT
imagedev 20140104 System --
imagefile 20200515 Software Checks ImageFileExecutionOptions subkeys values
injectdll64 20200427 NTUSER.DAT Software Retrieve values set to weaken Chrome security
inprocserver 20200427 Software Checks CLSID InProcServer32 values for indications of malware
installer 20200517 Software Determines product install information
ips 20200518 System Get IP Addresses and domains (DHCP, static)
jumplistdata 20200517 NTUSER.DAT Gets contents of user's JumpListData key
killsuit 20200427 Software Check for indications of Danderspritz Killsuit installation
killsuit_tln 20200414 Software Check for indications of Danderspritz Killsuit installation
knowndev 20200515 NTUSER.DAT Gets user's KnownDevices key contents
landesk 20200517 Software Get list of programs monitored by LANDESK - Software hive
landesk_tln 20130214 Software Get list of programs monitored by LANDESK from Software hive
lastloggedon 20200517 Software Gets LastLoggedOn* values from LogonUI key
licenses 20200526 Software Get contents of HKLM/Software/Licenses key
listsoft 20200517 NTUSER.DAT Lists contents of user's Software key
load 20200517 NTUSER.DAT Gets load and run values from user hive
logonstats 20200517 NTUSER.DAT Gets contents of user's LogonStats key
lsa 20200517 System Lists specific contents of LSA key
lxss 20200511 NTUSER.DAT Gets WSL config.
lxss_tln 20140723 NTUSER.DAT Gets WSL config.
macaddr 20200515 System Software --
mixer 20200517 NTUSER.DAT Checks user's audio mixer settings
mixer_tln 20141112 NTUSER.DAT Checks user's audio mixer info
mmc 20200517 NTUSER.DAT Get contents of user's MMC\Recent File List key
mmc_tln 20120828 NTUSER.DAT Get contents of user's MMC\Recent File List key (TLN)
mmo 20200517 NTUSER.DAT Checks NTUSER for Multimedia\Other values [malware]
mndmru 20200517 NTUSER.DAT Get contents of user's Map Network Drive MRU
mndmru_tln 20120829 NTUSER.DAT Get user's Map Network Drive MRU (TLN)
mountdev 20200517 System Return contents of System hive MountedDevices key
mountdev2 20200517 System Return contents of System hive MountedDevices key
mp2 20200526 NTUSER.DAT Gets user's MountPoints2 key contents
mp2_tln 20200525 NTUSER.DAT Gets user's MountPoints2 key contents
mpmru 20200517 NTUSER.DAT Gets user's Media Player RecentFileList values
msis 20200517 Software Determine MSI packages installed on the system
msoffice 20200518 NTUSER.DAT Get user's MSOffice content
msoffice_tln 20200518 NTUSER.DAT Get user's MSOffice content
muicache 20200525 NTUSER.DAT USRCLASS.DAT Gets EXEs from user's MUICache key
muicache_tln 20130425 NTUSER.DAT USRCLASS.DAT Gets EXEs from user's MUICache key (TLN)
nation 20200517 ntuser.dat Gets region information from HKCU
netlogon 20200515 System Parse values for machine account password changes
netsh 20200515 Software Gets list of NetSH helper DLLs
networkcards 20200518 Software Get NetworkCards Info
networklist 20200518 Software Collects network info from NetworkList key
networklist_tln 20150812 Software Collects network info from NetworkList key (TLN)
networksetup2 20191004 System Get NetworkSetup2 subkey info
nic2 20200525 System Gets NIC info from System hive
ntds 20200427 System Parse Services NTDS key for specific persistence values
null 20160119 All Check key/value names in a hive for leading null char
oisc 20091125 NTUSER.DAT Gets contents of user's Office Internet Server Cache
onedrive 20200515 NTUSER.DAT Gets contents of user's OneDrive key
onedrive_tln 20190823 NTUSER.DAT Gets contents of user's OneDrive key
osversion 20200511 NTUSER.DAT Checks for OSVersion value
osversion_tln 20120608 NTUSER.DAT Checks for OSVersion value (TLN)
outlook_homepage 20201002 NTUSER.DAT Software Retrieve values set to attack Outlook WebView Homepage
pagefile 20140505 System Get info on pagefile(s)
pending 20130711 System Gets contents of PendingFileRenameOperations value
pendinggpos 20200427 NTUSER.DAT Gets contents of user's PendingGPOs key
photos 20200525 USRCLASS.DAT Shell/BagMRU traversal in Win7 USRCLASS.DAT hives
Plugin Version Hive Description
portdev 20090118 Software Parses Windows Portable Devices key contents
powershellcore 20200525 Software Extracts PowerShellCore settings
prefetch 20200515 System Gets the the Prefetch Parameters
printdemon 20200514 Software Gets value assoc with printer ports and descriptions
printmon 20200427 System Lists installed Print Monitors
printmon_tln 20191122 System Lists installed Print Monitors
processor_architecture 20140505 System Get from the processor architecture from the System's environment key
profilelist 20200518 Software Get content of ProfileList key
profiler 20200525 NTUSER.DAwindows.memmap.MemmapT System Environment profiler information
pslogging 20200515 NTUSER.DAT Software Extracts PowerShell logging settings
psscript 20200525 Software NTUSER.DAT Get PSScript.ini values
putty 20200515 NTUSER.DAT Extracts the saved SshHostKeys for PuTTY.
rdpport 20200526 System Queries System hive for RDP Port
recentapps 20200515 NTUSER.DAT Gets contents of user's RecentApps key
recentapps_tln 20190513 NTUSER.DAT Gets contents of user's RecentApps key
recentdocs 20200427 NTUSER.DAT Gets contents of user's RecentDocs key
recentdocs_tln 20140220 NTUSER.DAT Gets contents of user's RecentDocs key (TLN)
remoteaccess 20200517 System Get RemoteAccess AccountLockout settings
rlo 20200517 All Parse hive, check key/value names for RLO character
routes 20200526 System Get persistent routes from the Registry
run 20200511 Software NTUSER.DAT [Autostart] Get autostart key contents from Software hive
runmru 20200525 NTUSER.DAT Gets contents of user's RunMRU key
runmru_tln 20120828 NTUSER.DAT Gets contents of user's RunMRU key (TLN)
runonceex 20200427 Software Gets contents of RunOnceEx values
runvirtual 20200427 NTUSER.DAT Software Gets RunVirtual entries
runvirtual_tln 20191211 NTUSER.DAT Software Gets RunVirtual entries
ryuk_gpo 20200427 Software Get GPO policy settings from Software hive related to Ryuk
samparse 20200825 SAM Parse SAM file for user & group mbrshp info
samparse_tln 20200826 SAM Parse SAM file for user acct info (TLN)
ScanButton 20131210 System Get Scan Button information
schedagent 20200518 Software Get SchedulingAgent key contents
scriptleturl 20200525 Software USRCLASS.DAT Check CLSIDs for ScriptletURL subkeys
searchscopes 20200517 NTUSER.DAT Gets contents of user's SearchScopes key
secctr 20200517 Software Get data from Security Center key
secrets 20200517 Security Get the last write time for the Policy\Secrets key
secrets_tln 20140814 Security Get the last write time for the Policy\Secrets key
securityproviders 20200526 System Gets SecurityProvider value from System hive
services 20191024 System Lists services/drivers in Services key by LastWrite times
sevenzip 20210329 NTUSER.DAT Gets records of histories from 7-Zip keys
sfc 20200517 Software Get SFC values
shares 20200525 System Get list of shares from System hive file
shc 20200427 NTUSER.DAT Gets SHC entries from user hive
shellbags 20200428 USRCLASS.DAT Shell/BagMRU traversal in Win7+ USRCLASS.DAT hives
shellbags_tln 20180702 USRCLASS.DAT Shell/BagMRU traversal in Win7 USRCLASS.DAT hives
shellfolders 20200515 NTUSER.DAT Gets user's shell folders values
shelloverlay 20100308 Software Gets ShellIconOverlayIdentifiers values
shimcache 20200428 System Parse file refs from System hive AppCompatCache data
shimcache_tln 20190112 System Parse file refs from System hive AppCompatCache data
shutdown 20200518 System Gets ShutdownTime value from System hive
sizes 20200517 All Scans a hive file looking for binary value data of a min size (5000)
slack 20200517 All Parse hive, print slack space, retrieve keys/values
slack_tln 20190506 All Parse hive, print slack space, retrieve keys/values
source_os 20200511 System Parse Source OS subkey values
speech 20200427 NTUSER.DAT Get values from user's Speech key
speech_tln 20191010 NTUSER.DAT Get values from user's Speech key
spp_clients 20130429 Software Determines volumes monitored by VSS
srum 20200518 Software Gets contents of SRUM subkeys
ssid 20200515 Software Get WZCSVC SSID Info
susclient 20200518 Software Extracts SusClient* info, including HDD SN (if avail)
svc 20200525 System Lists Services key contents by LastWrite time (CSV)
svcdll 20200525 System Lists Services keys with ServiceDll values
svc_tln 20130911 System Lists Services key contents by LastWrite time (CSV)
syscache 20200515 syscache Parse SysCache.hve file
syscache_csv 20200515 syscache
syscache_tln 20190516 syscache
sysinternals 20080324 NTUSER.DAT Checks for SysInternals apps keys
sysinternals_tln 20080324 NTUSER.DAT Checks for SysInternals apps keys (TLN)
systemindex 20200518 Software Gets systemindex\..\Paths info from Windows Search key
taskcache 20200427 Software Checks TaskCache\Tree root keys (not subkeys)
taskcache_tln 20200416 Software Checks TaskCache\Tree root keys (not subkeys)
tasks 20200427 Software Checks TaskCache\Tasks subkeys
tasks_tln 20200416 Software Checks TaskCache\Tasks subkeys
termcert 20200526 System Gets Terminal Server certificate
termserv 20200506 System Software Gets Terminal Server settings from System and Software hives
thispcpolicy 20200511 Software Gets ThisPCPolicy values
timezone 20200518 System Get TimeZoneInformation key contents
tracing 20200511 Software Gets list of apps that can be traced
tracing_tln 20120608 Software Gets list of apps that can be traced (TLN)
tsclient 20200518 NTUSER.DAT Displays contents of user's Terminal Server Client\Default key
tsclient_tln 20120827 NTUSER.DAT Displays contents of user's Terminal Server Client key (TLN)
typedpaths 20200526 NTUSER.DAT Gets contents of user's typedpaths key
typedpaths_tln 20120828 NTUSER.DAT Gets contents of user's typedpaths key (TLN)
typedurls 20200526 NTUSER.DAT Returns contents of user's TypedURLs key.
typedurlstime 20200526 NTUSER.DAT Returns contents of user's TypedURLsTime key.
typedurlstime_tln 20120613 NTUSER.DAT Returns contents of Win8 user's TypedURLsTime key (TLN).
typedurls_tln 20120827 NTUSER.DAT Returns MRU for user's TypedURLs key (TLN)
uac 20200427 Software Get Select User Account Control (UAC) Values from HKLM\SOFTWARE\Microsoft\Windows\CurrentVersion\Policies\System
uacbypass 20200511 USRCLASS.DAT Software Get possible UAC bypass settings
uninstall 20200525 Software NTUSER.DAT Gets contents of Uninstall keys from Software, NTUSER.DAT hives
uninstall_tln 20120523 Software NTUSER.DAT Gets contents of Uninstall keys from Software, NTUSER.DAT hives(TLN format)
usb 20200515 System Get USB key info
usbdevices 20200525 System Parses Enum\USB key for USB & WPD devices
usbstor 20200515 System Get USBStor key info
userassist 20170204 NTUSER.DAT Displays contents of UserAssist subkeys
userassist_tln 20180710 NTUSER.DAT Displays contents of UserAssist subkeys in TLN format
volinfocache 20200518 Software Gets VolumeInfoCache from Windows Search key
wab 20200427 Software Get WAB DLLPath settings
wab_tln 20191122 Software Get WAB DLLPath settings
watp 20200427 Software Gets contents of Windows Advanced Threat Protection key
wbem 20200511 Software Get some contents from WBEM key
wc_shares 20200515 NTUSER.DAT Gets contents of user's WorkgroupCrawler/Shares subkeys
winlogon_tln 20130429 Software Alerts on values from the WinLogon key (TLN)
winrar 20200526 NTUSER.DAT Get WinRAR\ArcHistory entries
winrar_tln 20120829 NTUSER.DAT Get WinRAR\ArcHistory entries (TLN)
winscp 20201227 NTUSER.DAT Gets user's WinSCP 2 data
winver 20200525 Software Get Windows version & build info
winzip 20200526 NTUSER.DAT Get WinZip extract and filemenu values
wordwheelquery 20200823 NTUSER.DAT Gets contents of user's WordWheelQuery key
wordwheelquery_tln 20200824 NTUSER.DAT Gets contents of user's WordWheelQuery key
wow64 20200515 Software Gets contents of WOW64\x86 key
wpdbusenum 20200515 System Get WpdBusEnum subkey info
wsh_settings 20200517 Software Gets WSH Settings
Install
see foralyse
reglookup
reglookup [-v] [-s] [-p <PATH_FILTER>] [-t <TYPE_FILTER>] <REGISTRY_FILE>
Print windows registry elements to stdout in a CSV-like format
Special
for hive in SAM SECURITY SOFTWARE SYSTEM $(find /vol6/ -iname ntuser.dat); do echo $hive; reglookup -i $hive > /share/examen/disk/hive/reglookup_${hive//\//_}; done
Usefull
-p # restrict output to elements below this path.
-H # disables header row.
-s # enables security descriptor output.
All
-v # sets verbose mode.
-h # enables header row. (default)
-H # disables header row.
-s # enables security descriptor output.
-S # disables security descriptor output. (default)
-p # restrict output to elements below this path.
-t # restrict results to this specific data type.
-i # includes parent key modification times with child values.
reglookup-timeline
reglookup-timeline [-H] [-V] <REGISTRY_FILE> [<REGISTRY_FILE> ...]
Builds timelines for forensic investigations, a wrapper for reglookup
Special
cd /vol6/Windows/System32/config && hives="SAM SECURITY SOFTWARE SYSTEM $(find /vol6/ -iname ntuser.dat)" && reglookup-timeline -v $hives > /share/examen/disk/hive/reglookup-tl # complete timeline
sed -n '/^2021-09-09 18:1/,$p' reglookup-tl > reglookup-tl-select # select part of timeline
All
-H # Omit header line
-V # Include values with parent timestamps
reglookup-recover
reglookup-recover [options] <REGISTRY_FILE>
Attempts to scour a Windows registry hive for deleted data structures and outputs those found in a CSV-like format
All
-v # sets verbose mode.
-h # enables header row. (default)
-H # disables header row.
-l # enables leftover(raw) cell output.
-L # disables leftover(raw) cell output. (default)
-r # enables raw cell output for parsed cells.
-R # disables raw cell output for parsed cells. (default)
Install
sudo apt install reglookup
theme
windows version
regripper -p winver -r $path_hive/SOFTWARE
reglookup -p Software/Microsoft $path_hive/SYSTEM | column -t -s,
user password
path_hive=/vol6/Windows/System32/config
path2=/cases/examen/artefacts
# get user id
reglookup -p SAM/Domains/Account/Users ${path_hive}/SAM | grep -i angela # select 0x.....
# data
uid=000003E9
hivexget ${path_hive}/SAM "SAM\Domains\Account\Users\000003E9" V | hexdump -ve '8/1 "%02X"' > ${path2}/sam-user-v.hexdump
hivexget ${path_hive}/SAM "SAM\Domains\Account" F | hexdump -ve '8/1 "%02X"' > ${path2}/sam-f.hexdump
hivexget ${path_hive}/SYSTEM "ControlSet001\Control\Lsa\JD" lookup | hexdump -ve '8/1 "%02X"' > ${path2}/system-jd.hexdump
hivexget ${path_hive}/SYSTEM "ControlSet001\Control\Lsa\Skew1" SkewMatrix | hexdump -ve '8/1 "%02X"' > ${path2}/system-skew.hexdump
hivexget ${path_hive}/SYSTEM "ControlSet001\Control\Lsa\GBG" GrafBlumGroup | hexdump -ve '8/1 "%02X"' > ${path2}/system-gbg.hexdump
hivexget ${path_hive}/SYSTEM "ControlSet001\Control\Lsa\Data" Pattern | hexdump -ve '8/1 "%02X"' > ${path2}/system-data.hexdump
for file in $(ls ${path2}); do echo $file; cat $file; echo; done
regripper
reglookup
reglookup-timeline
pathhive=$device/Windows/System32/config
pathreport=/share/examen/disk
cd $path
reglookup-timeline SAM SECURITY SOFTWARE SYSTEM > $pathreport/reglookup-timeline
windows
security center
disable
[HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Services\SecurityHealthService]"Start"=dword:00000004
[HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Services\wscsvc]"Start"=dword:00000004
enable
[HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Services\SecurityHealthService]"Start"=dword:00000002
[HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Services\wscsvc]"Start"=dword:00000002
cmp [OPTION]... FILE1 [FILE2 [SKIP1 [SKIP2]]]
Compare two files byte by byte
Special
cmp $file1 $file2 # compare 2 binary files
cmp -l $file1 $file2 | wc -l # get number of diferrences
Usefull
-b, --print-bytes # print differing bytes
-n, --bytes=LIMIT # compare at most LIMIT bytes
All
-b, --print-bytes # print differing bytes
-i, --ignore-initial=SKIP # skip first SKIP bytes of both inputs
-i, --ignore-initial=SKIP1:SKIP2 # skip first SKIP1 bytes of FILE1 and first SKIP2 bytes of FILE2
-l, --verbose # output byte numbers and differing byte values
-n, --bytes=LIMIT # compare at most LIMIT bytes
-s, --quiet, --silent # suppress all normal output
mount
info
file ${file} # show informations
fdisk -x ${file} # show informations
qemu-img info ${file} # show informations on virtual disk
guestfish --rw -a $file
run
list-filesystems
sudo modprobe nbd
sudo qemu-nbd -c /dev/nbd0 ${file} -f qcow2
sudo fdisk /dev/nbd0 -l
sudo qemu-nbd -d /dev/nbd0
parted ${file}
print
losetup -a # show mounted devices in /dev/loopX
resize
qemu-img resize -f raw ${file} 20972568064 # resize disk to 20972568064 bytes (correct disk size)
parted ${file}
select # select disk (interactive menu)
resizepart # resize partition (interactive menu)
mount/umount
guestmount --add %f --mount /dev/sda1 /vms/data
guestunmount /vms/data
sudo modprobe nbd
sudo qemu-nbd -c /dev/nbd0 ${file} -f qcow2
sudo fdisk /dev/nbd0 -l
sudo mount /dev/nbd0p1 /vms/data
sudo umount /vms/data
sudo qemu-nbd -d /dev/nbd0
sudo mount -o ro,loop,offset=$((1126400*512)) ${file} /mnt # mount disk partition with the partition offset
sudo mount -o ro,loop,offset=$((1126400*512)) ${file} /mnt # mount disk partition with the partition offset
sudo umount /mnt # umount disk
sudo losetup --find --show ${file} # mount disk in /dev/loopX and show /dev/loopX
sudo losetup --find --show --offset ${offset} ${file} # mount partition/disk with offset in /dev/loopX and show /dev/loopX
sudo losetup -d /dev/loopX # umount disk
info
https://opensource.com/article/18/3/creating-bash-completion-script
COMPREPLY
an array variable used to store the completions. The completion mechanism uses this variable to display its contents as completions
COMPREPLY=( $(compgen -W "now tomorrow never" -- ${COMP_WORDS[COMP_CWORD]}) ) # propose given words at each let choose the first completion from given words and repeat it after (replace)
COMPREPLY=( $(compgen -W "now tomorrow never" "${COMP_WORDS[1]}") ) # let choose the first completion from given words and repeat it after (replace)
complete
complete command to register this list for completion
complete -A directory $cmd # provide completion for directory
complete -d $cmd # provide completion for directory
complete -D $cmd # provide completion for directory
complete -f $cmd # provide completion for file
complete -W "$words" $cmd # Wordlist, provide the list of words for completion to command $cmd
complete -F _foo $cmd # use function _foo_comp to register completions for command $cmd
compopt
https://helpmanual.io/man1/bash/
variables
COMP_WORDS # an array of all the words typed after the name of the program the compspec belongs to
COMP_CWORD # an index of the COMP_WORDS array pointing to the word the current cursor is at—in other words
COMP_LINE # the current command line
tricks
exec bash # reload completions
examples
qemu-img
#!/usr/bin/env bash
_qemuimg_comp()
{
COMPREPLY=()
local cur=${COMP_WORDS[COMP_CWORD]}
local prev="${COMP_WORDS[COMP_CWORD-1]}"
local opts='amend bench bitmap check commit compare convert create dd info map measure snapshot rebase resize'
local formats='blkdebug blklogwrites blkverify bochs cloop compress copy-before-write copy-on-read dmg file ftp ftps gluster host_cdrom host_device http https iscsi iser luks nbd nfs null-aio null
-co nvme parallels preallocate qcow qcow2 qed quorum raw rbd replication snapshot-access ssh throttle vdi vhdx vmdk vpc vvfat'
#echo "COMP_LINE=$COMP_LINE" >> /tmp/qemu
#echo "COMP_WORDS=$COMP_WORDS[@] | COMP_CWORD=$COMP_CWORD" >> /tmp/qemu
#echo "cur=$cur | prev=$prev" >> /tmp/qemu
if [ ${COMP_CWORD} -eq 1 ]; then
COMPREPLY=( $(compgen -W "${opts}" -- "${cur}" ) )
return 0
elif [[ $prev =~ -[oOf] ]]; then
COMPREPLY=( $(compgen -W "${formats}" -- "${cur}" ) )
else
COMPREPLY=( $(compgen -f -- "${cur}") )
if [ -d "${COMPREPLY}" ]; then
compopt -o nospace
COMPREPLY=${COMPREPLY}/
fi
fi
} &&
complete -F _qemuimg_comp qemu-img
haconf
#!/usr/bin/env bash
#
# Bash completion function for the 'haconf' command.
_haconf()
{
local cur prev path_enabled path_available opts
path_enabled="/etc/haproxy/conf-enabled"
path_available="/etc/haproxy/conf-available"
__disabled() {
local confs conf notused
confs="$(ls "${path_available}")"
for conf in ${confs}; do
! [ -h "${path_enabled}/${conf}" ] && notused="${notused} ${conf}"
done
echo ${notused}
}
__enabled() {
ls ${path_enabled}
}
COMPREPLY=()
cur=${COMP_WORDS[COMP_CWORD]}
prev=${COMP_WORDS[COMP_CWORD-1]}
# primary commans
opts='check clear enable disable list reload'
# level 1 for commands
if [ $COMP_CWORD -eq 1 ]; then
COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") )
return 0
# level 2 for arguments
else
case $prev in
enable)
COMPREPLY=( $(compgen -W "$(__disabled)" -- "$cur" ) )
return 0
;;
disable)
COMPREPLY=( $(compgen -W "$(__enabled)" -- "$cur" ) )
return 0
;;
esac
fi
}
complete -F _haconf haconf
xxd [options] [infile [outfile]]
xxd -r [-s [-]offset] [-c cols] [-ps] [infile [outfile]]
ASCII, decimal, hexadecimal, octal dump
Special
xxd -p -c 10000 # export in hexa with 10000 octets by column
xxd -p -u -c 10000 # export in hexa with 10000 octets by column and in uppercase
xxd -s 0x200 -l 0x200 dump.vmdk| xxd -r # print readable content
Usefull
-s [+][-]seek # start at <seek> bytes abs. (or +: rel.) infile offset
-l len # stop after <len> octets
-r # reverse operation: convert (or patch) hexdump into binary
-r -s off # revert with <off> added to file positions found in hexdump
-u # use upper case hex letters
All
-a # toggle autoskip: A single '*' replaces nul-lines. Default off
-b # binary digit dump (incompatible with -ps,-i,-r). Default hex
-C # capitalize variable names in C include file style (-i)
-c cols # format <cols> octets per line. Default 16 (-i: 12, -ps: 30)
-E # show characters in EBCDIC. Default ASCII
-e # little-endian dump (incompatible with -ps,-i,-r)
-g bytes # number of octets per group in normal output. Default 2 (-e: 4)
-i # output in C include file style
-l len # stop after <len> octets
-o off # add <off> to the displayed file position
-ps # output in postscript plain hexdump style
-r # reverse operation: convert (or patch) hexdump into binary
-r -s off # revert with <off> added to file positions found in hexdump
-d # show offset in decimal instead of hex
-s [+][-]seek # start at <seek> bytes abs. (or +: rel.) infile offset
-u # use upper case hex letters
Install
sudo apt install bsdmainutils
https://pev.sourceforge.io/doc/manual/en_us/ch06.html
ofs2rva
ofs2rva <offset> FILE
Convert raw file offset to RVA
Example
ofs2rva 0x1b9b8 calc.exe
pedis
pedis OPTIONS FILE
PE sections and functions (by default, until found a RET or LEAVE instruction)
--att # set AT&T syntax
-e, --entrypoint # disassemble entrypoint
-f, --format <text|csv|xml|html> change output format (default: text)
-m, --mode <16|32|64> # disassembly mode (default: auto)
-i, <number> # number of instructions to be disassembled
-n, <number> # number of bytes to be disassembled
-o, --offset <offset> # disassemble at specified file offset
-r, --rva <rva> # disassemble at specified RVA
-s, --section <section_name> # disassemble entire section given
pehash
pehash OPTIONS FILE
Calculate hashes of PE pieces
-f, --format <text|csv|xml|html> # change output format (default: text)
-a, --all # hash file, sections and headers with md5, sha1, sha256, ssdeep and imphash
-c, --content # hash only the file content (default)
-h, --header <dos|coff|optional> # hash only the header with the specified name
-s, --section <section_name> # hash only the section with the specified name
--section-index <section_index> # hash only the section at the specified index (1..n)
peres
peres OPTIONS FILE
Show information about resource section and extract it
-a, --all # Show all information, statistics and extract resources
-i, --info # Show resources information
-s, --statistics # Show resources statistics
-x, --extract # Extract resources
-v, --file-version # Show File Version from PE resource directory
pesec
pesec [OPTIONS] FILE
Check for security features in PE files
-f, --format <text|csv|xml|html> # change output format (default: text)
-c, --certoutform <text|pem> # specifies the certificate output format (default: text)
-o, --certout <filename> # specifies the output filename to write certificates to (default: stdout)
pescan
pescan OPTIONS FILE
Search for suspicious things in PE files
-f, --format <text|html|xml|csv|json> # change output format (default: text)
-v, --verbose # show more info about items found
readpe
readpe OPTIONS FILE
Show PE file headers
-A, --all # full output (default)
-H, --all-headers # print all PE headers
-S, --all-sections # print all PE sections headers
-f, --format <text|csv|xml|html> change output format (default: text)
-d, --dirs # show data directories
-h, --header <dos|coff|optional> show specific header
-i, --imports # show imported functions
-e, --exports # show exported functions
rva2ofs
rva2ofs <rva> FILE
Convert RVA to raw file offset
Example
rva2ofs 0x12db cards.dll
Install
sudo apt install binwalk
binwalk [OPTIONS] [FILE1] [FILE2] ...
tool for searching binary images for embedded files and executable code
Special
binwalk $file # Get signatures (same as -B)
binwalk --hexdump --red $file1 $file2 # show only different lines
binwalk --raw $str $file # search string $str in file. use "\x00" for hexa character ("\x37" for 7)
binwalk --entropy $str $file # get entropy
binwalk --signature $str $file # search string $str in file. use "\x00" for hexa character ("\x37" for 7)
binwalk --extract $file && tree _${file}.extracted/ # extract files and show files in tree
Usefull
-W, --hexdump # Perform a hexdump / diff of a file or files
-i, --red # Only show lines containing bytes that are different among all files
-U, --blue # Only show lines containing bytes that are different among some files
-e, --extract # Automatically extract known file types
-E, --entropy # Calculate file entropy
All
Signature Scan Options:
-B, --signature # Scan target file(s) for common file signatures
-R, --raw=<str> # Scan target file(s) for the specified sequence of bytes
-A, --opcodes # Scan target file(s) for common executable opcode signatures
-m, --magic=<file> # Specify a custom magic file to use
-b, --dumb # Disable smart signature keywords
-I, --invalid # Show results marked as invalid
-x, --exclude=<str> # Exclude results that match <str>
-y, --include=<str> # Only show results that match <str>
Extraction Options:
-e, --extract # Automatically extract known file types
-D, --dd=<type:ext:cmd> # Extract <type> signatures, give the files an extension of <ext>, and execute <cmd>
-M, --matryoshka # Recursively scan extracted files
-d, --depth=<int> # Limit matryoshka recursion depth (default: 8 levels deep)
-C, --directory=<str> # Extract files/folders to a custom directory (default: current working directory)
-j, --size=<int> # Limit the size of each extracted file
-n, --count=<int> # Limit the number of extracted files
-r, --rm # Delete carved files after extraction
-z, --carve # Carve data from files, but don't execute extraction utilities
-V, --subdirs # Extract into sub-directories named by the offset
Entropy Options:
-E, --entropy # Calculate file entropy
-F, --fast # Use faster, but less detailed, entropy analysis
-J, --save # Save plot as a PNG
-Q, --nlegend # Omit the legend from the entropy plot graph
-N, --nplot # Do not generate an entropy plot graph
-H, --high=<float> # Set the rising edge entropy trigger threshold (default: 0.95)
-L, --low=<float> # Set the falling edge entropy trigger threshold (default: 0.85)
Binary Diffing Options:
-W, --hexdump # Perform a hexdump / diff of a file or files
-G, --green # Only show lines containing bytes that are the same among all files
-i, --red # Only show lines containing bytes that are different among all files
-U, --blue # Only show lines containing bytes that are different among some files
-u, --similar # Only display lines that are the same between all files
-w, --terse # Diff all files, but only display a hex dump of the first file
Raw Compression Options:
-X, --deflate # Scan for raw deflate compression streams
-Z, --lzma # Scan for raw LZMA compression streams
-P, --partial # Perform a superficial, but faster, scan
-S, --stop # Stop after the first result
General Options:
-l, --length=<int> # Number of bytes to scan
-o, --offset=<int> # Start scan at this file offset
-O, --base=<int> # Add a base address to all printed offsets
-K, --block=<int> # Set file block size
-g, --swap=<int> # Reverse every n bytes before scanning
-f, --log=<file> # Log results to file
-c, --csv # Log results to file in CSV format
-t, --term # Format output to fit the terminal window
-q, --quiet # Suppress output to stdout
-v, --verbose # Enable verbose output
-h, --help # Show help output
-a, --finclude=<str> # Only scan files whose names match this regex
-p, --fexclude=<str> # Do not scan files whose names match this regex
-s, --status=<int> # Enable the status server on the specified port
Install
sudo apt install binwalk
balbuzard
balbuzard [options] <filename> [filename2 ...]
malware analysis tools to extract patterns of interest and crack obfuscation such as XOR
Special
balbuzard $file # resume all founds
balbuzard $file -v|grep ^---- -A2 # show all sections
Usefull
-c CSV, --csv=CSV # export results to a CSV file
-r # find files recursively in subdirectories.
All
-h, --help # show this help message and exit
-c CSV, --csv=CSV # export results to a CSV file
-v # verbose display, with hex view.
-r # find files recursively in subdirectories.
-z ZIP_PASSWORD, --zip=ZIP_PASSWORD # if the file is a zip archive, open first file from it, using the provided password (requires Python 2.6+)
-f ZIP_FNAME, --zipfname=ZIP_FNAME # if the file is a zip archive, file(s) to be opened within the zip. Wildcards * and ? are supported. (default:*)
Install
pip2 install -U balbuzard
bbcrack
bbcrack [options] <filename>
uses a new algorithm based on patterns of interest to bruteforce typical malware obfuscation such as XOR, ROL, ADD and various combinations, in order to guess which algorithms/keys have been used
All
-l LEVEL, --level=LEVEL # select transforms with level 1, 2 or 3 and below
-i INCLEVEL, --inclevel=INCLEVEL # select transforms only with level 1, 2 or 3 (incremental)
-k KEEP, --keep=KEEP number of transforms to keep after stage 1
-s SAVE, --save=SAVE number of transforms to save to files after stage 2
-t TRANSFORM, --transform=TRANSFORM # only check specific transforms (comma separated list, or "-t list" to display all available transforms)
-z ZIP_PASSWORD, --zip=ZIP_PASSWORD # if the file is a zip archive, open first file from it, using the provided password (requires Python 2.6+)
-p # profiling: measure time spent on each pattern.
bbharvest
bbharvest [options] <filename>
extracts all patterns of interest found when applying typical malware obfuscation transforms such as XOR, ROL, ADD and various combinations, trying all possible keys. It is especially useful when several keys or several transforms are used in a single file
All
-l LEVEL, --level=LEVEL # select transforms level 1, 2 or 3
-i INCLEVEL, --inclevel=INCLEVEL # select transforms only with level 1, 2 or 3 (incremental)
-c CSV, --csv=CSV # export results to a CSV file
-t TRANSFORM, --transform=TRANSFORM # only check specific transforms (comma separated list, or "-t list" to display all available transforms)
-z ZIP_PASSWORD, --zip=ZIP_PASSWORD # if the file is a zip archive, open first file from it, using the provided password (requires Python 2.6+)
-p # profiling: measure time spent on each pattern.
bbharvest
bbtrans [options] <filename>
can apply any of the transforms from bbcrack (XOR, ROL, ADD and various combinations) to a file
All
-t TRANSFORM, --transform=TRANSFORM # transform to be applied (or "-t list" to display all available transforms)
-p PARAMS, --params=PARAMS # parameters for transform (comma separated list)
-z ZIP_PASSWORD, --zip=ZIP_PASSWORD # if the file is a zip archive, open first file from it, using the provided password (requires Python 2.6+)
hexdump [-bcCdovx] [-e format_string] [-f format_file] [-n length] [-s offset] file ...
ASCII, decimal, hexadecimal, octal dump
Special
hexdump -v # do not use * to replace duplicate lines
hexdump -ve '"%02X"' # convert in uppercase hexadecimal
hexdump -ve '8/1 "%02X"' # convert in uppercase hexadecimal in classic format 8bytes
hexdump -e '"%08_ax""|"' -e '16/1 "%02x ""|"' -e '16/1 "%_p""|\n"' # 1 bytes
hexdump -e '"%08_ax""|"' -e '8/2 "%04x ""|"' -e '16/1 "%_p""|\n"' # 2 bytes
hexdump -e '"%08_ax""|"' -e '4/4 "%08x ""|"' -e '16/1 "%_p""|\n"' # 4 bytes
Usefull
-C # Canonical hex+ASCII display. Display the input offset in hexadecimal, followed by sixteen space-separated, two column, hexadecimal bytes, followed by the same sixteen bytes in %_p format enclosed in ``|'' characters. Calling the command hd implies this option.
-n length # Interpret only length bytes of input.
-s offset # Skip offset bytes from the beginning of the input. By default, offset is interpreted as a decimal number.
-v # Cause hexdump to display all input data. Without the -v option, any number of groups of output lines, which would be identical to the immediately preceding group of output lines (except for the input offsets), are replaced with a line comprised of a single asterisk.
All
-b # One-byte octal display. Display the input offset in hexadecimal, followed by sixteen space-separated, three column, zero-filled, bytes of input data, in octal, per line.
-c # One-byte character display. Display the input offset in hexadecimal, followed by sixteen space-separated, three column, space-filled, characters of input data per line.
-C # Canonical hex+ASCII display. Display the input offset in hexadecimal, followed by sixteen space-separated, two column, hexadecimal bytes, followed by the same sixteen bytes in %_p format enclosed in ``|'' characters. Calling the command hd implies this option.
-d # Two-byte decimal display. Display the input offset in hexadecimal, followed by eight space-separated, five column, zero-filled, two-byte units of input data, in unsigned decimal, per line.
-e format_string # Specify a format string to be used for displaying data.
-f format_file # Specify a file that contains one or more newline separated format strings. Empty lines and lines whose first non-blank character is a hash mark (#) are ignored.
-n length # Interpret only length bytes of input.
-o # Two-byte octal display. Display the input offset in hexadecimal, followed by eight space-separated, six column, zero-filled, two byte quantities of input data, in octal, per line.
-s offset # Skip offset bytes from the beginning of the input. By default, offset is interpreted as a decimal number. With a leading 0x or 0X, offset is interpreted as a hexadecimal number, otherwise, with a leading 0, offset is interpreted as an octal number. Appending the character b, k, or m to offset causes it to be interpreted as a multiple of 512, 1024, or 1048576, respectively.
-v # Cause hexdump to display all input data. Without the -v option, any number of groups of output lines, which would be identical to the immediately preceding group of output lines (except for the input offsets), are replaced with a line comprised of a single asterisk.
-x # Two-byte hexadecimal display. Display the input offset in hexadecimal, followed by eight, space separated, four column, zero-filled, two-byte quantities of input data, in hexadecimal, per line.
Install
sudo apt install bsdmainutils
rabin2 [-AceghHiIsSMzlpRrLxvhqQTuUwV] [-a arch] [-b bits] [-B addr] [-C fmt:C:[D]] [-D lang sym|-] [-f subbin] [-k query] [-K algo] [-O binop] [-o str] [-m addr] [-@ addr] [-n str] [-X fmt file ...] file
Binary program info extractor
Special
rabin2 -H $file | grep -i timedate # compilation date
rabin2 -H $file | grep -i sizeofcode # size of code
rabin2 -i $file | grep -i " $dll " | wc -l # count imported functions in specific dll
rabin2 -i $file | awk '{print $5}' | grep -v '^\(lib\|\)$' | sort -u # show all imported libs (dll)
rabin2 -s $file | grep -i " $dll " | wc -l # count symbols functions in specific dll
rabin2 -H $file|grep -iA2 debug # debuger detection present
rabin2 -g Program|grep -i debug # details about debuger detection present
rabin2 -z $file | sed -n "/$str1/,/$str2/p" | sed 's/^.* ascii *//' > $fileout # extract data between 2 strings in file
Usefull
-H # Show header fields (see ih command in r2)
-g # Show all possible information
-I # Show binary info (iI in r2)
-i # Show imports (symbols imported from libraries) (ii)
-R # Show relocations
-s # Show exported symbols
-S # Show sections
-SS # Show segments
-t # Show file hashes
-T # Show Certificates
-U # Show Resources
-z # Show strings inside .data section (like gnu strings does)
-x # Extract all sub binaries from a fat binary (f.ex: fatmach0)
-X format file ... # Package a fat or zip containing all the files passed (fat, zip)
-l # List linked libraries to the binary
-e # Show entrypoints for disk and on-memory
All
-@ addr # Show information (symbol, section, import) of the given address
-A # List sub-binaries and their associated arch-bits pairs
-a arch # Set arch (x86, arm, .. accepts underscore for bits x86_32)
-b bits # Set bits (32, 64, ...)
-B addr # Override baddr
-c # List classes
-cc # List classes in header format
-C [fmt:C[:D]] Create [elf,mach0,pe] # for arm and x86-32/64 tiny binaries where 'C' is an hexpair list of the code bytes and ':D' is an optional concatenation to describe the bytes for the data section.
-d # Show debug/dwarf information
-D lang symbolname # - Demangle symbol name (or - to read from stdin) for lang (cxx, swift, java, cxx, ..)
-e # Show entrypoints for disk and on-memory
-ee # Show constructor/destructors (extended entrypoints)
-f subbin # Select sub-binary architecture. Useful for fat-mach0 binaries
-F binfmt # Force to use that bin plugin (ignore header check)
-g # Show all possible information
-G addr # Load address . offset to header
-h # Show usage help message.
-H # Show header fields (see ih command in r2)
-I # Show binary info (iI in r2)
-i # Show imports (symbols imported from libraries) (ii)
-j # Output in json
-k query # Perform SDB query on loaded file
-K algo # Select a rahash2 checksum algorithm to be performed on sections listing (and maybe others in the future) i.e 'rabin2 -K md5 -S /bin/ls'
-l # List linked libraries to the binary
-L # List supported bin plugins
-M # Show address of 'main' symbol
-m addr # Show source line reference from a given address
-N minlen:maxlen # Force minimum and maximum number of chars per string (see -z and -zz). if (strlen>minlen && (!maxlen || strlen<=maxlen))
-n str # Show information (symbol, section, import) at string offset
-o str # Output file/folder for write operations (out by default)
-O binop # Perform binary operation on target binary (dump, resize, change sections, ...) see '-O help' for more information
-p # Disable VA. Show physical addresses
-P # Show debug/pdb information
-PP # Download pdb file for binary
-q # Be quiet, just show fewer data
-qq # Show less info (no offset/size for -z for ex.)
-Q # Show load address used by dlopen (non-aslr libs)
-r # Show output in radare format
-R # Show relocations
-s # Show exported symbols
-S # Show sections
-SS # Show segments
-t # Show file hashes
-T # Show Certificates
-u # Unfiltered (no rename duplicated symbols/sections)
-U # Show Resources
-v # Show version information
-V # Show binary version information
-w # Show try/catch blocks
-x # Extract all sub binaries from a fat binary (f.ex: fatmach0)
-X format file ... # Package a fat or zip containing all the files passed (fat, zip)
-z # Show strings inside .data section (like gnu strings does)
-Z # Guess size of binary program
-zz # Shows strings from raw bins
-zzz # Dump raw strings to stdout (for huge files)
Install
sudo apt install radare2
objdump <option(s)> <file(s)>
Display information from object <file(s)>
Usefull
objdump Program -x|sed -n '1,/.rdata section/p'
objdump Program -s|grep -A1 ^Contents
objdump Program -sj $section # section=".data"
-a, --archive-headers # Display archive header information
-f, --file-headers # Display the contents of the overall file header
-h, --[section-]headers Display the contents of the section headers
-x, --all-headers # Display the contents of all headers
-s, --full-contents # Display the full contents of all sections requested
All
At least one of the following switches must be given:
-a, --archive-headers # Display archive header information
-f, --file-headers # Display the contents of the overall file header
-p, --private-headers # Display object format specific file header contents
-P, --private=OPT,OPT... Display object format specific contents
-h, --[section-]headers Display the contents of the section headers
-x, --all-headers # Display the contents of all headers
-d, --disassemble # Display assembler contents of executable sections
-D, --disassemble-all # Display assembler contents of all sections
--disassemble=<sym> Display assembler contents from <sym>
-S, --source # Intermix source code with disassembly
--source-comment[=<txt>] Prefix lines of source code with <txt>
-s, --full-contents # Display the full contents of all sections requested
-g, --debugging # Display debug information in object file
-e, --debugging-tags # Display debug information using ctags style
-G, --stabs # Display (in raw form) any STABS info in the file
-W[lLiaprmfFsoRtUuTgAckK] or --dwarf[=rawline,=decodedline,=info,=abbrev,=pubnames,=aranges,=macro,=frames, =frames-interp,=str,=loc,=Ranges,=pubtypes, =gdb_index,=trace_info,=trace_abbrev,=trace_aranges, =addr,=cu_index,=links,=follow-links] # Display DWARF info in the file
--ctf=SECTION # Display CTF info from SECTION
-t, --syms # Display the contents of the symbol table(s)
-T, --dynamic-syms # Display the contents of the dynamic symbol table
-r, --reloc # Display the relocation entries in the file
-R, --dynamic-reloc # Display the dynamic relocation entries in the file
@<file> # Read options from <file>
-v, --version # Display this program's version number
-i, --info # List object formats and architectures supported
-H, --help # Display this information
The following switches are optional:
-b, --target=BFDNAME # Specify the target object format as BFDNAME
-m, --architecture=MACHINE # Specify the target architecture as MACHINE
-j, --section=NAME # Only display information for section NAME
-M, --disassembler-options=OPT Pass text OPT on to the disassembler
-EB --endian=big # Assume big endian format when disassembling
-EL --endian=little # Assume little endian format when disassembling
--file-start-context # Include context from start of file (with -S)
-I, --include=DIR # Add DIR to search list for source files
-l, --line-numbers # Include line numbers and filenames in output
-F, --file-offsets # Include file offsets when displaying information
-C, --demangle[=STYLE] # Decode mangled/processed symbol names. The STYLE, if specified, can be `auto', `gnu', `lucid', `arm', `hp', `edg', `gnu-v3', `java' or `gnat'
--recurse-limit # Enable a limit on recursion whilst demangling. [Default]
--no-recurse-limit # Disable a limit on recursion whilst demangling
-w, --wide # Format output for more than 80 columns
-z, --disassemble-zeroes # Do not skip blocks of zeroes when disassembling
--start-address=ADDR # Only process data whose address is >= ADDR
--stop-address=ADDR # Only process data whose address is < ADDR
--prefix-addresses # Print complete address alongside disassembly
--[no-]show-raw-insn # Display hex alongside symbolic disassembly
--insn-width=WIDTH # Display WIDTH bytes on a single line for -d
--adjust-vma=OFFSET # Add OFFSET to all displayed section addresses
--special-syms # Include special symbols in symbol dumps
--inlines # Print all inlines for source line (with -l)
--prefix=PREFIX # Add PREFIX to absolute paths for -S
--prefix-strip=LEVEL # Strip initial directory names for -S
--dwarf-depth=N # Do not display DIEs at depth N or greater
--dwarf-start=N # Display DIEs starting with N, at the same depth or deeper
--dwarf-check # Make additional dwarf internal consistency checks.
--ctf-parent=SECTION # Use SECTION as the CTF parent
--visualize-jumps # Visualize jumps by drawing ASCII art lines
--visualize-jumps=color # Use colors in the ASCII art
--visualize-jumps=extended-color # Use extended 8-bit color codes
--visualize-jumps=off # Disable jump visualization
Install
sudo apt install binutils-common
clamscan [options] [file/directory/-]
Scan files and directories for viruses
Usefull
-i --infected # Only print infected files
-r --recursive[=yes/no(*)] # Scan subdirectories recursively
-f --file-list=FILE FILE # Scan files from FILE
All
-a --archive-verbose # Show filenames inside scanned archives
--stdout # Write to stdout instead of stderr. Does not affect 'debug' messages.
--no-summary # Disable summary at end of scanning
-i --infected # Only print infected files
--suppress-ok-results -o # Skip printing OK files
--bell # Sound bell on virus detection
--tempdir=DIRECTORY # Create temporary files in DIRECTORY
--leave-temps[=yes/no(*)] # Do not remove temporary files
--gen-json[=yes/no(*)] # Generate JSON description of scanned file(s). JSON will be printed and also dropped to the temp directory if --leave-temps is enabled.
-d --database=FILE/DIR FILE/DIR # Load virus database from FILE or load all supported db files from DIR
--official-db-only[=yes/no(*)] # Only load official signatures
-l --log=FILE FILE # Save scan report to FILE
-r --recursive[=yes/no(*)] # Scan subdirectories recursively
-z --allmatch[=yes/no(*)] # Continue scanning within file after finding a match
--cross-fs[=yes(*)/no] # Scan files and directories on other filesystems
--follow-dir-symlinks[=0/1(*)/2] # Follow directory symlinks (0 = never, 1 = direct, 2 = always)
--follow-file-symlinks[=0/1(*)/2] # Follow file symlinks (0 = never, 1 = direct, 2 = always)
-f --file-list=FILE FILE # Scan files from FILE
--remove[=yes/no(*)] # Remove infected files. Be careful!
--move=DIRECTORY # Move infected files into DIRECTORY
--copy=DIRECTORY # Copy infected files into DIRECTORY
--exclude=REGEX # Don't scan file names matching REGEX
--exclude-dir=REGEX # Don't scan directories matching REGEX
--include=REGEX # Only scan file names matching REGEX
--include-dir=REGEX # Only scan directories matching REGEX
--bytecode[=yes(*)/no] # Load bytecode from the database
--bytecode-unsigned[=yes/no(*)] # Load unsigned bytecode **Caution**: You should NEVER run bytecode signatures from untrusted sources. Doing so may result in arbitrary code execution.
--bytecode-timeout=N # Set bytecode timeout (in milliseconds)
--statistics[=none(*)/bytecode/pcre] # Collect and print execution statistics
--detect-pua[=yes/no(*)] # Detect Possibly Unwanted Applications
--exclude-pua=CAT # Skip PUA sigs of category CAT
--include-pua=CAT # Load PUA sigs of category CAT
--detect-structured[=yes/no(*)] # Detect structured data (SSN, Credit Card)
--structured-ssn-format=X # SSN format (0=normal,1=stripped,2=both)
--structured-ssn-count=N # Min SSN count to generate a detect
--structured-cc-count=N # Min CC count to generate a detect
--structured-cc-mode=X # CC mode (0=credit debit and private label, 1=credit cards only
--scan-mail[=yes(*)/no] # Scan mail files
--phishing-sigs[=yes(*)/no] # Enable email signature-based phishing detection
--phishing-scan-urls[=yes(*)/no] # Enable URL signature-based phishing detection
--heuristic-alerts[=yes(*)/no] # Heuristic alerts
--heuristic-scan-precedence[=yes/no(*)] # Stop scanning as soon as a heuristic match is found
--normalize[=yes(*)/no] # Normalize html, script, and text files. Use normalize=no for yara compatibility
--scan-pe[=yes(*)/no] # Scan PE files
--scan-elf[=yes(*)/no] # Scan ELF files
--scan-ole2[=yes(*)/no] # Scan OLE2 containers
--scan-pdf[=yes(*)/no] # Scan PDF files
--scan-swf[=yes(*)/no] # Scan SWF files
--scan-html[=yes(*)/no] # Scan HTML files
--scan-xmldocs[=yes(*)/no] # Scan xml-based document files
--scan-hwp3[=yes(*)/no] # Scan HWP3 files
--scan-archive[=yes(*)/no] # Scan archive files (supported by libclamav)
--alert-broken[=yes/no(*)] # Alert on broken executable files (PE & ELF)
--alert-broken-media[=yes/no(*)] # Alert on broken graphics files (JPEG, TIFF, PNG, GIF)
--alert-encrypted[=yes/no(*)] # Alert on encrypted archives and documents
--alert-encrypted-archive[=yes/no(*)] # Alert on encrypted archives
--alert-encrypted-doc[=yes/no(*)] # Alert on encrypted documents
--alert-macros[=yes/no(*)] # Alert on OLE2 files containing VBA macros
--alert-exceeds-max[=yes/no(*)] # Alert on files that exceed max file size, max scan size, or max recursion limit
--alert-phishing-ssl[=yes/no(*)] # Alert on emails containing SSL mismatches in URLs
--alert-phishing-cloak[=yes/no(*)] # Alert on emails containing cloaked URLs
--alert-partition-intersection[=yes/no(*)] # Alert on raw DMG image files containing partition intersections
--nocerts # Disable authenticode certificate chain verification in PE files
--dumpcerts # Dump authenticode certificate chain in PE files
--max-scantime=#n # Scan time longer than this will be skipped and assumed clean (milliseconds)
--max-filesize=#n # Files larger than this will be skipped and assumed clean
--max-scansize=#n # The maximum amount of data to scan for each container file (**)
--max-files=#n # The maximum number of files to scan for each container file (**)
--max-recursion=#n # Maximum archive recursion level for container file (**)
--max-dir-recursion=#n # Maximum directory recursion level
--max-embeddedpe=#n # Maximum size file to check for embedded PE
--max-htmlnormalize=#n # Maximum size of HTML file to normalize
--max-htmlnotags=#n # Maximum size of normalized HTML file to scan
--max-scriptnormalize=#n # Maximum size of script file to normalize
--max-ziptypercg=#n # Maximum size zip to type reanalyze
--max-partitions=#n # Maximum number of partitions in disk image to be scanned
--max-iconspe=#n # Maximum number of icons in PE file to be scanned
--max-rechwp3=#n # Maximum recursive calls to HWP3 parsing function
--pcre-match-limit=#n # Maximum calls to the PCRE match function.
--pcre-recmatch-limit=#n # Maximum recursive calls to the PCRE match function.
--pcre-max-filesize=#n # Maximum size file to perform PCRE subsig matching.
--disable-cache # Disable caching and cache checks for hash sums of scanned files.
-h --help # Show this help
--debug # Enable libclamav's debug messages
--quiet # Only output error messages
-v --verbose # Be verbose
-V --version # Print version number
Install
sudo apt install clamav
pescan OPTIONS FILE
Search for suspicious things in PE files
Usefull
-f, --format <text|html|xml|csv|json> # change output format (default: text)
All
-f, --format <text|html|xml|csv|json> # change output format (default: text)
-v, --verbose # show more info about items found
-V, --version # show version and exit
--help # show this help and exit
Install
sudo apt install pev
strings [option(s)] [file(s)]
Display printable strings in [file(s)] (stdin by default)
Usefull
-s --output-separator=<string> String used to separate strings in output.
All
-a - --all # Scan the entire file, not just the data section [default]
-d --data # Only scan the data sections in the file
-f --print-file-name # Print the name of the file before each string
-n --bytes=[number] # Locate & print any NUL-terminated sequence of at -<number> least [number] characters (default 4)
-t --radix={o,d,x} # Print the location of the string in base 8, 10 or 16
-w --include-all-whitespace Include all whitespace as valid string characters
-o # An alias for --radix=o
-T --target=<BFDNAME> # Specify the binary file format
-e --encoding={s,S,b,l,B,L} Select character size and endianness: s = 7-bit, S = 8-bit, {b,l} = 16-bit, {B,L} = 32-bit
@<file> # Read options from <file>
-h --help # Display this information
-v -V --version # Print the program's version number
file [OPTION...] [FILE...]
Determine type of FILEs.
Usefull
-z, --uncompress # try to look inside compressed files
-F, --separator STRING # use string as separator instead of `:'
All
-m, --magic-file LIST # use LIST as a colon-separated list of magic number files
-z, --uncompress # try to look inside compressed files
-Z, --uncompress-noreport only print the contents of compressed files
-b, --brief # do not prepend filenames to output lines
-c, --checking-printout # print the parsed form of the magic file, use in conjunction with -m to debug a new magic file before installing it
-e, --exclude TEST # exclude TEST from the list of test to be performed for file. Valid tests are: apptype, ascii, cdf, compress, csv, elf, encoding, soft, tar, json, text, tokens
-f, --files-from FILE # read the filenames to be examined from FILE
-F, --separator STRING # use string as separator instead of `:'
-i, --mime # output MIME type strings (--mime-type and --mime-encoding)
--apple # output the Apple CREATOR/TYPE
--extension # output a slash-separated list of extensions
--mime-type # output the MIME type
--mime-encoding # output the MIME encoding
-k, --keep-going # don't stop at the first match
-l, --list # list magic strength
-L, --dereference # follow symlinks (default if POSIXLY_CORRECT is set)
-h, --no-dereference # don't follow symlinks (default if POSIXLY_CORRECT is not set) (default)
-n, --no-buffer # do not buffer output
-N, --no-pad # do not pad output
-0, --print0 # terminate filenames with ASCII NUL
-p, --preserve-date # preserve access times on files
-P, --parameter # set file engine parameter limits
indir 15 recursion limit for indirection
name 30 use limit for name/use magic
elf_notes 256 max ELF notes processed
elf_phnum 128 max ELF prog sections processed
elf_shnum 32768 max ELF sections processed
-r, --raw # don't translate unprintable chars to \ooo
-s, --special-files # treat special (block/char devices) files as ordinary ones
-S, --no-sandbox # disable system call sandboxing
-C, --compile # compile file specified by -m
-d, --debug # print debugging messages
--help # display this help and exit
-v, --version # output version information and exit
install
see foralyse in https://code.ambau.fr
info
variables
file=/share/memory/dump
profile=Win7SP0x86
vol2 -f $file --profile $profile
# Options
--output dot/greptext/html/json/sqlite/text/xlsx
vol2 --info # get all informations from volatility
vol2 --info|sed -n '/^Profiles/,/^$/ p' # available profiles
vol2 --info|sed -n '/^Address/,/^$/ p' # available address spaces
vol2 --info|sed -n '/^Scanner/,/^$/ p' # available scanner
vol2 --info|sed -n '/^Plugins/,/^$/ p' # available plugins
vol2 --info|sed -n '/^Plugins/,/^$/ p'|grep -v '^mac_\|^linux_' # windows plugins
vol2 --info|sed -n '/^Plugins/,/^$/ p'|grep '^linux_' # linux plugins
vol2 --info|sed -n '/^Plugins/,/^$/ p'|grep '^mac_' # mac plugins
special
hash
vol2 hashdump -f ${dump} --profile=${profile} -y ${offset_system} -s ${offset_sam}
plugins
cmd
clipboard # Extract the contents of the windows clipboard
cmdline # Display process command-line arguments
cmdscan # Extract command history by scanning for _COMMAND_HISTORY
consoles # Extract command history by scanning for _CONSOLE_INFORMATION
device
devicetree # Show device tree
mbrparser # Scans for and parses potential Master Boot Records (MBRs)
dll
dlldump -D PATH # Dump DLLs from a process address space to PATH
-p PID # specify a process by his PID
-o OFFSET # specify a process by his Virtual OFFSET
dlllist # Print list of loaded dlls for each process
-p PID # specify a process by his PID
ldrmodules # Detect unlinked DLLs
dump
cachedump # Dumps cached domain hashes from memory
dumpcerts # Dump RSA private and public SSL keys
dlldump -D PATH # Dump DLLs from a process address space to PATH
-p PID # specify a process by his PID
-o OFFSET # specify a process by his Virtual OFFSET
dumpfiles # Extract memory mapped and cached files
hashdump # Dumps passwords hashes (LM/NTLM) from memory
hivedump # Prints out a hive
lsadump # Dump (decrypted) LSA secrets from the registry
procdump # Dump a process to an executable file sample
-o OFFSET, --offset=OFFSET # EPROCESS offset (in hex) in the physical address space
-p PID, --pid=PID # Operate on these Process IDs (comma-separated)
-n NAME, --name=NAME # Operate on these process names (regex)
-D DUMP_DIR, --dump-dir=DUMP_DIR # Directory in which to dump executable files
executable
impscan # Scan for calls to imported functions
-p PID, --pid=PID # Process ID (leave off to scan kernel memory)
-o OFFSET, --offset=OFFSET # EPROCESS offset (in hex) in the physical address space
-b BASE, --base=BASE # Base address in process memory if --pid is supplied, otherwise an address in kernel space
-s SIZE, --size=SIZE # Size of memory to scan
joblinks # Print process job link information
malfind # Find hidden and injected code
privs # Display process privileges
shimcache # Parses the Application Compatibility Shim Cache registry key
verinfo # Prints out the version information from PE images
file
dumpfiles # Extract memory mapped and cached files
filescan # Pool scanner for file objects
mftparser # Scans for and parses potential Master Boot Records (MBRs)
notepad # List currently displayed notepad text
hive
amcache # Print AmCache information
hivescan # Pool scanner for registry hives
hivedump # Prints out a hive
hivelist # Print list of registry hives
printkey # Print a registry key, and its subkeys and values
shimcache # Parses the Application Compatibility Shim Cache registry key
shutdowntime # Print ShutdownTime of machine from registry
userassist # Print userassist registry keys and information
hook
apihooks # Detect API hooks in process and kernel memory
driverirp # Driver IRP hook detection
eventhooks # Print details on windows event hooks
messagehooks # List desktop and thread window message hooks
image
imageinfo # get info from OS and profiles
kdbgscan # Search for and dump potential KDBG values
memory
bigpools # Dump the big page pools using BigPagePoolScanner
cachedump # Dumps cached domain hashes from memory
hpakextract # Extract physical memory from an HPAK file
hpakinfo # Info on an HPAK file
memdump # Dump the addressable memory for a process
memmap # Print the memory map
patcher # Patches memory based on page scans
raw2dmp # Converts a physical memory sample to a windbg crash dump
module
drivermodule # Associate driver objects to kernel modules
moddump # Dump a kernel driver to an executable file sample
modscan # Pool scanner for kernel modules
modules # Print list of loaded modules
timers # Print kernel timers and associated module DPCs
unloadedmodules # Print list of unloaded modules
network
connections # Print list of open connections [Windows XP and 2003 Only]
connscan # Pool scanner for tcp connections
netscan # list of connections
sockets # Print list of open sockets
sockscan # Pool scanner for tcp socket objects
password
dumpcerts # Dump RSA private and public SSL keys
hashdump # Dumps passwords hashes (LM/NTLM) from memory
truecryptmaster # Recover TrueCrypt 7.1a Master Keys
truecryptpassphrase # TrueCrypt Cached Passphrase Finder
truecryptsummary # TrueCrypt Summary
process
envars # Display process environment variables
getsids # Print the SIDs owning each process
handles # Print list of open handles for each process
privs # Display process privileges
procdump # Dump a process to an executable file sample
pslist # Print all running processes by following the EPROCESS lists
-P # print for physical offset
psscan # Pool scanner for process objects
pstree # Print process list as a tree
psxview # Find hidden processes with various process listings
thrdscan # Pool scanner for thread objects
threads # Investigate _ETHREAD and _KTHREADs
service
getservicesids # Get the names of services in the Registry and return Calculated SID
servicediff # List Windows services (ala Plugx)
svcscan # Scan for Windows services
system
auditpol # Prints out the Audit Policies from HKLM\SECURITY\Policy\PolAdtEv
bioskbd # Reads the keyboard buffer from Real Mode memory
callbacks # Print system-wide notification routines
crashinfo # Dump crash-dump information
driverirp # Driver IRP hook detection
driverscan # Pool scanner for driver objects
envars # Display process environment variables
evtlogs # Extract Windows Event Logs (XP/2003 only)
kpcrscan # Search for and dump potential KPCR values
machoinfo # Dump Mach-O file format information
mutantscan # Pool scanner for mutex objects
objtypescan # Scan for Windows object type objects
screenshot # Save a pseudo-screenshot based on GDI windows (require PIL)
shutdowntime # Print ShutdownTime of machine from registry
symlinkscan # Pool scanner for symlink objects
timeline
timeliner # Creates a timeline from various artifacts in memory
timers # Print kernel timers and associated module DPCs
user
atoms # Print session and window station atom tables
atomscan # Pool scanner for atom tables
clipboard # Extract the contents of the windows clipboard
deskscan # Poolscaner for tagDESKTOP (desktops)
gahti # Dump the USER handle type information
sessions # List details on _MM_SESSION_SPACE (user logon sessions)
userassist # Print userassist registry keys and information
userhandles # Dump the USER handle tables
vad
vaddump # Dumps out the vad sections to a file
vadinfo # Dump the VAD info
vadtree # Walk the VAD tree and display in tree format
vadwalk # Walk the VAD tree
virtual
qemuinfo # Dump Qemu information
vboxinfo # Dump virtualbox information
vmwareinfo # Dump VMware VMSS/VMSN information
volshell
Use addrspace() for Kernel/Virtual AS
Use addrspace().base for Physical AS
Use proc() to get the current process object
proc().get_process_address_space() for the current process AS
proc().get_load_modules() for the current process DLLs
addrspace() # Get the current kernel/virtual address space.
cc(offset=None, pid=None, name=None, physical=False) # Change current shell context.
db(address, length=128, space=None) # Print bytes as canonical hexdump.
dd(address, length=128, space=None) # Print dwords at address.
dis(address, length=128, space=None, mode=None) # Disassemble code at a given address.
dq(address, length=128, space=None) # Print qwords at address.
dt(objct, address=None, space=None, recursive=False, depth=0) # Describe an object or show type info.
find(needle, max=1, shift=0, skip=0, count=False, length=128)
getmods() # Generator for kernel modules (scripting).
getprocs() # Generator of process objects (scripting).
hh(cmd=None) # Get help on a command.
list_entry(head, objname, offset=-1, fieldname=None, forward=True, space=None) # Traverse a _LIST_ENTRY.
modules() # Print loaded modules in a table view.
proc() # Get the current process object.
ps() # Print active processes in a table view.
sc() # Show the current context.
For help on a specific command, type 'hh(<command>)'
windows
windows # Print Desktop Windows (verbose details)
wintree # Print Z-Order Desktop Windows Tree
wndscan # Pool scanner for window stations
others
editbox # Displays information about Edit controls. (Listbox experimental.)
gditimers # Print installed GDI timers and callbacks
gdt # Display Global Descriptor Table
idt # Display Interrupt Descriptor Table
hibinfo # Dump hibernation file information
imagecopy --profile $profile $file -O $file-converted
iehistory # Reconstruct Internet Explorer cache / history
poolpeek # Configurable pool scanner plugin
shellbags # Prints ShellBags info
strings # Match physical offsets to virtual addresses (may take a while, VERY verbo
yarascan # Scan process or kernel memory with Yara signatures
Operators
See wireshark
Usefull
select
tshark -r ${dump} -e ip.src # get all source ip addresses
tshark -r ${dump} -e ip.proto -Tfields
tshark -r ${file} -e http.user_agent -Tfields
filter
tshark -r ${dump} -e ip.src # get all source ip addresses
tshark -r ${dump} -Y http -w filtered.pcapng # filter all http streams to file
tshark -r ${dump} -Y "not ip.addr == 93.184.221.240" -w filtered.pcapng # filter by IP address
tshark -r ${dump} -Y "frame.time_epoch >= 1631211000" -w filtered.pcapng # filter by frame time epoch
help
tshark [ -i <capture interface>|- ] [ -f <capture filter> ] [ -2 ] [ -r <infile> ] [ -w <outfile>|- ] [ options ] [ <filter> ]
qtshark -G [ <report type> ] [ --elastic-mapping-filter <protocols> ]
Dump and analyze network traffic
Capture interface
-i <interface>, --interface <interface> # name or idx of interface (def: first non-loopback)
-f <capture filter> # packet filter in libpcap filter syntax
-s <snaplen>, --snapshot-length <snaplen> # packet snapshot length (def: appropriate maximum)
-p, --no-promiscuous-mode # don't capture in promiscuous mode
-I, --monitor-mode # capture in monitor mode, if available
-B <buffer size>, --buffer-size <buffer size> # size of kernel buffer (def: 2MB)
-y <link type>, --linktype <link type> # link layer type (def: first appropriate)
--time-stamp-type <type> # timestamp method for interface
-D, --list-interfaces # print list of interfaces and exit
-L, --list-data-link-types # print list of link-layer types of iface and exit
--list-time-stamp-types # print list of timestamp types for iface and exit
Capture stop conditions
-c <packet count> stop after n packets (def: infinite)
-a <autostop cond.> ..., --autostop <autostop cond.> ...
duration:NUM - stop after NUM seconds
filesize:NUM - stop this file after NUM KB
files:NUM - stop after NUM files
packets:NUM - stop after NUM packets```
Capture output
-b <ringbuffer opt.> ..., --ring-buffer <ringbuffer opt.>
duration:NUM - switch to next file after NUM secs
filesize:NUM - switch to next file after NUM KB
files:NUM - ringbuffer: replace after NUM files
packets:NUM - switch to next file after NUM packets
interval:NUM - switch to next file when the time is an exact multiple of NUM secs
Input file
-r <infile>, --read-file <infile>
Processing
-2 # perform a two-pass analysis
-M <packet count> # perform session auto reset
-R <read filter>, --read-filter <read filter> # packet Read filter in Wireshark display filter syntax (requires -2)
-Y <display filter>, --display-filter <display filter> # packet displaY filter in Wireshark display filter syntax
-n # disable all name resolutions (def: all enabled)
-N <name resolve flags> # enable specific name resolution(s): "mnNtdv"
-d <layer_type>==<selector>,<decode_as_protocol> ... # "Decode As", see the man page for details Example: tcp.port==8888,http
-H <hosts file> # read a list of entries from a hosts file, which will then be written to a capture file. (Implies -W n)
--enable-protocol <proto_name> # enable dissection of proto_name
--disable-protocol <proto_name> # disable dissection of proto_name
--enable-heuristic <short_name> # enable dissection of heuristic protocol
--disable-heuristic <short_name> # disable dissection of heuristic protocol
Output
-w <outfile|-> # write packets to a pcapng-format file named "outfile" (or '-' for stdout)
--capture-comment <comment> # set the capture file comment, if supported
-C <config profile> # start with specified configuration profile
-F <output file type> # set the output file type, default is pcapng an empty "-F" option will list the file types
-V # add output of packet tree # (Packet Details)
-O <protocols> # Only show packet details of these protocols, comma separated
-P, --print # print packet summary even when writing to a file
-S <separator> # the line separator to print between packets
-x # add output of hex and ASCII dump (Packet Bytes)
-T pdml|ps|psml|json|jsonraw|ek|tabs|text|fields|? # format of text output (def: text)
-j <protocolfilter> # protocols layers filter if -T ek|pdml|json selected (e.g. "ip ip.flags text", filter does not expand child nodes, unless child is specified also in the filter)
-J <protocolfilter> # top level protocol filter if -T ek|pdml|json selected (e.g. "http tcp", filter which expands all child nodes)
-e <field> # field to print if -Tfields selected (e.g. tcp.port, _ws.col.Info) this option can be repeated to print multiple fields
-E<fieldsoption>=<value> set options for output when -Tfields selected:
bom=y|n # print a UTF-8 BOM
header=y|n # switch headers on and off
separator=/t|/s|<char> # select tab, space, printable character as separator
occurrence=f|l|a # print first, last or all occurrences of each field
aggregator=,|/s|<char> # select comma, space, printable character as aggregator
quote=d|s|n # select double, single, no quotes for values
-t a|ad|adoy|d|dd|e|r|u|ud|udoy # output format of time stamps (def: r: rel. to first)
-u s|hms # output format of seconds (def: s: seconds)
-l # flush standard output after each packet
-q # be more quiet on stdout (e.g. when using statistics)
-Q # only log true errors to stderr (quieter than -q)
-g # enable group read access on the output file(s)
-W n # Save extra information in the file, if supported. n = write network address resolution information
-X <key>:<value> # eXtension options, see the man page for details
-U tap_name # PDUs export mode, see the man page for details
-z <statistics> # various statistics, see the man page for details
--export-objects <protocol>,<destdir> # save exported objects for a protocol to a directory named "destdir"
--color # color output text similarly to the Wireshark GUI, requires a terminal with 24-bit color support Also supplies color attributes to pdml and psml formats (Note that attributes are nonstandard)
--no-duplicate-keys # If -T json is specified, merge duplicate keys in an object into a single key with as value a json array containing all values
--elastic-mapping-filter <protocols> # If -G elastic-mapping is specified, put only the specified protocols within the mapping file
Miscellaneous
-h, --help # display this help and exit
-v, --version # display version info and exit
-o <name>:<value> ... # override preference setting
-K <keytab> # keytab file to use for kerberos decryption
-G [report] # dump one of several available reports and exit default report="fields" use "-G help" for more help
COLOR
base
# normal
m=0; for i in {16..128}; do echo -en "\e[${m};${i}m${i}\e[0m" ; done; echo
# bold
m=1; for i in {16..128}; do echo -en "\e[${m};${i}m${i}\e[0m" ; done; echo
# all
for j in {0..5}; do echo "- ${j}"; for i in {16..256}; do echo -en "\e[${j};${i}m${i}\e[0m" ; done; echo; done; echo
256
for i in {16..255}; do echo -en "$i \e[38;5;${i}m#\e[0m" ; done; echo
Installation
yay -S maxima # xmaxima
yay -S ttf-mathtype wxmaxima # wxmaxima
USB
RTL8821CU
https://github.com/brektrou/rtl8821CU
RTL88x2BU / RTL8822BU
manjaro
https://github.com/cilynx/rtl88x2BU_WiFi_linux_v5.3.1_27678.20180430_COEX20180427-5959
ubuntu
INSTALL
MANJARO
yay -S autofs sshfs
CONFIGURATION
SSHFS
SSH KEY
Do not forget to put root key in remote server !!
autofs use root rights to connect
MANJARO
/etc/autofs/auto.master.d/cluster.autofs
echo "/home/cluster /etc/autofs/auto.sshfs uid=1000,gid=1000, --timeout=30, --ghost" | sudo tee /etc/autofs/auto.master.d/cluster.autofs
/etc/autofs/auto.sshfs
echo "node1 -fstype=fuse,port=2002,rw,allow_other :sshfs\#root@node1\:/" | sudo tee /etc/autofs/auto.sshfs
TEST SSHFS
path=/tmp/node1
mkdir -p ${path}
sshfs root@node1:/ ${path}
MAN
systemd-resolve [OPTIONS...] HOSTNAME|ADDRESS...
systemd-resolve [OPTIONS...] --service [[NAME] TYPE] DOMAIN
systemd-resolve [OPTIONS...] --openpgp EMAIL@DOMAIN...
systemd-resolve [OPTIONS...] --statistics
systemd-resolve [OPTIONS...] --reset-statistics
Resolve domain names, IPv4 and IPv6 addresses, DNS records, and services.
-h --help # Show this help
--version # Show package version
--no-pager # Do not pipe output into a pager
-4 # Resolve IPv4 addresses
-6 # Resolve IPv6 addresses
-i --interface=INTERFACE # Look on interface
-p --protocol=PROTO|help # Look via protocol
-t --type=TYPE|help # Query RR with DNS type
-c --class=CLASS|help # Query RR with DNS class
--service # Resolve service (SRV)
--service-address=BOOL # Resolve address for services (default: yes)
--service-txt=BOOL # Resolve TXT records for services (default: yes)
--openpgp # Query OpenPGP public key
--tlsa # Query TLS public key
--cname=BOOL # Follow CNAME redirects (default: yes)
--search=BOOL # Use search domains for single-label names (default: yes)
--raw[=payload|packet] # Dump the answer as binary data
--legend=BOOL # Print headers and additional info (default: yes)
--statistics # Show resolver statistics
--reset-statistics # Reset resolver statistics
--status # Show link and server status
--flush-caches # Flush all local DNS caches
--reset-server-features # Forget learnt DNS server feature levels
--set-dns=SERVER # Set per-interface DNS server address
--set-domain=DOMAIN # Set per-interface search domain
--set-llmnr=MODE # Set per-interface LLMNR mode
--set-mdns=MODE # Set per-interface MulticastDNS mode
--set-dnsovertls=MODE # Set per-interface DNS-over-TLS mode
--set-dnssec=MODE # Set per-interface DNSSEC mode
--set-nta=DOMAIN # Set per-interface DNSSEC NTA
--revert # Revert per-interface configuration
TRICKS
LXC
bind DNS from host to containers
dynamically
# for selected interface
resolvectl --interface lxdbr0 dnssec set no
# add DNS configuration to lxd domain
resolvectl dns lxdbr0 "$(lxc network show lxdbr0 | sed -n 's|.*ipv4\.address: \(.*\)/.*|\1|p')"
resolvectl domain lxdbr0 '~lxd'
# old style
# systemd-resolve --interface lxdbr0 --set-dnssec no #~ old style
#sed -i 's|^.\?DNSSEC=.*$|DNSSEC=allow-downgrade|' /etc/systemd/resolved.conf # global / not too advisable
#systemd-resolve --interface lxdbr0 --set-domain '~lxd' --set-dns "$(lxc network show lxdbr0 | sed -n 's|.*ipv4\.address: \(.*\)/.*|\1|p')"
persistently
path="/etc/systemd/resolved.conf.d/"
[ -d "${path}" ] || mkdir -p "${path}"
cidr="$(lxc network show lxdbr0 | sed -n 's|.*ipv4\.address: \(.*\)/.*|\1|p')"
echo "# Configuration file for lxdbr0
[Resolve]
DNS=${cidr}
Domains=lxd
DNSSEC=no" > "${path}/lxd.conf"
start & enable service
[ "$(systemctl status systemd-resolved.service)" = "inactive" ] && systemctl start systemd-resolved.service
[ "$(systemctl is-enabled systemd-resolved.service)" ] && systemctl enable systemd-resolved.service
test
resolvectl query sp20-www.lxd
#systemd-resolve -i lxdbr0 sp20-www.lxd
Use TAB to select options & values !!
journalctl -p err|alert|crit|debug|emerg|err|info|notice|warning # print only level
-u $service # show logs for unit apache2
_PID=1 # show journal for one PID
<command> # show journal for one command (ex: /usr/sbin/apache2)
OTHERS
systemd-cgls [OPTIONS...] [CGROUP...]
Recursively show control group contents
-a --all Show all groups, including empty
-u --unit Show the subtrees of specifified system units
--user-unit Show the subtrees of specifified user units
-l --full Do not ellipsize output
-k Include kernel threads in output
-M --machine= Show container
systemd-cgtop [OPTIONS...] [CGROUP]
Show top control groups by their resource usage
-p --order=path Order by path
-t --order=tasks Order by number of tasks/processes
-c --order=cpu Order by CPU load (default)
-m --order=memory Order by memory load
-i --order=io Order by IO load
-r --raw Provide raw (not human-readable) numbers
--cpu=percentage Show CPU usage as percentage (default)
--cpu=time Show CPU usage as time
-P Count userspace processes instead of tasks (excl. kernel)
-k Count all processes instead of tasks (incl. kernel)
--recursive=BOOL Sum up process count recursively
-d --delay=DELAY Delay between updates
-n --iterations=N Run for N iterations before exiting
-b --batch Run in batch mode, accepting no input
--depth=DEPTH Maximum traversal depth (default: 3)
-M --machine= Show container
systemd-analyze [OPTIONS...] {COMMAND} ...
Profile systemd, show unit dependencies, check unit files
-h --help # Show this help
--version # Show package version
--no-pager # Do not pipe output into a pager
--system # Operate on system systemd instance
--user # Operate on user systemd instance
-H --host=[USER@]HOST # Operate on remote host
-M --machine=CONTAINER # Operate on local container
--order # Show only order in the graph
--require # Show only requirement in the graph
--from-pattern=GLOB # Show only origins in the graph
--to-pattern=GLOB # Show only destinations in the graph
--fuzz=SECONDS # Also print also services which finished SECONDS earlier than the latest in the branch
--man[=BOOL] # Do [not] check for existence of man pages
--generators[=BOOL] # Do [not] run unit generators (requires privileges)
Commands:
time # Print time spent in the kernel
blame # Print list of running units ordered by time to init
critical-chain [UNIT...] # Print a tree of the time critical chain of units
plot # Output SVG graphic showing service initialization
dot [UNIT...] # Output dependency graph in man:dot(1) format
log-level [LEVEL] # Get/set logging threshold for manager
log-target [TARGET] # Get/set logging target for manager
dump # Output state serialization of service manager
syscall-filter [NAME...] # Print list of syscalls in seccomp filter
verify FILE... # Check unit files for correctness
calendar SPEC... # Validate repetitive calendar time events
service-watchdogs [BOOL] # Get/set service watchdog state
path
// get path for a specific value
paths as $path | select(getpath($path) == "10.0.0.159") | $path
// get paths in string format
[paths|map(if type=="number" then "[]" else tostring end)|join(".")|split(".[]")|join("[]")]|unique|map("."+.)|.[]
insert data
data='{"ipv4":"192.168.0.1"}
{"ipv4":"192.168.0.100"}
{"ipv4":"192.168.0.102"}'
echo "$data" | jq -r 'select(.ipv4 == "192.168.0.1") |= . + {"hostname":"toto"}'
LXC
pretty print
# pretty print
lxc list --format=json $ctname$ | jq -C
search
# search in keys recursively & get values for key
lxc list --format json|jq -r '..|.hwaddr?|select(.!=null and .!="")'
# search in keys recursively & get flatten path
lxc list --format json | jq -c 'paths | select(.[-1] == "hwaddr")'
# search by type
jq 'path(recurse(if type|. == "array" or . =="object" then .[] else empty end))'
# search image by alias
lxc image list --format=json | jq -r '.[].aliases[] | select(.name | test("'-1635.*$'")).name'
name
# print name
lxc list --format=json | jq -r '.[].name'
# print selected name for exact name alpine
lxc list --format=json | jq -r '.[] | select(.name == "alpine311").name'
# print selected name for alpine.* in regexp format
lxc list --format=json | jq -r '.[] | select(.name | test("alpine.*")).name'
# display names of running containers
lxc list --format=json | jq -r '.[] | select(.status == "Running").name'
# display names of running containers with a selected name
lxc list --format=json | jq -r '.[] | select(.status == "Running") | select(.name == "alpine314").name'
# display name of containers which have attached profile $profile
lxc list --format=json | jq -r '.[] | select(.profiles | any(contains("'$profile'"))).name'
IP
Display first IP address for specified: interface
# Display IP address of the first network interface of containers which have an interface named 'eth0'
lxc list --format=json | jq -r '.[].state.network.eth0.addresses[0].address'
# Display IP mac address & IP address for containers which have eth0 named interface
lxc list --format json|jq '.[].state.network.eth0 | {(.hwaddr): .addresses[0].address}'
Display IP address for specified: name, scope, family
Display IP address for container named 'alpine311$' with network interface='eth0', with scope='global' & family='inet'
lxc list --format=json alpine311$ | jq -r '.[].state.network.eth0?.addresses[]
| select(.scope == "global" and .family == "inet")
| .address'
Display pairs 'name + Ip address' for specified: interface, family
Display 'name + address' for containers with network, interface='eth0', scope='global' & family='inet'
lxc list --format=json | jq -r '.[] |
select (.state.network != null) |
(.name + " " + (
.state.network.eth0.addresses[] |
select(.family == "inet") |
.address
)lxc profile list -f json|jq -r '.[] | (.name, .used_by)'
)'lxc profile list -f json|jq -r '.[] | (.name, .used_by)'
Display profile names and users in json format
lxc profile list -f json|jq -r '.[] | (.name, .used_by)'
Display name, ipv4, hwaddr for running containers with network in json format
lxc list --format=json | jq -r '.[] |
select (.state.network != null) |
{
"name": .name,
"ip": (
.state.network.eth0.addresses[] |
select(.family == "inet") |
.address
),
"hwaddr": .state.network.eth0.hwaddr
}'
EXAMPLES
CONTAINER
BACKUP
# backup server
CMD = /usr/local/bs/backup-server
OPTS = --vz --vz-dump --vz-cleanlog
LOG_FILE = /var/log/server/cron-backup-server
DATE = date +%Y%m%d-%T
# every week, the monday at 2h00
#*/1 * * * * root echo "$($DATE) /usr/local/bs/backup-server --vz --vz-dump" >> $LOG_FILE
0 2 * * 1 root echo "$($DATE) /usr/local/bs/backup-server --vz --vz-dump --vz-cleanlog" >> $LOG_FILE
# every week, the monday at 02h01
1 2 * * 1 root test -x $CMD && $CMD $OPTS >> $LOG_FILE
55 2 * * 1 root echo "$($DATE) /usr/sbin/vzctl stop 105" >> $LOG_FILE
56 2 * * 1 root /usr/sbin/vzctl stop 105 >> $LOG_FILE
# every hour
#0 */1 * * * root test -x $CMD && $CMD $OPT
MARIADB
# cron to dump mariadb databases
CMD = /usr/local/bs/mysql-dump-slave
OPTS = "db_pwd=txccR_OH2Goal9 path2=/var/share/mariadb/save"
LOG_FILE = /var/log/server/cron-mariadb
DATE = date +%Y%m%d-%T
0 1 * * * root echo "$($DATE) /usr/local/bs/mysql-dump-slave db_pwd= path2=/var/share/mariadb/save" >> $LOG_FILE # every day at 1h00
1 1 * * * root test -x $CMD && $CMD $OPTS >> $LOG_FILE # every day at 01h01
SERVER
BACKUP
# cron to sync server backup from HOST to local
HOST = ns388803
DATE = date +%Y%m%d-%T
LOG_FILE = /var/log/server/cron-ns388803-backup
# every day at 3h00
0 3 * * * root echo "$($DATE) rsync -e 'ssh -p2002' -av root@${HOST}:/save/backup/ /save/${HOST}/backup/" >> $LOG_FILE
# every day at 3h01
1 3 * * * root rsync -e 'ssh -p2002' -av root@${HOST}:/save/backup/ /save/${HOST}/backup/ >> $LOG_FILE
MARIADB
# cron to sync mariadb dump from HOST to local
HOST = ns388803
DATE = date +%Y%m%d-%T
LOG_FILE = /var/log/server/cron-ns388803-mariadb
# every day at 3h30
30 3 * * * root echo "$($DATE) rsync -e 'ssh -p2002' -av root@${HOST}:/save/mariadb/ /save/${HOST}/mariadb/" >> $LOG_FILE
# every day at 3h31
31 3 * * * root rsync -e 'ssh -p2002' -av root@${HOST}:/save/mariadb/ /save/${HOST}/mariadb/ >> $LOG_FILE
# cron to sync mail from HOST to local
HOST = ns388803
DATE = date +%Y%m%d-%T
LOG_FILE = /var/log/server/cron-ns388803-mail
# every day at 3h50
50 3 * * * root echo "$($DATE) rsync -e 'ssh -p2002' -av root@${HOST}:/vm/share/mail/ /save/${HOST}/mail/" >> $LOG_FILE
# every day at 3h51
51 3 * * * root rsync -e 'ssh -p2002' -av root@${HOST}:/vm/share/mail/ /save/${HOST}/mail/ >> $LOG_FILE
Create an archive of files from a named tree
Creates an archive of the specified format containing the tree structure for the named tree, and writes it out to the standard output. If <prefix> is specified it is prepended to the filenames in the archive
git archive behaves differently when given a tree ID versus when given a commit ID or tag ID. In the first case the current time is used as the modification time of each file in the archive. In the latter case the commit time as recorded in the referenced commit object is used instead. Additionally the commit ID is stored in a global extended pax header if the tar format is used; it can be extracted using git get-tar-commit-id. In ZIP files it is stored as a file comment
git archive [--format=<fmt>] [--list] [--prefix=<prefix>/] [<extra>] [-o <file> | --output=<file>] [--worktree-attributes] [--remote=<repo> [--exec=<git-upload-archive>]] <tree-ish> [<path>...]
--format=<fmt> # Format of the resulting archive: tar or zip. If this option is not given, and the output file is specified, the format is inferred from the filename if possible (e.g. writing to "foo.zip" makes the output to be in the zip format). Otherwise the output format is tar
-l, --list # Show all available formats
-v, --verbose # Report progress to stderr
--prefix=<prefix>/ # Prepend <prefix>/ to each filename in the archive
-o <file>, --output=<file> # Write the archive to <file> instead of stdout
--worktree-attributes # Look for attributes in .gitattributes files in the working tree as well (see the section called “ATTRIBUTES”)
<extra> # This can be any options that the archiver backend understands. See next section
--remote=<repo> # Instead of making a tar archive from the local repository, retrieve a tar archive from a remote repository. Note that the remote repository may place restrictions on which sha1 expressions may be allowed in <tree-ish>. See git-upload-archive(1) for details
--exec=<git-upload-archive> # Used with --remote to specify the path to the git-upload-archive on the remote side
<tree-ish> # The tree or commit to produce an archive for
<path> # Without an optional path parameter, all files and subdirectories of the current working directory are included in the archive. If one or more paths are specified, only these are included
EXAMPLES
# Create a tar archive that contains the contents of the latest commit on the current branch, and extract it in the /var/tmp/junk directory
git archive --format=tar --prefix=junk/ HEAD | (cd /var/tmp/ && tar xf -)
# Create a compressed tarball for v1.4.0 release
git archive --format=tar --prefix=git-1.4.0/ v1.4.0 | gzip >git-1.4.0.tar.gz
# Same as above, but using the builtin tar.gz handling
git archive --format=tar.gz --prefix=git-1.4.0/ v1.4.0 >git-1.4.0.tar.gz
# Same as above, but the format is inferred from the output file
git archive --prefix=git-1.4.0/ -o git-1.4.0.tar.gz v1.4.0
# Create a compressed tarball for v1.4.0 release, but without a global extended pax header
git archive --format=tar --prefix=git-1.4.0/ v1.4.0^{tree} | gzip >git-1.4.0.tar.gz
# Put everything in the current head’s Documentation/ directory into git-1.4.0-docs.zip, with the prefix git-docs/
git archive --format=zip --prefix=git-docs/ HEAD:Documentation/ > git-1.4.0-docs.zip
# Create a Zip archive that contains the contents of the latest commit on the current branch. Note that the output format is inferred by the extension of the output file
git archive -o latest.zip HEAD
git config tar.tar.xz.command "xz -c"
# Configure a "tar.xz" format for making LZMA-compressed tarfiles. You can use it specifying --format=tar.xz, or by creating an output file like -o foo.tar.xz
TRICKS
Un zip avec uniquement vos fichiers et surtout l’arborescence des fichiers qui va avec
git archive -o delta.zip develop $(git diff --name-only V1.0.0^)
Inconvénient de cette méthodes, les fichiers supprimé ne seront pas supprimé et la commande ci-dessus va planter.
Il faut donc ajouter un filtrer sur les fichiers. Exemple pour ne prendre en compte que les fichier Ajouter, Modifier, Renommer, Changer)
git archive -o delta.zip develop $(git diff --name-only --diff-filter=ACMRT V1.0.0^)
Reapply commits on top of another base tip
If <branch> is specified, git rebase will perform an automatic git switch <branch> before doing anything else. Otherwise it remains on the current branch
A---B---C topic
/
D---E---F---G master
git checkout topic && git rebase master
git rebase master topic
# rebase (from) master (for) topic
A'--B'--C' topic
/
D---E---F---G master
----------------------------------------------------
o---o---o---o---o master
\
o---o---o---o---o next
\
o---o---o topic
git rebase --onto master next topic
# rebase (onto) master (from) next (for) topic
o---o---o---o---o master
| \
| o'--o'--o' topic
\
o---o---o---o---o next
git rebase [-i | --interactive] [<options>] [--exec <cmd>] [--onto <newbase> | --keep-base] [<upstream> [<branch>]]
git rebase [-i | --interactive] [<options>] [--exec <cmd>] [--onto <newbase>] --root [<branch>]
git rebase (--continue | --skip | --abort | --quit | --edit-todo | --show-current-patch)
--onto <newbase> # Starting point at which to create the new commits. If the --onto option is not specified, the starting point is <upstream>. May be any valid commit, and not just an existing branch name
--keep-base # Set the starting point at which to create the new commits to the merge base of <upstream> <branch>. Running git rebase --keep-base <upstream> <branch> is equivalent to running git rebase --onto <upstream>... <upstream>
<upstream> # Upstream branch to compare against. May be any valid commit, not just an existing branch name. Defaults to the configured upstream for the current branch
<branch> # Working branch; defaults to HEAD
--continue # Restart the rebasing process after having resolved a merge conflict
--abort # Abort the rebase operation and reset HEAD to the original branch. If <branch> was provided when the rebase operation was started, then HEAD will be reset to <branch>. Otherwise HEAD will be reset to where it was when the rebase operation was started
--quit # Abort the rebase operation but HEAD is not reset back to the original branch. The index and working tree are also left unchanged as a result
--keep-empty # Keep the commits that do not change anything from its parents in the result
--allow-empty-message # By default, rebasing commits with an empty message will fail. This option overrides that behavior, allowing commits with empty messages to be rebased
--skip # Restart the rebasing process by skipping the current patch
--edit-todo # Edit the todo list during an interactive rebase
--show-current-patch # Show the current patch in an interactive rebase or when rebase is stopped because of conflicts. This is the equivalent of git show REBASE_HEAD
-m, --merge # Use merging strategies to rebase. When the recursive (default) merge strategy is used, this allows rebase to be aware of renames on the upstream side
-s <strategy>, --strategy=<strategy> # Use the given merge strategy. If there is no -s option git merge-recursive is used instead. This implies --merge
-X <strategy-option>, --strategy-option=<strategy-option> # Pass the <strategy-option> through to the merge strategy. This implies --merge and, if no strategy has been specified, -s recursive. Note the reversal of ours and theirs as noted above for the -m option
--rerere-autoupdate, --no-rerere-autoupdate # Allow the rerere mechanism to update the index with the result of auto-conflict resolution if possible
-S[<keyid>], --gpg-sign[=<keyid>] # GPG-sign commits. The keyid argument is optional and defaults to the committer identity; if specified, it must be stuck to the option without a space
-q, --quiet # Be quiet. Implies --no-stat
-v, --verbose # Be verbose. Implies --stat
--stat # Show a diffstat of what changed upstream since the last rebase. The diffstat is also controlled by the configuration option rebase.stat
-n, --no-stat # Do not show a diffstat as part of the rebase process
--no-verify # This option bypasses the pre-rebase hook. See also githooks(5)
--verify # Allows the pre-rebase hook to run, which is the default. This option can be used to override --no-verify. See also githooks(5)
-C<n> # Ensure at least <n> lines of surrounding context match before and after each change. When fewer lines of surrounding context exist they all must match. By default no context is ever ignored
--no-ff, --force-rebase, -f # Individually replay all rebased commits instead of fast-forwarding over the unchanged ones. This ensures that the entire history of the rebased branch is composed of new commits
--fork-point, --no-fork-point # Use reflog to find a better common ancestor between <upstream> and <branch> when calculating which commits have been introduced by <branch>
--ignore-whitespace, --whitespace=<option> # These flag are passed to the git apply program (see git-apply(1)) that applies the patch
--committer-date-is-author-date, --ignore-date # These flags are passed to git am to easily change the dates of the rebased commits (see git-am(1))
--signoff # Add a Signed-off-by: trailer to all the rebased commits. Note that if --interactive is given then only commits marked to be picked, edited or reworded will have the trailer added
-i, --interactive # Make a list of the commits which are about to be rebased. Let the user edit that list before rebasing. This mode can also be used to split commits (see SPLITTING COMMITS below)
-r, --rebase-merges[=(rebase-cousins|no-rebase-cousins)] # By default, a rebase will simply drop merge commits from the todo list, and put the rebased commits into a single, linear branch. With --rebase-merges, the rebase will instead try to preserve the branching structure within the commits that are to be rebased, by recreating the merge commits
-p, --preserve-merges # [DEPRECATED: use --rebase-merges instead] Recreate merge commits instead of flattening the history by replaying commits a merge commit introduces. Merge conflict resolutions or manual amendments to merge commits are not preserved
-x <cmd>, --exec <cmd> # Append "exec <cmd>" after each line creating a commit in the final history. <cmd> will be interpreted as one or more shell commands. Any command that fails will interrupt the rebase, with exit code 1
--root # Rebase all commits reachable from <branch>, instead of limiting them with an <upstream>. This allows you to rebase the root commit(s) on a branch
--autosquash, --no-autosquash # When the commit log message begins with "squash! ..." (or "fixup! ..."), and there is already a commit in the todo list that matches the same ..., automatically modify the todo list of rebase -i so that the commit marked for squashing comes right after the commit to be modified, and change the action of the moved commit from pick to squash (or fixup)
--autostash, --no-autostash # Automatically create a temporary stash entry before the operation begins, and apply it after the operation ends. This means that you can run rebase on a dirty worktree. However, use with care: the final stash application after a # successful rebase might result in non-trivial conflicts
--reschedule-failed-exec, --no-reschedule-failed-exec
Automatically reschedule exec commands that failed. This only makes sense in interactive mode (or when an --exec option was provided)
INCOMPATIBLE OPTIONS
The following options:
--committer-date-is-author-date
--ignore-date
--whitespace
--ignore-whitespace
-C
are incompatible with the following options:
--merge
--strategy
--strategy-option
--allow-empty-message
--[no-]autosquash
--rebase-merges
--preserve-merges
--interactive
--exec
--keep-empty
--edit-todo
--root when used in combination with --onto
In addition, the following pairs of options are incompatible:
--preserve-merges and --interactive
--preserve-merges and --signoff
--preserve-merges and --rebase-merges
--keep-base and --onto
--keep-base and --root
Switch branches
Switch to a specified branch. The working tree and the index are updated to match the branch. All new commits will be added to the tip of this branch
Optionally a new branch could be created with either -c, -C, automatically from a remote branch of same name (see --guess), or detach the working tree from any branch with --detach, along with switching
Switching branches does not require a clean index and working tree (i.e. no differences compared to HEAD). The operation is aborted however if the operation leads to loss of local changes, unless told otherwise with --discard-changes or --merge
git switch [<options>] [--no-guess] <branch>
git switch [<options>] --detach [<start-point>]
git switch [<options>] (-c|-C) <new-branch> [<start-point>]
git switch [<options>] --orphan <new-branch>
<branch> # Branch to switch to
<new-branch> # Name for the new branch
<start-point> # The starting point for the new branch. Specifying a <start-point> allows you to create a branch based on some other point in history than where HEAD currently points. (Or, in the case of --detach, allows you to inspect and detach from some other point.)
-c <new-branch>, --create <new-branch> # Create a new branch named <new-branch> starting at <start-point> before switching to the branch
-C <new-branch>, --force-create <new-branch> # Similar to --create except that if <new-branch> already exists, it will be reset to <start-point>
-d, --detach # Switch to a commit for inspection and discardable experiments
--guess, --no-guess # If <branch> is not found but there does exist a tracking branch in exactly one remote (call it <remote>) with a matching name, treat as equivalent to
git switch -c <branch> --track <remote>/<branch>
-f, --force # An alias for --discard-changes
--discard-changes # Proceed even if the index or the working tree differs from HEAD. Both the index and working tree are restored to match the switching target
-m, --merge # If you have local modifications to one or more files that are different between the current branch and the branch to which you are switching, the command refuses to switch branches in order to preserve your modifications in context
--conflict=<style> # The same as --merge option above, but changes the way the conflicting hunks are presented, overriding the merge.conflictStyle configuration variable
--progress, --no-progress # Progress status is reported on the standard error stream by default when it is attached to a terminal, unless --quiet is specified. This flag enables progress reporting even if not attached to a terminal, regardless of --quiet
-t, --track # When creating a new branch, set up "upstream" configuration. -c is implied. See --track in git-branch(1) for details
--no-track # Do not set up "upstream" configuration, even if the branch.autoSetupMerge configuration variable is true
--orphan <new-branch> # Create a new orphan branch, named <new-branch>. All tracked files are removed
--ignore-other-worktrees # git switch refuses when the wanted ref is already checked out by another worktree. This option makes it check the ref out anyway. In other words, the ref can be held by more than one worktree
--recurse-submodules, --no-recurse-submodules # Using --recurse-submodules will update the content of all initialized submodules according to the commit recorded in the superproject
EXAMPLES
The following command switches to the "master" branch:
git switch master
After working in the wrong branch, switching to the correct branch would be done using:
git switch mytopic
However, your "wrong" branch and correct "mytopic" branch may differ in files that you have modified locally, in which case the above switch would fail like this:
git switch mytopic
error: You have local changes to 'frotz'; not switching branches
You can give the -m flag to the command, which would try a three-way merge:
git switch -m mytopic
Auto-merging frotz
After this three-way merge, the local modifications are not registered in your index file, so git diff would show you what changes you made since the tip of the new branch
To switch back to the previous branch before we switched to mytopic (i.e. "master" branch):
git switch -
You can grow a new branch from any commit. For example, switch to "HEAD~3" and create branch "fixup":
git switch -c fixup HEAD~3
Switched to a new branch 'fixup'
If you want to start a new branch from a remote branch of the same name:
git switch new-topic
Branch 'new-topic' set up to track remote branch 'new-topic' from 'origin'
Switched to a new branch 'new-topic'
To check out commit HEAD~3 for temporary inspection or experiment without creating a new branch:
git switch --detach HEAD~3
HEAD is now at 9fc9555312 Merge branch 'cc/shared-index-permbits'
git-restore - Restore working tree files
MRestore specified paths in the working tree with some contents from a restore source. If a path is tracked but does not exist in the restore source, it will be removed to match the source
The command can also be used to restore the content in the index with --staged, or restore both the working tree and the index with --staged --worktree
By default, the restore sources for working tree and the index are the index and HEAD respectively. --source could be used to specify a commit as the restore source
git restore [<options>] [--source=<tree>] [--staged] [--worktree] [--] <pathspec>...
git restore [<options>] [--source=<tree>] [--staged] [--worktree] --pathspec-from-file=<file> [--pathspec-file-nul]
git restore (-p|--patch) [<options>] [--source=<tree>] [--staged] [--worktree] [--] [<pathspec>...]
-s <tree>, --source=<tree> # Restore the working tree files with the content from the given tree. It is common to specify the source tree by naming a commit, branch or tag associated with it.
-p, --patch # Interactively select hunks in the difference between the restore source and the restore location. See the “Interactive Mode” section of git-add(1) to learn how to operate the --patch mode.
-W, --worktree, -S, --staged # Specify the restore location. If neither option is specified, by default the working tree is restored. Specifying --staged will only restore the index. Specifying both restores both.
-q, --quiet # Quiet, suppress feedback messages. Implies --no-progress.
--progress, --no-progress # Progress status is reported on the standard error stream by default when it is attached to a terminal, unless --quiet is specified. This flag enables progress reporting even if not attached to a terminal, regardless of --quiet.
--ours, --theirs # When restoring files in the working tree from the index, use stage #2 (ours) or #3 (theirs) for unmerged paths.
-m, --merge # When restoring files on the working tree from the index, recreate the conflicted merge in the unmerged paths.
--conflict=<style> # The same as --merge option above, but changes the way the conflicting hunks are presented, overriding the merge.conflictStyle configuration variable. Possible values are "merge" (default) and "diff3" (in addition to what is shown by # "merge" style, shows the original contents).
--ignore-unmerged # When restoring files on the working tree from the index, do not abort the operation if there are unmerged entries and neither --ours, --theirs, --merge or --conflict is specified. Unmerged paths on the working tree are left alone.
--ignore-skip-worktree-bits # In sparse checkout mode, by default is to only update entries matched by <pathspec> and sparse patterns in $GIT_DIR/info/sparse-checkout. This option ignores the sparse patterns and unconditionally restores any files in <pathspec>.
--overlay, --no-overlay # In overlay mode, the command never removes files when restoring. In no-overlay mode, tracked files that do not appear in the --source tree are removed, to make them match <tree> exactly. The default is no-overlay mode.
--pathspec-from-file=<file> # Pathspec is passed in <file> instead of commandline args. If <file> is exactly - then standard input is used. Pathspec elements are separated by LF or CR/LF. Pathspec elements can be quoted as explained for the configuration variable core.quotePath (see git-config(1)). See also --pathspec-file-nul and global --literal-pathspecs.
--pathspec-file-nul # Only meaningful with --pathspec-from-file. Pathspec elements are separated with NUL character and all other characters are taken literally (including newlines and quotes).
-- # Do not interpret any more arguments as options.
<pathspec>... # Limits the paths affected by the operation.
EXAMPLES
The following sequence switches to the master branch, reverts the Makefile to two revisions back, deletes hello.c by mistake, and gets it back from the index
git switch master
git restore --source master~2 Makefile # take a file out of another commit
rm -f hello.c
git restore hello.c # restore hello.c from the index
If you want to restore all C source files to match the version in the index, you can say
git restore '*.c'
Note the quotes around *.c. The file hello.c will also be restored, even though it is no longer in the working tree, because the file globbing is used to match entries in the index (not in the working tree by the shell).
To restore all files in the current directory
git restore .
or to restore all working tree files with top pathspec magic (see gitglossary(7))
git restore :/
To restore a file in the index to match the version in HEAD (this is the same as using git-reset(1))
git restore --staged hello.c
or you can restore both the index and the working tree (this the same as using git-checkout(1))
git restore --source=HEAD --staged --worktree hello.c
or the short form which is more practical but less readable:
git restore -s@ -SW hello.c
https://medium.com/@cq94/zfs-vous-connaissez-vous-devriez-1d2611e7dad6
TOC
chapter | designation |
---|---|
ADD | Adds the specified virtual devices to the given pool |
ATTACH | Attaches new_device to the existing device |
CLEAR | Clears device errors in a pool |
CREATE | Creates a new storage pool containing the virtual devices specified on the command line |
DESTROY | Destroys the given pool, freeing up any devices for other use |
DETACH | Detaches device from a mirror |
EVENTS | Lists all recent events generated by the ZFS kernel modules |
EXPORT | Exports the given pools from the system |
GET | Retrieves the given list of properties (or all properties if all is used) for the specified storage pool(s) |
HISTORY | Displays the command history of the specified pool(s) or all pools if no pool is specified |
IMPORT-LIST | Lists pools available to import |
IMPORT-ALL | Imports all pools found in the search directories |
IMPORT | Imports a specific pool |
IOSTAT | Displays I/O statistics for the given pools/vdevs |
LABELCLEAR | Removes ZFS label information from the specified device |
LIST | Lists the given pools along with a health status and space usage |
OFFLINE | Takes the specified physical device offline |
ONLINE | Brings the specified physical device online |
REGUID | Generates a new unique identifier for the pool |
REOPEN | Reopen all the vdevs associated with the pool |
REMOVE | Removes the specified device from the pool |
REPLACE | Replaces old_device with new_device |
SCRUB | Begins a scrub or resumes a paused scrub |
SET | Sets the given property on the specified pool |
SPLIT | Splits devices off pool creating newpool |
STATUS | Displays the detailed health status for the given pools |
UPGRADE-DISPLAY-NOT | Displays pools which do not have all supported features enabled and pools formatted using a legacy ZFS version number |
UPGRADE-DISPLAY | Displays legacy ZFS versions supported by the current software |
UPGRADE | Enables all supported features on the given pool |
PROPERTIES | Available propserties |
ADD
Adds the specified virtual devices to the given pool
The vdev specification is described in the Virtual Devices section. The behavior of the -f option, and the device checks performed are described in the zpool create subcommand
zpool add [-fgLnP] [-o property=value] pool vdev...
-f # Forces use of vdevs, even if they appear in use or specify a conflicting replication level
-g # Display vdev, GUIDs instead of the normal device names
-L # Display real paths for vdevs resolving all symbolic links
-n # Displays the configuration that would be used without actually adding the vdevs
-P # Display real paths for vdevs instead of only the last component of the path
-o property=value # Sets the given pool properties
ATTACH
Attaches new_device to the existing device
The existing device cannot be part of a raidz configuration. If device is not currently part of a mirrored configuration, device automatically transforms into a two-way mirror of device and new_device. If device is part of a two-way mirror, attaching new_device creates a three-way mirror, and so on. In either case, new_device begins to resilver immediately
zpool attach [-f] [-o property=value] pool device new_device
-f # Forces use of new_device, even if its appears to be in use
-o property=value # Sets the given pool properties
CLEAR
Clears device errors in a pool
If no arguments are specified, all device errors within the pool are cleared. If one or more devices is specified, only those errors associated with the specified device or devices are cleared
zpool clear pool [device]
CREATE
Creates a new storage pool containing the virtual devices specified on the command line
The pool name must begin with a letter, and can only contain alphanumeric characters as well as underscore ("_"), dash ("."), colon (":"),space ("-"), and period ("."). The pool names mirror, raidz, spare and log are reserved, as are names beginning with the pattern c[0-9]. The vdev specification is described in the Virtual Devices section
zpool create [-dfn] [-m mountpoint] [-o property=value]... [-o feature@feature=value]... [-O file-system-property=value]... [-R root] [-t tname] pool vdev...
-d # Do not enable any features on the new pool
-f # Forces use of vdevs, even if they appear in use or specify a conflicting replication level
-m mountpoint # Sets the mount point for the root dataset
-n # Displays the configuration that would be used without actually creating the pool
-o property=value # Sets the given pool properties
-o feature@feature=value # Sets the given pool feature
-O file-system-property=value # Sets the given file system properties in the root file system of the pool
-R root # Equivalent to -o cachefile=none -o altroot=root
-t tname # Sets the in-core pool name to tname while the on-disk name will be the name specified as the pool name pool
DESTROY
Destroys the given pool, freeing up any devices for other use
This command tries to unmount any active datasets before destroying the pool
zpool destroy [-f] pool
-f # Forces any active datasets contained within the pool to be unmounted
DETACH
Detaches device from a mirror
The operation is refused if there are no other valid replicas of the data
# Destroys the given pool, freeing up any devices for other use
zpool detach pool device
EVENTS
Lists all recent events generated by the ZFS kernel modules
These events are consumed by the zed(8) and used to automate administrative tasks such as replacing a failed device with a hot spare. For more information about the subclasses and event payloads that can be generated see the zfs-events(5) man page
zpool events
-c # Clear all previous events
-f # Follow mode
-H # Scripted mode. Do not display headers, and separate fields by a single tab instead of arbitrary space
-v # Print the entire payload for each event
EXPORT
Exports the given pools from the system
All devices are marked as exported, but are still considered in use by other subsystems. The devices can be moved between systems (even those of different endianness) and imported as long as a sufficient number of devices are present
zpool export [-a] [-f] pool...
-a # Exports all pools imported on the system
-f # Forcefully unmount all datasets, using the unmount -f command
GET
Retrieves the given list of properties (or all properties if all is used) for the specified storage pool(s)
These properties are displayed with the following fields:
- name Name of storage pool
- property Property name
- value Property value
- source Property source, either 'default' or 'local'
zpool get [-Hp] [-o field[,field]...] all|property[,property]... pool...
-H # Scripted mode. Do not display headers, and separate fields by a single tab instead of arbitrary space
-o field # A comma-separated list of columns to display. name,property,value,source is the default value
-p # Display numbers in parsable (exact) values
HISTORY
Displays the command history of the specified pool(s) or all pools if no pool is specified
zpool history [-il] [pool]...
-i # Displays internally logged ZFS events in addition to user initiated events
-l # Displays log records in long format: + user name, the hostname, and the zone
IMPORT-LIST
Lists pools available to import
zpool import [-D] [-c cachefile|-d dir]
-c cachefile # Reads configuration from the given cachefile that was created with the cachefile pool property
-d dir # Searches for devices or files in dir
-D # Lists destroyed pools only
IMPORT-ALL
Imports all pools found in the search directories
Identical to the previous command, except that all pools with a sufficient number of devices available are imported. Destroyed pools, pools that were previously destroyed with the zpool destroy command, will not be imported unless the -D option is specified
zpool import -a [-DfmN] [-F [-n] [-T] [-X]] [-c cachefile|-d dir] [-o mntopts] [-o property=value]... [-R root] [-s]
-a # Searches for and imports all pools found
-c cachefile # Reads configuration from the given cachefile that was created with the cachefile pool property
-d dir # Searches for devices or files in dir
-D # Imports destroyed pools only
-f # Forces import, even if the pool appears to be potentially active
-F # Recovery mode for a non-importable pool
-m # Allows a pool to import when there is a missing log device
-n # Used with the -F recovery option
-N # Import the pool without mounting any file systems
-o mntopts # Comma-separated list of mount options to use when mounting datasets within the pool
-o property=value # Sets the specified property on the imported pool
-R root # Sets the cachefile property to none and the altroot property to root
-s # Scan using the default search path, the libblkid cache will not be consulted
-X # Used with the -F recovery option
-T # Specify the txg to use for rollback
IMPORT
Imports a specific pool
A pool can be identified by its name or the numeric identifier. If newpool is specified, the pool is imported using the name newpool. Otherwise, it is imported with the same name as its exported name
zpool import [-Dfm] [-F [-n] [-t] [-T] [-X]] [-c cachefile|-d dir] [-o mntopts] [-o property=value]... [-R root] [-s] pool|id [newpool]
-c cachefile # Reads configuration from the given cachefile that was created with the cachefile pool property
-d dir Searches for devices or files in dir. The -d option can be specified multiple times. This option is incompatible with the -c option.
-D # Imports destroyed pool. The -f option is also required
-f # Forces import, even if the pool appears to be potentially active
-F # Recovery mode for a non-importable pool
-m # Allows a pool to import when there is a missing log device
-n # Used with the -F recovery option
-o mntopts # Comma-separated list of mount options to use when mounting datasets within the pool
-o property=value # Sets the specified property on the imported pool
-R root # Sets the cachefile property to none and the altroot property to root
-s # Scan using the default search path, the libblkid cache will not be consulted
-X # Used with the -F recovery option
-T # Specify the txg to use for rollback
-t # Used with newpool
IOSTAT
Displays I/O statistics for the given pools/vdevs
You can pass in a list of pools, a pool and list of vdevs in that pool, or a list of any vdevs from any pool. If no items are specified, statistics for every pool in the system are shown. When given an interval, the statistics are printed every interval seconds until ^C is pressed. If count is specified, the command exits after count reports are printed. The first report printed is always the statistics since boot regardless of whether interval and count are passed. However, this behavior can be suppressed with the -y flag. Also note that the units of K, M, ... that are printed in the report are in base 1024. To get the raw values, use the -p flag
zpool iostat [[[-c SCRIPT] [-lq]]|-rw] [-T u|d] [-ghHLpPvy] [[pool...]|[pool vdev...]|[vdev...]] [interval [count]]
-c [SCRIPT1[,SCRIPT2]...] # Run a script (or scripts) on each vdev and include the output as a new column in the zpool iostat output
-T u|d # Display a time stamp
-g # Display vdev GUIDs instead of the normal device names
-H # Scripted mode
-L # Display real paths for vdevs resolving all symbolic links
-p # Display numbers in parsable (exact) values
-P # Display full paths for vdevs instead of only the last component of the path
-r # Print request size histograms for the leaf ZIOs
-v # Verbose statistics Reports usage statistics for individual vdevs within the pool, in addition to the pool-wide statistics
-l # Include average latency statistics:
- total_wait: Average total IO time (queuing + disk IO time)
- disk_wait: Average disk IO time (time reading/writing the disk)
- syncq_wait: Average amount of time IO spent in synchronous priority queues. Does not include disk time
- asyncq_wait: Average amount of time IO spent in asynchronous priority queues. Does not include disk time
- scrub: Average queuing time in scrub queue. Does not include disk time
-q # Include active queue statistics
- syncq_read/write: Current number of entries in synchronous priority queues
- asyncq_read/write: Current number of entries in asynchronous priority queues
- scrubq_read: Current number of entries in scrub queue
LABELCLEAR
Removes ZFS label information from the specified device
The device must not be part of an active pool configuration
zpool labelclear [-f] device
-f # Treat exported or foreign devices as inactive
LIST
Lists the given pools along with a health status and space usage
If no pools are specified, all pools in the system are listed. When given an interval, the information is printed every interval seconds until ^C is pressed. If count is specified, the command exits after count reports are printed
zpool list [-HgLpPv] [-o property[,property]...] [-T u|d] [pool]... [interval [count]]
-g # print vdev GUIDs instead of the normal device names
-H # scripted mode. Do not display headers, and separate fields by a single tab instead of arbitrary space
-o property,... # print only specidied properties. Default list is name, size, alloc, free, fragmentation, expandsize, capacity, dedupratio, health, altroot
-L # Display real paths for vdevs resolving all symbolic links
-p # Display numbers in parsable (exact) values
-P # Display full paths for vdevs instead of only the last component of the path
-T u|d # Display a time stamp
-v # Verbose statistics
OFFLINE
Takes the specified physical device offline
While the device is offline, no attempt is made to read or write to the device. This command is not applicable to spares
zpool offline [-f] [-t] pool device...
-f # Force fault. Instead of offlining the disk, put it into a faulted state
-t # Temporary. Upon reboot, the specified physical device reverts to its previous state
ONLINE
Brings the specified physical device online
This command is not applicable to spares or cache devices
zpool online [-e] pool device...
-e # Expand the device to use all available space```
REGUID
Generates a new unique identifier for the pool
You must ensure that all devices in this pool are online and healthy before performing this action
zpool reguid pool
REOPEN
Reopen all the vdevs associated with the pool
zpool reopen pool
REMOVE
Removes the specified device from the pool
This command currently only supports removing hot spares, cache, and log devices. A mirrored log device can be removed by specifying the top-level mirror for the log. Non-log devices that are part of a mirrored configuration can be removed using the zpool detach command. Non-redundant and raidz devices cannot be removed from a pool
zpool remove pool device...
REPLACE
Replaces old_device with new_device
This is equivalent to attaching new_device, waiting for it to resilver, and then detaching old_device
The size of new_device must be greater than or equal to the minimum size of all the devices in a mirror or raidz configuration
zpool replace [-f] [-o property=value] pool device [new_device]
-f # Forces use of new_device, even if its appears to be in use
-o property=value # Sets the given pool properties. See the Properties section for a list of valid properties that can be set
SCRUB
Begins a scrub or resumes a paused scrub
The scrub examines all data in the specified pools to verify that it checksums correctly. For replicated (mirror or raidz) devices, ZFS automatically repairs any damage discovered during the scrub. The zpool status command reports the progress of the scrub and summarizes the results of the scrub upon completion
zpool scrub [-s | -p] pool...
-s # Stop scrubbing
-p # Pause scrubbing
SET
Sets the given property on the specified pool
zpool set property=value pool
SPLIT
Splits devices off pool creating newpool
All vdevs in pool must be mirrors and the pool must not be in the process of resilvering. At the time of the split, newpool will be a replica of pool. By default, the last device in each mirror is split from pool to create newpool
zpool split [-gLnP] [-o property=value]... [-R root] pool newpool [device ...]
-g # Display vdev GUIDs instead of the normal device names
-L # Display real paths for vdevs resolving all symbolic links
-n # Do dry run, do not actually perform the split
-P # Display full paths for vdevs instead of only the last component of the path
-o property=value # Sets the specified property for newpool
-R root # Set altroot for newpool to root and automatically import it
STATUS
Displays the detailed health status for the given pools
If no pool is specified, then the status of each pool in the system is displayed. For more information on pool and device health, see the Device Failure and Recovery section.
If a scrub or resilver is in progress, this command reports the percentage done and the estimated time to completion. Both of these are only approximate, because the amount of data in the pool and the other workloads on the system can change
zpool status [-c [SCRIPT1[,SCRIPT2]...]] [-gLPvxD] [-T u|d] [pool]... [interval [count]]
-c [SCRIPT1[,SCRIPT2]...] # Run a script (or scripts) on each vdev and include the output as a new column in the zpool status output
-g # Display vdev GUIDs instead of the normal device names
-L # Display real paths for vdevs resolving all symbolic links
-P # Display full paths for vdevs instead of only the last component of the path
-D # Display a histogram of deduplication statistics
-T u|d # Display a time stamp. -u for a the internal representation of time, -d for standard date format
-v # Displays verbose data error information, printing out a complete list of all data errors since the last complete pool scrub
-x # Only display status for pools that are exhibiting errors or are otherwise unavailable
UPGRADE-DISPLAY-NOT
Displays pools which do not have all supported features enabled and pools formatted using a legacy ZFS version number
These pools can continue to be used, but some features may not be available. Use zpool upgrade -a to enable all features on all pools
zpool upgrade
UPGRADE-DISPLAY
Displays legacy ZFS versions supported by the current software
See zpool-features(5) for a description of feature flags features supported by the current software
zpool upgrade -v
UPGRADE
Enables all supported features on the given pool
Once this is done, the pool will no longer be accessible on systems that do not support feature flags. See zfs-features(5) for details on compatibility with systems that support feature flags, but do not support all features enabled on the pool
zpool upgrade [-V version] -a|pool...
-a # Enables all supported features on all pools.
-V version # Upgrade to the specified legacy version. If the -V flag is specified, no features will be enabled on the pool
PROPERTIES
available # Amount of storage available within the pool
capacity # Percentage of pool space used
expandsize # Amount of uninitialized space within the pool or device that can be used to increase the total capacity of the pool
fragmentation # The amount of fragmentation in the pool
free # The amount of free space available in the pool
freeing # After a file system or snapshot is destroyed, the space it was using is returned to the pool asynchronously. freeing is the amount of space remaining to be reclaimed. Over time freeing will decrease while free increases
health # The current health of the pool. Health can be one of ONLINE, DEGRADED, FAULTED, OFFLINE, REMOVED, UNAVAIL
guid # A unique identifier for the pool
size # Total size of the storage pool
unsupported@feature_guid # Information about unsupported features that are enabled on the pool. See zpool-features(5) for details
used # Amount of storage space used within the pool
The following property can be set at creation time and import time:
altroot # Alternate root directory. If set, this directory is prepended to any mount points within the pool
The following property can be set only at import time:
readonly=on|off # If set to on, the pool will be imported in read-only mode
The following properties can be set at creation time and import time, and later changed with the zpool set command:
ashift=ashift # Pool sector size exponent, to the power of 2 (internally referred to as ashift )
autoexpand=on|off # Controls automatic pool expansion when the underlying LUN is grown. If set to on, the pool will be resized according to the size of the expanded device
autoreplace=on|off # Controls automatic device replacement. If set to off, device replacement must be initiated by the administrator by using the zpool replace command. If set to on, any new device, found in the same physical location as a device that previously belonged to the pool, is automatically formatted and replaced. The default behavior is off
bootfs=(unset)|pool/dataset # Identifies the default bootable dataset for the root pool
cachefile=path|none # Controls the location of where the pool configuration is cached. Discovering all pools on system startup requires a cached copy of the configuration data that is stored on the root file system
comment=text # A text string consisting of printable ASCII characters that will be stored such that it is available even if the pool becomes faulted. An administrator can provide additional information about a pool using this property
dedupditto=number # Threshold for the number of block ditto copies
delegation=on|off # Controls whether a non-privileged user is granted access based on the dataset permissions defined on the dataset
failmode=wait|continue|panic # Controls the system behavior in the event of catastrophic pool failure
wait # Blocks all I/O access until the device connectivity is recovered and the errors are cleared
continue # Returns EIO to any new write I/O requests but allows reads to any of the remaining healthy devices. Any write requests that have yet to be committed to disk would be blocked
panic # Prints out a message to the console and generates a system crash dump
feature@feature_name=enabled # The value of this property is the current state of feature_name. The only valid value when setting this property is enabled which moves feature_name to the enabled state
listsnapshots=on|off # Controls whether information about snapshots associated with this pool is output when zfs list is run without the -t option. The default value is off
version=version # The current on-disk version of the pool. This can be increased, but never decreased
https://www.tutorialspoint.com/yaml/index.htm
TOC
chapter |
---|
SCALAR |
COLLECTION |
COMMENT |
DOCUMENT |
REFERENCE |
DIRECTIVE |
PyYAML |
<br />
“YAML Ain’t Markup Language”
Le nom YAML veut dire “YAML Ain’t Markup Language”, soit “YAML n’est pas un langage de balises”. Si cela met d’emblée des distances avec XML, cela ne nous dit pas ce qu’est YAML. YAML est, d’après sa spécification, un langage de sérialisation de données conçu pour être lisible par des humains et travaillant bien avec les langage de programmation modernes pour les tâches de tous les jours
SCALAR
string
- Chaîne
- "3"
- Chaîne sur
une ligne
- "Guillemets doubles\t"
- 'Guillemets simples\t'
Le résultat de ce parsing nous amène aux commentaires suivants :
- Les caractères accentués sont gérés, en fait, l’Unicode est géré de manière plus générale
- Les retours à la ligne ne sont pas pris en compte dans les chaînes, ils sont gérés comme en HTML ou XML, à savoir qu’ils sont remplacés par des espaces
- Les guillemets doubles gèrent les caractères d’échappement, comme \t pour la tabulation par exemple
- Les guillemets simples ne gèrent pas les caractères d’échappement qui sont transcrits de manière littérale
- La liste des caractères d’échappement gérés par YAML comporte les valeurs classiques, mais aussi nombre d’autres que l’on pourra trouver dans la spécification YAML
UTF8 :
\xNN : pour écrire des caractères Unicode sur 8 bits, où NN est un nombre hexadécimal.
\uNNNN : pour des caractères Unicode sur 16 bits.
\UNNNNNNNN : pour des caractères Unicode sur 32 bits.
integer
canonique: 12345
decimal: +12_345
sexagesimal: 3:25:45
octal: 030071
hexadecimal: 0x3039
float
canonique: 1.23015e+3
exponentielle: 12.3015e+02
sexagesimal: 20:30.15
fixe: 1_230.15
infini negatif: -.inf
pas un nombre: .NaN
date
canonique: 2001-12-15T02:59:43.1Z
iso8601: 2001-12-14t21:59:43.10-05:00
espace: 2001-12-14 21:59:43.10 -5
date: 2002-12-14
others
nul: null
nul bis: ~
vrai: true
vrai bis: yes
vrai ter: on
faux: false
faux bis: no
faux ter: off
COLLECTION
List
- beans
- chocolat
- ham
[beans, chocolat, ham]
Associated Array
croissants: 2
chocolatines: 1
jambon: 0
{ croissants: 2, chocolatines: 1, jambon: 0}
COMMENT
# This is a comment
DOCUMENT
# this a first document stated by --- & ended by ...
---
first document
...
---
second document
...
REFERENCE
&ref : Defines the reference 'ref'
*ref : Link the reference 'ref'
# define the reference
monday: &p 'patatoes'
# uses the reference with pointeur
tuesday: *p
wednesday: *p
DIRECTIVE
Yaml
Give the version of YAML used
%YAML 1.1
---
Tag
Predefined Tag
tag is a data type
omap : is ordered map (list)
null: !!null
integer: !!int 3
float: !!float 1.2
string: !!str string
boolean: !!bool true
binary: !!binary dGVzdA==
map: !!map { key: value }
seq: !!seq [ element1, element2 ]
set: !!set { element1, element2 }
omap: !!omap [ key: value ]
Personnal Tag
Defines tag & use it
%TAG !person! tag:myfirst,2020:bar
---
- !person
nom: Simpson
prenom: Omer
PyYAML
http://sweetohm.net/article/introduction-yaml.html
Install yaml library like LibYaml
Read Yaml
Load file passed by argument & print the first document in file
#!/usr/bin/env python
# encoding: UTF-8
import sys
import yaml
print yaml.load(open(sys.argv[1]))
To load all documents (entire file) you can use the method 'yaml.load_all()'
for document in yaml.load_all(documents):
print document
Write Yaml
Uses method yaml.dump()
#!/usr/bin/env python
# encoding: UTF-8
import yaml
recette = {
'nom': 'sushi',
'ingredients': ['riz', 'vinaigre', 'sucre', 'sel', 'thon', 'saumon'],
'temps de cuisson': 10,
'difficulte': 'difficile'
}
print yaml.dump(recette)
Class Serialization
#!/usr/bin/env python
# encoding: UTF-8
import yaml
class Personne(object):
def __init__(self, nom, age):
self.nom = nom
self.age = age
def __repr__(self):
return "%s(nom=%r, age=%r)" % \
(self.__class__.__name__, self.nom, self.age)
print yaml.dump(Personne('Robert', 25), default_flow_style=False)
==>
!!python/object:__main__.Personne
age: 25
nom: Robert```
Class Unserialization
#!/usr/bin/env python
# encoding: UTF-8
import yaml
class Personne(object):
def __init__(self, nom, age):
self.nom = nom
self.age = age
def __repr__(self):
return "%s(nom=%r, age=%r)" % \
(self.__class__.__name__, self.nom, self.age)
print yaml.load("""
!!python/object:__main__.Personne
nom: Robert
age: 25
""")
==>
Personne(nom='Robert', age=25)
https://mikefarah.gitbook.io/yq/
yq is a lightweight and portable command-line YAML processor
It aims to be the jq or sed of yaml files
yq [flags]
yq [command]
SUBCOMMANDS
alias | subcommand | Designation |
---|---|---|
x | COMPARE | Deeply compares two yaml files |
d | DELETE | Deletes the nodes matching the given path expression from the YAML file |
h | HELP | Help provides help for any command in the application |
m | MERGE | Updates the yaml file by adding/updating the path(s) and value(s) from additional yaml file(s) |
n | NEW | Creates a new yaml w.r.t the given path and value |
p | PREFIX | Prefixes w.r.t to the yaml file at the given path |
r | READ | Outputs the value of the given path in the yaml file to STDOUT |
v | VALIDATE | test syntax of file |
w | WRITE | Updates the yaml file w.r.t the given path and value |
TRICKS |
Global options
-h, --help # help for yq
-C, --colors # print with colors
-I, --indent int # sets indent level for output (default 2)
-P, --prettyPrint # pretty print
-j, --tojson # output as json. By default it prints a json document in one line, use the prettyPrint flag to print a formatted doc.
-v, --verbose # verbose mode
-V, --version # Print version information and quit
COMPARE
Deeply compares two yaml files, prints the difference
Use with prettyPrint flag to ignore formatting differences
yq compare [yaml_file_a] [yaml_file_b] [flags]
-D, --defaultValue string # default value printed when there are no results
-d, --doc string # process document index number (0 based, * for all documents) (default "0")
-h, --help # help for compare
-p, --printMode string # print mode (v (values, default), p (paths), pv (path and value pairs) (default "v")
examples
yq x - data2.yml # reads from stdin
yq x -pp dataA.yaml dataB.yaml '**' # compare paths
yq x -d1 dataA.yaml dataB.yaml 'a.b.c'
DELETE
Deletes the nodes matching the given path expression from the YAML file
Outputs to STDOUT unless the inplace flag is used, in which case the file is updated instead
yq delete [yaml_file] [path_expression] [flags]
-d, --doc string # process document index number (0 based, * for all documents) (default "0")
-h, --help # help for delete
-i, --inplace # update the yaml file inplace
examples
yq delete things.yaml 'a.b.c'
yq delete things.yaml 'a.*.c'
yq delete things.yaml 'a.(child.subchild==co*).c'
yq delete things.yaml 'a.**'
yq delete --inplace things.yaml 'a.b.c'
yq delete --inplace -- things.yaml '--key-starting-with-dash' # need to use '--' to stop processing arguments as flags
yq d -i things.yaml 'a.b.c'
HELP
Help provides help for any command in the application
Simply type yq help [path to command] for full details
yq help [command] [flags]
-h, --help # help for help
MERGE
Updates the yaml file by adding/updating the path(s) and value(s) from additional yaml file(s)
Outputs to STDOUT unless the inplace flag is used, in which case the file is updated instead.
If overwrite flag is set then existing values will be overwritten using the values from each additional yaml file.
If append flag is set then existing arrays will be merged with the arrays from each additional yaml file.
yq merge [initial_yaml_file] [additional_yaml_file]... [flags]
-a, --append # update the yaml file by appending array values
-c, --autocreate # automatically create any missing entries (default true)
-d, --doc string # process document index number (0 based, * for all documents) (default "0")
-h, --help # help for merge
-i, --inplace # update the yaml file inplace
-x, --overwrite # update the yaml file by overwriting existing values
examples
yq merge things.yaml other.yaml
yq merge --inplace things.yaml other.yaml
yq m -i things.yaml other.yaml
yq m --overwrite things.yaml other.yaml
yq m -i -x things.yaml other.yaml
yq m -i -a things.yaml other.yaml
yq m -i --autocreate=false things.yaml other.yaml
NEW
Creates a new yaml w.r.t the given path and value
Outputs to STDOUT
Create Scripts:
Note that you can give a create script to perform more sophisticated yaml This follows the same format as the update script
yq new [path] [value] [flags]
-h, --help # help for new
-s, --script string # yaml script for creating yaml
-t, --tag string # set yaml tag (e.g. !!int)
examples
yq new 'a.b.c' cat
yq n 'a.b.c' --tag '!!str' true # force 'true' to be interpreted as a string instead of bool
yq n 'a.b[+]' cat
yq n -- '--key-starting-with-dash' cat # need to use '--' to stop processing arguments as flags
yq n --script create_script.yaml
PREFIX
Prefixes w.r.t to the yaml file at the given path
Outputs to STDOUT unless the inplace flag is used, in which case the file is updated instead
yq prefix [yaml_file] [path] [flags]
-d, --doc string # process document index number (0 based, * for all documents) (default "0")
-h, --help # help for prefix
-i, --inplace # update the yaml file inplace
examples
yq prefix things.yaml 'a.b.c'
yq prefix --inplace things.yaml 'a.b.c'
yq prefix --inplace -- things.yaml '--key-starting-with-dash' # need to use '--' to stop processing arguments as flags
yq p -i things.yaml 'a.b.c'
yq p --doc 2 things.yaml 'a.b.d'
yq p -d2 things.yaml 'a.b.d'
READ
Outputs the value of the given path in the yaml file to STDOUT
yq read [yaml_file] [path_expression] [flags]
-c, --collect # collect results into array
-D, --defaultValue string # default value printed when there are no results
-d, --doc string # process document index number (0 based, * for all documents) (default "0")
-X, --explodeAnchors # explode anchors
-h, --help # help for read
-l, --length # print length of results
-p, --printMode string # print mode (v (values, default), p (paths), pv (path and value pairs) (default "v")
examples
yq read things.yaml 'a.b.c'
yq r - 'a.b.c' # reads from stdin
yq r things.yaml 'a.*.c'
yq r things.yaml 'a.**.c' # deep splat
yq r things.yaml 'a.(child.subchild==co*).c'
yq r -d1 things.yaml 'a.array[0].blah'
yq r things.yaml 'a.array[*].blah'
yq r -- things.yaml '--key-starting-with-dashes.blah'
VALIDATE
test syntax of file
yq v sample.yaml
yq validate [yaml_file] [flags]
-d, --doc string # process document index number (0 based, * for all documents) (default "0")
-h, --help # help for validate
examples
yq v - # reads from stdin
WRITE
Updates the yaml file w.r.t the given path and value
Outputs to STDOUT unless the inplace flag is used, in which case the file is updated instead
Append value to array adds the value to the end of array
Update Scripts:
Note that you can give an update script to perform more sophisticated update. Update script format is list of update commands (update or delete) like so:
- command: update
path: b.c
value:
#great
things: frog # wow!
- command: delete
path: b.d
yq write [yaml_file] [path_expression] [value] [flags]
-d, --doc string # process document index number (0 based, * for all documents) (default "0")
-f, --from string # yaml file for updating yaml (as-is)
-h, --help # help for write
-i, --inplace # update the yaml file inplace
-s, --script string # yaml script for updating yaml
-t, --tag string # set yaml tag (e.g. !!int)
examples
yq write things.yaml 'a.b.c' true
yq write things.yaml 'a.*.c' true
yq write things.yaml 'a.**' true
yq write things.yaml 'a.(child.subchild==co*).c' true
yq write things.yaml 'a.b.c' --tag '!!str' true # force 'true' to be interpreted as a string instead of bool
yq write things.yaml 'a.b.c' --tag '!!float' 3
yq write --inplace -- things.yaml 'a.b.c' '--cat' # need to use '--' to stop processing arguments as flags
yq w -i things.yaml 'a.b.c' cat
yq w -i -s update_script.yaml things.yaml
yq w things.yaml 'a.b.d[+]' foo # appends a new node to the 'd' array
yq w --doc 2 things.yaml 'a.b.d[+]' foo # updates the 3rd document of the yaml file
TRICKS
LXC
pretty print
# pretty print
lxc list --format=yaml $ctname$ | yq r - -C
name
# print name
lxc list --format=yaml $ctname$ | yq r - '.name'
# print selected name for alpine.* in regexp format
lxc list --format=yaml | yq r - 'name==alpine*'
# display names of running containers
lxc list --format yaml | yq r - 'status==Running.name'
# display name of containers which have attached profile $profile
?
https://www.atlassian.com/fr/git/tutorials
https://git-scm.com/docs
TOC
chapter | |
---|---|
REFERENCE | USED |
- URL | - ADD |
- VALUES | - ARCHIVE |
GITHUB | - BRANCH |
GPG | - CHECKOUT |
GITIGNORE | - CLONE |
TRICKS | - COMMIT |
- CONFIG | |
- LOG | |
- MERGE | |
- PULL | |
- PUSH | |
- REMOTE | |
- RESET | |
- SUBMODULE | |
- SWITCH | |
- TAG |
REFERENCE
URL
https://<fqdn>/<user>/<project> # https://github.com/aguytech/Shaarli
git@<fqdn>:<user>/<project>.git # git@github.com:aguytech/Shaarli.git
VALUES
git rev-parse --symbolic-full-name --abbrev-ref @{upstream} # print value for upstream
git rev-parse --symbolic-full-name --abbrev-ref @{push} # print value for push
git for-each-ref --format='%(refname:short) <- %(upstream:short)' refs/heads # show all upstream
git for-each-ref --format='%(upstream:short)' "$(git symbolic-ref -q HEAD)" # idem
git for-each-ref --format='%(refname:short) <- %(push:short)' refs/heads # show all upstream
git for-each-ref --format='%(push:short)' "$(git symbolic-ref -q HEAD)" # idem
USED
ADD
git add -i / --interactive # add with interactively mode
git add -u / --update # Update the index for already referred files (just where it already has an entry matching <pathspec>
gi add -A / --all / --no-ignore-removal # add all files
ARCHIVE
git archive -l # list available formats
git archive --format tar.gz -9 -o "$(git br --show-current).$(date +%s).tar.gz" <branch> # create an archive from local <branch> with best compression -9 & in format tar.gz
BRANCH
https://stackoverflow.com/questions/11266478/git-add-remote-branch
list
git branch / git br # print list of local branches
git br -v # print informations about local branches
git br -vv # print full information about local branches
git branch -a -vv # print full information about all branches
git br --show-current # show name of current branch
git br -r # print list of remote branches for all repositories
git br -rlv <remote>/<pattern> # list remote branches for <remote> repository & with name matched <pattern>
create/delete
git br <branch> # create a local branch
git br <branch> <remote>/<remote_branch> # create local branch from remote
git br -u <remote>/<remote_branch> <branch> # attach a local branch to remote existing one
git br --set-upstream-to=<remote>/<remote_branch> <branch> # idem
# Equal to: git checkout <branch> && git push --set-upstream <remote> <branch>
git br -m <branch> <new_branch># rename local branch && 'git push'
git br -d <branch> # delete local branch
git br -rd <remote>/<branch> # delete remote branch
CHECKOUT
git co -b <branch> # create a branch from HEAD and switch to it
git co -t <repo>/<branch> -b <branch> # create a local branch from <repo>/<branch>
git co --orphan=<branch> # create an orphan branch (whithout history)
git co --orphan=<branch> # create an orphan branch (whithout history)
git co --detach -b <branch> # check out a commit for inspection and discardable experiments
CLONE
git clone <url> # clone a repository
git clone <url> <alias> # clone a repository & give it an alias
git clone -b <branch> <url> # clone only one branch from repository
git clone -b v0.11-snippets --single-branch --no-tags git@github.com:aguytech/Shaarli.git shaarli-snippets # clone from a repository a single branch
COMMIT
# amend
git commit --amend --no-edit # amends a commit without changing its commit message
git commit --amend -m "message" # amends a commit with a new message
CONFIG
# amend
git config <variable> # show variable and his value
git config --global core.editor vim # set selected editor
git config -l # list all config variables
git config -l --show-origin # list all config variables with their origins
git config -l --name-only # list all names of system config variables
git config -l --local # list all config variables defined for user
git config -l --global # list all global config variables for global users
git config -l --system # list all system config variables for system
LOG
git log # show logs
git log -n3 <repo>/<branch> # show only last 23 lines of logs
git log --pretty=format:'%h' -n1 <repo>/<branch> # show short sha of last commit
git log --name-only # with file names
git log --name-status # with file names with its status
git log --stat # with file names with its statisticals
git reflog # show logs with a reference (sha) view
MERGE
git merge -m "message" <branch> # merge branch with actual one with a message to committing
git merge --allow-unrelated-histories <branch> # allows to merge branch with no common history
PULL
If you tried a pull which resulted in complex conflicts and would want to start over, you can recover with git reset
git pull # Update actual local branch from current remote
git pull <remote> # Update actual local branch from a selected remote
git pull <remote> <branch> # Merge into the current branch the remote branch
<=>
git fetch origin
git merge origin/next
git pull --rebase # pull automatically last modifications on remote (with fetch + merge) & put your validation on head directly
pull all submodules
git submodule foreach git pull
git submodule foreach git pull origin master
git submodule foreach 'git pull origin master || true' # for some submodules without updates"
PUSH
git push -u <remote> <branch> # set upstream for actual local branch & push it to remote (create one if needed), while you created a local branch to local and you want to push to another repository !
git push --tags # push tags also
git push -d <remote> <branch> # delete remote branch
REMOTE
git remote rename <name> <new_name> # rename a remote source
git remote add <name> <url> # add a remote source to repository
git remote add -t <branch> <name> <url> # add only a branch from a repository like source
git remote remove <name> # remove/delete a remote source to repository
RESET
git reset --merge # resets the index and updates the files in the working tree that are different between <commit> and HEAD
git reset --hard <commit_sha> # reset branch to commit_sha, 'git reflog' is an better way to find commit_sha
git reset --hard HEAD~1
SWITCH
TAG
git tag -l # List all tags
git tag -a -m "message" # Defines an unsigned, annoted tag
git tag -s "tag" -m "message" # Creates a signed tag with message (define the default key with git config --global user.signingkey before)
git tag -s <tag> -u <keyid> -m <message> # Creates a signed tag with a specified key user
git tag -d <tag> # Delete existing tags with the given names
git tag -v <tag> # Verify the GPG signature of the given tag names
git push --delete origin <tag> # delete tag in origin
# rename tag
git tag new old
git tag -d old
git push origin new :old
git pull --prune --tags # for coworkers
SUBMODULE
git submodule add <url> # add submodule to actual repository
GITHUB
create a local git repository and publish it to github
touch README.md
git init
git add *
git status
git commit -m "First commit"
git remote add origin <url>
git push -u origin master
GPG
https://kamarada.github.io/en/2019/07/14/using-git-with-ssh-keys/
GITHUB
-
import public to github
-
test
ssh -T git@github.com # test ssh connection ssh -T -p 443 git@ssh.github.com # test ssh connection over https
-
set the default key
git config --global user.signingkey <keyid>
change remote url for remote existing repository
git remote -v # print https://github.com/user/project
git remote set-url origin git@github.com:user/project.git # change the connection url to use ssh
git remote -v # print git@github.com:user/project.git
delete tags
git tag -d [tag];
git push origin :[tag]
git tag -d [tag]
git push origin :refs/tags/[tag]
GITIGNORE
https://www.atlassian.com/git/tutorials/saving-changes/gitignore
pattern
**/path # match directories anywhere in the repository, relative definition
*.pattern # matches zero or more characters
!pattern # mark to a pattern negates it
/pattern # matches files only in the repository root
path/ # appending a slash indicates the pattern is a directory
debug?.log # a question mark matches exactly one character
debug[0-9].log # Square brackets matches a single character from a specified range like [01] [a-z] [A-Z]
debug[!01].log # an exclamation mark matches any character except one from the specified set
logs/**/debug.log # a double asterisk matches zero or more directories like logs/*day/debug.log
example
*.ba[kt]
*~
!myfile.a # include file in repo
tmp/ # exclude all files in directory tmp
head/**/*.tmp # exclude all files *.tmp in subdirectory of head
TRICKS
create orphan repo from another
Create origin to remote server
repo_local="shaarli-snippets"
tmp_branch="dev"
origin="github"
url_origin="git@github.com:aguytech/Shaarli-snippets.git"
upstream="shaarli"
url_upstream="git@github.com:aguytech/Shaarli.git"
upstream_branch="v0.11-snippets" # remote branch to track
mkdir -p "$repo_local"
cd "$repo_local"
git init
# remote
git remote add "$origin" "$url_origin"
git remote add -t "$upstream_branch" "$upstream" "$url_upstream"
git remote -v
git config --get-regexp '^remote'
# upstream
git fetch "$upstream"
git co --orphan="$tmp_branch" "$upstream"/"$upstream_branch"
git st
git ci -m "Initialize branch from $upstream/$upstream_branch $(git log --pretty=format:'%h' -n 1 "$upstream"/"$upstream_branch")"
# origin
git push --set-upstream "$origin" "$tmp_branch"
git co -b master
git push --set-upstream "$origin" master
git br -vv
git br -rlv github/*
# archive
git archive --format tar.gz -9 -o "master.$(date +%s).tar.gz" master
add a submodule
git submodule add $url
git diff --cached $submodule
git diff --cached --submodule
git commit -m "Add $submodule module"
clone with submodules
git clone --recurse-submodules
GIT URLS
In general, URLs contain information about the transport protocol, the address of the remote server, and the path to the repository. Depending on the
transport protocol, some of this information may be absent.
Git supports ssh, git, http, and https protocols (in addition, ftp, and ftps can be used for fetching, but this is inefficient and deprecated; do not use
it).
The native transport (i.e. git:// URL) does no authentication and should be used with caution on unsecured networks.
The following syntaxes may be used with them:
- ssh://[user@]host.xz[:port]/path/to/repo.git/
- git://host.xz[:port]/path/to/repo.git/
- http[s]://host.xz[:port]/path/to/repo.git/
- ftp[s]://host.xz[:port]/path/to/repo.git/
An alternative scp-like syntax may also be used with the ssh protocol:
- [user@]host.xz:path/to/repo.git/
This syntax is only recognized if there are no slashes before the first colon. This helps differentiate a local path that contains a colon. For example the
local path foo:bar could be specified as an absolute path or ./foo:bar to avoid being misinterpreted as an ssh url.
The ssh and git protocols additionally support ~username expansion:
- ssh://[user@]host.xz[:port]/~[user]/path/to/repo.git/
- git://host.xz[:port]/~[user]/path/to/repo.git/
- [user@]host.xz:/~[user]/path/to/repo.git/
For local repositories, also supported by Git natively, the following syntaxes may be used:
- /path/to/repo.git/
- file:///path/to/repo.git/
These two syntaxes are mostly equivalent, except when cloning, when the former implies --local option
git clone, git fetch and git pull, but not git push, will also accept a suitable bundle file
When Git doesn’t know how to handle a certain transport protocol, it attempts to use the remote-<transport> remote helper, if one exists. To explicitly
request a remote helper, the following syntax may be used:
- <transport>::<address>
where <address> may be a path, a server and path, or an arbitrary URL-like string recognized by the specific remote helper being invoked
If there are a large number of similarly-named remote repositories and you want to use a different format for them (such that the URLs you use will be rewritten into URLs that work), you can create a configuration section of the form:
[url "<actual url base>"]
insteadOf = <other url base>
For example, with this:
[url "git://git.host.xz/"]
insteadOf = host.xz:/path/to/
insteadOf = work:
a URL like "work:repo.git" or like "host.xz:/path/to/repo.git" will be rewritten in any context that takes a URL to be "git://git.host.xz/repo.git".
If you want to rewrite URLs for push only, you can create a configuration section of the form:
[url "<actual url base>"]
pushInsteadOf = <other url base>
For example, with this:
[url "ssh://example.org/"]
pushInsteadOf = git://example.org/
a URL like "git://example.org/path/to/repo.git" will be rewritten to "ssh://example.org/path/to/repo.git" for pushes, but pulls will still use the original URL.
REMOTES
The name of one of the following can be used instead of a URL as <repository> argument:
- a remote in the Git configuration file: $GIT_DIR/config,
- a file in the $GIT_DIR/remotes directory, or
- a file in the $GIT_DIR/branches directory.
All of these also allow you to omit the refspec from the command line because they each contain a refspec which git will use by default.
Named remote in configuration file
You can choose to provide the name of a remote which you had previously configured using git-remote(1), git-config(1) or even by a manual edit to the
$GIT_DIR/config file. The URL of this remote will be used to access the repository. The refspec of this remote will be used by default when you do not
provide a refspec on the command line. The entry in the config file would appear like this:
[remote "<name>"]
url = <url>
pushurl = <pushurl>
push = <refspec>
fetch = <refspec>
The <pushurl> is used for pushes only. It is optional and defaults to <url>.
Named file in $GIT_DIR/remotes
You can choose to provide the name of a file in $GIT_DIR/remotes. The URL in this file will be used to access the repository. The refspec in this file will
be used as default when you do not provide a refspec on the command line. This file should have the following format:
URL: one of the above URL format
Push: <refspec>
Pull: <refspec>
Push: lines are used by git push and Pull: lines are used by git pull and git fetch. Multiple Push: and Pull: lines may be specified for additional branch
mappings.
Named file in $GIT_DIR/branches
You can choose to provide the name of a file in $GIT_DIR/branches. The URL in this file will be used to access the repository. This file should have the
following format:
<url>#<head>
<url> is required; #<head> is optional.
Depending on the operation, git will use one of the following refspecs, if you don’t provide one on the command line. <branch> is the name of this file in
$GIT_DIR/branches and <head> defaults to master.
git fetch uses:
refs/heads/<head>:refs/heads/<branch>
git push uses:
HEAD:refs/heads/<head>
https://git-scm.com/book/fr/v2/Utilitaires-Git-Sous-modules
Inspects, updates and manages submodules
git submodule
add [-b <branch>] [-f|--force] [--name <name>] [--reference <repository>] [--depth <depth>] [--] <repository> [<path>] # add the given repository as a submodule at the given path to the changeset to be committed next to the current project: the current project is termed the "superproject"
add [-b <branch>] [-f|--force] [--name <name>] [--reference <repository>] [--depth <depth>] [--] <repository> [<path>] # Add the given repository as a submodule at the given path to the changeset to be committed next to the current project: the current project is termed the "superproject".
status [--cached] [--recursive] [--] [<path>...] # Show the status of the submodules. This will print the SHA-1 of the currently checked out commit for each submodule, along with the submodule path and the output of git describe for the SHA-1.
init [--] [<path>...] # Initialize the submodules recorded in the index (which were added and committed elsewhere) by setting submodule.$name.url in .git/config.
deinit [-f|--force] (--all|[--] <path>...) # Unregister the given submodules, i.e. remove the whole submodule.$name section from .git/config together with their work tree.
update [--init] [--remote] [-N|--no-fetch] [--[no-]recommend-shallow] [-f|--force] [--checkout|--rebase|--merge] [--reference <repository>] [--depth <depth>] [--recursive] [--jobs <n>] # Update the registered submodules to match what the superproject expects by cloning missing submodules, fetching missing commits in submodules and updating the working tree of the submodules.
checkout # the commit recorded in the superproject will be checked out in the submodule on a detached HEAD.
rebase # the current branch of the submodule will be rebased onto the commit recorded in the superproject.
merge # the commit recorded in the superproject will be merged into the current branch in the submodule.
none # the submodule is not updated.
set-branch (-b|--branch) <branch> [--] <path>, set-branch (-d|--default) [--] <path> # Sets the default remote tracking branch for the submodule.
set-url [--] <path> <newurl> # Sets the URL of the specified submodule to <newurl>. Then, it will automatically synchronize the submodule’s new remote URL configuration.
summary [--cached|--files] [(-n|--summary-limit) <n>] [commit] [--] [<path>...] # Show commit summary between the given commit (defaults to HEAD) and working tree/index.
foreach [--recursive] <command> # Evaluates an arbitrary shell command in each checked out submodule.
sync [--recursive] [--] [<path>...] # Synchronizes submodules' remote URL configuration setting to the value specified in .gitmodules.
absorbgitdirs # If a git directory of a submodule is inside the submodule, move the git directory of the submodule into its superproject’s $GIT_DIR/modules path and then connect the git directory and its working directory by setting the core.worktree and adding a .git file pointing to the git directory embedded in the superprojects git directory.
Create, list, delete or verify a tag object signed with GPG
Add a tag reference in refs/tags/, unless -d/-l/-v is given to delete, list or verify tags
Unless -f is given, the named tag must not yet exist
If one of -a, -s, or -u <keyid> is passed, the command creates a tag object, and requires a tag message. Unless -m <msg> or -F <file> is given, an editor is started for the user to type in the tag message
If -m <msg> or -F <file> is given and -a, -s, and -u <keyid> are absent, -a is implied
Otherwise, a tag reference that points directly at the given object (i.e., a lightweight tag) is created
A GnuPG signed tag object will be created when -s or -u <keyid> is used. When -u <keyid> is not used, the committer identity for the current user is used to find the GnuPG key for signing. The configuration variable gpg.program is used to specify custom GnuPG binary
Tag objects (created with -a, -s, or -u) are called "annotated" tags; they contain a creation date, the tagger name and e-mail, a tagging message, and an optional GnuPG signature. Whereas a "lightweight" tag is simply a name for an object (usually a commit object)
Annotated tags are meant for release while lightweight tags are meant for private or temporary object labels. For this reason, some git commands for naming objects (like git describe) will ignore lightweight tags by default
git tag [-a | -s | -u <keyid>] [-f] [-m <msg> | -F <file>] [-e] <tagname> [<commit> | <object>]
git tag -d <tagname>...
git tag [-n[<num>]] -l [--contains <commit>] [--no-contains <commit>] [--points-at <object>] [--column[=<options>] | --no-column] [--create-reflog] [--sort=<key>] [--format=<format>] [--[no-]merged [<commit>]] [<pattern>...]
git tag -v [--format=<format>] <tagname>...
-a, --annotate # Make an unsigned, annotated tag object
-s, --sign # Make a GPG-signed tag, using the default e-mail address’s key
--no-sign # Override tag
-u <keyid>, --local-user=<keyid> # Make a GPG-signed tag, using the given key
-f, --force # Replace an existing tag with the given name (instead of failing)
-d, --delete # Delete existing tags with the given names
-v, --verify # Verify the GPG signature of the given tag names
-n<num> # <num> specifies how many lines from the annotation, if any, are printed when using -l
-l, --list # List tags
--sort=<key> # Sort based on the key given
--color[=<when>] # Respect any colors specified in the --format option
-i, --ignore-case # Sorting and filtering tags are case insensitive
--column[=<options>], --no-column # Display tag listing in columns
--contains [<commit>] # Only list tags which contain the specified commit (HEAD if not specified)
--no-contains [<commit>] # Only list tags which don’t contain the specified commit (HEAD if not specified)
--merged [<commit>] # Only list tags whose commits are reachable from the specified commit (HEAD if not specified), incompatible with --no-merged
--no-merged [<commit>] # Only list tags whose commits are not reachable from the specified commit (HEAD if not specified), incompatible with --merged
--points-at <object> # Only list tags of the given object (HEAD if not specified)
-m <msg>, --message=<msg> # Use the given tag message (instead of prompting)
-F <file>, --file=<file> # Take the tag message from the given file
-e, --edit # The message taken from file with -F and command line with -m are usually used as the tag message unmodified
--cleanup=<mode> # This option sets how the tag message is cleaned up
--create-reflog # Create a reflog for the tag
--format=<format> # A string that interpolates %(fieldname) from a tag ref being shown and the object it points at
<tagname> # The name of the tag to create, delete, or describe
<commit>, <object> # The object that the new tag will refer to, usually a commit
Fetch from and integrate with another repository or a local branch
Incorporates changes from a remote repository into the current branch. In its default mode, git pull is shorthand for git fetch followed by git merge FETCH_HEAD
More precisely, git pull runs git fetch with the given parameters and calls git merge to merge the retrieved branch heads into the current branch. With
--rebase, it runs git rebase instead of git merge
<repository> should be the name of a remote repository as passed to git-fetch(1). <refspec> can name an arbitrary remote ref (for example, the name of a tag) or even a collection of refs with corresponding remote-tracking branches (e.g., refs/heads/:refs/remotes/origin/), but usually it is the name of a branch in the remote repository
Default values for <repository> and <branch> are read from the "remote" and "merge" configuration for the current branch as set by git-branch(1) --track.
Assume the following history exists and the current branch is "master":
A---B---C master on origin
/
D---E---F---G master
^
origin/master in your repository
Then "git pull" will fetch and replay the changes from the remote master branch since it diverged from the local master (i.e., E) until its current commit (C) on top of master and record the result in a new commit along with the names of the two parent commits and a log message from the user describing the changes.
A---B---C origin/master
/ \
D---E---F---G---H master
See git-merge(1) for details, including how conflicts are presented and handled.
In Git 1.7.0 or later, to cancel a conflicting merge, use git reset --merge. Warning: In older versions of Git, running git pull with uncommitted changes is discouraged: while possible, it leaves you in a state that may be hard to back out of in the case of a conflict.
If any of the remote changes overlap with local uncommitted changes, the merge will be automatically canceled and the work tree untouched. It is generally best to get any local changes in working order before pulling or stash them away with git-stash(1).
git pull [<options>] [<repository> [<refspec>...]]
-q, --quiet # This is passed to both underlying git-fetch to squelch reporting of during transfer, and underlying git-merge to squelch output during merging
-v, --verbose # Pass --verbose to git-fetch and git-merge
--[no-]recurse-submodules[=yes|on-demand|no] # This option controls if new commits of all populated submodules should be fetched and updated, too
Options related to merging
--commit, --no-commit # Perform the merge and commit the result
--edit, -e, --no-edit # Invoke an editor before committing successful mechanical merge to further edit the auto-generated merge message, so that the user can explain and justify the merge
--cleanup=<mode> # This option determines how the merge message will be cleaned up before committing
--ff, --no-ff, --ff-only # Specifies how a merge is handled when the merged-in history is already a descendant of the current history
-S[<keyid>], --gpg-sign[=<keyid>] # GPG-sign the resulting merge commit
--log[=<n>], --no-log # In addition to branch names, populate the log message with one-line descriptions from at most <n> actual commits that are being merged
--signoff, --no-signoff # Add Signed-off-by line by the committer at the end of the commit log message
--stat, -n, --no-stat # Show a diffstat at the end of the merge
--squash, --no-squash # Produce the working tree and index state as if a real merge happened (except for the merge information), but do not actually make a commit, move the
--no-verify # This option bypasses the pre-merge and commit-msg hooks
-s <strategy>, --strategy=<strategy> # Use the given merge strategy; can be supplied more than once to specify them in the order they should be tried
-X <option>, --strategy-option=<option> # Pass merge strategy specific option through to the merge strategy
--verify-signatures, --no-verify-signatures # Verify that the tip commit of the side branch being merged is signed with a valid key, i
--summary, --no-summary # Synonyms to --stat and --no-stat; these are deprecated and will be removed in the future
--allow-unrelated-histories # By default, git merge command refuses to merge histories that do not share a common ancestor
-r, --rebase[=false|true|merges|preserve|interactive] # When true, rebase the current branch on top of the upstream branch after fetching
--no-rebase # Override earlier --rebase
--autostash, --no-autostash # Before starting rebase, stash local modifications away (see git-stash(1)) if needed, and apply the stash entry when done
Options related to fetching
--all # Fetch all remotes
-a, --append # Append ref names and object names of fetched refs to the existing contents of
--depth=<depth> # Limit fetching to the specified number of commits from the tip of each remote branch history
--deepen=<depth> # Similar to --depth, except it specifies the number of commits from the current shallow boundary instead of from the tip of each remote branch history
--shallow-since=<date> # Deepen or shorten the history of a shallow repository to include all reachable commits after <date>
--shallow-exclude=<revision> # Deepen or shorten the history of a shallow repository to exclude commits reachable from a specified remote branch or tag
--unshallow # If the source repository is complete, convert a shallow repository to a complete one, removing all the limitations imposed by shallow repositories
--update-shallow # By default when fetching from a shallow repository, git fetch refuses refs that require updating
--negotiation-tip=<commit|glob> # By default, Git will report, to the server, commits reachable from all local refs to find common commits in an attempt to reduce the size of the to-be-received packfile
-f, --force # When git fetch is used with <src>:<dst> refspec it may refuse to update the local branch as discussed in the <refspec> part of the git-fetch(1) documentation
-k, --keep # Keep downloaded pack
--no-tags # By default, tags that point at objects that are downloaded from the remote repository are fetched and stored locally
-u, --update-head-ok # By default git fetch refuses to update the head which corresponds to the current branch
--upload-pack <upload-pack> # When given, and the repository to fetch from is handled by git fetch-pack, --exec=<upload-pack> is passed to the command to specify non-default path for the command run on the other end
--progress # Progress status is reported on the standard error stream by default when it is attached to a terminal, unless -q is specified
-o <option>, --server-option=<option> # Transmit the given string to the server when communicating using protocol version 2
--show-forced-updates # By default, git checks if a branch is force-updated during fetch
--no-show-forced-updates # By default, git checks if a branch is force-updated during fetch
-4, --ipv4 # Use IPv4 addresses only, ignoring IPv6 addresses
-6, --ipv6 # Use IPv6 addresses only, ignoring IPv4 addresses
<repository> # The "remote" repository that is the source of a fetch or pull operation
<refspec> # Specifies which refs to fetch and which local refs to update
examples
Update the remote-tracking branches for the repository you cloned from, then merge one of them into your current branch:
git pull
git pull origin
Normally the branch merged in is the HEAD of the remote repository, but the choice is determined by the branch.<name>.remote and branch.<name>.merge
options; see git-config(1) for details.
Merge into the current branch the remote branch next:
git pull origin next
This leaves a copy of next temporarily in FETCH_HEAD, but does not update any remote-tracking branches. Using remote-tracking branches, the same can be
done by invoking fetch and merge:
git fetch origin
git merge origin/next
If you tried a pull which resulted in complex conflicts and would want to start over, you can recover with git reset
Download objects and refs from another repository
Fetch branches and/or tags (collectively, "refs") from one or more other repositories, along with the objects necessary to complete their histories. Remote-tracking branches are updated (see the description of <refspec> below for ways to control this behavior)
By default, any tag that points into the histories being fetched is also fetched; the effect is to fetch tags that point at branches that you are interested in. This default behavior can be changed by using the --tags or --no-tags options or by configuring remote.<name>.tagOpt. By using a refspec that fetches tags explicitly, you can fetch tags that do not point into branches you are interested in as well
git fetch can fetch from either a single named repository or URL, or from several repositories at once if <group> is given and there is a remotes.<group> entry in the configuration file
When no remote is specified, by default the origin remote will be used, unless there’s an upstream branch configured for the current branch
The names of refs that are fetched, together with the object names they point at, are written to .git/FETCH_HEAD. This information may be used by
scripts or other git commands, such as git-pull(1)
git fetch [<options>] [<repository> [<refspec>...]]
git fetch [<options>] <group>
git fetch --multiple [<options>] [(<repository> | <group>)...]
git fetch --all [<options>]
--all # Fetch all remotes
-a, --append # Append ref names and object names of fetched refs to the existing contents of .git/FETCH_HEAD. Without this option old data in .git/FETCH_HEAD will be overwritten
--depth=<depth> # Limit fetching to the specified number of commits from the tip of each remote branch history. If fetching to a shallow repository created by git clone with --depth=<depth> option, deepen or shorten the history to the specified number of commits. Tags for the deepened commits are not fetched
--deepen=<depth> # Similar to --depth, except it specifies the number of commits from the current shallow boundary instead of from the tip of each remote branch history
--shallow-since=<date> # Deepen or shorten the history of a shallow repository to include all reachable commits after <date>
--shallow-exclude=<revision> # Deepen or shorten the history of a shallow repository to exclude commits reachable from a specified remote branch or tag. This option can be specified multiple times
--unshallow # If the source repository is complete, convert a shallow repository to a complete one, removing all the limitations imposed by shallow repositories
--update-shallow # By default when fetching from a shallow repository, git fetch refuses refs that require updating .git/shallow. This option updates .git/shallow and accept such refs
--negotiation-tip=<commit|glob> # By default, Git will report, to the server, commits reachable from all local refs to find common commits in an attempt to reduce the size of the to-be-received packfile. If specified, Git will only report commits reachable from the given tips. This is useful to speed up fetches when the user knows which local ref is likely to have commits in common with the upstream ref being fetched
--dry-run # Show what would be done, without making any changes
-f, --force # When git fetch is used with <src>:<dst> refspec it may refuse to update the local branch as discussed in the <refspec> part below. This option overrides that check
-k, --keep # Keep downloaded pack
--multiple # Allow several <repository> and <group> arguments to be specified. No <refspec>s may be specified
--[no-]auto-gc # Run git gc --auto at the end to perform garbage collection if needed. This is enabled by default
--[no-]write-commit-graph # Write a commit-graph after fetching. This overrides the config setting fetch.writeCommitGraph
-p, --prune # Before fetching, remove any remote-tracking references that no longer exist on the remote. Tags are not subject to pruning if they are fetched only because of the default tag auto-following or due to a --tags option. However, if tags are fetched due to an explicit refspec (either on the command line or in the remote configuration, for example if the remote was cloned with the --mirror option), then they are also subject to pruning. Supplying --prune-tags is a shorthand for providing the tag refspec
-P, --prune-tags # Before fetching, remove any local tags that no longer exist on the remote if --prune is enabled. This option should be used more carefully, unlike --prune it will remove any local references (local tags) that have been created. This option is a shorthand for providing the explicit tag refspec along with --prune, see the discussion about that in its documentation
-n, --no-tags # By default, tags that point at objects that are downloaded from the remote repository are fetched and stored locally. This option disables this automatic tag following. The default behavior for a remote may be specified with the remote.<name>.tagOpt setting
--refmap=<refspec> # When fetching refs listed on the command line, use the specified refspec (can be given more than once) to map the refs to remote-tracking branches, instead of the values of remote.*.fetch configuration variables for the remote repository
-t, --tags # Fetch all tags from the remote (i.e., fetch remote tags refs/tags/* into local tags with the same name), in addition to whatever else would otherwise be fetched. Using this option alone does not subject tags to pruning, even if --prune is used (though tags may be pruned anyway if they are also the destination of an explicit refspec; see --prune)
--recurse-submodules[=yes|on-demand|no] # This option controls if and under what conditions new commits of populated submodules should be fetched too. It can be used as a boolean option to completely disable recursion when set to no or to unconditionally recurse into all populated submodules when set to yes, which is the default when this option is used without any value. Use on-demand to only recurse into a populated submodule when the superproject retrieves a commit that updates the submodule’s reference to a commit that isn’t already in the local submodule clone
-j, --jobs=<n> # Number of parallel children to be used for all forms of fetching
--no-recurse-submodules # Disable recursive fetching of submodules (this has the same effect as using the --recurse-submodules=no option)
--set-upstream # If the remote is fetched successfully, pull and add upstream (tracking) reference, used by argument-less git-pull(1) and other commands. For more information, see branch.<name>.merge and branch.<name>.remote in git-config(1)
--submodule-prefix=<path> # Prepend <path> to paths printed in informative messages such as "Fetching submodule foo". This option is used internally when recursing over submodules
--recurse-submodules-default=[yes|on-demand] # This option is used internally to temporarily provide a non-negative default value for the --recurse-submodules option. All other methods of configuring fetch’s submodule recursion (such as settings in gitmodules(5) and git-config(1)) override this option, as does specifying --[no-]recurse-submodules directly
-u, --update-head-ok # By default git fetch refuses to update the head which corresponds to the current branch. This flag disables the check. This is purely for the internal use for git pull to communicate with git fetch, and unless you are implementing your own Porcelain you are not supposed to use it
--upload-pack <upload-pack> # When given, and the repository to fetch from is handled by git fetch-pack, --exec=<upload-pack> is passed to the command to specify non-default path for the command run on the other end
-q, --quiet # Pass --quiet to git-fetch-pack and silence any other internally used git commands. Progress is not reported to the standard error stream
-v, --verbose # Be verbose
--progress # Progress status is reported on the standard error stream by default when it is attached to a terminal, unless -q is specified. This flag forces progress status even if the standard error stream is not directed to a terminal
-o <option>, --server-option=<option> # Transmit the given string to the server when communicating using protocol version 2. The given string must not contain a NUL or LF character. The server’s handling of server options, including unknown ones, is server-specific. When multiple --server-option=<option> are given, they are all sent to the other side in the order listed on the command line
--show-forced-updates # By default, git checks if a branch is force-updated during fetch. This can be disabled through fetch.showForcedUpdates, but the --show-forced-updates option guarantees this check occurs
--no-show-forced-updates # By default, git checks if a branch is force-updated during fetch. Pass --no-show-forced-updates or set fetch.showForcedUpdates to false to skip this check for performance reasons. If used during git-pull the --ff-only option will still check for forced updates before attempting a fast-forward update
-4, --ipv4 # Use IPv4 addresses only, ignoring IPv6 addresses
-6, --ipv6 # Use IPv6 addresses only, ignoring IPv4 addresses
<repository> # The "remote" repository that is the source of a fetch or pull operation. This parameter can be either a URL or the name of a remote
<group> # A name referring to a list of repositories as the value of remotes.<group> in the configuration file
<refspec> # Specifies which refs to fetch and which local refs to update. When no <refspec>s appear on the command line, the refs to fetch are read from remote.<repository>.fetch variables instead
examples
Update the remote-tracking branches
git fetch origin
The above command copies all branches from the remote refs/heads/ namespace and stores them to the local refs/remotes/origin/ namespace,
unless the branch.<name>.fetch option is used to specify a non-default refspec
Using refspecs explicitly:
git fetch origin +pu:pu maint:tmp
This updates (or creates, as necessary) branches pu and tmp in the local repository by fetching from the branches (respectively) pu and maint
from the remote repository
The pu branch will be updated even if it does not fast-forward, because it is prefixed with a plus sign; tmp will not be
Peek at a remote’s branch, without configuring the remote in your local repository
git fetch git://git.kernel.org/pub/scm/git/git.git maint
git log FETCH_HEAD
The first command fetches the maint branch from the repository at git://git.kernel.org/pub/scm/git/git.git and the second command uses
FETCH_HEAD to examine the branch with git-log(1). The fetched objects will eventually be removed by git’s built-in housekeeping
Update remote refs along with associated objects
Updates remote refs using local refs, while sending objects necessary to complete the given refs
You can make interesting things happen to a repository every time you push into it, by setting up hooks there. See documentation for git-receive-
pack(1)
When the command line does not specify where to push with the <repository> argument, branch.*.remote configuration for the current branch is
consulted to determine where to push. If the configuration is missing, it defaults to origin
When the command line does not specify what to push with <refspec>... arguments or --all, --mirror, --tags options, the command finds the default
<refspec> by consulting remote.*.push configuration, and if it is not found, honors push.default configuration to decide what to push (See git-
config(1) for the meaning of push.default)
When neither the command-line nor the configuration specify what to push, the default behavior is used, which corresponds to the simple value for
push.default: the current branch is pushed to the corresponding upstream branch, but as a safety measure, the push is aborted if the upstream
branch does not have the same name as the local one
git push [--all | --mirror | --tags] [--follow-tags] [--atomic] [-n | --dry-run] [--receive-pack=<git-receive-pack>] [--repo=<repository>] [-f | --force] [-d | --delete] [--prune] [-v | --verbose] [-u | --set-upstream] [-o <string> | --push-option=<string>] [--[no-]signed|--signed=(true|false|if-asked)] [--force-with-lease[=<refname>[:<expect>]]] [--no-verify] [<repository> [<refspec>...]]
<repository> # The "remote" repository that is destination of a push operation. This parameter can be either a URL (see the section GIT URLS below) or the name of a remote (see the section REMOTES below)
<refspec>... # Specify what destination ref to update with what source object. The format of a <refspec> parameter is an optional plus +, followed by the source object <src>, followed by a colon :, followed by the destination ref <dst>
--all # Push all branches (i.e. refs under refs/heads/); cannot be used with other <refspec>
--prune # Remove remote branches that don’t have a local counterpart. For example a remote branch tmp will be removed if a local branch with the same name doesn’t exist any more. This also respects refspecs, e.g. git push --prune remote refs/heads/*:refs/tmp/* would make sure that remote refs/tmp/foo will be removed if refs/heads/foo doesn’t exist
--mirror # Instead of naming each ref to push, specifies that all refs under refs/ (which includes but is not limited to refs/heads/, refs/remotes/, and refs/tags/) be mirrored to the remote repository. Newly created local refs will be pushed to the remote end, locally updated refs will be force updated on the remote end, and deleted refs will be removed from the remote end. This is the default if the configuration option remote.<remote>.mirror is set
-n, --dry-run # Do everything except actually send the updates
--porcelain # Produce machine-readable output. The output status line for each ref will be tab-separated and sent to stdout instead of stderr. The full symbolic names of the refs will be given
-d, --delete # All listed refs are deleted from the remote repository. This is the same as prefixing all refs with a colon
--tags # All refs under refs/tags are pushed, in addition to refspecs explicitly listed on the command line
--follow-tags # Push all the refs that would be pushed without this option, and also push annotated tags in refs/tags that are missing from the remote but are pointing at commit-ish that are reachable from the refs being pushed. This can also be specified with configuration variable push.followTags. For more information, see push.followTags in git-config(1)
--[no-]signed, --signed=(true|false|if-asked) # GPG-sign the push request to update refs on the receiving side, to allow it to be checked by the hooks and/or be logged. If false or --no-signed, no signing will be attempted. If true or --signed, the push will fail if the server does not support signed pushes. If set to if-asked, sign if and only if the server supports signed pushes. The push will also fail if the actual call to gpg --sign fails. See git-receive-pack(1) for the details on the receiving end
--[no-]atomic # Use an atomic transaction on the remote side if available. Either all refs are updated, or on error, no refs are updated. If the server does not support atomic pushes the push will fail
-o <option>, --push-option=<option> # Transmit the given string to the server, which passes them to the pre-receive as well as the post-receive hook. The given string must not contain a NUL or LF character. When multiple --push-option=<option> are given, they are all sent to the other side in the order listed on the command line. When no --push-option=<option> is given from the command line, the values of configuration variable push.pushOption are used instead
--receive-pack=<git-receive-pack>, --exec=<git-receive-pack> # Path to the git-receive-pack program on the remote end. Sometimes useful when pushing to a remote repository over ssh, and you do not have the program in a directory on the default $PATH
--[no-]force-with-lease, --force-with-lease=<refname>, --force-with-lease=<refname>:<expect> # Usually, "git push" refuses to update a remote ref that is not an ancestor of the local ref used to overwrite it. This option overrides this restriction if the current value of the remote ref is the expected value. "git push" fails otherwise
-f, --force # Usually, the command refuses to update a remote ref that is not an ancestor of the local ref used to overwrite it. Also, when --force-with-lease option is used, the command refuses to update a remote ref whose current value does not match what is expected
--repo=<repository> # This option is equivalent to the <repository> argument. If both are specified, the command-line argument takes precedence
-u, --set-upstream # For every branch that is up to date or successfully pushed, add upstream (tracking) reference, used by argument-less git-pull(1) and other commands. For more information, see branch.<name>.merge in git-config(1)
--[no-]thin # These options are passed to git-send-pack(1). A thin transfer significantly reduces the amount of sent data when the sender and receiver share many of the same objects in common. The default is --thin
-q, --quiet # Suppress all output, including the listing of updated refs, unless an error occurs. Progress is not reported to the standard error stream
-v, --verbose # Run verbosely
--progress # Progress status is reported on the standard error stream by default when it is attached to a terminal, unless -q is specified. This flag forces progress status even if the standard error stream is not directed to a terminal
--no-recurse-submodules, --recurse-submodules=check|on-demand|only|no # May be used to make sure all submodule commits used by the revisions to be pushed are available on a remote-tracking branch. If check is used Git will verify that all submodule commits that changed in the revisions to be pushed are available on at least one remote of the submodule. If any commits are missing the push will be aborted and exit with non-zero status. If on-demand is used all submodules that changed in the revisions to be pushed will be pushed. If on-demand was not able to push all necessary revisions it will also be aborted and exit with non-zero status. If only is used all submodules will be recursively pushed while the superproject is left unpushed. A value of no or using --no-recurse-submodules can be used to override the push.recurseSubmodules configuration variable when no submodule recursion is required
--[no-]verify # Toggle the pre-push hook (see githooks(5)). The default is --verify, giving the hook a chance to prevent the push. With --no-verify, the hook is bypassed completely
-4, --ipv4 # Use IPv4 addresses only, ignoring IPv6 addresses
-6, --ipv6 # Use IPv6 addresses only, ignoring IPv4 addresses
OUTPUT
The output of "git push" depends on the transport method used; this section describes the output when pushing over the Git protocol (either
locally or via ssh)
The status of the push is output in tabular form, with each line representing the status of a single ref. Each line is of the form:
- <flag> <summary> <from> -> <to> (<reason>)
If --porcelain is used, then each line of the output is of the form:
- <flag> \t <from>:<to> \t <summary> (<reason>)
The status of up-to-date refs is shown only if --porcelain or --verbose option is used
flag
(space) # for a successfully pushed fast-forward;
+ # for a successful forced update;
- # for a successfully deleted ref;
* # for a successfully pushed new ref;
! # for a ref that was rejected or failed to push; and
= # for a ref that was up to date and did not need pushing
summary
For a successfully pushed ref
the summary shows the old and new values of the ref in a form suitable for using as an argument to git log (this is <old>..<new> in most cases, and <old>...<new> for forced non-fast-forward updates)
For a failed update, more details are given:
rejected # Git did not try to send the ref at all, typically because it is not a fast-forward and you did not force the update
remote rejected # The remote end refused the update. Usually caused by a hook on the remote side, or because the remote repository has one of the following safety options in effect: receive.denyCurrentBranch (for pushes to the checked out branch), receive.denyNonFastForwards (for forced non-fast-forward updates), receive.denyDeletes or receive.denyDeleteCurrent
remote failure # The remote end did not report the successful update of the ref, perhaps because of a temporary error on the remote side, a break in the network connection, or other transient error
from # The name of the local ref being pushed, minus its refs/<type>/ prefix. In the case of deletion, the name of the local ref is omitted
to # The name of the remote ref being updated, minus its refs/<type>/ prefix
reason # A human-readable explanation. In the case of successfully pushed refs, no explanation is needed. For a failed ref, the reason for failure is
described
examples
git push
# Works like git push <remote>, where <remote> is the current branch’s remote (or origin, if no remote is configured for the current branch)
git push origin
# Without additional configuration, pushes the current branch to the configured upstream (remote.origin.merge configuration variable) if it has the same name as the current branch, and errors out without pushing otherwise
#The default behavior of this command when no <refspec> is given can be configured by setting the push option of the remote, or the push.default configuration variable
# For example, to default to pushing only the current branch to origin use git config remote.origin.push HEAD. Any valid <refspec> (like the ones in the examples below) can be configured as the default for git push origin
git push origin:
# Push "matching" branches to origin. See <refspec> in the OPTIONS section above for a description of "matching" branches
git push origin master
# Find a ref that matches master in the source repository (most likely, it would find refs/heads/master), and update the same ref (e.g. refs/heads/master) in origin repository with it. If master did not exist remotely, it would be created
git push origin HEAD
# A handy way to push the current branch to the same name on the remote
git push mothership master:satellite/master dev:satellite/dev
# Use the source ref that matches master (e.g. refs/heads/master) to update the ref that matches satellite/master (most probably refs/remotes/satellite/master) in the mothership repository; do the same for dev and satellite/dev
# See the section describing <refspec>... above for a discussion of the matching semantics
# This is to emulate git fetch run on the mothership using git push that is run in the opposite direction in order to integrate the work done on satellite, and is often necessary when you can only make connection in one way (i.e. satellite can ssh into mothership but mothership cannot initiate connection to satellite because the latter is behind a firewall or does not run sshd)
# After running this git push on the satellite machine, you would ssh into the mothership and run git merge there to complete the emulation of git pull that were run on mothership to pull changes made on satellite
git push origin HEAD:master
# Push the current branch to the remote ref matching master in the origin repository. This form is convenient to push the current branch without thinking about its local name
git push origin master:refs/heads/experimental
# Create the branch experimental in the origin repository by copying the current master branch. This form is only needed to create a new branch or tag in the remote repository when the local name and the remote name are different; otherwise, the ref name on its own will work
git push origin :experimental
# Find a ref that matches experimental in the origin repository (e.g. refs/heads/experimental), and delete it
git push origin +dev:master
# Update the origin repository’s master branch with the dev branch, allowing non-fast-forward updates. This can leave unreferenced commits dangling in the origin repository. Consider the following situation, where a fast-forward is not possible:
o---o---o---A---B origin/master
\
X---Y---Z dev
The above command would change the origin repository to
A---B (unnamed branch)
/
o---o---o---X---Y---Z master
# Commits A and B would no longer belong to a branch with a symbolic name, and so would be unreachable. As such, these commits would be removed by a git gc command on the origin repository
List references in a remote repository
Displays references available in a remote repository along with the associated commit IDs
git ls-remote [--heads] [--tags] [--refs] [--upload-pack=<exec>] [-q | --quiet] [--exit-code] [--get-url] [--sort=<key>] [--symref] [<repository> [<refs>...]]
-h, --heads, -t, --tags # Limit to only refs/heads and refs/tags, respectively. These options are not mutually exclusive; when given both, references stored in refs/heads and refs/tags are displayed
--refs # Do not show peeled tags or pseudorefs like HEAD in the output
-q, --quiet # Do not print remote URL to stderr
--upload-pack=<exec> # Specify the full path of git-upload-pack on the remote host. This allows listing references from repositories accessed via SSH and where the SSH daemon does not use the PATH configured by the user
--exit-code # Exit with status "2" when no matching refs are found in the remote repository. Usually the command exits with status "0" to indicate it successfully talked with the remote repository, whether it found any matching refs
--get-url # Expand the URL of the given remote repository taking into account any "url.<base>.insteadOf" config setting (See git-config(1)) and exit without talking to the remote
--symref # In addition to the object pointed by it, show the underlying ref pointed by it when showing a symbolic ref. Currently, upload-pack only shows the symref HEAD, so it will be the only one shown by ls-remote
--sort=<key> # Sort based on the key given. Prefix - to sort in descending order of the value. Supports "version:refname" or "v:refname" (tag names are treated as versions). The "version:refname" sort order can also be affected by the "versionsort.suffix" configuration variable. See git-for- each-ref(1) for more sort options, but be aware keys like committerdate that require access to the objects themselves will not work for refs whose objects have not yet been fetched from the remote, and will give a missing object error
-o <option>, --server-option=<option> # Transmit the given string to the server when communicating using protocol version 2. The given string must not contain a NUL or LF character. When multiple --server-option=<option> are given, they are all sent to the other side in the order listed on the command line
<repository> # The "remote" repository to query. This parameter can be either a URL or the name of a remote (see the GIT URLS and REMOTES sections of git- fetch(1))
<refs>... # When unspecified, all references, after filtering done with --heads and --tags, are shown. When <refs>... are specified, only references matching the given patterns are displayed
examples
git ls-remote --tags ./
git ls-remote http://www.kernel.org/pub/scm/git/git.git master pu rc
git remote add korg http://www.kernel.org/pub/scm/git/git.git
git ls-remote --tags korg v\*