code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
|---|---|---|---|---|---|---|---|
def getExecuteCmd(self, proto, port):
"""Get the ExecuteCmd format string specified by the operator.
Args:
proto: The protocol name
port: The port number
Returns:
The format string if applicable
None, otherwise
"""
listener = self.getListenerMeta(proto, port)
if listener:
return listener.cmd_template
|
Get the ExecuteCmd format string specified by the operator.
Args:
proto: The protocol name
port: The port number
Returns:
The format string if applicable
None, otherwise
|
getExecuteCmd
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/diverters/diverterbase.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/diverterbase.py
|
Apache-2.0
|
def _isWhiteListMiss(self, thing, whitelist):
"""Check if thing is NOT in whitelist.
Args:
thing: thing to check whitelist for
whitelist: list of entries
Returns:
True if thing is in whitelist
False otherwise, or if there is no whitelist
"""
if not whitelist:
return False
return not (thing in whitelist)
|
Check if thing is NOT in whitelist.
Args:
thing: thing to check whitelist for
whitelist: list of entries
Returns:
True if thing is in whitelist
False otherwise, or if there is no whitelist
|
_isWhiteListMiss
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/diverters/diverterbase.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/diverterbase.py
|
Apache-2.0
|
def _isBlackListHit(self, thing, blacklist):
"""Check if thing is in blacklist.
Args:
thing: thing to check blacklist for
blacklist: list of entries
Returns:
True if thing is in blacklist
False otherwise, or if there is no blacklist
"""
if not blacklist:
return False
return (thing in blacklist)
|
Check if thing is in blacklist.
Args:
thing: thing to check blacklist for
blacklist: list of entries
Returns:
True if thing is in blacklist
False otherwise, or if there is no blacklist
|
_isBlackListHit
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/diverters/diverterbase.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/diverterbase.py
|
Apache-2.0
|
def isProcessWhiteListMiss(self, proto, port, proc):
"""Check if proc is OUTSIDE the process WHITElist for a port.
Args:
proto: The protocol name
port: The port number
proc: The process name
Returns:
False if no listener on this port
Return value of _isWhiteListMiss otherwise
"""
listener = self.getListenerMeta(proto, port)
if not listener:
return False
return self._isWhiteListMiss(proc, listener.proc_wl)
|
Check if proc is OUTSIDE the process WHITElist for a port.
Args:
proto: The protocol name
port: The port number
proc: The process name
Returns:
False if no listener on this port
Return value of _isWhiteListMiss otherwise
|
isProcessWhiteListMiss
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/diverters/diverterbase.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/diverterbase.py
|
Apache-2.0
|
def isProcessBlackListHit(self, proto, port, proc):
"""Check if proc is IN the process BLACKlist for a port.
Args:
proto: The protocol name
port: The port number
proc: The process name
Returns:
False if no listener on this port
Return value of _isBlackListHit otherwise
"""
listener = self.getListenerMeta(proto, port)
if not listener:
return False
return self._isBlackListHit(proc, listener.proc_bl)
|
Check if proc is IN the process BLACKlist for a port.
Args:
proto: The protocol name
port: The port number
proc: The process name
Returns:
False if no listener on this port
Return value of _isBlackListHit otherwise
|
isProcessBlackListHit
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/diverters/diverterbase.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/diverterbase.py
|
Apache-2.0
|
def isHostWhiteListMiss(self, proto, port, host):
"""Check if host is OUTSIDE the process WHITElist for a port.
Args:
proto: The protocol name
port: The port number
host: The process name
Returns:
False if no listener on this port
Return value of _isWhiteListMiss otherwise
"""
listener = self.getListenerMeta(proto, port)
if not listener:
return False
return self._isWhiteListMiss(host, listener.host_wl)
|
Check if host is OUTSIDE the process WHITElist for a port.
Args:
proto: The protocol name
port: The port number
host: The process name
Returns:
False if no listener on this port
Return value of _isWhiteListMiss otherwise
|
isHostWhiteListMiss
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/diverters/diverterbase.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/diverterbase.py
|
Apache-2.0
|
def isHostBlackListHit(self, proto, port, host):
"""Check if host is IN the process BLACKlist for a port.
Args:
proto: The protocol name
port: The port number
host: The process name
Returns:
False if no listener on this port
Return value of _isBlackListHit otherwise
"""
listener = self.getListenerMeta(proto, port)
if not listener:
return False
return self._isBlackListHit(host, listener.host_bl)
|
Check if host is IN the process BLACKlist for a port.
Args:
proto: The protocol name
port: The port number
host: The process name
Returns:
False if no listener on this port
Return value of _isBlackListHit otherwise
|
isHostBlackListHit
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/diverters/diverterbase.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/diverterbase.py
|
Apache-2.0
|
def isDistinct(self, prev, bound_ips):
"""Not quite inequality.
Requires list of bound IPs for that IP protocol version and recognizes
when a foreign-destined packet was redirected to localhost or to an IP
occupied by an adapter local to the system to be able to suppress
output of these near-duplicates.
"""
return ((not prev) or (self.pid != prev.pid) or
(self.comm != prev.comm) or (self.port != prev.port) or
((self.ip != prev.ip) and (self.ip not in bound_ips)))
|
Not quite inequality.
Requires list of bound IPs for that IP protocol version and recognizes
when a foreign-destined packet was redirected to localhost or to an IP
occupied by an adapter local to the system to be able to suppress
output of these near-duplicates.
|
isDistinct
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/diverters/diverterbase.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/diverterbase.py
|
Apache-2.0
|
def __init__(self, diverter_config, listeners_config, ip_addrs,
logging_level=logging.INFO):
"""Initialize the DiverterBase.
TODO: Replace the sys.exit() calls from this function with exceptions
or some other mechanism appropriate for allowing the user of this class
to programmatically detect and handle these cases in their own way.
This may entail moving configuration parsing to a method with a return
value, or modifying fakenet.py to handle Diverter exceptions.
Args:
diverter_config: A dict of [Diverter] config section
listeners_config: A dict of listener configuration sections
ip_addrs: dictionary keyed by integers 4 and 6, with each element
being a list and each list member being a str that is an ASCII
representation of an IP address that is associated with a local
interface on this system.
logging_level: Optional integer logging level such as logging.DEBUG
Returns:
None
"""
# For fine-grained control of subclass debug output. Does not control
# debug output from DiverterBase. To see DiverterBase debug output,
# pass logging.DEBUG as the logging_level argument to init_base.
self.pdebug_level = 0
self.pdebug_labels = dict()
# Override in Windows implementation
self.running_on_windows = False
self.pid = os.getpid()
self.ip_addrs = ip_addrs
self.pcap = None
self.pcap_filename = ''
self.pcap_lock = None
self.logger = logging.getLogger('Diverter')
self.logger.setLevel(logging_level)
# Network Based Indicators
self.nbis = {}
# Index remote Process IDs for MultiHost operations
self.remote_pid_counter = 0
# Maps Proxy initiated source ports to original source ports
self.proxy_sport_to_orig_sport_map = {}
# Maps (proxy_sport, orig_sport) to pkt SSL encryption
self.is_proxied_pkt_ssl_encrypted = {}
# Rate limiting for displaying pid/comm/proto/IP/port
self.last_conn = None
portlists = ['BlackListPortsTCP', 'BlackListPortsUDP']
stringlists = ['HostBlackList']
idlists = ['BlackListIDsICMP']
self.configure(diverter_config, portlists, stringlists, idlists)
self.listeners_config = dict((k.lower(), v)
for k, v in listeners_config.items())
# Local IP address
self.external_ip = socket.gethostbyname(socket.gethostname())
self.loopback_ip = socket.gethostbyname('localhost')
# Sessions cache
# NOTE: A dictionary of source ports mapped to destination address,
# port tuples
self.sessions = dict()
# Manage logging of foreign-destined packets
self.nonlocal_ips_already_seen = []
self.log_nonlocal_only_once = True
# Port forwarding table, for looking up original unbound service ports
# when sending replies to foreign endpoints that have attempted to
# communicate with unbound ports. Allows fixing up source ports in
# response packets. Similar to the `sessions` member of the Windows
# Diverter implementation.
self.port_fwd_table = dict()
self.port_fwd_table_lock = threading.Lock()
# Track conversations that will be ignored so that e.g. an RST response
# from a closed port does not erroneously trigger port forwarding and
# silence later replies to legitimate clients.
self.ignore_table = dict()
self.ignore_table_lock = threading.Lock()
# IP forwarding table, for looking up original foreign destination IPs
# when sending replies to local endpoints that have attempted to
# communicate with other machines e.g. via hard-coded C2 IP addresses.
self.ip_fwd_table = dict()
self.ip_fwd_table_lock = threading.Lock()
# Ports bound by FakeNet-NG listeners
self.listener_ports = ListenerPorts()
# Parse listener configurations
self.parse_listeners_config(listeners_config)
#######################################################################
# Diverter settings
# Default TCP/UDP listeners
self.default_listener = dict()
# Global TCP/UDP port blacklist
self.blacklist_ports = {'TCP': [], 'UDP': []}
# Glocal ICMP ID blacklist
self.blacklist_ids = {'ICMP': []}
# Global process blacklist
# TODO: Allow PIDs
self.blacklist_processes = []
self.whitelist_processes = []
# Global host blacklist
# TODO: Allow domain resolution
self.blacklist_hosts = []
# Parse diverter config
self.parse_diverter_config()
slists = ['DebugLevel', ]
self.reconfigure(portlists=[], stringlists=slists)
dbg_lvl = 0
if self.is_configured('DebugLevel'):
for label in self.getconfigval('DebugLevel'):
label = label.upper()
if label == 'OFF':
dbg_lvl = 0
break
if not label in DLABELS_INV:
self.logger.warning('No such DebugLevel as %s' % (label))
else:
dbg_lvl |= DLABELS_INV[label]
self.set_debug_level(dbg_lvl, DLABELS)
#######################################################################
# Network verification - Implemented in OS-specific mixin
# Check active interfaces
if not self.check_active_ethernet_adapters():
self.logger.critical('ERROR: No active ethernet interfaces '
'detected!')
self.logger.critical(' Please enable a network interface.')
sys.exit(1)
# Check configured ip addresses
if not self.check_ipaddresses():
self.logger.critical('ERROR: No interface had IP address '
'configured!')
self.logger.critical(' Please configure an IP address on '
'network interface.')
sys.exit(1)
# Check configured gateways
gw_ok = self.check_gateways()
if not gw_ok:
self.logger.warning('WARNING: No gateways configured!')
if self.is_set('fixgateway'):
gw_ok = self.fix_gateway()
if not gw_ok:
self.logger.warning('Cannot fix gateway')
if not gw_ok:
self.logger.warning(' Please configure a default ' +
'gateway or route in order to intercept ' +
'external traffic.')
self.logger.warning(' Current interception abilities ' +
'are limited to local traffic.')
# Check configured DNS servers
dns_ok = self.check_dns_servers()
if not dns_ok:
self.logger.warning('WARNING: No DNS servers configured!')
if self.is_set('fixdns'):
dns_ok = self.fix_dns()
if not dns_ok:
self.logger.warning('Cannot fix DNS')
if not dns_ok:
self.logger.warning(' Please configure a DNS server ' +
'in order to allow network resolution.')
# OS-specific Diverters must initialize e.g. WinDivert,
# libnetfilter_queue, pf/alf, etc.
|
Initialize the DiverterBase.
TODO: Replace the sys.exit() calls from this function with exceptions
or some other mechanism appropriate for allowing the user of this class
to programmatically detect and handle these cases in their own way.
This may entail moving configuration parsing to a method with a return
value, or modifying fakenet.py to handle Diverter exceptions.
Args:
diverter_config: A dict of [Diverter] config section
listeners_config: A dict of listener configuration sections
ip_addrs: dictionary keyed by integers 4 and 6, with each element
being a list and each list member being a str that is an ASCII
representation of an IP address that is associated with a local
interface on this system.
logging_level: Optional integer logging level such as logging.DEBUG
Returns:
None
|
__init__
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/diverters/diverterbase.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/diverterbase.py
|
Apache-2.0
|
def set_debug_level(self, lvl, labels={}):
"""Enable debug output if necessary, set the debug output level, and
maintain a reference to the dictionary of labels to print when a given
logging level is encountered.
Args:
lvl: An int mask of all debug logging levels
labels: A dict of int => str assigning names to each debug level
Returns:
None
"""
if lvl:
self.logger.setLevel(logging.DEBUG)
self.pdebug_level = lvl
self.pdebug_labels = labels
|
Enable debug output if necessary, set the debug output level, and
maintain a reference to the dictionary of labels to print when a given
logging level is encountered.
Args:
lvl: An int mask of all debug logging levels
labels: A dict of int => str assigning names to each debug level
Returns:
None
|
set_debug_level
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/diverters/diverterbase.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/diverterbase.py
|
Apache-2.0
|
def pdebug(self, lvl, s):
"""Log only the debug trace messages that have been enabled via
set_debug_level.
Args:
lvl: An int indicating the debug level of this message
s: The mssage
Returns:
None
"""
if self.pdebug_level & lvl:
label = self.pdebug_labels.get(lvl)
prefix = '[' + label + '] ' if label else '[some component] '
self.logger.debug(prefix + str(s))
|
Log only the debug trace messages that have been enabled via
set_debug_level.
Args:
lvl: An int indicating the debug level of this message
s: The mssage
Returns:
None
|
pdebug
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/diverters/diverterbase.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/diverterbase.py
|
Apache-2.0
|
def check_privileged(self):
"""UNIXy and Windows-oriented check for superuser privileges.
Returns:
True if superuser, else False
"""
try:
privileged = (os.getuid() == 0)
except AttributeError:
privileged = (ctypes.windll.shell32.IsUserAnAdmin() != 0)
return privileged
|
UNIXy and Windows-oriented check for superuser privileges.
Returns:
True if superuser, else False
|
check_privileged
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/diverters/diverterbase.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/diverterbase.py
|
Apache-2.0
|
def parse_listeners_config(self, listeners_config):
"""Parse listener config sections.
TODO: Replace the sys.exit() calls from this function with exceptions
or some other mechanism appropriate for allowing the user of this class
to programmatically detect and handle these cases in their own way.
This may entail modifying fakenet.py.
Args:
listeners_config: A dict of listener configuration sections
Returns:
None
"""
#######################################################################
# Populate diverter ports and process filters from the configuration
for listener_name, listener_config in listeners_config.items():
if 'port' in listener_config:
port = int(listener_config['port'])
hidden = (listener_config.get('hidden', 'false').lower() ==
'true')
if not 'protocol' in listener_config:
self.logger.error('ERROR: Protocol not defined for ' +
'listener %s', listener_name)
sys.exit(1)
protocol = listener_config['protocol'].upper()
if not protocol in ['TCP', 'UDP']:
self.logger.error('ERROR: Invalid protocol %s for ' +
'listener %s', protocol, listener_name)
sys.exit(1)
listener = ListenerMeta(protocol, port, hidden)
###############################################################
# Process filtering configuration
if ('processwhitelist' in listener_config and
'processblacklist' in listener_config):
self.logger.error('ERROR: Listener can\'t have both ' +
'process whitelist and blacklist.')
sys.exit(1)
elif 'processwhitelist' in listener_config:
self.logger.debug('Process whitelist:')
whitelist = listener_config['processwhitelist']
listener.setProcessWhitelist(whitelist)
# for port in self.port_process_whitelist[protocol]:
# self.logger.debug(' Port: %d (%s) Processes: %s',
# port, protocol, ', '.join(
# self.port_process_whitelist[protocol][port]))
elif 'processblacklist' in listener_config:
self.logger.debug('Process blacklist:')
blacklist = listener_config['processblacklist']
listener.setProcessBlacklist(blacklist)
# for port in self.port_process_blacklist[protocol]:
# self.logger.debug(' Port: %d (%s) Processes: %s',
# port, protocol, ', '.join(
# self.port_process_blacklist[protocol][port]))
###############################################################
# Host filtering configuration
if ('hostwhitelist' in listener_config and
'hostblacklist' in listener_config):
self.logger.error('ERROR: Listener can\'t have both ' +
'host whitelist and blacklist.')
sys.exit(1)
elif 'hostwhitelist' in listener_config:
self.logger.debug('Host whitelist:')
host_whitelist = listener_config['hostwhitelist']
listener.setHostWhitelist(host_whitelist)
# for port in self.port_host_whitelist[protocol]:
# self.logger.debug(' Port: %d (%s) Hosts: %s', port,
# protocol, ', '.join(
# self.port_host_whitelist[protocol][port]))
elif 'hostblacklist' in listener_config:
self.logger.debug('Host blacklist:')
host_blacklist = listener_config['hostblacklist']
listener.setHostBlacklist(host_blacklist)
# for port in self.port_host_blacklist[protocol]:
# self.logger.debug(' Port: %d (%s) Hosts: %s', port,
# protocol, ', '.join(
# self.port_host_blacklist[protocol][port]))
# Listener metadata is now configured, add it to the dictionary
self.listener_ports.addListener(listener)
###############################################################
# Execute command configuration
if 'executecmd' in listener_config:
template = listener_config['executecmd'].strip()
# Would prefer not to get into the middle of a debug
# session and learn that a typo has ruined the day, so we
# test beforehand to make sure all the user-specified
# insertion strings are valid.
test = self._build_cmd(template, 0, 'test', '1.2.3.4',
12345, '4.3.2.1', port)
if not test:
self.logger.error(('Terminating due to incorrectly ' +
'configured ExecuteCmd for ' +
'listener %s') % (listener_name))
sys.exit(1)
listener.setExecuteCmd(template)
self.logger.debug('Port %d (%s) ExecuteCmd: %s', port,
protocol,
template)
|
Parse listener config sections.
TODO: Replace the sys.exit() calls from this function with exceptions
or some other mechanism appropriate for allowing the user of this class
to programmatically detect and handle these cases in their own way.
This may entail modifying fakenet.py.
Args:
listeners_config: A dict of listener configuration sections
Returns:
None
|
parse_listeners_config
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/diverters/diverterbase.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/diverterbase.py
|
Apache-2.0
|
def build_cmd(self, pkt, pid, comm):
"""Retrieve the ExecuteCmd directive if applicable and build the
command to execute.
Args:
pkt: An fnpacket.PacketCtx or derived object
pid: Process ID associated with the packet
comm: Process name (command) that sent the packet
Returns:
A str that is the resultant command to execute
"""
cmd = None
template = self.listener_ports.getExecuteCmd(pkt.proto, pkt.dport)
if template:
cmd = self._build_cmd(template, pid, comm, pkt.src_ip, pkt.sport,
pkt.dst_ip, pkt.dport)
return cmd
|
Retrieve the ExecuteCmd directive if applicable and build the
command to execute.
Args:
pkt: An fnpacket.PacketCtx or derived object
pid: Process ID associated with the packet
comm: Process name (command) that sent the packet
Returns:
A str that is the resultant command to execute
|
build_cmd
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/diverters/diverterbase.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/diverterbase.py
|
Apache-2.0
|
def _build_cmd(self, tmpl, pid, comm, src_ip, sport, dst_ip, dport):
"""Build a command based on the template specified in an ExecuteCmd
config directive, applying the parameters as needed.
Accepts individual arguments instead of an fnpacket.PacketCtx so that
the Diverter can test any ExecuteCmd directives at configuration time
without having to synthesize a fnpacket.PacketCtx or construct a
NamedTuple to satisfy the requirement for such an argument.
Args:
tmpl: A str containing the body of the ExecuteCmd config directive
pid: Process ID associated with the packet
comm: Process name (command) that sent the packet
src_ip: The source IP address that originated the packet
sport: The source port that originated the packet
dst_ip: The destination IP that the packet was directed at
dport: The destination port that the packet was directed at
Returns:
A str that is the resultant command to execute
"""
cmd = None
try:
cmd = tmpl.format(
pid=str(pid),
procname=str(comm),
src_addr=str(src_ip),
src_port=str(sport),
dst_addr=str(dst_ip),
dst_port=str(dport))
except KeyError as e:
self.logger.error(('Failed to build ExecuteCmd for port %d due ' +
'to erroneous format key: %s') %
(dport, str(e)))
return cmd
|
Build a command based on the template specified in an ExecuteCmd
config directive, applying the parameters as needed.
Accepts individual arguments instead of an fnpacket.PacketCtx so that
the Diverter can test any ExecuteCmd directives at configuration time
without having to synthesize a fnpacket.PacketCtx or construct a
NamedTuple to satisfy the requirement for such an argument.
Args:
tmpl: A str containing the body of the ExecuteCmd config directive
pid: Process ID associated with the packet
comm: Process name (command) that sent the packet
src_ip: The source IP address that originated the packet
sport: The source port that originated the packet
dst_ip: The destination IP that the packet was directed at
dport: The destination port that the packet was directed at
Returns:
A str that is the resultant command to execute
|
_build_cmd
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/diverters/diverterbase.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/diverterbase.py
|
Apache-2.0
|
def execute_detached(self, execute_cmd):
"""OS-agnostic asynchronous subprocess creation.
Executes the process with the appropriate subprocess.Popen parameters
for UNIXy or Windows platforms to isolate the process from FakeNet-NG
to prevent it from being interrupted by termination of FakeNet-NG,
Ctrl-C, etc.
Args:
execute_cmd: A str that is the command to execute
Side-effects:
Creates the specified process.
Returns:
Success => an int that is the pid of the new process
Failure => None
"""
DETACHED_PROCESS = 0x00000008
cflags = DETACHED_PROCESS if self.running_on_windows else 0
cfds = False if self.running_on_windows else True
shl = False if self.running_on_windows else True
def ign_sigint():
# Prevent KeyboardInterrupt in FakeNet-NG's console from
# terminating child processes
signal.signal(signal.SIGINT, signal.SIG_IGN)
preexec = None if self.running_on_windows else ign_sigint
try:
pid = subprocess.Popen(execute_cmd, creationflags=cflags,
shell=shl,
close_fds=cfds,
preexec_fn=preexec).pid
except Exception as e:
self.logger.error('Exception of type %s' % (str(type(e))))
self.logger.error('Error: Failed to execute command: %s',
execute_cmd)
self.logger.error(' %s', e)
else:
return pid
|
OS-agnostic asynchronous subprocess creation.
Executes the process with the appropriate subprocess.Popen parameters
for UNIXy or Windows platforms to isolate the process from FakeNet-NG
to prevent it from being interrupted by termination of FakeNet-NG,
Ctrl-C, etc.
Args:
execute_cmd: A str that is the command to execute
Side-effects:
Creates the specified process.
Returns:
Success => an int that is the pid of the new process
Failure => None
|
execute_detached
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/diverters/diverterbase.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/diverterbase.py
|
Apache-2.0
|
def parse_diverter_config(self):
"""Parse [Diverter] config section.
Args: N/A
Side-effects:
Diverter members (whitelists, pcap, etc.) initialized.
Returns:
None
"""
# SingleHost vs MultiHost mode
self.network_mode = 'SingleHost' # Default
self.single_host_mode = True
if self.is_configured('networkmode'):
self.network_mode = self.getconfigval('networkmode')
available_modes = ['singlehost', 'multihost']
# Constrain argument values
if self.network_mode.lower() not in available_modes:
self.logger.error('NetworkMode must be one of %s' %
(available_modes))
sys.exit(1)
# Adjust previously assumed mode if operator specifies MultiHost
if self.network_mode.lower() == 'multihost':
self.single_host_mode = False
if (self.getconfigval('processwhitelist') and
self.getconfigval('processblacklist')):
self.logger.error('ERROR: Diverter can\'t have both process ' +
'whitelist and blacklist.')
sys.exit(1)
if self.is_set('dumppackets'):
self.pcap_filename = '%s_%s.pcap' % (self.getconfigval(
'dumppacketsfileprefix', 'packets'),
time.strftime('%Y%m%d_%H%M%S'))
self.logger.info('Capturing traffic to %s', self.pcap_filename)
self.pcap = dpkt.pcap.Writer(open(self.pcap_filename, 'wb'),
linktype=dpkt.pcap.DLT_RAW)
self.pcap_lock = threading.Lock()
# Do not redirect blacklisted processes
if self.is_configured('processblacklist'):
self.blacklist_processes = [process.strip() for process in
self.getconfigval('processblacklist').split(',')]
self.logger.debug('Blacklisted processes: %s', ', '.join(
[str(p) for p in self.blacklist_processes]))
if self.logger.level == logging.INFO:
self.logger.info('Hiding logs from blacklisted processes')
# Only redirect whitelisted processes
if self.is_configured('processwhitelist'):
self.whitelist_processes = [process.strip() for process in
self.getconfigval('processwhitelist').split(',')]
self.logger.debug('Whitelisted processes: %s', ', '.join(
[str(p) for p in self.whitelist_processes]))
# Do not redirect blacklisted hosts
if self.is_configured('hostblacklist'):
self.blacklist_hosts = self.getconfigval('hostblacklist')
self.logger.debug('Blacklisted hosts: %s', ', '.join(
[str(p) for p in self.getconfigval('hostblacklist')]))
# Redirect all traffic
self.default_listener = {'TCP': None, 'UDP': None}
if self.is_set('redirectalltraffic'):
if self.is_unconfigured('defaulttcplistener'):
self.logger.error('ERROR: No default TCP listener specified ' +
'in the configuration.')
sys.exit(1)
elif self.is_unconfigured('defaultudplistener'):
self.logger.error('ERROR: No default UDP listener specified ' +
'in the configuration.')
sys.exit(1)
elif not (self.getconfigval('defaulttcplistener').lower() in
self.listeners_config):
self.logger.error('ERROR: No configuration exists for ' +
'default TCP listener %s',
self.getconfigval('defaulttcplistener'))
sys.exit(1)
elif not (self.getconfigval('defaultudplistener').lower() in
self.listeners_config):
self.logger.error('ERROR: No configuration exists for ' +
'default UDP listener %s',
self.getconfigval('defaultudplistener'))
sys.exit(1)
else:
default_listener = self.getconfigval('defaulttcplistener').lower()
default_port = self.listeners_config[default_listener]['port']
self.default_listener['TCP'] = int(default_port)
self.logger.debug('Using default listener %s on port %d',
self.getconfigval('defaulttcplistener').lower(),
self.default_listener['TCP'])
default_listener = self.getconfigval('defaultudplistener').lower()
default_port = self.listeners_config[default_listener]['port']
self.default_listener['UDP'] = int(default_port)
self.logger.debug('Using default listener %s on port %d',
self.getconfigval('defaultudplistener').lower(),
self.default_listener['UDP'])
# Re-marshall these into a readily usable form...
# Do not redirect blacklisted TCP ports
if self.is_configured('blacklistportstcp'):
self.blacklist_ports['TCP'] = \
self.getconfigval('blacklistportstcp')
self.logger.debug('Blacklisted TCP ports: %s', ', '.join(
[str(p) for p in self.getconfigval('BlackListPortsTCP')]))
# Do not redirect blacklisted UDP ports
if self.is_configured('blacklistportsudp'):
self.blacklist_ports['UDP'] = \
self.getconfigval('blacklistportsudp')
self.logger.debug('Blacklisted UDP ports: %s', ', '.join(
[str(p) for p in self.getconfigval('BlackListPortsUDP')]))
# Do not redirect blacklisted ICMP IDs
if self.is_configured('blacklistidsicmp'):
self.blacklist_ids['ICMP'] = \
self.getconfigval('blacklistidsicmp')
self.logger.debug('Blacklisted ICMP IDs: %s', ', '.join(
[str(c) for c in self.getconfigval('BlackListIDsICMP')]))
|
Parse [Diverter] config section.
Args: N/A
Side-effects:
Diverter members (whitelists, pcap, etc.) initialized.
Returns:
None
|
parse_diverter_config
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/diverters/diverterbase.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/diverterbase.py
|
Apache-2.0
|
def write_pcap(self, pkt):
"""Writes a packet to the pcap.
Args:
pkt: A fnpacket.PacketCtx or derived object
Returns:
None
Side-effects:
Calls dpkt.pcap.Writer.writekpt to persist the octets
"""
if self.pcap and self.pcap_lock:
with self.pcap_lock:
mangled = 'mangled' if pkt.mangled else 'initial'
self.pdebug(DPCAP, 'Writing %s packet %s' %
(mangled, pkt.hdrToStr2()))
self.pcap.writepkt(pkt.octets)
|
Writes a packet to the pcap.
Args:
pkt: A fnpacket.PacketCtx or derived object
Returns:
None
Side-effects:
Calls dpkt.pcap.Writer.writekpt to persist the octets
|
write_pcap
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/diverters/diverterbase.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/diverterbase.py
|
Apache-2.0
|
def formatPkt(self, pkt, pid, comm):
"""Format a packet analysis log line for DGENPKTV.
Args:
pkt: A fnpacket.PacketCtx or derived object
pid: Process ID associated with the packet
comm: Process executable name
Returns:
A str containing the log line
"""
logline = ''
if pkt.proto == 'UDP':
fmt = '| {label} {proto} | {pid:>6} | {comm:<8} | {src:>15}:{sport:<5} | {dst:>15}:{dport:<5} | {length:>5} | {flags:<11} | {seqack:<35} |'
logline = fmt.format(
label=pkt.label,
proto=pkt.proto,
pid=str(pid),
comm=str(comm),
src=pkt.src_ip,
sport=pkt.sport,
dst=pkt.dst_ip,
dport=pkt.dport,
length=len(pkt),
flags='',
seqack='',
)
elif pkt.proto == 'TCP':
tcp = pkt._hdr.data
sa = 'Seq=%d, Ack=%d' % (tcp.seq, tcp.ack)
f = []
if (tcp.flags & dpkt.tcp.TH_RST) != 0:
f.append('RST')
if (tcp.flags & dpkt.tcp.TH_SYN) != 0:
f.append('SYN')
if (tcp.flags & dpkt.tcp.TH_ACK) != 0:
f.append('ACK')
if (tcp.flags & dpkt.tcp.TH_FIN) != 0:
f.append('FIN')
if (tcp.flags & dpkt.tcp.TH_PUSH) != 0:
f.append('PSH')
fmt = '| {label} {proto} | {pid:>6} | {comm:<8} | {src:>15}:{sport:<5} | {dst:>15}:{dport:<5} | {length:>5} | {flags:<11} | {seqack:<35} |'
logline = fmt.format(
label=pkt.label,
proto=pkt.proto,
pid=str(pid),
comm=str(comm),
src=pkt.src_ip,
sport=pkt.sport,
dst=pkt.dst_ip,
dport=pkt.dport,
length=len(pkt),
flags=','.join(f),
seqack=sa,
)
else:
fmt = '| {label} {proto} | {pid:>6} | {comm:<8} | {src:>15}:{sport:<5} | {dst:>15}:{dport:<5} | {length:>5} | {flags:<11} | {seqack:<35} |'
logline = fmt.format(
label=pkt.label,
proto='UNK',
pid=str(pid),
comm=str(comm),
src=str(pkt.src_ip),
sport=str(pkt.sport),
dst=str(pkt.dst_ip),
dport=str(pkt.dport),
length=len(pkt),
flags='',
seqack='',
)
return logline
|
Format a packet analysis log line for DGENPKTV.
Args:
pkt: A fnpacket.PacketCtx or derived object
pid: Process ID associated with the packet
comm: Process executable name
Returns:
A str containing the log line
|
formatPkt
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/diverters/diverterbase.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/diverterbase.py
|
Apache-2.0
|
def check_should_ignore(self, pkt, pid, comm):
"""Indicate whether a packet should be passed without mangling.
Checks whether the packet matches black and whitelists, or whether it
signifies an FTP Active Mode connection.
Args:
pkt: A fnpacket.PacketCtx or derived object
pid: Process ID associated with the packet
comm: Process executable name
Returns:
True if the packet should be left alone, else False.
"""
src_ip = pkt.src_ip0
sport = pkt.sport0
dst_ip = pkt.dst_ip0
dport = pkt.dport0
if not self.is_set('redirectalltraffic'):
self.pdebug(DIGN, 'Ignoring %s packet %s' %
(pkt.proto, pkt.hdrToStr()))
return True
# SingleHost mode checks
if self.single_host_mode:
if comm:
# Check process blacklist
if comm in self.blacklist_processes:
self.pdebug(DIGN, ('Ignoring %s packet from process %s ' +
'in the process blacklist.') % (pkt.proto,
comm))
self.pdebug(DIGN, ' %s' %
(pkt.hdrToStr()))
return True
# Check process whitelist
elif (len(self.whitelist_processes) and (comm not in
self.whitelist_processes)):
self.pdebug(DIGN, ('Ignoring %s packet from process %s ' +
'not in the process whitelist.') % (pkt.proto,
comm))
self.pdebug(DIGN, ' %s' %
(pkt.hdrToStr()))
return True
# Check per-listener blacklisted process list
elif self.listener_ports.isProcessBlackListHit(
pkt.proto, dport, comm):
self.pdebug(DIGN, ('Ignoring %s request packet from ' +
'process %s in the listener process ' +
'blacklist.') % (pkt.proto, comm))
self.pdebug(DIGN, ' %s' %
(pkt.hdrToStr()))
return True
# Check per-listener whitelisted process list
elif self.listener_ports.isProcessWhiteListMiss(
pkt.proto, dport, comm):
self.pdebug(DIGN, ('Ignoring %s request packet from ' +
'process %s not in the listener process ' +
'whitelist.') % (pkt.proto, comm))
self.pdebug(DIGN, ' %s' %
(pkt.hdrToStr()))
return True
# MultiHost mode checks
else:
pass # None as of yet
# Checks independent of mode
# Forwarding blacklisted port
if pkt.proto:
if set(self.blacklist_ports[pkt.proto]).intersection([sport, dport]):
self.pdebug(DIGN, 'Forwarding blacklisted port %s packet:' %
(pkt.proto))
self.pdebug(DIGN, ' %s' % (pkt.hdrToStr()))
return True
# Check host blacklist
global_host_blacklist = self.getconfigval('hostblacklist')
if global_host_blacklist and dst_ip in global_host_blacklist:
self.pdebug(DIGN, ('Ignoring %s packet to %s in the host ' +
'blacklist.') % (str(pkt.proto), dst_ip))
self.pdebug(DIGN, ' %s' % (pkt.hdrToStr()))
self.logger.error('IGN: host blacklist match')
return True
# Check the port host whitelist
if self.listener_ports.isHostWhiteListMiss(pkt.proto, dport, dst_ip):
self.pdebug(DIGN, ('Ignoring %s request packet to %s not in ' +
'the listener host whitelist.') % (pkt.proto,
dst_ip))
self.pdebug(DIGN, ' %s' % (pkt.hdrToStr()))
return True
# Check the port host blacklist
if self.listener_ports.isHostBlackListHit(pkt.proto, dport, dst_ip):
self.pdebug(DIGN, ('Ignoring %s request packet to %s in the ' +
'listener host blacklist.') % (pkt.proto, dst_ip))
self.pdebug(DIGN, ' %s' % (pkt.hdrToStr()))
return True
# Duplicated from diverters/windows.py:
# HACK: FTP Passive Mode Handling
# Check if a listener is initiating a new connection from a
# non-diverted port and add it to blacklist. This is done to handle a
# special use-case of FTP ACTIVE mode where FTP server is initiating a
# new connection for which the response may be redirected to a default
# listener. NOTE: Additional testing can be performed to check if this
# is actually a SYN packet
if pid == self.pid:
if (
((dst_ip in self.ip_addrs[pkt.ipver]) and
(not dst_ip.startswith('127.'))) and
((src_ip in self.ip_addrs[pkt.ipver]) and
(not dst_ip.startswith('127.'))) and
(not self.listener_ports.intersectsWithPorts(pkt.proto, [sport, dport]))
):
self.pdebug(DIGN | DFTP, 'Listener initiated %s connection' %
(pkt.proto))
self.pdebug(DIGN | DFTP, ' %s' % (pkt.hdrToStr()))
self.pdebug(DIGN | DFTP, ' Blacklisting port %d' % (sport))
self.blacklist_ports[pkt.proto].append(sport)
return True
return False
|
Indicate whether a packet should be passed without mangling.
Checks whether the packet matches black and whitelists, or whether it
signifies an FTP Active Mode connection.
Args:
pkt: A fnpacket.PacketCtx or derived object
pid: Process ID associated with the packet
comm: Process executable name
Returns:
True if the packet should be left alone, else False.
|
check_should_ignore
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/diverters/diverterbase.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/diverterbase.py
|
Apache-2.0
|
def check_log_icmp(self, crit, pkt):
"""Log an ICMP packet if the header was parsed as ICMP.
Args:
crit: A DivertParms object
pkt: An fnpacket.PacketCtx or derived object
Returns:
None
"""
if (pkt.is_icmp and (not self.running_on_windows or
pkt.icmp_id not in self.blacklist_ids["ICMP"])):
self.logger.info('ICMP type %d code %d %s' % (
pkt.icmp_type, pkt.icmp_code, pkt.hdrToStr()))
|
Log an ICMP packet if the header was parsed as ICMP.
Args:
crit: A DivertParms object
pkt: An fnpacket.PacketCtx or derived object
Returns:
None
|
check_log_icmp
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/diverters/diverterbase.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/diverterbase.py
|
Apache-2.0
|
def getOriginalDestPort(self, orig_src_ip, orig_src_port, proto):
"""Return original destination port, or None if it was not redirected.
The proxy listener uses this method to obtain and provide port
information to listeners in the taste() callback as an extra hint as to
whether the traffic may be appropriate for parsing by that listener.
Args:
orig_src_ip: A str that is the ASCII representation of the peer IP
orig_src_port: An int that is the source port of the peer
Returns:
The original destination port if the packet was redirected
None, otherwise
"""
orig_src_key = fnpacket.PacketCtx.gen_endpoint_key(proto, orig_src_ip,
orig_src_port)
with self.port_fwd_table_lock:
return self.port_fwd_table.get(orig_src_key)
|
Return original destination port, or None if it was not redirected.
The proxy listener uses this method to obtain and provide port
information to listeners in the taste() callback as an extra hint as to
whether the traffic may be appropriate for parsing by that listener.
Args:
orig_src_ip: A str that is the ASCII representation of the peer IP
orig_src_port: An int that is the source port of the peer
Returns:
The original destination port if the packet was redirected
None, otherwise
|
getOriginalDestPort
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/diverters/diverterbase.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/diverterbase.py
|
Apache-2.0
|
def maybe_redir_ip(self, crit, pkt, pid, comm):
"""Conditionally redirect foreign destination IPs to localhost.
On Linux, this is used only under SingleHost mode.
Args:
crit: DivertParms object
pkt: fnpacket.PacketCtx or derived object
pid: int process ID associated with the packet
comm: Process name (command) that sent the packet
Side-effects:
May mangle the packet by modifying the destination IP to point to a
loopback or external interface IP local to the system where
FakeNet-NG is running.
Returns:
None
"""
if self.check_should_ignore(pkt, pid, comm):
return
self.pdebug(DIPNAT, 'Condition 1 test')
# Condition 1: If the remote IP address is foreign to this system,
# then redirect it to a local IP address.
if self.single_host_mode and (pkt.dst_ip not in self.ip_addrs[pkt.ipver]):
self.pdebug(DIPNAT, 'Condition 1 satisfied')
with self.ip_fwd_table_lock:
self.ip_fwd_table[pkt.skey] = pkt.dst_ip
newdst = self.getNewDestinationIp(pkt.src_ip)
self.pdebug(DIPNAT, 'REDIRECTING %s to IP %s' %
(pkt.hdrToStr(), newdst))
pkt.dst_ip = newdst
else:
# Delete any stale entries in the IP forwarding table: If the
# local endpoint appears to be reusing a client port that was
# formerly used to connect to a foreign host (but not anymore),
# then remove the entry. This prevents a packet hook from
# faithfully overwriting the source IP on a later packet to
# conform to the foreign endpoint's stale connection IP when
# the host is reusing the port number to connect to an IP
# address that is local to the FakeNet system.
with self.ip_fwd_table_lock:
if pkt.skey in self.ip_fwd_table:
self.pdebug(DIPNAT, ' - DELETING ipfwd key entry: %s' %
(pkt.skey))
del self.ip_fwd_table[pkt.skey]
|
Conditionally redirect foreign destination IPs to localhost.
On Linux, this is used only under SingleHost mode.
Args:
crit: DivertParms object
pkt: fnpacket.PacketCtx or derived object
pid: int process ID associated with the packet
comm: Process name (command) that sent the packet
Side-effects:
May mangle the packet by modifying the destination IP to point to a
loopback or external interface IP local to the system where
FakeNet-NG is running.
Returns:
None
|
maybe_redir_ip
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/diverters/diverterbase.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/diverterbase.py
|
Apache-2.0
|
def maybe_fixup_srcip(self, crit, pkt, pid, comm):
"""Conditionally fix up the source IP address if the remote endpoint
had their connection IP-forwarded.
Check is based on whether the remote endpoint corresponds to a key in
the IP forwarding table.
Args:
crit: DivertParms object
pkt: fnpacket.PacketCtx or derived object
pid: int process ID associated with the packet
comm: Process name (command) that sent the packet
Side-effects:
May mangle the packet by modifying the source IP to reflect the
original destination IP that was overwritten by maybe_redir_ip.
Returns:
None
"""
# Condition 4: If the local endpoint (IP/port/proto) combo
# corresponds to an endpoint that initiated a conversation with a
# foreign endpoint in the past, then fix up the source IP for this
# incoming packet with the last destination IP that was requested
# by the endpoint.
self.pdebug(DIPNAT, "Condition 4 test: was remote endpoint IP fwd'd?")
with self.ip_fwd_table_lock:
if self.single_host_mode and (pkt.dkey in self.ip_fwd_table):
self.pdebug(DIPNAT, 'Condition 4 satisfied')
self.pdebug(DIPNAT, ' = FOUND ipfwd key entry: ' + pkt.dkey)
new_srcip = self.ip_fwd_table[pkt.dkey]
self.pdebug(DIPNAT, 'MASQUERADING %s from IP %s' %
(pkt.hdrToStr(), new_srcip))
pkt.src_ip = new_srcip
else:
self.pdebug(DIPNAT, ' ! NO SUCH ipfwd key entry: ' + pkt.dkey)
|
Conditionally fix up the source IP address if the remote endpoint
had their connection IP-forwarded.
Check is based on whether the remote endpoint corresponds to a key in
the IP forwarding table.
Args:
crit: DivertParms object
pkt: fnpacket.PacketCtx or derived object
pid: int process ID associated with the packet
comm: Process name (command) that sent the packet
Side-effects:
May mangle the packet by modifying the source IP to reflect the
original destination IP that was overwritten by maybe_redir_ip.
Returns:
None
|
maybe_fixup_srcip
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/diverters/diverterbase.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/diverterbase.py
|
Apache-2.0
|
def maybe_redir_port(self, crit, pkt, pid, comm):
"""Conditionally send packets to the default listener for this proto.
Args:
crit: DivertParms object
pkt: fnpacket.PacketCtx or derived object
pid: int process ID associated with the packet
comm: Process name (command) that sent the packet
Side-effects:
May mangle the packet by modifying the destination port to point to
the default listener.
Returns:
None
"""
# Pre-condition 1: there must be a default listener for this protocol
default = self.default_listener.get(pkt.proto)
if not default:
return
# Pre-condition 2: destination must not be present in port forwarding
# table (prevents masqueraded ports responding to unbound ports from
# being mistaken as starting a conversation with an unbound port).
with self.port_fwd_table_lock:
# Uses dkey to cross-reference
if pkt.dkey in self.port_fwd_table:
return
# Proxy-related check: is the dport bound by a listener that is hidden?
dport_hidden_listener = crit.dport_hidden_listener
# Condition 2: If the packet is destined for an unbound port, then
# redirect it to a bound port and save the old destination IP in
# the port forwarding table keyed by the source endpoint identity.
bound_ports = self.listener_ports.getPortList(pkt.proto)
if dport_hidden_listener or self.decide_redir_port(pkt, bound_ports):
self.pdebug(DDPFV, 'Condition 2 satisfied: Packet destined for '
'unbound port or hidden listener')
# Post-condition 1: General ignore conditions are not met, or this
# is part of a conversation that is already being ignored.
#
# Placed after the decision to redirect for three reasons:
# 1.) We want to ensure that the else condition below has a chance
# to check whether to delete a stale port forwarding table
# entry.
# 2.) Checking these conditions is, on average, more expensive than
# checking if the packet would be redirected in the first
# place.
# 3.) Reporting of packets that are being ignored (i.e. not
# redirected), which is integrated into this check, should only
# appear when packets would otherwise have been redirected.
# Is this conversation already being ignored for DPF purposes?
with self.ignore_table_lock:
if ((pkt.dkey in self.ignore_table) and
(self.ignore_table[pkt.dkey] == pkt.sport)):
# This is a reply (e.g. a TCP RST) from the
# non-port-forwarded server that the non-port-forwarded
# client was trying to talk to. Leave it alone.
return
if self.check_should_ignore(pkt, pid, comm):
with self.ignore_table_lock:
self.ignore_table[pkt.skey] = pkt.dport
return
# Record the foreign endpoint and old destination port in the port
# forwarding table
self.pdebug(DDPFV, ' + ADDING portfwd key entry: ' + pkt.skey)
with self.port_fwd_table_lock:
self.port_fwd_table[pkt.skey] = pkt.dport
self.pdebug(DDPF, 'Redirecting %s to go to port %d' %
(pkt.hdrToStr(), default))
pkt.dport = default
else:
# Delete any stale entries in the port forwarding table: If the
# foreign endpoint appears to be reusing a client port that was
# formerly used to connect to an unbound port on this server,
# remove the entry. This prevents the OUTPUT or other packet
# hook from faithfully overwriting the source port to conform
# to the foreign endpoint's stale connection port when the
# foreign host is reusing the port number to connect to an
# already-bound port on the FakeNet system.
self.delete_stale_port_fwd_key(pkt.skey)
if crit.first_packet_new_session:
self.addSession(pkt)
# Execute command if applicable
self.maybeExecuteCmd(pkt, pid, comm)
|
Conditionally send packets to the default listener for this proto.
Args:
crit: DivertParms object
pkt: fnpacket.PacketCtx or derived object
pid: int process ID associated with the packet
comm: Process name (command) that sent the packet
Side-effects:
May mangle the packet by modifying the destination port to point to
the default listener.
Returns:
None
|
maybe_redir_port
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/diverters/diverterbase.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/diverterbase.py
|
Apache-2.0
|
def maybe_fixup_sport(self, crit, pkt, pid, comm):
"""Conditionally fix up source port if the remote endpoint had their
connection port-forwarded to the default listener.
Check is based on whether the remote endpoint corresponds to a key in
the port forwarding table.
Side-effects:
May mangle the packet by modifying the source port to masquerade
traffic coming from the default listener to look as if it is coming
from the port that the client originally requested.
Returns:
None
"""
hdr_modified = None
# Condition 3: If the remote endpoint (IP/port/proto) combo
# corresponds to an endpoint that initiated a conversation with an
# unbound port in the past, then fix up the source port for this
# outgoing packet with the last destination port that was requested
# by that endpoint. The term "endpoint" is (ab)used loosely here to
# apply to UDP host/port/proto combos and any other protocol that
# may be supported in the future.
new_sport = None
self.pdebug(DDPFV, "Condition 3 test: was remote endpoint port fwd'd?")
with self.port_fwd_table_lock:
new_sport = self.port_fwd_table.get(pkt.dkey)
if new_sport:
self.pdebug(DDPFV, 'Condition 3 satisfied: must fix up ' +
'source port')
self.pdebug(DDPFV, ' = FOUND portfwd key entry: ' + pkt.dkey)
self.pdebug(DDPF, 'MASQUERADING %s to come from port %d' %
(pkt.hdrToStr(), new_sport))
pkt.sport = new_sport
else:
self.pdebug(DDPFV, ' ! NO SUCH portfwd key entry: ' + pkt.dkey)
return pkt.hdr if pkt.mangled else None
|
Conditionally fix up source port if the remote endpoint had their
connection port-forwarded to the default listener.
Check is based on whether the remote endpoint corresponds to a key in
the port forwarding table.
Side-effects:
May mangle the packet by modifying the source port to masquerade
traffic coming from the default listener to look as if it is coming
from the port that the client originally requested.
Returns:
None
|
maybe_fixup_sport
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/diverters/diverterbase.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/diverterbase.py
|
Apache-2.0
|
def decide_redir_port(self, pkt, bound_ports):
"""Decide whether to redirect a port.
Optimized logic derived by truth table + k-map. See docs/internals.md
for details.
Args:
pkt: fnpacket.PacketCtx or derived object
bound_ports: Set of ports that are bound for this protocol
Returns:
True if the packet must be redirected to the default listener
False otherwise
"""
# A, B, C, D for easy manipulation; full names for readability only.
a = src_local = (pkt.src_ip in self.ip_addrs[pkt.ipver])
c = sport_bound = pkt.sport in (bound_ports)
d = dport_bound = pkt.dport in (bound_ports)
if self.pdebug_level & DDPFV:
# Unused logic term not calculated except for debug output
b = dst_local = (pkt.dst_ip in self.ip_addrs[pkt.ipver])
self.pdebug(DDPFV, 'src %s (%s)' %
(str(pkt.src_ip), ['foreign', 'local'][a]))
self.pdebug(DDPFV, 'dst %s (%s)' %
(str(pkt.dst_ip), ['foreign', 'local'][b]))
self.pdebug(DDPFV, 'sport %s (%sbound)' %
(str(pkt.sport), ['un', ''][c]))
self.pdebug(DDPFV, 'dport %s (%sbound)' %
(str(pkt.dport), ['un', ''][d]))
# Convenience function: binary representation of a bool
def bn(x):
return '1' if x else '0' # Bool -> binary
self.pdebug(DDPFV, 'abcd = ' + bn(a) + bn(b) + bn(c) + bn(d))
return (not a and not d) or (not c and not d)
|
Decide whether to redirect a port.
Optimized logic derived by truth table + k-map. See docs/internals.md
for details.
Args:
pkt: fnpacket.PacketCtx or derived object
bound_ports: Set of ports that are bound for this protocol
Returns:
True if the packet must be redirected to the default listener
False otherwise
|
decide_redir_port
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/diverters/diverterbase.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/diverterbase.py
|
Apache-2.0
|
def addSession(self, pkt):
"""Add a connection to the sessions hash table.
Args:
pkt: fnpacket.PacketCtx or derived object
Returns:
None
"""
session = namedtuple('session', ['dst_ip', 'dport', 'pid',
'comm', 'dport0', 'proto'])
pid, comm = self.get_pid_comm(pkt)
self.sessions[pkt.sport] = session(pkt.dst_ip, pkt.dport, pid,
comm, pkt._dport0, pkt.proto)
|
Add a connection to the sessions hash table.
Args:
pkt: fnpacket.PacketCtx or derived object
Returns:
None
|
addSession
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/diverters/diverterbase.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/diverterbase.py
|
Apache-2.0
|
def maybeExecuteCmd(self, pkt, pid, comm):
"""Execute any ExecuteCmd associated with this port/listener.
Args:
pkt: fnpacket.PacketCtx or derived object
pid: int process ID associated with the packet
comm: Process name (command) that sent the packet
Returns:
None
"""
if not pid:
return
execCmd = self.build_cmd(pkt, pid, comm)
if execCmd:
self.logger.info('Executing command: %s' % (execCmd))
self.execute_detached(execCmd)
|
Execute any ExecuteCmd associated with this port/listener.
Args:
pkt: fnpacket.PacketCtx or derived object
pid: int process ID associated with the packet
comm: Process name (command) that sent the packet
Returns:
None
|
maybeExecuteCmd
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/diverters/diverterbase.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/diverterbase.py
|
Apache-2.0
|
def mapProxySportToOrigSport(self, proto, orig_sport, proxy_sport,
is_ssl_encrypted):
"""Maps Proxy initiated source ports to their original source ports.
The Proxy listener uses this method to notify the diverter about the
proxy originated source port for the original source port. It also
notifies if the packet uses SSL encryption.
Args:
proto: str protocol of socket created by ProxyListener
orig_sport: int source port that originated the packet
proxy_sport: int source port initiated by Proxy listener
is_ssl_encrypted: bool is the packet SSL encrypted or not
Returns:
None
"""
self.proxy_sport_to_orig_sport_map[(proto, proxy_sport)] = orig_sport
self.is_proxied_pkt_ssl_encrypted[(proto, proxy_sport)] = is_ssl_encrypted
|
Maps Proxy initiated source ports to their original source ports.
The Proxy listener uses this method to notify the diverter about the
proxy originated source port for the original source port. It also
notifies if the packet uses SSL encryption.
Args:
proto: str protocol of socket created by ProxyListener
orig_sport: int source port that originated the packet
proxy_sport: int source port initiated by Proxy listener
is_ssl_encrypted: bool is the packet SSL encrypted or not
Returns:
None
|
mapProxySportToOrigSport
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/diverters/diverterbase.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/diverterbase.py
|
Apache-2.0
|
def logNbi(self, sport, nbi, proto, application_layer_proto,
is_ssl_encrypted):
"""Collects the NBIs from all listeners into a dictionary.
All listeners use this method to notify the diverter about any NBI
captured within their scope.
Args:
sport: int port bound by listener
nbi: dict NBI captured within the listener
proto: str protocol used by the listener
application_layer_proto: str Application layer protocol of the pkt
is_ssl_encrpted: str is the listener configured to use SSL or not
Returns:
None
"""
proxied_nbi = (proto, sport) in self.proxy_sport_to_orig_sport_map
# For proxied nbis, override the listener's is_ssl_encrypted with Proxy
# listener's is_ssl_encrypted, and update the original sport. For
# non-proxied nbis, use listener provided is_ssl_encrypted and sport.
if proxied_nbi:
orig_sport = self.proxy_sport_to_orig_sport_map[(proto, sport)]
is_ssl_encrypted = self.is_proxied_pkt_ssl_encrypted.get((proto, sport))
else:
orig_sport = sport
if self.sessions.get(orig_sport) is None:
return
dst_ip, _, pid, comm, orig_dport, transport_layer_proto = self.sessions.get(orig_sport)
if application_layer_proto == '':
application_layer_proto = transport_layer_proto
# Normalize pid and comm for MultiHost mode
if pid is None and comm is None and self.network_mode.lower() == 'multihost':
self.remote_pid_counter += 1
pid = self.remote_pid_counter
comm = 'Remote Process'
# Craft the dictionary
nbi_entry = {
'transport_layer_proto': transport_layer_proto,
'sport': orig_sport,
'dst_ip': dst_ip,
'dport': orig_dport,
'is_ssl_encrypted': is_ssl_encrypted,
'network_mode': self.network_mode.lower(),
'nbi': nbi
}
application_layer_proto = application_layer_proto.lower()
# If it's a new NBI from an exisitng process or existing protocol,
# append the nbi, else create new key
self.nbis.setdefault((pid, comm), {}).setdefault(application_layer_proto,
[]).append(nbi_entry)
|
Collects the NBIs from all listeners into a dictionary.
All listeners use this method to notify the diverter about any NBI
captured within their scope.
Args:
sport: int port bound by listener
nbi: dict NBI captured within the listener
proto: str protocol used by the listener
application_layer_proto: str Application layer protocol of the pkt
is_ssl_encrpted: str is the listener configured to use SSL or not
Returns:
None
|
logNbi
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/diverters/diverterbase.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/diverterbase.py
|
Apache-2.0
|
def prettyPrintNbi(self):
"""Convenience method to print all NBIs in appropriate format upon
fakenet session termination. Called by stop() method of diverter.
"""
banner = r"""
NNNNNNNN NNNNNNNNBBBBBBBBBBBBBBBBB IIIIIIIIII
N:::::::N N::::::NB::::::::::::::::B I::::::::I
N::::::::N N::::::NB::::::BBBBBB:::::B I::::::::I
N:::::::::N N::::::NBB:::::B B:::::BII::::::II
N::::::::::N N::::::N B::::B B:::::B I::::I ssssssssss
N:::::::::::N N::::::N B::::B B:::::B I::::I ss::::::::::s
N:::::::N::::N N::::::N B::::BBBBBB:::::B I::::I ss:::::::::::::s
N::::::N N::::N N::::::N B:::::::::::::BB I::::I s::::::ssss:::::s
N::::::N N::::N:::::::N B::::BBBBBB:::::B I::::I s:::::s ssssss
N::::::N N:::::::::::N B::::B B:::::B I::::I s::::::s
N::::::N N::::::::::N B::::B B:::::B I::::I s::::::s
N::::::N N:::::::::N B::::B B:::::B I::::I ssssss s:::::s
N::::::N N::::::::NBB:::::BBBBBB::::::BII::::::IIs:::::ssss::::::s
N::::::N N:::::::NB:::::::::::::::::B I::::::::Is::::::::::::::s
N::::::N N::::::NB::::::::::::::::B I::::::::I s:::::::::::ss
NNNNNNNN NNNNNNNBBBBBBBBBBBBBBBBB IIIIIIIIII sssssssssss
========================================================================
Network-Based Indicators Summary
========================================================================
"""
indent = " "
self.logger.info(banner)
process_counter = 0
for process_info, values in self.nbis.items():
process_counter += 1
self.logger.info(f"[{process_counter}] Process ID: "
f"{process_info[0]}, Process Name: {process_info[1]}")
for application_layer_proto, nbi_entry in values.items():
self.logger.info(f"{indent*2} Protocol: "
f"{application_layer_proto}")
nbi_counter = 0
for attributes in nbi_entry:
nbi_counter += 1
self.logger.info(f"{indent*3}{nbi_counter}.Transport Layer "
f"Protocol: {attributes['transport_layer_proto']}")
self.logger.info(f"{indent*4}Source port: {attributes['sport']}")
self.logger.info(f"{indent*4}Destination IP: {attributes['dst_ip']}")
self.logger.info(f"{indent*4}Destination port: {attributes['dport']}")
self.logger.info(f"{indent*4}SSL encrypted: "
f"{attributes['is_ssl_encrypted']}")
self.logger.info(f"{indent*4}Network mode: "
f"{attributes['network_mode']}")
for key, v in attributes['nbi'].items():
if v is not None:
# Let's convert the NBI value to str if it's not already
if isinstance(v, bytes):
v = v.decode('utf-8')
# Let's print maximum 40 characters for NBI values
v = (v[:40]+"...") if len(v)>40 else v
self.logger.info(f"{indent*6}-{key}: {v}")
self.logger.info("\r")
self.logger.info("\r")
|
Convenience method to print all NBIs in appropriate format upon
fakenet session termination. Called by stop() method of diverter.
|
prettyPrintNbi
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/diverters/diverterbase.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/diverterbase.py
|
Apache-2.0
|
def generate_html_report(self):
"""Generates an interactive HTML report containing NBI summary saved
to the main working directory of flare-fakenet-ng. Called by stop() method
of diverter.
"""
if getattr(sys, 'frozen', False) and hasattr(sys, '_MEIPASS'):
# Inside a Pyinstaller bundle
fakenet_dir_path = os.path.dirname(sys.executable)
else:
fakenet_dir_path = os.fspath(Path(__file__).parents[1])
template_file = os.path.join(fakenet_dir_path, "configs", "html_report_template.html")
template_loader = jinja2.FileSystemLoader(searchpath=os.path.dirname(template_file))
template_env = jinja2.Environment(loader=template_loader)
template = template_env.get_template(os.path.basename(template_file))
timestamp = time.strftime('%Y%m%d_%H%M%S')
output_filename = f"report_{timestamp}.html"
with open(output_filename, "w") as output_file:
output_file.write(template.render(nbis=self.nbis))
self.logger.info(f"Generated new HTML report: {output_filename}")
|
Generates an interactive HTML report containing NBI summary saved
to the main working directory of flare-fakenet-ng. Called by stop() method
of diverter.
|
generate_html_report
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/diverters/diverterbase.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/diverterbase.py
|
Apache-2.0
|
def isProcessBlackListed(self, proto, sport=None, process_name=None, dport=None):
"""Checks if a process is blacklisted.
Expected arguments are either:
- process_name and dport, or
- sport
"""
pid = None
if self.single_host_mode and proto is not None:
if process_name is None or dport is None:
if sport is None:
return False, process_name, pid
orig_sport = self.proxy_sport_to_orig_sport_map.get((proto, sport), sport)
session = self.sessions.get(orig_sport)
if session:
pid = session.pid
process_name = session.comm
dport = session.dport0
else:
return False, process_name, pid
# Check process blacklist
if process_name in self.blacklist_processes:
self.pdebug(DIGN, ('Ignoring %s packet from process %s ' +
'in the process blacklist.') % (proto,
process_name))
return True, process_name, pid
# Check per-listener blacklisted process list
if self.listener_ports.isProcessBlackListHit(
proto, dport, process_name):
self.pdebug(DIGN, ('Ignoring %s request packet from ' +
'process %s in the listener process ' +
'blacklist.') % (proto, process_name))
return True, process_name, pid
return False, process_name, pid
|
Checks if a process is blacklisted.
Expected arguments are either:
- process_name and dport, or
- sport
|
isProcessBlackListed
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/diverters/diverterbase.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/diverterbase.py
|
Apache-2.0
|
def logNbi(self, sport, nbi, proto, application_layer_proto,
is_ssl_encrypted):
"""Delegate the logging of NBIs to the diverter.
This method forwards the provided NBI information to the logNbi() method
in the underlying diverter object. Called by all listeners to log NBIs.
"""
self.__diverter.logNbi(sport, nbi, proto, application_layer_proto,
is_ssl_encrypted)
|
Delegate the logging of NBIs to the diverter.
This method forwards the provided NBI information to the logNbi() method
in the underlying diverter object. Called by all listeners to log NBIs.
|
logNbi
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/diverters/diverterbase.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/diverterbase.py
|
Apache-2.0
|
def mapProxySportToOrigSport(self, proto, orig_sport, proxy_sport,
is_ssl_encrypted):
"""Delegate the mapping of proxy sport to original sport to the
diverter.
This method forwards the provided parameters to the
mapProxySportToOrigSport() method in the underlying diverter object.
Called by ProxyListener to report the mapping between proxy initiated
source port and original source port.
"""
self.__diverter.mapProxySportToOrigSport(proto, orig_sport, proxy_sport,
is_ssl_encrypted)
|
Delegate the mapping of proxy sport to original sport to the
diverter.
This method forwards the provided parameters to the
mapProxySportToOrigSport() method in the underlying diverter object.
Called by ProxyListener to report the mapping between proxy initiated
source port and original source port.
|
mapProxySportToOrigSport
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/diverters/diverterbase.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/diverterbase.py
|
Apache-2.0
|
def configure(self, config_dict, portlists=[], stringlists=[], idlists=[]):
"""Parse configuration.
Does three things:
1.) Turn dictionary keys to lowercase
2.) Turn string lists into arrays for quicker access
3.) Expand port range specifications
"""
self._dict = dict((k.lower(), v) for k, v in config_dict.items())
for entry in portlists:
portlist = self.getconfigval(entry)
if portlist:
expanded = self._expand_ports(portlist)
self.setconfigval(entry, expanded)
for entry in stringlists:
stringlist = self.getconfigval(entry)
if stringlist:
expanded = [s.strip() for s in stringlist.split(',')]
self.setconfigval(entry, expanded)
for entry in idlists:
idlist = self.getconfigval(entry)
if idlist:
expanded = [int(c) for c in idlist.split(',')]
self.setconfigval(entry, expanded)
|
Parse configuration.
Does three things:
1.) Turn dictionary keys to lowercase
2.) Turn string lists into arrays for quicker access
3.) Expand port range specifications
|
configure
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/diverters/fnconfig.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/fnconfig.py
|
Apache-2.0
|
def _parseIp(self):
"""Parse IP src/dst fields and next-layer fields if recognized."""
if self._is_ip:
self._src_ip0 = self._src_ip = socket.inet_ntoa(self._hdr.src)
self._dst_ip0 = self._dst_ip = socket.inet_ntoa(self._hdr.dst)
self.proto = self.handled_protocols.get(self.proto_num)
# If this is a transport protocol we handle...
if self.proto:
self._tcpudpcsum0 = self._hdr.data.sum
self._sport0 = self._sport = self._hdr.data.sport
self._dport0 = self._dport = self._hdr.data.dport
self.skey = self._genEndpointKey(self._src_ip, self._sport)
self.dkey = self._genEndpointKey(self._dst_ip, self._dport)
|
Parse IP src/dst fields and next-layer fields if recognized.
|
_parseIp
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/diverters/fnpacket.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/fnpacket.py
|
Apache-2.0
|
def _calcCsums(self):
"""The roundabout dance of inducing dpkt to recalculate checksums..."""
self._hdr.sum = 0
self._hdr.data.sum = 0
# This has the side-effect of invoking dpkt.in_cksum() et al:
str(self._hdr)
|
The roundabout dance of inducing dpkt to recalculate checksums...
|
_calcCsums
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/diverters/fnpacket.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/fnpacket.py
|
Apache-2.0
|
def _iptables_format(self, chain, iface, argfmt):
"""Format iptables command line with optional interface restriction.
Parameters
----------
chain : string
One of 'OUTPUT', 'POSTROUTING', 'INPUT', or 'PREROUTING', used for
deciding the correct flag (-i versus -o)
iface : string or NoneType
Name of interface to restrict the rule to (e.g. 'eth0'), or None
argfmt : string
Format string for remaining iptables arguments. This format string
will not be included in format string evaluation but is appended
as-is to the iptables command.
"""
flag_iface = ''
if iface:
if chain in ['OUTPUT', 'POSTROUTING']:
flag_iface = '-o'
elif chain in ['INPUT', 'PREROUTING']:
flag_iface = '-i'
else:
raise NotImplementedError('Unanticipated chain %s' % (chain))
self._addcmd = 'iptables -I {chain} {flag_if} {iface} {fmt}'
self._addcmd = self._addcmd.format(chain=chain, flag_if=flag_iface,
iface=(iface or ''), fmt=argfmt)
self._remcmd = 'iptables -D {chain} {flag_if} {iface} {fmt}'
self._remcmd = self._remcmd.format(chain=chain, flag_if=flag_iface,
iface=(iface or ''), fmt=argfmt)
|
Format iptables command line with optional interface restriction.
Parameters
----------
chain : string
One of 'OUTPUT', 'POSTROUTING', 'INPUT', or 'PREROUTING', used for
deciding the correct flag (-i versus -o)
iface : string or NoneType
Name of interface to restrict the rule to (e.g. 'eth0'), or None
argfmt : string
Format string for remaining iptables arguments. This format string
will not be included in format string evaluation but is appended
as-is to the iptables command.
|
_iptables_format
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/diverters/linutil.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/linutil.py
|
Apache-2.0
|
def start(self, timeout_sec=0.5):
"""Binds to the netfilter queue number specified in the ctor, obtains
the netlink socket, sets a timeout of <timeout_sec>, and starts the
thread procedure which checks _stopflag every time the netlink socket
times out.
"""
# Execute iptables to add the rule
ret = self._rule.add()
if ret != 0:
return False
self._rule_added = True
# Bind the specified callback to the specified queue
try:
self._nfqueue.bind(self.qno, self._callback)
self._bound = True
except OSError as e:
self.logger.error('Failed to start queue for %s: %s' %
(str(self), str(e)))
except RuntimeWarning as e:
self.logger.error('Failed to start queue for %s: %s' %
(str(self), str(e)))
if not self._bound:
return False
# Facilitate _stopflag monitoring and thread joining
self._sk = socket.fromfd(
self._nfqueue.get_fd(), socket.AF_UNIX, socket.SOCK_STREAM)
self._sk.settimeout(timeout_sec)
# Start a thread to run the queue and monitor the stop flag
self._thread = threading.Thread(target=self._threadproc)
self._thread.daemon = True
self._stopflag = False
try:
self._thread.start()
self._started = True
except RuntimeError as e:
self.logger.error('Failed to start queue thread: %s' % (str(e)))
return self._started
|
Binds to the netfilter queue number specified in the ctor, obtains
the netlink socket, sets a timeout of <timeout_sec>, and starts the
thread procedure which checks _stopflag every time the netlink socket
times out.
|
start
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/diverters/linutil.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/linutil.py
|
Apache-2.0
|
def parse(self, multi=False, max_col=None):
"""Rip through the file and call cb to extract field(s).
Specify multi if you want to collect an aray instead of exiting the
first time the callback returns anything.
Only specify max_col if you are uncertain that the maximum column
number you will access may exist. For procfs files, this should remain
None.
"""
retval = list() if multi else None
try:
with open(self.path, 'r') as f:
while True:
line = f.readline()
# EOF case
if not len(line):
break
# Insufficient columns => ValueError
if max_col and (len(line) < max_col):
raise ValueError(('Line %d in %s has less than %d '
'columns') %
(n, self.path, max_col))
# Skip header lines
if self.skip:
self.skip -= 1
continue
cb_retval = self.cb(line.split())
if cb_retval:
if multi:
retval.append(cb_retval)
else:
retval = cb_retval
break
except IOError as e:
self.logger.error('Failed accessing %s: %s' % (path, str(e)))
# All or nothing
retval = [] if multi else None
return retval
|
Rip through the file and call cb to extract field(s).
Specify multi if you want to collect an aray instead of exiting the
first time the callback returns anything.
Only specify max_col if you are uncertain that the maximum column
number you will access may exist. For procfs files, this should remain
None.
|
parse
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/diverters/linutil.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/linutil.py
|
Apache-2.0
|
def linux_get_current_nfnlq_bindings(self):
"""Determine what NFQUEUE queue numbers (if any) are already bound by
existing libnfqueue client processes.
Although iptables rules may exist specifying other queues in addition
to these, the netfilter team does not support using libiptc (such as
via python-iptables) to detect that condition, so code that does so may
break in the future. Shelling out to iptables and parsing its output
for NFQUEUE numbers is not an attractive option. The practice of
checking the currently bound NetFilter netlink queue bindings is a
compromise. Note that if an iptables rule specifies an NFQUEUE number
that is not yet bound by any process in the system, the results are
undefined. We can add FakeNet arguments to be passed to the Diverter
for giving the user more control if it becomes necessary.
"""
procfs_path = '/proc/net/netfilter/nfnetlink_queue'
qnos = list()
try:
with open(procfs_path, 'r') as f:
lines = f.read().split('\n')
for line in lines:
line = line.strip()
if line:
queue_nr = int(line.split()[0], 10)
self.pdebug(DNFQUEUE, ('Found NFQUEUE #' +
str(queue_nr) + ' per ') + procfs_path)
qnos.append(queue_nr)
except IOError as e:
self.logger.debug(('Failed to open %s to enumerate netfilter '
'netlink queues, caller may proceed as if '
'none are in use: %s') %
(procfs_path, str(e)))
return qnos
|
Determine what NFQUEUE queue numbers (if any) are already bound by
existing libnfqueue client processes.
Although iptables rules may exist specifying other queues in addition
to these, the netfilter team does not support using libiptc (such as
via python-iptables) to detect that condition, so code that does so may
break in the future. Shelling out to iptables and parsing its output
for NFQUEUE numbers is not an attractive option. The practice of
checking the currently bound NetFilter netlink queue bindings is a
compromise. Note that if an iptables rule specifies an NFQUEUE number
that is not yet bound by any process in the system, the results are
undefined. We can add FakeNet arguments to be passed to the Diverter
for giving the user more control if it becomes necessary.
|
linux_get_current_nfnlq_bindings
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/diverters/linutil.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/linutil.py
|
Apache-2.0
|
def linux_iptables_redir_iface(self, iface):
"""Linux-specific iptables processing for interface-based redirect
rules.
returns:
tuple(bool, list(IptCmdTemplate))
Status of the operation and any successful iptables rules that will
need to be undone.
"""
iptables_rules = []
rule = IptCmdTemplateRedir(iface)
ret = rule.add()
if ret != 0:
self.logger.error('Failed to create PREROUTING/REDIRECT ' +
'rule for %s, stopping...' % (iface))
return (False, iptables_rules)
iptables_rules.append(rule)
return (True, iptables_rules)
|
Linux-specific iptables processing for interface-based redirect
rules.
returns:
tuple(bool, list(IptCmdTemplate))
Status of the operation and any successful iptables rules that will
need to be undone.
|
linux_iptables_redir_iface
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/diverters/linutil.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/linutil.py
|
Apache-2.0
|
def linux_remove_iptables_rules(self, rules):
"""Execute the iptables command to remove each rule that was
successfully added.
"""
failed = []
for rule in rules:
ret = rule.remove()
if ret != 0:
failed.append(rule)
return failed
|
Execute the iptables command to remove each rule that was
successfully added.
|
linux_remove_iptables_rules
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/diverters/linutil.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/linutil.py
|
Apache-2.0
|
def linux_find_processes(self, names):
"""But what if a blacklisted process spawns after we call
this? We'd have to call this every time we do anything.
"""
pids = []
proc_pid_dirs = glob.glob('/proc/[0-9]*/')
comm_file = ''
for proc_pid_dir in proc_pid_dirs:
comm_file = os.path.join(proc_pid_dir, 'comm')
try:
with open(comm_file, 'r') as f:
comm = f.read().strip()
if comm in names:
pid = int(proc_pid_dir.split('/')[-2], 10)
pids.append(pid)
except IOError as e:
# Silently ignore
pass
return pids
|
But what if a blacklisted process spawns after we call
this? We'd have to call this every time we do anything.
|
linux_find_processes
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/diverters/linutil.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/linutil.py
|
Apache-2.0
|
def _linux_find_sock_by_endpoint_unsafe(self, ipver, proto_name, ip, port,
local=True):
"""Search /proc/net/tcp for a socket whose local (field 1, zero-based)
or remote (field 2) address matches ip:port and return the
corresponding inode (field 9).
Fields referenced above are zero-based.
Example contents of /proc/net/tcp (wrapped and double-spaced)
sl local_address rem_address st tx_queue rx_queue tr tm->when
retrnsmt uid timeout inode
0: 0100007F:0277 00000000:0000 0A 00000000:00000000 00:00000000
00000000 0 0 53320 1 0000000000000000 100 0 0 10 0
1: 00000000:021A 00000000:0000 0A 00000000:00000000 00:00000000
00000000 0 0 11125 1 0000000000000000 100 0 0 10 0
2: 00000000:1A0B 00000000:0000 0A 00000000:00000000 00:00000000
00000000 39 0 11175 1 0000000000000000 100 0 0 10 0
3: 0100007F:8071 0100007F:1F90 01 00000000:00000000 00:00000000
00000000 1000 0 58661 1 0000000000000000 20 0 0 10 -1
4: 0100007F:1F90 0100007F:8071 01 00000000:00000000 00:00000000
00000000 1000 0 58640 1 0000000000000000 20 4 30 10 -1
Returns inode
"""
INODE_COLUMN = 9
# IPv6 untested
suffix = '6' if (ipver == 6) else ''
procfs_path = '/proc/net/' + proto_name.lower() + suffix
inode = None
port_tag = self._port_for_proc_net_tcp(port)
match_column = 1 if local else 2
local_column = 1
remote_column = 2
try:
with open(procfs_path) as f:
f.readline() # Discard header
while True:
line = f.readline()
if not len(line):
break
fields = line.split()
# Local matches can be made based on port only
if local and fields[local_column].endswith(port_tag):
inode = int(fields[INODE_COLUMN], 10)
self.pdebug(DPROCFS, 'MATCHING CONNECTION: %s' %
(line.strip()))
break
# Untested: Remote matches must be more specific and
# include the IP address. Hence, an "endpoint tag" is
# constructed to match what would appear in
# /proc/net/{tcp,udp}{,6}
elif not local:
endpoint_tag = self._ip_port_for_proc_net_tcp(ipver,
ip, port)
if fields[remote_column] == endpoint_tag:
inode = int(fields[INODE_COLUMN], 10)
self.pdebug(DPROCFS, 'MATCHING CONNECTION: %s' %
(line.strip()))
except IOError as e:
self.logger.error('No such protocol/IP ver (%s) or error: %s' %
(procfs_path, str(e)))
return inode
|
Search /proc/net/tcp for a socket whose local (field 1, zero-based)
or remote (field 2) address matches ip:port and return the
corresponding inode (field 9).
Fields referenced above are zero-based.
Example contents of /proc/net/tcp (wrapped and double-spaced)
sl local_address rem_address st tx_queue rx_queue tr tm->when
retrnsmt uid timeout inode
0: 0100007F:0277 00000000:0000 0A 00000000:00000000 00:00000000
00000000 0 0 53320 1 0000000000000000 100 0 0 10 0
1: 00000000:021A 00000000:0000 0A 00000000:00000000 00:00000000
00000000 0 0 11125 1 0000000000000000 100 0 0 10 0
2: 00000000:1A0B 00000000:0000 0A 00000000:00000000 00:00000000
00000000 39 0 11175 1 0000000000000000 100 0 0 10 0
3: 0100007F:8071 0100007F:1F90 01 00000000:00000000 00:00000000
00000000 1000 0 58661 1 0000000000000000 20 0 0 10 -1
4: 0100007F:1F90 0100007F:8071 01 00000000:00000000 00:00000000
00000000 1000 0 58640 1 0000000000000000 20 4 30 10 -1
Returns inode
|
_linux_find_sock_by_endpoint_unsafe
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/diverters/linutil.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/linutil.py
|
Apache-2.0
|
def linux_get_pid_comm_by_endpoint(self, ipver, proto_name, ip, port):
"""Obtain a pid and executable name associated with an endpoint.
NOTE: procfs does not allow us to answer questions like "who just
called send()?"; only questions like "who owns a socket associated with
this local port?" Since fork() etc. can result in multiple ownership,
the real answer may be that multiple processes actually own the socket.
This implementation stops at the first match and hence may not give a
perfectly accurate answer in those cases. In practice, this may be
adequate, or it may need to be revisited to return a list of (pid,comm)
tuples to take into account cases where multiple processes have the
same inode open.
"""
pid, comm = None, None
# 1. Find the inode number associated with this socket
inode = self.linux_find_sock_by_endpoint(ipver, proto_name, ip, port)
if inode:
# 2. Search for a /proc/<pid>/fd/<fd> that has this inode open.
proc_fds_glob = '/proc/[0-9]*/fd/*'
proc_fd_paths = glob.glob(proc_fds_glob)
for fd_path in proc_fd_paths:
candidate = self._linux_get_sk_ino_for_fd_file(fd_path)
if candidate and (candidate == inode):
# 3. Record the pid and executable name
try:
pid = int(fd_path.split('/')[-3], 10)
comm = self.linux_get_comm_by_pid(pid)
# Not interested in e.g.
except ValueError:
pass
return pid, comm
|
Obtain a pid and executable name associated with an endpoint.
NOTE: procfs does not allow us to answer questions like "who just
called send()?"; only questions like "who owns a socket associated with
this local port?" Since fork() etc. can result in multiple ownership,
the real answer may be that multiple processes actually own the socket.
This implementation stops at the first match and hence may not give a
perfectly accurate answer in those cases. In practice, this may be
adequate, or it may need to be revisited to return a list of (pid,comm)
tuples to take into account cases where multiple processes have the
same inode open.
|
linux_get_pid_comm_by_endpoint
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/diverters/linutil.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/linutil.py
|
Apache-2.0
|
def handle_nonlocal(self, nfqpkt):
"""Handle comms sent to IP addresses that are not bound to any adapter.
This allows analysts to observe when malware is communicating with
hard-coded IP addresses in MultiHost mode.
"""
try:
pkt = LinuxPacketCtx('handle_nonlocal', nfqpkt)
self.handle_pkt(pkt, self.nonlocal_net_cbs, [])
if pkt.mangled:
nfqpkt.set_payload(pkt.octets)
# Catch-all exceptions are usually bad practice, agreed, but
# python-netfilterqueue has a catch-all that will not print enough
# information to troubleshoot with, so if there is going to be a
# catch-all exception handler anyway, it might as well be mine so that
# I can print out the stack trace before I lose access to this valuable
# debugging information.
except Exception:
self.logger.error('Exception: %s' % (traceback.format_exc()))
raise
nfqpkt.accept()
|
Handle comms sent to IP addresses that are not bound to any adapter.
This allows analysts to observe when malware is communicating with
hard-coded IP addresses in MultiHost mode.
|
handle_nonlocal
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/diverters/linux.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/linux.py
|
Apache-2.0
|
def handle_incoming(self, nfqpkt):
"""Incoming packet hook.
Specific to incoming packets:
5.) If SingleHost mode:
a.) Conditionally fix up source IPs to support IP forwarding for
otherwise foreign-destined packets
4.) Conditionally mangle destination ports to implement port forwarding
for unbound ports to point to the default listener
No return value.
"""
try:
pkt = LinuxPacketCtx('handle_incoming', nfqpkt)
self.handle_pkt(pkt, self.incoming_net_cbs,
self.incoming_trans_cbs)
if pkt.mangled:
nfqpkt.set_payload(pkt.octets)
except Exception:
self.logger.error('Exception: %s' % (traceback.format_exc()))
raise
nfqpkt.accept()
|
Incoming packet hook.
Specific to incoming packets:
5.) If SingleHost mode:
a.) Conditionally fix up source IPs to support IP forwarding for
otherwise foreign-destined packets
4.) Conditionally mangle destination ports to implement port forwarding
for unbound ports to point to the default listener
No return value.
|
handle_incoming
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/diverters/linux.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/linux.py
|
Apache-2.0
|
def handle_outgoing(self, nfqpkt):
"""Outgoing packet hook.
Specific to outgoing packets:
4.) If SingleHost mode:
a.) Conditionally log packets destined for foreign IP addresses
(the corresponding check for MultiHost mode is called by
handle_nonlocal())
b.) Conditionally mangle destination IPs for otherwise foreign-
destined packets to implement IP forwarding
5.) Conditionally fix up mangled source ports to support port
forwarding
No return value.
"""
try:
pkt = LinuxPacketCtx('handle_outgoing', nfqpkt)
self.handle_pkt(pkt, self.outgoing_net_cbs,
self.outgoing_trans_cbs)
if pkt.mangled:
nfqpkt.set_payload(pkt.octets)
except Exception:
self.logger.error('Exception: %s' % (traceback.format_exc()))
raise
nfqpkt.accept()
|
Outgoing packet hook.
Specific to outgoing packets:
4.) If SingleHost mode:
a.) Conditionally log packets destined for foreign IP addresses
(the corresponding check for MultiHost mode is called by
handle_nonlocal())
b.) Conditionally mangle destination IPs for otherwise foreign-
destined packets to implement IP forwarding
5.) Conditionally fix up mangled source ports to support port
forwarding
No return value.
|
handle_outgoing
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/diverters/linux.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/linux.py
|
Apache-2.0
|
def check_log_nonlocal(self, crit, pkt):
"""Conditionally log packets having a foreign destination.
Each foreign destination will be logged only once if the Linux
Diverter's internal log_nonlocal_only_once flag is set. Otherwise, any
foreign destination IP address will be logged each time it is observed.
"""
if pkt.dst_ip not in self.ip_addrs[pkt.ipver]:
self.pdebug(DNONLOC, 'Nonlocal %s' % pkt.hdrToStr())
first_sighting = (pkt.dst_ip not in self.nonlocal_ips_already_seen)
if first_sighting:
self.nonlocal_ips_already_seen.append(pkt.dst_ip)
# Log when a new IP is observed OR if we are not restricted to
# logging only the first occurrence of a given nonlocal IP.
if first_sighting or (not self.log_nonlocal_only_once):
self.logger.info(
'Received nonlocal IPv%d datagram destined for %s' %
(pkt.ipver, pkt.dst_ip))
return None
|
Conditionally log packets having a foreign destination.
Each foreign destination will be logged only once if the Linux
Diverter's internal log_nonlocal_only_once flag is set. Otherwise, any
foreign destination IP address will be logged each time it is observed.
|
check_log_nonlocal
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/diverters/linux.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/linux.py
|
Apache-2.0
|
def redirIcmpIpUnconditionally(self, crit, pkt):
"""Redirect ICMP to loopback or external IP if necessary.
On Windows, we can't conveniently use an iptables REDIRECT rule to get
ICMP packets sent back home for free, so here is some code.
"""
if (pkt.is_icmp and
pkt.icmp_id not in self.blacklist_ids["ICMP"] and
pkt.dst_ip not in [self.loopback_ip, self.external_ip]):
self.logger.info('Modifying ICMP packet (type %d, code %d):' %
(pkt.icmp_type, pkt.icmp_code))
self.logger.info(' from: %s' % (pkt.hdrToStr()))
pkt.dst_ip = self.getNewDestinationIp(pkt.src_ip)
self.logger.info(' to: %s' % (pkt.hdrToStr()))
return pkt
|
Redirect ICMP to loopback or external IP if necessary.
On Windows, we can't conveniently use an iptables REDIRECT rule to get
ICMP packets sent back home for free, so here is some code.
|
redirIcmpIpUnconditionally
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/diverters/windows.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/windows.py
|
Apache-2.0
|
def fix_gateway(self):
"""Check if there is a gateway configured on any of the Ethernet
interfaces. If that's not the case, then locate configured IP address
and set a gateway automatically. This is necessary for VMWare Host-Only
DHCP server which leaves default gateway empty.
"""
fixed = False
for adapter in self.get_adapters_info():
# Look for a DHCP interface with a set IP address but no gateway
# (Host-Only)
if self.check_ipaddresses_interface(adapter) and adapter.DhcpEnabled:
(ip_address, netmask) = next(self.get_ipaddresses_netmask(adapter))
# set the gateway ip address to be that of the virtual network adapter
# https://docs.vmware.com/en/VMware-Workstation-Pro/17/com.vmware.ws.using.doc/GUID-9831F49E-1A83-4881-BB8A-D4573F2C6D91.html
gw_address = ip_address[:ip_address.rfind('.')] + '.1'
interface_name = self.get_adapter_friendlyname(adapter.Index)
# Don't set gateway on loopback interfaces (e.g. Npcap Loopback
# Adapter)
if not "loopback" in interface_name.lower():
self.adapters_dhcp_restore.append(interface_name)
cmd_set_gw = "netsh interface ip set address name=\"%s\" static %s %s %s" % (
interface_name, ip_address, netmask, gw_address)
# Configure gateway
try:
subprocess.check_call(cmd_set_gw, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except subprocess.CalledProcessError as e:
self.logger.error(" Failed to set gateway %s on interface %s."
% (gw_address, interface_name))
else:
self.logger.info(" Setting gateway %s on interface %s"
% (gw_address, interface_name))
fixed = True
return fixed
|
Check if there is a gateway configured on any of the Ethernet
interfaces. If that's not the case, then locate configured IP address
and set a gateway automatically. This is necessary for VMWare Host-Only
DHCP server which leaves default gateway empty.
|
fix_gateway
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/diverters/winutil.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/winutil.py
|
Apache-2.0
|
def fix_dns(self):
"""Check if there is a DNS server on any of the Ethernet interfaces. If
that's not the case, then locate configured IP address and set a DNS
server automatically.
"""
fixed = False
for adapter in self.get_adapters_info():
if self.check_ipaddresses_interface(adapter):
ip_address = next(self.get_ipaddresses(adapter))
dns_address = ip_address
interface_name = self.get_adapter_friendlyname(adapter.Index)
# Don't set DNS on loopback interfaces (e.g. Npcap Loopback
# Adapter)
if not "loopback" in interface_name.lower():
self.adapters_dns_restore.append(interface_name)
cmd_set_dns = "netsh interface ip set dns name=\"%s\" static %s" % (
interface_name, dns_address)
# Configure DNS server
try:
subprocess.check_output(cmd_set_dns,
shell=True,
stderr=subprocess.PIPE)
except subprocess.CalledProcessError as e:
self.logger.error(" Failed to set DNS %s on interface %s."
% (dns_address, interface_name))
self.logger.error(" netsh failed with error: %s"
% (e.output))
else:
self.logger.info(" Setting DNS %s on interface %s"
% (dns_address, interface_name))
fixed = True
return fixed
|
Check if there is a DNS server on any of the Ethernet interfaces. If
that's not the case, then locate configured IP address and set a DNS
server automatically.
|
fix_dns
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/diverters/winutil.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/winutil.py
|
Apache-2.0
|
def failEarly(self):
"""Raise exceptions upon construction rather than later."""
# Test generating banner
banner_generated = str(self)
# Test generating and getting length of banner
banner_generated_len = len(self)
return banner_generated, banner_generated_len
|
Raise exceptions upon construction rather than later.
|
failEarly
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/listeners/BannerFactory.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/listeners/BannerFactory.py
|
Apache-2.0
|
def __len__(self):
"""Needed for pyftpdlib.
If the length changes between the time when the caller obtains the
length and the time when the caller obtains the latest generated
string, then there is not much that could reasonably be done. It would
be possible to cache the formatted banner with a short expiry so that
temporally clustered __len__() and __repr__() call sequences would view
consistent and coherent string contents, however this seems like
overkill since the use case is really just allowing pyftpdlib to
determine which way to send the response (directly versus push() if the
length exceeds a threshold of 75 characters). In this case, if the
banner string length and contents are inconsistent, it appears that the
only effect will be to erroneously send the message differently. Test
code has been left in place for easy repro in case this proves to be an
issue on some future/other platform.
"""
# Test path: simulate length of 75 but actual string of length 76 (part
# 1/2) to test pyftpdlib/handlers.py:1321
if self.test_pyftpdlib_handler_banner_threshold75:
return self.len_75
# Normal path: return the length of the banner generated by self.fmt()
return len(self.fmt())
|
Needed for pyftpdlib.
If the length changes between the time when the caller obtains the
length and the time when the caller obtains the latest generated
string, then there is not much that could reasonably be done. It would
be possible to cache the formatted banner with a short expiry so that
temporally clustered __len__() and __repr__() call sequences would view
consistent and coherent string contents, however this seems like
overkill since the use case is really just allowing pyftpdlib to
determine which way to send the response (directly versus push() if the
length exceeds a threshold of 75 characters). In this case, if the
banner string length and contents are inconsistent, it appears that the
only effect will be to erroneously send the message differently. Test
code has been left in place for easy repro in case this proves to be an
issue on some future/other platform.
|
__len__
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/listeners/BannerFactory.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/listeners/BannerFactory.py
|
Apache-2.0
|
def genBanner(self, config, bannerdict, defaultbannerkey='!generic'):
"""Select and initialize a banner.
Supported banner escapes:
!<key> - Use the banner whose key in bannerdict is <key>
!random - Use a random banner from bannerdict
!generic - Every listener supporting banners must have a generic
Banners can include literal '\n' or '\t' tokens (slash followed by the
letter n or t) to indicate that a newline or tab should be inserted.
Banners can include {servername} or {tz} to insert the servername or
time zone (hard-coded to 'UTC' as of this writing).
If the user does not specify a banner, then '!generic' is used by
default, resulting in bannerdict['generic'] being used. If the user
specifies a bang escape e.g. '!iis-6', then the banner keyed by that
name will be used. If the user specifies '!random' then a random banner
will be chosen from bannerdict.
Because some banners include the servername as an insertion string,
this method also retrieves the configuration value for ServerName and
incorporates a couple of similar escape sequences:
!random - Randomized servername with random length between 1-15
!gethostname - Use the real hostname
"""
banner = config.get('banner', defaultbannerkey)
servername = config.get('servername', 'localhost')
if servername.startswith('!'):
servername = servername[1:]
if servername.lower() == 'random':
servername = self.randomizeHostname()
elif servername.lower() == 'gethostname':
servername = socket.gethostname()
else:
raise ValueError('ServerName config invalid escape: !%s' %
(servername))
if banner.startswith('!'):
banner = banner[1:]
if banner.lower() == 'random':
banner = random.choice(list(bannerdict.keys()))
elif banner not in bannerdict:
raise ValueError(
'Banner config escape !%s not a valid banner key' %
(banner))
banner = bannerdict[banner]
insertions = {'servername': servername, 'tz': 'UTC'}
return Banner(banner, insertions)
|
Select and initialize a banner.
Supported banner escapes:
!<key> - Use the banner whose key in bannerdict is <key>
!random - Use a random banner from bannerdict
!generic - Every listener supporting banners must have a generic
Banners can include literal '
' or ' ' tokens (slash followed by the
letter n or t) to indicate that a newline or tab should be inserted.
Banners can include {servername} or {tz} to insert the servername or
time zone (hard-coded to 'UTC' as of this writing).
If the user does not specify a banner, then '!generic' is used by
default, resulting in bannerdict['generic'] being used. If the user
specifies a bang escape e.g. '!iis-6', then the banner keyed by that
name will be used. If the user specifies '!random' then a random banner
will be chosen from bannerdict.
Because some banners include the servername as an insertion string,
this method also retrieves the configuration value for ServerName and
incorporates a couple of similar escape sequences:
!random - Randomized servername with random length between 1-15
!gethostname - Use the real hostname
|
genBanner
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/listeners/BannerFactory.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/listeners/BannerFactory.py
|
Apache-2.0
|
def log_message(self, log_level, is_process_blacklisted, message, *args):
"""The primary objective of this method is to control the log messages
generated for requests from blacklisted processes.
In a case where the DNS server is same as the local machine, the DNS
requests from a blacklisted process will reach the DNS listener (which
listens on port 53 locally) nevertheless. As a user may not wish to see
logs from a blacklisted process, messages are logged with level DEBUG.
Executing FakeNet in the verbose mode will print these logs.
"""
if is_process_blacklisted:
self.server.logger.log(logging.DEBUG, message, *args)
else:
self.server.logger.log(log_level, message, *args)
|
The primary objective of this method is to control the log messages
generated for requests from blacklisted processes.
In a case where the DNS server is same as the local machine, the DNS
requests from a blacklisted process will reach the DNS listener (which
listens on port 53 locally) nevertheless. As a user may not wish to see
logs from a blacklisted process, messages are logged with level DEBUG.
Executing FakeNet in the verbose mode will print these logs.
|
log_message
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/listeners/DNSListener.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/listeners/DNSListener.py
|
Apache-2.0
|
def main():
"""
Run from the flare-fakenet-ng root dir with the following command:
python2 -m fakenet.listeners.HTTPListener
"""
logging.basicConfig(format='%(asctime)s [%(name)15s] %(message)s', datefmt='%m/%d/%y %I:%M:%S %p', level=logging.DEBUG)
config = {'port': '8443', 'usessl': 'Yes', 'webroot': 'fakenet/defaultFiles' }
listener = HTTPListener(config)
listener.start()
###########################################################################
# Run processing
import time
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
###########################################################################
# Run tests
test(config)
|
Run from the flare-fakenet-ng root dir with the following command:
python2 -m fakenet.listeners.HTTPListener
|
main
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/listeners/HTTPListener.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/listeners/HTTPListener.py
|
Apache-2.0
|
def safe_join(root, path):
"""
Joins a path to a root path, even if path starts with '/', using os.sep
"""
# prepending a '/' ensures '..' does not traverse past the root
# of the path
if not path.startswith('/'):
path = '/' + path
normpath = os.path.normpath(path)
return root + normpath
|
Joins a path to a root path, even if path starts with '/', using os.sep
|
safe_join
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/listeners/ListenerBase.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/listeners/ListenerBase.py
|
Apache-2.0
|
def abs_config_path(path):
"""
Attempts to return the absolute path of a path from a configuration
setting.
First tries just to just take the abspath() of the parameter to see
if it exists relative to the current working directory. If that does
not exist, attempts to find it relative to the 'fakenet' package
directory. Returns None if neither exists.
"""
# Try absolute path first
abspath = os.path.abspath(path)
if os.path.exists(abspath):
return abspath
if getattr(sys, 'frozen', False) and hasattr(sys, '_MEIPASS'):
relpath = os.path.join(os.path.dirname(sys.executable), path)
else:
# Try to locate the location relative to application path
relpath = os.path.join(os.path.dirname(os.path.dirname(__file__)), path)
if os.path.exists(relpath):
return os.path.abspath(relpath)
return None
|
Attempts to return the absolute path of a path from a configuration
setting.
First tries just to just take the abspath() of the parameter to see
if it exists relative to the current working directory. If that does
not exist, attempts to find it relative to the 'fakenet' package
directory. Returns None if neither exists.
|
abs_config_path
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/listeners/ListenerBase.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/listeners/ListenerBase.py
|
Apache-2.0
|
def main():
"""
Run from the flare-fakenet-ng root dir with the following command:
python2 -m fakenet.listeners.TFTPListener
"""
logging.basicConfig(format='%(asctime)s [%(name)15s] %(message)s', datefmt='%m/%d/%y %I:%M:%S %p', level=logging.DEBUG)
config = {'port': '69', 'protocol': 'udp', 'tftproot': 'defaultFiles'}
listener = TFTPListener(config)
listener.start()
###########################################################################
# Run processing
import time
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
###########################################################################
# Run tests
#test(config)
listener.stop()
|
Run from the flare-fakenet-ng root dir with the following command:
python2 -m fakenet.listeners.TFTPListener
|
main
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/listeners/TFTPListener.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/listeners/TFTPListener.py
|
Apache-2.0
|
def create_cert(self, cn, ca_cert=None, ca_key=None, cert_dir=None):
"""
Create a cert given the common name, a signing CA, CA private key and
the directory output.
return: tuple(None, None) on error
tuple(cert_file_path, key_file_path) on success
"""
f_selfsign = ca_cert is None or ca_key is None
if not cert_dir:
cert_dir = self.abs_config_path(self.config.get('cert_dir'))
else:
cert_dir = os.path.abspath(cert_dir)
cert_file = os.path.join(cert_dir, "%s.crt" % (cn))
key_file = os.path.join(cert_dir, "%s.key" % (cn))
if os.path.exists(cert_file) and os.path.exists(key_file):
return cert_file, key_file
if ca_cert is not None and ca_key is not None:
ca_cert_data = self._load_cert(ca_cert)
if ca_cert_data is None:
return None, None
ca_key_data = self._load_private_key(ca_key)
if ca_key_data is None:
return None, None
# generate crypto keys:
key = crypto.PKey()
key.generate_key(crypto.TYPE_RSA, 2048)
# Create a cert
cert = crypto.X509()
# Setting certificate version to 3. This is required to use certificate
# extensions which have proven necessary when working with browsers
cert.set_version(2)
cert.get_subject().C = "US"
cert.get_subject().CN = cn
cert.set_serial_number(random.randint(1, 0x31337))
now = time.time() / 1000000
na = int(now + self.NOT_AFTER_DELTA_SECONDS)
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(na)
cert.set_pubkey(key)
if f_selfsign:
extensions = [
crypto.X509Extension(b'basicConstraints', True, b'CA:TRUE'),
]
cert.set_issuer(cert.get_subject())
cert.add_extensions(extensions)
cert.sign(key, "sha256")
else:
alt_name = b'DNS:' + cn.encode()
extensions = [
crypto.X509Extension(b'basicConstraints', False, b'CA:FALSE'),
crypto.X509Extension(b'subjectAltName', False, alt_name)
]
cert.set_issuer(ca_cert_data.get_subject())
cert.add_extensions(extensions)
cert.sign(ca_key_data, "sha256")
try:
with open(cert_file, "wb") as cert_file_input:
cert_file_input.write(crypto.dump_certificate(
crypto.FILETYPE_PEM, cert)
)
with open(key_file, "wb") as key_file_output:
key_file_output.write(crypto.dump_privatekey(
crypto.FILETYPE_PEM, key)
)
except IOError:
traceback.print_exc()
return None, None
return cert_file, key_file
|
Create a cert given the common name, a signing CA, CA private key and
the directory output.
return: tuple(None, None) on error
tuple(cert_file_path, key_file_path) on success
|
create_cert
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/listeners/ssl_utils/__init__.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/listeners/ssl_utils/__init__.py
|
Apache-2.0
|
def abs_config_path(self, path):
"""
Attempts to return the absolute path of a path from a configuration
setting.
"""
# Try absolute path first
abspath = os.path.abspath(path)
if os.path.exists(abspath):
return abspath
if getattr(sys, 'frozen', False) and hasattr(sys, '_MEIPASS'):
abspath = os.path.join(os.getcwd(), path)
else:
abspath = os.path.join(os.fspath(Path(__file__).parents[2]), path)
return abspath
|
Attempts to return the absolute path of a path from a configuration
setting.
|
abs_config_path
|
python
|
mandiant/flare-fakenet-ng
|
fakenet/listeners/ssl_utils/__init__.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/listeners/ssl_utils/__init__.py
|
Apache-2.0
|
def get_ips(ipvers):
"""Return IP addresses bound to local interfaces including loopbacks.
Parameters
----------
ipvers : list
IP versions desired (4, 6, or both); ensures the netifaces semantics
(e.g. netiface.AF_INET) are localized to this function.
"""
specs = []
results = []
for ver in ipvers:
if ver == 4:
specs.append(netifaces.AF_INET)
elif ver == 6:
specs.append(netifaces.AF_INET6)
else:
raise ValueError('get_ips only supports IP versions 4 and 6')
for iface in netifaces.interfaces():
for spec in specs:
addrs = netifaces.ifaddresses(iface)
# If an interface only has an IPv4 or IPv6 address, then 6 or 4
# respectively will be absent from the keys in the interface
# addresses dictionary.
if spec in addrs:
for link in addrs[spec]:
if 'addr' in link:
results.append(link['addr'])
return results
|
Return IP addresses bound to local interfaces including loopbacks.
Parameters
----------
ipvers : list
IP versions desired (4, 6, or both); ensures the netifaces semantics
(e.g. netiface.AF_INET) are localized to this function.
|
get_ips
|
python
|
mandiant/flare-fakenet-ng
|
test/test.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/test/test.py
|
Apache-2.0
|
def _irc_evt_handler(self, srv, evt):
"""Check for each case and set the corresponding success flag."""
if evt.type == 'join':
if evt.target.startswith(self.join_chan):
self.join_ok = True
elif evt.type == 'welcome':
if evt.arguments[0].startswith('Welcome to IRC'):
self.welcome_ok = True
elif evt.type == 'privmsg':
if (evt.arguments[0].startswith(self.safehouse) and
evt.source.startswith(self.clouseau)):
self.privmsg_ok = True
elif evt.type == 'pubmsg':
if (evt.arguments[0].startswith(self.black_market) and
evt.target == self.pub_chan):
self.pubmsg_ok = True
|
Check for each case and set the corresponding success flag.
|
_irc_evt_handler
|
python
|
mandiant/flare-fakenet-ng
|
test/test.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/test/test.py
|
Apache-2.0
|
def _irc_script(self, srv):
"""Callback manages individual test cases for IRC."""
# Clear success flags
self.welcome_ok = False
self.join_ok = False
self.privmsg_ok = False
self.pubmsg_ok = False
# This handler should set the success flags in success cases
srv.add_global_handler('join', self._irc_evt_handler)
srv.add_global_handler('welcome', self._irc_evt_handler)
srv.add_global_handler('privmsg', self._irc_evt_handler)
srv.add_global_handler('pubmsg', self._irc_evt_handler)
# Issue all commands, indirectly invoking the event handler for each
# flag
srv.join(self.join_chan)
srv.process_data()
srv.privmsg(self.pub_chan, self.black_market)
srv.process_data()
srv.privmsg(self.clouseau, self.safehouse)
srv.process_data()
srv.quit()
srv.process_data()
if not self.welcome_ok:
raise FakeNetTestException('Welcome test failed')
if not self.join_ok:
raise FakeNetTestException('Join test failed')
if not self.privmsg_ok:
raise FakeNetTestException('privmsg test failed')
if not self.pubmsg_ok:
raise FakeNetTestException('pubmsg test failed')
return all([
self.welcome_ok,
self.join_ok,
self.privmsg_ok,
self.pubmsg_ok
])
|
Callback manages individual test cases for IRC.
|
_irc_script
|
python
|
mandiant/flare-fakenet-ng
|
test/test.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/test/test.py
|
Apache-2.0
|
def _run_irc_script(self, nm, callback):
"""Connect to server and give control to callback."""
r = irc.client.Reactor()
srv = r.server()
srv.connect(self.hostname, self.port, self.nick)
retval = callback(srv)
srv.close()
return retval
|
Connect to server and give control to callback.
|
_run_irc_script
|
python
|
mandiant/flare-fakenet-ng
|
test/test.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/test/test.py
|
Apache-2.0
|
def _filterMatchingTests(self, tests, matchspec):
"""Remove tests that match negative specifications (regexes preceded by
a minus sign) or do not match positive specifications (regexes not
preceded by a minus sign).
Modifies the contents of the tests dictionary.
"""
negatives = []
positives = []
if len(matchspec):
# If the user specifies a minus sign before a regular expression,
# match negatively (exclude any matching tests)
for spec in matchspec:
if spec.startswith('-'):
negatives.append(spec[1:])
else:
positives.append(spec)
# Iterating over tests first, match specifications second to
# preserve the order of the selected tests. Less efficient to
# compile every regex several times, but less confusing.
for testname, test in list(tests.items()):
# First determine if it is to be excluded, in which case,
# remove it and do not evaluate further match specifications.
exclude = False
for spec in negatives:
if bool(re.search(spec, testname)):
exclude = True
if exclude:
tests.pop(testname)
continue
# If the user ONLY specified negative match specifications,
# then admit all tests
if not len(positives):
continue
# Otherwise, only admit if it matches a positive spec
include = False
for spec in positives:
if bool(re.search(spec, testname)):
include = True
break
if not include:
tests.pop(testname)
return
|
Remove tests that match negative specifications (regexes preceded by
a minus sign) or do not match positive specifications (regexes not
preceded by a minus sign).
Modifies the contents of the tests dictionary.
|
_filterMatchingTests
|
python
|
mandiant/flare-fakenet-ng
|
test/test.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/test/test.py
|
Apache-2.0
|
def _test_ftp(self, hostname, port=0):
"""Note that the FakeNet-NG Proxy listener won't know what to do with
this client if you point it at some random port, because the client
listens silently for the server 220 welcome message which doesn't give
the Proxy listener anything to work with to decide where to forward it.
"""
fullbuf = ''
m = hashlib.md5()
def update_hash(buf):
m.update(buf)
f = ftplib.FTP()
f.connect(hostname, port)
f.login()
f.set_pasv(False)
f.retrbinary('RETR FakeNet.gif', update_hash)
f.quit()
digest = m.digest()
expected = binascii.unhexlify('a6b78c4791dc8110dec6c55f8a756395')
return (digest == expected)
|
Note that the FakeNet-NG Proxy listener won't know what to do with
this client if you point it at some random port, because the client
listens silently for the server 220 welcome message which doesn't give
the Proxy listener anything to work with to decide where to forward it.
|
_test_ftp
|
python
|
mandiant/flare-fakenet-ng
|
test/test.py
|
https://github.com/mandiant/flare-fakenet-ng/blob/master/test/test.py
|
Apache-2.0
|
def preprocess_input(audio_path, dim_ordering='default'):
'''Reads an audio file and outputs a Mel-spectrogram.
'''
if dim_ordering == 'default':
dim_ordering = K.image_dim_ordering()
assert dim_ordering in {'tf', 'th'}
if librosa_exists():
import librosa
else:
raise RuntimeError('Librosa is required to process audio files.\n' +
'Install it via `pip install librosa` \nor visit ' +
'http://librosa.github.io/librosa/ for details.')
# mel-spectrogram parameters
SR = 12000
N_FFT = 512
N_MELS = 96
HOP_LEN = 256
DURA = 29.12
src, sr = librosa.load(audio_path, sr=SR)
n_sample = src.shape[0]
n_sample_wanted = int(DURA * SR)
# trim the signal at the center
if n_sample < n_sample_wanted: # if too short
src = np.hstack((src, np.zeros((int(DURA * SR) - n_sample,))))
elif n_sample > n_sample_wanted: # if too long
src = src[(n_sample - n_sample_wanted) / 2:
(n_sample + n_sample_wanted) / 2]
logam = librosa.logamplitude
melgram = librosa.feature.melspectrogram
x = logam(melgram(y=src, sr=SR, hop_length=HOP_LEN,
n_fft=N_FFT, n_mels=N_MELS) ** 2,
ref_power=1.0)
if dim_ordering == 'th':
x = np.expand_dims(x, axis=0)
elif dim_ordering == 'tf':
x = np.expand_dims(x, axis=3)
return x
|
Reads an audio file and outputs a Mel-spectrogram.
|
preprocess_input
|
python
|
fchollet/deep-learning-models
|
audio_conv_utils.py
|
https://github.com/fchollet/deep-learning-models/blob/master/audio_conv_utils.py
|
MIT
|
def decode_predictions(preds, top_n=5):
'''Decode the output of a music tagger model.
# Arguments
preds: 2-dimensional numpy array
top_n: integer in [0, 50], number of items to show
'''
assert len(preds.shape) == 2 and preds.shape[1] == 50
results = []
for pred in preds:
result = zip(TAGS, pred)
result = sorted(result, key=lambda x: x[1], reverse=True)
results.append(result[:top_n])
return results
|
Decode the output of a music tagger model.
# Arguments
preds: 2-dimensional numpy array
top_n: integer in [0, 50], number of items to show
|
decode_predictions
|
python
|
fchollet/deep-learning-models
|
audio_conv_utils.py
|
https://github.com/fchollet/deep-learning-models/blob/master/audio_conv_utils.py
|
MIT
|
def preprocess_input(x):
"""Preprocesses a numpy array encoding a batch of images.
This function applies the "Inception" preprocessing which converts
the RGB values from [0, 255] to [-1, 1]. Note that this preprocessing
function is different from `imagenet_utils.preprocess_input()`.
# Arguments
x: a 4D numpy array consists of RGB values within [0, 255].
# Returns
Preprocessed array.
"""
x /= 255.
x -= 0.5
x *= 2.
return x
|
Preprocesses a numpy array encoding a batch of images.
This function applies the "Inception" preprocessing which converts
the RGB values from [0, 255] to [-1, 1]. Note that this preprocessing
function is different from `imagenet_utils.preprocess_input()`.
# Arguments
x: a 4D numpy array consists of RGB values within [0, 255].
# Returns
Preprocessed array.
|
preprocess_input
|
python
|
fchollet/deep-learning-models
|
inception_resnet_v2.py
|
https://github.com/fchollet/deep-learning-models/blob/master/inception_resnet_v2.py
|
MIT
|
def conv2d_bn(x,
filters,
kernel_size,
strides=1,
padding='same',
activation='relu',
use_bias=False,
name=None):
"""Utility function to apply conv + BN.
# Arguments
x: input tensor.
filters: filters in `Conv2D`.
kernel_size: kernel size as in `Conv2D`.
padding: padding mode in `Conv2D`.
activation: activation in `Conv2D`.
strides: strides in `Conv2D`.
name: name of the ops; will become `name + '_ac'` for the activation
and `name + '_bn'` for the batch norm layer.
# Returns
Output tensor after applying `Conv2D` and `BatchNormalization`.
"""
x = Conv2D(filters,
kernel_size,
strides=strides,
padding=padding,
use_bias=use_bias,
name=name)(x)
if not use_bias:
bn_axis = 1 if K.image_data_format() == 'channels_first' else 3
bn_name = None if name is None else name + '_bn'
x = BatchNormalization(axis=bn_axis, scale=False, name=bn_name)(x)
if activation is not None:
ac_name = None if name is None else name + '_ac'
x = Activation(activation, name=ac_name)(x)
return x
|
Utility function to apply conv + BN.
# Arguments
x: input tensor.
filters: filters in `Conv2D`.
kernel_size: kernel size as in `Conv2D`.
padding: padding mode in `Conv2D`.
activation: activation in `Conv2D`.
strides: strides in `Conv2D`.
name: name of the ops; will become `name + '_ac'` for the activation
and `name + '_bn'` for the batch norm layer.
# Returns
Output tensor after applying `Conv2D` and `BatchNormalization`.
|
conv2d_bn
|
python
|
fchollet/deep-learning-models
|
inception_resnet_v2.py
|
https://github.com/fchollet/deep-learning-models/blob/master/inception_resnet_v2.py
|
MIT
|
def inception_resnet_block(x, scale, block_type, block_idx, activation='relu'):
"""Adds a Inception-ResNet block.
This function builds 3 types of Inception-ResNet blocks mentioned
in the paper, controlled by the `block_type` argument (which is the
block name used in the official TF-slim implementation):
- Inception-ResNet-A: `block_type='block35'`
- Inception-ResNet-B: `block_type='block17'`
- Inception-ResNet-C: `block_type='block8'`
# Arguments
x: input tensor.
scale: scaling factor to scale the residuals (i.e., the output of
passing `x` through an inception module) before adding them
to the shortcut branch. Let `r` be the output from the residual branch,
the output of this block will be `x + scale * r`.
block_type: `'block35'`, `'block17'` or `'block8'`, determines
the network structure in the residual branch.
block_idx: an `int` used for generating layer names. The Inception-ResNet blocks
are repeated many times in this network. We use `block_idx` to identify
each of the repetitions. For example, the first Inception-ResNet-A block
will have `block_type='block35', block_idx=0`, ane the layer names will have
a common prefix `'block35_0'`.
activation: activation function to use at the end of the block
(see [activations](keras./activations.md)).
When `activation=None`, no activation is applied
(i.e., "linear" activation: `a(x) = x`).
# Returns
Output tensor for the block.
# Raises
ValueError: if `block_type` is not one of `'block35'`,
`'block17'` or `'block8'`.
"""
if block_type == 'block35':
branch_0 = conv2d_bn(x, 32, 1)
branch_1 = conv2d_bn(x, 32, 1)
branch_1 = conv2d_bn(branch_1, 32, 3)
branch_2 = conv2d_bn(x, 32, 1)
branch_2 = conv2d_bn(branch_2, 48, 3)
branch_2 = conv2d_bn(branch_2, 64, 3)
branches = [branch_0, branch_1, branch_2]
elif block_type == 'block17':
branch_0 = conv2d_bn(x, 192, 1)
branch_1 = conv2d_bn(x, 128, 1)
branch_1 = conv2d_bn(branch_1, 160, [1, 7])
branch_1 = conv2d_bn(branch_1, 192, [7, 1])
branches = [branch_0, branch_1]
elif block_type == 'block8':
branch_0 = conv2d_bn(x, 192, 1)
branch_1 = conv2d_bn(x, 192, 1)
branch_1 = conv2d_bn(branch_1, 224, [1, 3])
branch_1 = conv2d_bn(branch_1, 256, [3, 1])
branches = [branch_0, branch_1]
else:
raise ValueError('Unknown Inception-ResNet block type. '
'Expects "block35", "block17" or "block8", '
'but got: ' + str(block_type))
block_name = block_type + '_' + str(block_idx)
channel_axis = 1 if K.image_data_format() == 'channels_first' else 3
mixed = Concatenate(axis=channel_axis, name=block_name + '_mixed')(branches)
up = conv2d_bn(mixed,
K.int_shape(x)[channel_axis],
1,
activation=None,
use_bias=True,
name=block_name + '_conv')
x = Lambda(lambda inputs, scale: inputs[0] + inputs[1] * scale,
output_shape=K.int_shape(x)[1:],
arguments={'scale': scale},
name=block_name)([x, up])
if activation is not None:
x = Activation(activation, name=block_name + '_ac')(x)
return x
|
Adds a Inception-ResNet block.
This function builds 3 types of Inception-ResNet blocks mentioned
in the paper, controlled by the `block_type` argument (which is the
block name used in the official TF-slim implementation):
- Inception-ResNet-A: `block_type='block35'`
- Inception-ResNet-B: `block_type='block17'`
- Inception-ResNet-C: `block_type='block8'`
# Arguments
x: input tensor.
scale: scaling factor to scale the residuals (i.e., the output of
passing `x` through an inception module) before adding them
to the shortcut branch. Let `r` be the output from the residual branch,
the output of this block will be `x + scale * r`.
block_type: `'block35'`, `'block17'` or `'block8'`, determines
the network structure in the residual branch.
block_idx: an `int` used for generating layer names. The Inception-ResNet blocks
are repeated many times in this network. We use `block_idx` to identify
each of the repetitions. For example, the first Inception-ResNet-A block
will have `block_type='block35', block_idx=0`, ane the layer names will have
a common prefix `'block35_0'`.
activation: activation function to use at the end of the block
(see [activations](keras./activations.md)).
When `activation=None`, no activation is applied
(i.e., "linear" activation: `a(x) = x`).
# Returns
Output tensor for the block.
# Raises
ValueError: if `block_type` is not one of `'block35'`,
`'block17'` or `'block8'`.
|
inception_resnet_block
|
python
|
fchollet/deep-learning-models
|
inception_resnet_v2.py
|
https://github.com/fchollet/deep-learning-models/blob/master/inception_resnet_v2.py
|
MIT
|
def InceptionResNetV2(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000):
"""Instantiates the Inception-ResNet v2 architecture.
Optionally loads weights pre-trained on ImageNet.
Note that when using TensorFlow, for best performance you should
set `"image_data_format": "channels_last"` in your Keras config
at `~/.keras/keras.json`.
The model and the weights are compatible with both TensorFlow and Theano
backends (but not CNTK). The data format convention used by the model is
the one specified in your Keras config file.
Note that the default input image size for this model is 299x299, instead
of 224x224 as in the VGG16 and ResNet models. Also, the input preprocessing
function is different (i.e., do not use `imagenet_utils.preprocess_input()`
with this model. Use `preprocess_input()` defined in this module instead).
# Arguments
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization)
or `'imagenet'` (pre-training on ImageNet).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is `False` (otherwise the input shape
has to be `(299, 299, 3)` (with `'channels_last'` data format)
or `(3, 299, 299)` (with `'channels_first'` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 139.
E.g. `(150, 150, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the last convolutional layer.
- `'avg'` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `'max'` means that global max pooling will be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is `True`, and
if no `weights` argument is specified.
# Returns
A Keras `Model` instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
RuntimeError: If attempting to run this model with an unsupported backend.
"""
if K.backend() in {'cntk'}:
raise RuntimeError(K.backend() + ' backend is currently unsupported for this model.')
if weights not in {'imagenet', None}:
raise ValueError('The `weights` argument should be either '
'`None` (random initialization) or `imagenet` '
'(pre-training on ImageNet).')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as imagenet with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = _obtain_input_shape(
input_shape,
default_size=299,
min_size=139,
data_format=K.image_data_format(),
require_flatten=False,
weights=weights)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
# Stem block: 35 x 35 x 192
x = conv2d_bn(img_input, 32, 3, strides=2, padding='valid')
x = conv2d_bn(x, 32, 3, padding='valid')
x = conv2d_bn(x, 64, 3)
x = MaxPooling2D(3, strides=2)(x)
x = conv2d_bn(x, 80, 1, padding='valid')
x = conv2d_bn(x, 192, 3, padding='valid')
x = MaxPooling2D(3, strides=2)(x)
# Mixed 5b (Inception-A block): 35 x 35 x 320
branch_0 = conv2d_bn(x, 96, 1)
branch_1 = conv2d_bn(x, 48, 1)
branch_1 = conv2d_bn(branch_1, 64, 5)
branch_2 = conv2d_bn(x, 64, 1)
branch_2 = conv2d_bn(branch_2, 96, 3)
branch_2 = conv2d_bn(branch_2, 96, 3)
branch_pool = AveragePooling2D(3, strides=1, padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 64, 1)
branches = [branch_0, branch_1, branch_2, branch_pool]
channel_axis = 1 if K.image_data_format() == 'channels_first' else 3
x = Concatenate(axis=channel_axis, name='mixed_5b')(branches)
# 10x block35 (Inception-ResNet-A block): 35 x 35 x 320
for block_idx in range(1, 11):
x = inception_resnet_block(x,
scale=0.17,
block_type='block35',
block_idx=block_idx)
# Mixed 6a (Reduction-A block): 17 x 17 x 1088
branch_0 = conv2d_bn(x, 384, 3, strides=2, padding='valid')
branch_1 = conv2d_bn(x, 256, 1)
branch_1 = conv2d_bn(branch_1, 256, 3)
branch_1 = conv2d_bn(branch_1, 384, 3, strides=2, padding='valid')
branch_pool = MaxPooling2D(3, strides=2, padding='valid')(x)
branches = [branch_0, branch_1, branch_pool]
x = Concatenate(axis=channel_axis, name='mixed_6a')(branches)
# 20x block17 (Inception-ResNet-B block): 17 x 17 x 1088
for block_idx in range(1, 21):
x = inception_resnet_block(x,
scale=0.1,
block_type='block17',
block_idx=block_idx)
# Mixed 7a (Reduction-B block): 8 x 8 x 2080
branch_0 = conv2d_bn(x, 256, 1)
branch_0 = conv2d_bn(branch_0, 384, 3, strides=2, padding='valid')
branch_1 = conv2d_bn(x, 256, 1)
branch_1 = conv2d_bn(branch_1, 288, 3, strides=2, padding='valid')
branch_2 = conv2d_bn(x, 256, 1)
branch_2 = conv2d_bn(branch_2, 288, 3)
branch_2 = conv2d_bn(branch_2, 320, 3, strides=2, padding='valid')
branch_pool = MaxPooling2D(3, strides=2, padding='valid')(x)
branches = [branch_0, branch_1, branch_2, branch_pool]
x = Concatenate(axis=channel_axis, name='mixed_7a')(branches)
# 10x block8 (Inception-ResNet-C block): 8 x 8 x 2080
for block_idx in range(1, 10):
x = inception_resnet_block(x,
scale=0.2,
block_type='block8',
block_idx=block_idx)
x = inception_resnet_block(x,
scale=1.,
activation=None,
block_type='block8',
block_idx=10)
# Final convolution block: 8 x 8 x 1536
x = conv2d_bn(x, 1536, 1, name='conv_7b')
if include_top:
# Classification block
x = GlobalAveragePooling2D(name='avg_pool')(x)
x = Dense(classes, activation='softmax', name='predictions')(x)
else:
if pooling == 'avg':
x = GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model
model = Model(inputs, x, name='inception_resnet_v2')
# Load weights
if weights == 'imagenet':
if K.image_data_format() == 'channels_first':
if K.backend() == 'tensorflow':
warnings.warn('You are using the TensorFlow backend, yet you '
'are using the Theano '
'image data format convention '
'(`image_data_format="channels_first"`). '
'For best performance, set '
'`image_data_format="channels_last"` in '
'your Keras config '
'at ~/.keras/keras.json.')
if include_top:
weights_filename = 'inception_resnet_v2_weights_tf_dim_ordering_tf_kernels.h5'
weights_path = get_file(weights_filename,
BASE_WEIGHT_URL + weights_filename,
cache_subdir='models',
md5_hash='e693bd0210a403b3192acc6073ad2e96')
else:
weights_filename = 'inception_resnet_v2_weights_tf_dim_ordering_tf_kernels_notop.h5'
weights_path = get_file(weights_filename,
BASE_WEIGHT_URL + weights_filename,
cache_subdir='models',
md5_hash='d19885ff4a710c122648d3b5c3b684e4')
model.load_weights(weights_path)
return model
|
Instantiates the Inception-ResNet v2 architecture.
Optionally loads weights pre-trained on ImageNet.
Note that when using TensorFlow, for best performance you should
set `"image_data_format": "channels_last"` in your Keras config
at `~/.keras/keras.json`.
The model and the weights are compatible with both TensorFlow and Theano
backends (but not CNTK). The data format convention used by the model is
the one specified in your Keras config file.
Note that the default input image size for this model is 299x299, instead
of 224x224 as in the VGG16 and ResNet models. Also, the input preprocessing
function is different (i.e., do not use `imagenet_utils.preprocess_input()`
with this model. Use `preprocess_input()` defined in this module instead).
# Arguments
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization)
or `'imagenet'` (pre-training on ImageNet).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is `False` (otherwise the input shape
has to be `(299, 299, 3)` (with `'channels_last'` data format)
or `(3, 299, 299)` (with `'channels_first'` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 139.
E.g. `(150, 150, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the last convolutional layer.
- `'avg'` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `'max'` means that global max pooling will be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is `True`, and
if no `weights` argument is specified.
# Returns
A Keras `Model` instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
RuntimeError: If attempting to run this model with an unsupported backend.
|
InceptionResNetV2
|
python
|
fchollet/deep-learning-models
|
inception_resnet_v2.py
|
https://github.com/fchollet/deep-learning-models/blob/master/inception_resnet_v2.py
|
MIT
|
def conv2d_bn(x,
filters,
num_row,
num_col,
padding='same',
strides=(1, 1),
name=None):
"""Utility function to apply conv + BN.
Arguments:
x: input tensor.
filters: filters in `Conv2D`.
num_row: height of the convolution kernel.
num_col: width of the convolution kernel.
padding: padding mode in `Conv2D`.
strides: strides in `Conv2D`.
name: name of the ops; will become `name + '_conv'`
for the convolution and `name + '_bn'` for the
batch norm layer.
Returns:
Output tensor after applying `Conv2D` and `BatchNormalization`.
"""
if name is not None:
bn_name = name + '_bn'
conv_name = name + '_conv'
else:
bn_name = None
conv_name = None
if K.image_data_format() == 'channels_first':
bn_axis = 1
else:
bn_axis = 3
x = Conv2D(
filters, (num_row, num_col),
strides=strides,
padding=padding,
use_bias=False,
name=conv_name)(x)
x = BatchNormalization(axis=bn_axis, scale=False, name=bn_name)(x)
x = Activation('relu', name=name)(x)
return x
|
Utility function to apply conv + BN.
Arguments:
x: input tensor.
filters: filters in `Conv2D`.
num_row: height of the convolution kernel.
num_col: width of the convolution kernel.
padding: padding mode in `Conv2D`.
strides: strides in `Conv2D`.
name: name of the ops; will become `name + '_conv'`
for the convolution and `name + '_bn'` for the
batch norm layer.
Returns:
Output tensor after applying `Conv2D` and `BatchNormalization`.
|
conv2d_bn
|
python
|
fchollet/deep-learning-models
|
inception_v3.py
|
https://github.com/fchollet/deep-learning-models/blob/master/inception_v3.py
|
MIT
|
def InceptionV3(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000):
"""Instantiates the Inception v3 architecture.
Optionally loads weights pre-trained
on ImageNet. Note that when using TensorFlow,
for best performance you should set
`image_data_format="channels_last"` in your Keras config
at ~/.keras/keras.json.
The model and the weights are compatible with both
TensorFlow and Theano. The data format
convention used by the model is the one
specified in your Keras config file.
Note that the default input image size for this model is 299x299.
Arguments:
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization)
or "imagenet" (pre-training on ImageNet).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(299, 299, 3)` (with `channels_last` data format)
or `(3, 299, 299)` (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 139.
E.g. `(150, 150, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
Returns:
A Keras model instance.
Raises:
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
"""
if weights not in {'imagenet', None}:
raise ValueError('The `weights` argument should be either '
'`None` (random initialization) or `imagenet` '
'(pre-training on ImageNet).')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as imagenet with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = _obtain_input_shape(
input_shape,
default_size=299,
min_size=139,
data_format=K.image_data_format(),
include_top=include_top)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
img_input = Input(tensor=input_tensor, shape=input_shape)
if K.image_data_format() == 'channels_first':
channel_axis = 1
else:
channel_axis = 3
x = conv2d_bn(img_input, 32, 3, 3, strides=(2, 2), padding='valid')
x = conv2d_bn(x, 32, 3, 3, padding='valid')
x = conv2d_bn(x, 64, 3, 3)
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = conv2d_bn(x, 80, 1, 1, padding='valid')
x = conv2d_bn(x, 192, 3, 3, padding='valid')
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
# mixed 0, 1, 2: 35 x 35 x 256
branch1x1 = conv2d_bn(x, 64, 1, 1)
branch5x5 = conv2d_bn(x, 48, 1, 1)
branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 32, 1, 1)
x = layers.concatenate(
[branch1x1, branch5x5, branch3x3dbl, branch_pool],
axis=channel_axis,
name='mixed0')
# mixed 1: 35 x 35 x 256
branch1x1 = conv2d_bn(x, 64, 1, 1)
branch5x5 = conv2d_bn(x, 48, 1, 1)
branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 64, 1, 1)
x = layers.concatenate(
[branch1x1, branch5x5, branch3x3dbl, branch_pool],
axis=channel_axis,
name='mixed1')
# mixed 2: 35 x 35 x 256
branch1x1 = conv2d_bn(x, 64, 1, 1)
branch5x5 = conv2d_bn(x, 48, 1, 1)
branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 64, 1, 1)
x = layers.concatenate(
[branch1x1, branch5x5, branch3x3dbl, branch_pool],
axis=channel_axis,
name='mixed2')
# mixed 3: 17 x 17 x 768
branch3x3 = conv2d_bn(x, 384, 3, 3, strides=(2, 2), padding='valid')
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(
branch3x3dbl, 96, 3, 3, strides=(2, 2), padding='valid')
branch_pool = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = layers.concatenate(
[branch3x3, branch3x3dbl, branch_pool], axis=channel_axis, name='mixed3')
# mixed 4: 17 x 17 x 768
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 128, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 128, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 128, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = layers.concatenate(
[branch1x1, branch7x7, branch7x7dbl, branch_pool],
axis=channel_axis,
name='mixed4')
# mixed 5, 6: 17 x 17 x 768
for i in range(2):
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 160, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 160, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 160, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = AveragePooling2D(
(3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = layers.concatenate(
[branch1x1, branch7x7, branch7x7dbl, branch_pool],
axis=channel_axis,
name='mixed' + str(5 + i))
# mixed 7: 17 x 17 x 768
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 192, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 192, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = layers.concatenate(
[branch1x1, branch7x7, branch7x7dbl, branch_pool],
axis=channel_axis,
name='mixed7')
# mixed 8: 8 x 8 x 1280
branch3x3 = conv2d_bn(x, 192, 1, 1)
branch3x3 = conv2d_bn(branch3x3, 320, 3, 3,
strides=(2, 2), padding='valid')
branch7x7x3 = conv2d_bn(x, 192, 1, 1)
branch7x7x3 = conv2d_bn(branch7x7x3, 192, 1, 7)
branch7x7x3 = conv2d_bn(branch7x7x3, 192, 7, 1)
branch7x7x3 = conv2d_bn(
branch7x7x3, 192, 3, 3, strides=(2, 2), padding='valid')
branch_pool = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = layers.concatenate(
[branch3x3, branch7x7x3, branch_pool], axis=channel_axis, name='mixed8')
# mixed 9: 8 x 8 x 2048
for i in range(2):
branch1x1 = conv2d_bn(x, 320, 1, 1)
branch3x3 = conv2d_bn(x, 384, 1, 1)
branch3x3_1 = conv2d_bn(branch3x3, 384, 1, 3)
branch3x3_2 = conv2d_bn(branch3x3, 384, 3, 1)
branch3x3 = layers.concatenate(
[branch3x3_1, branch3x3_2], axis=channel_axis, name='mixed9_' + str(i))
branch3x3dbl = conv2d_bn(x, 448, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 384, 3, 3)
branch3x3dbl_1 = conv2d_bn(branch3x3dbl, 384, 1, 3)
branch3x3dbl_2 = conv2d_bn(branch3x3dbl, 384, 3, 1)
branch3x3dbl = layers.concatenate(
[branch3x3dbl_1, branch3x3dbl_2], axis=channel_axis)
branch_pool = AveragePooling2D(
(3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = layers.concatenate(
[branch1x1, branch3x3, branch3x3dbl, branch_pool],
axis=channel_axis,
name='mixed' + str(9 + i))
if include_top:
# Classification block
x = GlobalAveragePooling2D(name='avg_pool')(x)
x = Dense(classes, activation='softmax', name='predictions')(x)
else:
if pooling == 'avg':
x = GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Model(inputs, x, name='inception_v3')
# load weights
if weights == 'imagenet':
if K.image_data_format() == 'channels_first':
if K.backend() == 'tensorflow':
warnings.warn('You are using the TensorFlow backend, yet you '
'are using the Theano '
'image data format convention '
'(`image_data_format="channels_first"`). '
'For best performance, set '
'`image_data_format="channels_last"` in '
'your Keras config '
'at ~/.keras/keras.json.')
if include_top:
weights_path = get_file(
'inception_v3_weights_tf_dim_ordering_tf_kernels.h5',
WEIGHTS_PATH,
cache_subdir='models',
md5_hash='9a0d58056eeedaa3f26cb7ebd46da564')
else:
weights_path = get_file(
'inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5',
WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
md5_hash='bcbd6486424b2319ff4ef7d526e38f63')
model.load_weights(weights_path)
if K.backend() == 'theano':
convert_all_kernels_in_model(model)
return model
|
Instantiates the Inception v3 architecture.
Optionally loads weights pre-trained
on ImageNet. Note that when using TensorFlow,
for best performance you should set
`image_data_format="channels_last"` in your Keras config
at ~/.keras/keras.json.
The model and the weights are compatible with both
TensorFlow and Theano. The data format
convention used by the model is the one
specified in your Keras config file.
Note that the default input image size for this model is 299x299.
Arguments:
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization)
or "imagenet" (pre-training on ImageNet).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(299, 299, 3)` (with `channels_last` data format)
or `(3, 299, 299)` (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 139.
E.g. `(150, 150, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
Returns:
A Keras model instance.
Raises:
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
|
InceptionV3
|
python
|
fchollet/deep-learning-models
|
inception_v3.py
|
https://github.com/fchollet/deep-learning-models/blob/master/inception_v3.py
|
MIT
|
def MobileNet(input_shape=None,
alpha=1.0,
depth_multiplier=1,
dropout=1e-3,
include_top=True,
weights='imagenet',
input_tensor=None,
pooling=None,
classes=1000):
"""Instantiates the MobileNet architecture.
Note that only TensorFlow is supported for now,
therefore it only works with the data format
`image_data_format='channels_last'` in your Keras config
at `~/.keras/keras.json`.
To load a MobileNet model via `load_model`, import the custom
objects `relu6` and `DepthwiseConv2D` and pass them to the
`custom_objects` parameter.
E.g.
model = load_model('mobilenet.h5', custom_objects={
'relu6': mobilenet.relu6,
'DepthwiseConv2D': mobilenet.DepthwiseConv2D})
# Arguments
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `channels_last` data format)
or (3, 224, 224) (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(200, 200, 3)` would be one valid value.
alpha: controls the width of the network.
- If `alpha` < 1.0, proportionally decreases the number
of filters in each layer.
- If `alpha` > 1.0, proportionally increases the number
of filters in each layer.
- If `alpha` = 1, default number of filters from the paper
are used at each layer.
depth_multiplier: depth multiplier for depthwise convolution
(also called the resolution multiplier)
dropout: dropout rate
include_top: whether to include the fully-connected
layer at the top of the network.
weights: `None` (random initialization) or
`imagenet` (ImageNet weights)
input_tensor: optional Keras tensor (i.e. output of
`layers.Input()`)
to use as image input for the model.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model
will be the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a
2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
RuntimeError: If attempting to run this model with a
backend that does not support separable convolutions.
"""
if K.backend() != 'tensorflow':
raise RuntimeError('Only Tensorflow backend is currently supported, '
'as other backends do not support '
'depthwise convolution.')
if weights not in {'imagenet', None}:
raise ValueError('The `weights` argument should be either '
'`None` (random initialization) or `imagenet` '
'(pre-training on ImageNet).')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as ImageNet with `include_top` '
'as true, `classes` should be 1000')
# Determine proper input shape.
input_shape = _obtain_input_shape(input_shape,
default_size=224,
min_size=32,
data_format=K.image_data_format(),
include_top=include_top or weights)
if K.image_data_format() == 'channels_last':
row_axis, col_axis = (0, 1)
else:
row_axis, col_axis = (1, 2)
rows = input_shape[row_axis]
cols = input_shape[col_axis]
if weights == 'imagenet':
if depth_multiplier != 1:
raise ValueError('If imagenet weights are being loaded, '
'depth multiplier must be 1')
if alpha not in [0.25, 0.50, 0.75, 1.0]:
raise ValueError('If imagenet weights are being loaded, '
'alpha can be one of'
'`0.25`, `0.50`, `0.75` or `1.0` only.')
if rows != cols or rows not in [128, 160, 192, 224]:
raise ValueError('If imagenet weights are being loaded, '
'input must have a static square shape (one of '
'(128,128), (160,160), (192,192), or (224, 224)).'
' Input shape provided = %s' % (input_shape,))
if K.image_data_format() != 'channels_last':
warnings.warn('The MobileNet family of models is only available '
'for the input data format "channels_last" '
'(width, height, channels). '
'However your settings specify the default '
'data format "channels_first" (channels, width, height).'
' You should set `image_data_format="channels_last"` '
'in your Keras config located at ~/.keras/keras.json. '
'The model being returned right now will expect inputs '
'to follow the "channels_last" data format.')
K.set_image_data_format('channels_last')
old_data_format = 'channels_first'
else:
old_data_format = None
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
x = _conv_block(img_input, 32, alpha, strides=(2, 2))
x = _depthwise_conv_block(x, 64, alpha, depth_multiplier, block_id=1)
x = _depthwise_conv_block(x, 128, alpha, depth_multiplier,
strides=(2, 2), block_id=2)
x = _depthwise_conv_block(x, 128, alpha, depth_multiplier, block_id=3)
x = _depthwise_conv_block(x, 256, alpha, depth_multiplier,
strides=(2, 2), block_id=4)
x = _depthwise_conv_block(x, 256, alpha, depth_multiplier, block_id=5)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier,
strides=(2, 2), block_id=6)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=7)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=8)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=9)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=10)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=11)
x = _depthwise_conv_block(x, 1024, alpha, depth_multiplier,
strides=(2, 2), block_id=12)
x = _depthwise_conv_block(x, 1024, alpha, depth_multiplier, block_id=13)
if include_top:
if K.image_data_format() == 'channels_first':
shape = (int(1024 * alpha), 1, 1)
else:
shape = (1, 1, int(1024 * alpha))
x = GlobalAveragePooling2D()(x)
x = Reshape(shape, name='reshape_1')(x)
x = Dropout(dropout, name='dropout')(x)
x = Conv2D(classes, (1, 1),
padding='same', name='conv_preds')(x)
x = Activation('softmax', name='act_softmax')(x)
x = Reshape((classes,), name='reshape_2')(x)
else:
if pooling == 'avg':
x = GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Model(inputs, x, name='mobilenet_%0.2f_%s' % (alpha, rows))
# load weights
if weights == 'imagenet':
if K.image_data_format() == 'channels_first':
raise ValueError('Weights for "channels_last" format '
'are not available.')
if alpha == 1.0:
alpha_text = '1_0'
elif alpha == 0.75:
alpha_text = '7_5'
elif alpha == 0.50:
alpha_text = '5_0'
else:
alpha_text = '2_5'
if include_top:
model_name = 'mobilenet_%s_%d_tf.h5' % (alpha_text, rows)
weigh_path = BASE_WEIGHT_PATH + model_name
weights_path = get_file(model_name,
weigh_path,
cache_subdir='models')
else:
model_name = 'mobilenet_%s_%d_tf_no_top.h5' % (alpha_text, rows)
weigh_path = BASE_WEIGHT_PATH + model_name
weights_path = get_file(model_name,
weigh_path,
cache_subdir='models')
model.load_weights(weights_path)
if old_data_format:
K.set_image_data_format(old_data_format)
return model
|
Instantiates the MobileNet architecture.
Note that only TensorFlow is supported for now,
therefore it only works with the data format
`image_data_format='channels_last'` in your Keras config
at `~/.keras/keras.json`.
To load a MobileNet model via `load_model`, import the custom
objects `relu6` and `DepthwiseConv2D` and pass them to the
`custom_objects` parameter.
E.g.
model = load_model('mobilenet.h5', custom_objects={
'relu6': mobilenet.relu6,
'DepthwiseConv2D': mobilenet.DepthwiseConv2D})
# Arguments
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `channels_last` data format)
or (3, 224, 224) (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(200, 200, 3)` would be one valid value.
alpha: controls the width of the network.
- If `alpha` < 1.0, proportionally decreases the number
of filters in each layer.
- If `alpha` > 1.0, proportionally increases the number
of filters in each layer.
- If `alpha` = 1, default number of filters from the paper
are used at each layer.
depth_multiplier: depth multiplier for depthwise convolution
(also called the resolution multiplier)
dropout: dropout rate
include_top: whether to include the fully-connected
layer at the top of the network.
weights: `None` (random initialization) or
`imagenet` (ImageNet weights)
input_tensor: optional Keras tensor (i.e. output of
`layers.Input()`)
to use as image input for the model.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model
will be the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a
2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
RuntimeError: If attempting to run this model with a
backend that does not support separable convolutions.
|
MobileNet
|
python
|
fchollet/deep-learning-models
|
mobilenet.py
|
https://github.com/fchollet/deep-learning-models/blob/master/mobilenet.py
|
MIT
|
def _conv_block(inputs, filters, alpha, kernel=(3, 3), strides=(1, 1)):
"""Adds an initial convolution layer (with batch normalization and relu6).
# Arguments
inputs: Input tensor of shape `(rows, cols, 3)`
(with `channels_last` data format) or
(3, rows, cols) (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(224, 224, 3)` would be one valid value.
filters: Integer, the dimensionality of the output space
(i.e. the number output of filters in the convolution).
alpha: controls the width of the network.
- If `alpha` < 1.0, proportionally decreases the number
of filters in each layer.
- If `alpha` > 1.0, proportionally increases the number
of filters in each layer.
- If `alpha` = 1, default number of filters from the paper
are used at each layer.
kernel: An integer or tuple/list of 2 integers, specifying the
width and height of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the width and height.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
# Input shape
4D tensor with shape:
`(samples, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, rows, cols, channels)` if data_format='channels_last'.
# Output shape
4D tensor with shape:
`(samples, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to stride.
# Returns
Output tensor of block.
"""
channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
filters = int(filters * alpha)
x = Conv2D(filters, kernel,
padding='same',
use_bias=False,
strides=strides,
name='conv1')(inputs)
x = BatchNormalization(axis=channel_axis, name='conv1_bn')(x)
return Activation(relu6, name='conv1_relu')(x)
|
Adds an initial convolution layer (with batch normalization and relu6).
# Arguments
inputs: Input tensor of shape `(rows, cols, 3)`
(with `channels_last` data format) or
(3, rows, cols) (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(224, 224, 3)` would be one valid value.
filters: Integer, the dimensionality of the output space
(i.e. the number output of filters in the convolution).
alpha: controls the width of the network.
- If `alpha` < 1.0, proportionally decreases the number
of filters in each layer.
- If `alpha` > 1.0, proportionally increases the number
of filters in each layer.
- If `alpha` = 1, default number of filters from the paper
are used at each layer.
kernel: An integer or tuple/list of 2 integers, specifying the
width and height of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the width and height.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
# Input shape
4D tensor with shape:
`(samples, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, rows, cols, channels)` if data_format='channels_last'.
# Output shape
4D tensor with shape:
`(samples, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to stride.
# Returns
Output tensor of block.
|
_conv_block
|
python
|
fchollet/deep-learning-models
|
mobilenet.py
|
https://github.com/fchollet/deep-learning-models/blob/master/mobilenet.py
|
MIT
|
def _depthwise_conv_block(inputs, pointwise_conv_filters, alpha,
depth_multiplier=1, strides=(1, 1), block_id=1):
"""Adds a depthwise convolution block.
A depthwise convolution block consists of a depthwise conv,
batch normalization, relu6, pointwise convolution,
batch normalization and relu6 activation.
# Arguments
inputs: Input tensor of shape `(rows, cols, channels)`
(with `channels_last` data format) or
(channels, rows, cols) (with `channels_first` data format).
pointwise_conv_filters: Integer, the dimensionality of the output space
(i.e. the number output of filters in the pointwise convolution).
alpha: controls the width of the network.
- If `alpha` < 1.0, proportionally decreases the number
of filters in each layer.
- If `alpha` > 1.0, proportionally increases the number
of filters in each layer.
- If `alpha` = 1, default number of filters from the paper
are used at each layer.
depth_multiplier: The number of depthwise convolution output channels
for each input channel.
The total number of depthwise convolution output
channels will be equal to `filters_in * depth_multiplier`.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the width and height.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
block_id: Integer, a unique identification designating the block number.
# Input shape
4D tensor with shape:
`(batch, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, rows, cols, channels)` if data_format='channels_last'.
# Output shape
4D tensor with shape:
`(batch, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to stride.
# Returns
Output tensor of block.
"""
channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
pointwise_conv_filters = int(pointwise_conv_filters * alpha)
x = DepthwiseConv2D((3, 3),
padding='same',
depth_multiplier=depth_multiplier,
strides=strides,
use_bias=False,
name='conv_dw_%d' % block_id)(inputs)
x = BatchNormalization(axis=channel_axis, name='conv_dw_%d_bn' % block_id)(x)
x = Activation(relu6, name='conv_dw_%d_relu' % block_id)(x)
x = Conv2D(pointwise_conv_filters, (1, 1),
padding='same',
use_bias=False,
strides=(1, 1),
name='conv_pw_%d' % block_id)(x)
x = BatchNormalization(axis=channel_axis, name='conv_pw_%d_bn' % block_id)(x)
return Activation(relu6, name='conv_pw_%d_relu' % block_id)(x)
|
Adds a depthwise convolution block.
A depthwise convolution block consists of a depthwise conv,
batch normalization, relu6, pointwise convolution,
batch normalization and relu6 activation.
# Arguments
inputs: Input tensor of shape `(rows, cols, channels)`
(with `channels_last` data format) or
(channels, rows, cols) (with `channels_first` data format).
pointwise_conv_filters: Integer, the dimensionality of the output space
(i.e. the number output of filters in the pointwise convolution).
alpha: controls the width of the network.
- If `alpha` < 1.0, proportionally decreases the number
of filters in each layer.
- If `alpha` > 1.0, proportionally increases the number
of filters in each layer.
- If `alpha` = 1, default number of filters from the paper
are used at each layer.
depth_multiplier: The number of depthwise convolution output channels
for each input channel.
The total number of depthwise convolution output
channels will be equal to `filters_in * depth_multiplier`.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the width and height.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
block_id: Integer, a unique identification designating the block number.
# Input shape
4D tensor with shape:
`(batch, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, rows, cols, channels)` if data_format='channels_last'.
# Output shape
4D tensor with shape:
`(batch, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to stride.
# Returns
Output tensor of block.
|
_depthwise_conv_block
|
python
|
fchollet/deep-learning-models
|
mobilenet.py
|
https://github.com/fchollet/deep-learning-models/blob/master/mobilenet.py
|
MIT
|
def MusicTaggerCRNN(weights='msd', input_tensor=None,
include_top=True):
'''Instantiate the MusicTaggerCRNN architecture,
optionally loading weights pre-trained
on Million Song Dataset. Note that when using TensorFlow,
for best performance you should set
`image_dim_ordering="tf"` in your Keras config
at ~/.keras/keras.json.
The model and the weights are compatible with both
TensorFlow and Theano. The dimension ordering
convention used by the model is the one
specified in your Keras config file.
For preparing mel-spectrogram input, see
`audio_conv_utils.py` in [applications](https://github.com/fchollet/keras/tree/master/keras/applications).
You will need to install [Librosa](http://librosa.github.io/librosa/)
to use it.
# Arguments
weights: one of `None` (random initialization)
or "msd" (pre-training on ImageNet).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
include_top: whether to include the 1 fully-connected
layer (output layer) at the top of the network.
If False, the network outputs 32-dim features.
# Returns
A Keras model instance.
'''
if weights not in {'msd', None}:
raise ValueError('The `weights` argument should be either '
'`None` (random initialization) or `msd` '
'(pre-training on Million Song Dataset).')
# Determine proper input shape
if K.image_dim_ordering() == 'th':
input_shape = (1, 96, 1366)
else:
input_shape = (96, 1366, 1)
if input_tensor is None:
melgram_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
melgram_input = Input(tensor=input_tensor, shape=input_shape)
else:
melgram_input = input_tensor
# Determine input axis
if K.image_dim_ordering() == 'th':
channel_axis = 1
freq_axis = 2
time_axis = 3
else:
channel_axis = 3
freq_axis = 1
time_axis = 2
# Input block
x = ZeroPadding2D(padding=(0, 37))(melgram_input)
x = BatchNormalization(axis=time_axis, name='bn_0_freq')(x)
# Conv block 1
x = Convolution2D(64, 3, 3, border_mode='same', name='conv1')(x)
x = BatchNormalization(axis=channel_axis, mode=0, name='bn1')(x)
x = ELU()(x)
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool1')(x)
# Conv block 2
x = Convolution2D(128, 3, 3, border_mode='same', name='conv2')(x)
x = BatchNormalization(axis=channel_axis, mode=0, name='bn2')(x)
x = ELU()(x)
x = MaxPooling2D(pool_size=(3, 3), strides=(3, 3), name='pool2')(x)
# Conv block 3
x = Convolution2D(128, 3, 3, border_mode='same', name='conv3')(x)
x = BatchNormalization(axis=channel_axis, mode=0, name='bn3')(x)
x = ELU()(x)
x = MaxPooling2D(pool_size=(4, 4), strides=(4, 4), name='pool3')(x)
# Conv block 4
x = Convolution2D(128, 3, 3, border_mode='same', name='conv4')(x)
x = BatchNormalization(axis=channel_axis, mode=0, name='bn4')(x)
x = ELU()(x)
x = MaxPooling2D(pool_size=(4, 4), strides=(4, 4), name='pool4')(x)
# reshaping
if K.image_dim_ordering() == 'th':
x = Permute((3, 1, 2))(x)
x = Reshape((15, 128))(x)
# GRU block 1, 2, output
x = GRU(32, return_sequences=True, name='gru1')(x)
x = GRU(32, return_sequences=False, name='gru2')(x)
if include_top:
x = Dense(50, activation='sigmoid', name='output')(x)
# Create model
model = Model(melgram_input, x)
if weights is None:
return model
else:
# Load weights
if K.image_dim_ordering() == 'tf':
weights_path = get_file('music_tagger_crnn_weights_tf_kernels_tf_dim_ordering.h5',
TF_WEIGHTS_PATH,
cache_subdir='models')
else:
weights_path = get_file('music_tagger_crnn_weights_tf_kernels_th_dim_ordering.h5',
TH_WEIGHTS_PATH,
cache_subdir='models')
model.load_weights(weights_path, by_name=True)
if K.backend() == 'theano':
convert_all_kernels_in_model(model)
return model
|
Instantiate the MusicTaggerCRNN architecture,
optionally loading weights pre-trained
on Million Song Dataset. Note that when using TensorFlow,
for best performance you should set
`image_dim_ordering="tf"` in your Keras config
at ~/.keras/keras.json.
The model and the weights are compatible with both
TensorFlow and Theano. The dimension ordering
convention used by the model is the one
specified in your Keras config file.
For preparing mel-spectrogram input, see
`audio_conv_utils.py` in [applications](https://github.com/fchollet/keras/tree/master/keras/applications).
You will need to install [Librosa](http://librosa.github.io/librosa/)
to use it.
# Arguments
weights: one of `None` (random initialization)
or "msd" (pre-training on ImageNet).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
include_top: whether to include the 1 fully-connected
layer (output layer) at the top of the network.
If False, the network outputs 32-dim features.
# Returns
A Keras model instance.
|
MusicTaggerCRNN
|
python
|
fchollet/deep-learning-models
|
music_tagger_crnn.py
|
https://github.com/fchollet/deep-learning-models/blob/master/music_tagger_crnn.py
|
MIT
|
def identity_block(input_tensor, kernel_size, filters, stage, block):
"""The identity block is the block that has no conv layer at shortcut.
# Arguments
input_tensor: input tensor
kernel_size: defualt 3, the kernel size of middle conv layer at main path
filters: list of integers, the filterss of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
# Returns
Output tensor for the block.
"""
filters1, filters2, filters3 = filters
if K.image_data_format() == 'channels_last':
bn_axis = 3
else:
bn_axis = 1
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = Conv2D(filters1, (1, 1), name=conv_name_base + '2a')(input_tensor)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = Conv2D(filters2, kernel_size,
padding='same', name=conv_name_base + '2b')(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
x = Activation('relu')(x)
x = Conv2D(filters3, (1, 1), name=conv_name_base + '2c')(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)
x = layers.add([x, input_tensor])
x = Activation('relu')(x)
return x
|
The identity block is the block that has no conv layer at shortcut.
# Arguments
input_tensor: input tensor
kernel_size: defualt 3, the kernel size of middle conv layer at main path
filters: list of integers, the filterss of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
# Returns
Output tensor for the block.
|
identity_block
|
python
|
fchollet/deep-learning-models
|
resnet50.py
|
https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py
|
MIT
|
def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)):
"""conv_block is the block that has a conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: defualt 3, the kernel size of middle conv layer at main path
filters: list of integers, the filterss of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
# Returns
Output tensor for the block.
Note that from stage 3, the first conv layer at main path is with strides=(2,2)
And the shortcut should have strides=(2,2) as well
"""
filters1, filters2, filters3 = filters
if K.image_data_format() == 'channels_last':
bn_axis = 3
else:
bn_axis = 1
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = Conv2D(filters1, (1, 1), strides=strides,
name=conv_name_base + '2a')(input_tensor)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = Conv2D(filters2, kernel_size, padding='same',
name=conv_name_base + '2b')(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
x = Activation('relu')(x)
x = Conv2D(filters3, (1, 1), name=conv_name_base + '2c')(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)
shortcut = Conv2D(filters3, (1, 1), strides=strides,
name=conv_name_base + '1')(input_tensor)
shortcut = BatchNormalization(axis=bn_axis, name=bn_name_base + '1')(shortcut)
x = layers.add([x, shortcut])
x = Activation('relu')(x)
return x
|
conv_block is the block that has a conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: defualt 3, the kernel size of middle conv layer at main path
filters: list of integers, the filterss of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
# Returns
Output tensor for the block.
Note that from stage 3, the first conv layer at main path is with strides=(2,2)
And the shortcut should have strides=(2,2) as well
|
conv_block
|
python
|
fchollet/deep-learning-models
|
resnet50.py
|
https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py
|
MIT
|
def ResNet50(include_top=True, weights='imagenet',
input_tensor=None, input_shape=None,
pooling=None,
classes=1000):
"""Instantiates the ResNet50 architecture.
Optionally loads weights pre-trained
on ImageNet. Note that when using TensorFlow,
for best performance you should set
`image_data_format="channels_last"` in your Keras config
at ~/.keras/keras.json.
The model and the weights are compatible with both
TensorFlow and Theano. The data format
convention used by the model is the one
specified in your Keras config file.
# Arguments
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization)
or "imagenet" (pre-training on ImageNet).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `channels_last` data format)
or `(3, 224, 244)` (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 197.
E.g. `(200, 200, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
"""
if weights not in {'imagenet', None}:
raise ValueError('The `weights` argument should be either '
'`None` (random initialization) or `imagenet` '
'(pre-training on ImageNet).')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as imagenet with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = _obtain_input_shape(input_shape,
default_size=224,
min_size=197,
data_format=K.image_data_format(),
include_top=include_top)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
if K.image_data_format() == 'channels_last':
bn_axis = 3
else:
bn_axis = 1
x = ZeroPadding2D((3, 3))(img_input)
x = Conv2D(64, (7, 7), strides=(2, 2), name='conv1')(x)
x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
x = Activation('relu')(x)
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')
x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')
x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f')
x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')
x = AveragePooling2D((7, 7), name='avg_pool')(x)
if include_top:
x = Flatten()(x)
x = Dense(classes, activation='softmax', name='fc1000')(x)
else:
if pooling == 'avg':
x = GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Model(inputs, x, name='resnet50')
# load weights
if weights == 'imagenet':
if include_top:
weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels.h5',
WEIGHTS_PATH,
cache_subdir='models',
md5_hash='a7b3fe01876f51b976af0dea6bc144eb')
else:
weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
md5_hash='a268eb855778b3df3c7506639542a6af')
model.load_weights(weights_path)
if K.backend() == 'theano':
layer_utils.convert_all_kernels_in_model(model)
if K.image_data_format() == 'channels_first':
if include_top:
maxpool = model.get_layer(name='avg_pool')
shape = maxpool.output_shape[1:]
dense = model.get_layer(name='fc1000')
layer_utils.convert_dense_weights_data_format(dense, shape, 'channels_first')
if K.backend() == 'tensorflow':
warnings.warn('You are using the TensorFlow backend, yet you '
'are using the Theano '
'image data format convention '
'(`image_data_format="channels_first"`). '
'For best performance, set '
'`image_data_format="channels_last"` in '
'your Keras config '
'at ~/.keras/keras.json.')
return model
|
Instantiates the ResNet50 architecture.
Optionally loads weights pre-trained
on ImageNet. Note that when using TensorFlow,
for best performance you should set
`image_data_format="channels_last"` in your Keras config
at ~/.keras/keras.json.
The model and the weights are compatible with both
TensorFlow and Theano. The data format
convention used by the model is the one
specified in your Keras config file.
# Arguments
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization)
or "imagenet" (pre-training on ImageNet).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `channels_last` data format)
or `(3, 224, 244)` (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 197.
E.g. `(200, 200, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
|
ResNet50
|
python
|
fchollet/deep-learning-models
|
resnet50.py
|
https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py
|
MIT
|
def VGG16(include_top=True, weights='imagenet',
input_tensor=None, input_shape=None,
pooling=None,
classes=1000):
"""Instantiates the VGG16 architecture.
Optionally loads weights pre-trained
on ImageNet. Note that when using TensorFlow,
for best performance you should set
`image_data_format="channels_last"` in your Keras config
at ~/.keras/keras.json.
The model and the weights are compatible with both
TensorFlow and Theano. The data format
convention used by the model is the one
specified in your Keras config file.
# Arguments
include_top: whether to include the 3 fully-connected
layers at the top of the network.
weights: one of `None` (random initialization)
or "imagenet" (pre-training on ImageNet).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `channels_last` data format)
or `(3, 224, 244)` (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 48.
E.g. `(200, 200, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
"""
if weights not in {'imagenet', None}:
raise ValueError('The `weights` argument should be either '
'`None` (random initialization) or `imagenet` '
'(pre-training on ImageNet).')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as imagenet with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = _obtain_input_shape(input_shape,
default_size=224,
min_size=48,
data_format=K.image_data_format(),
include_top=include_top)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
# Block 1
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(img_input)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
# Block 2
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
# Block 3
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
# Block 4
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
# Block 5
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)
if include_top:
# Classification block
x = Flatten(name='flatten')(x)
x = Dense(4096, activation='relu', name='fc1')(x)
x = Dense(4096, activation='relu', name='fc2')(x)
x = Dense(classes, activation='softmax', name='predictions')(x)
else:
if pooling == 'avg':
x = GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Model(inputs, x, name='vgg16')
# load weights
if weights == 'imagenet':
if include_top:
weights_path = get_file('vgg16_weights_tf_dim_ordering_tf_kernels.h5',
WEIGHTS_PATH,
cache_subdir='models')
else:
weights_path = get_file('vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5',
WEIGHTS_PATH_NO_TOP,
cache_subdir='models')
model.load_weights(weights_path)
if K.backend() == 'theano':
layer_utils.convert_all_kernels_in_model(model)
if K.image_data_format() == 'channels_first':
if include_top:
maxpool = model.get_layer(name='block5_pool')
shape = maxpool.output_shape[1:]
dense = model.get_layer(name='fc1')
layer_utils.convert_dense_weights_data_format(dense, shape, 'channels_first')
if K.backend() == 'tensorflow':
warnings.warn('You are using the TensorFlow backend, yet you '
'are using the Theano '
'image data format convention '
'(`image_data_format="channels_first"`). '
'For best performance, set '
'`image_data_format="channels_last"` in '
'your Keras config '
'at ~/.keras/keras.json.')
return model
|
Instantiates the VGG16 architecture.
Optionally loads weights pre-trained
on ImageNet. Note that when using TensorFlow,
for best performance you should set
`image_data_format="channels_last"` in your Keras config
at ~/.keras/keras.json.
The model and the weights are compatible with both
TensorFlow and Theano. The data format
convention used by the model is the one
specified in your Keras config file.
# Arguments
include_top: whether to include the 3 fully-connected
layers at the top of the network.
weights: one of `None` (random initialization)
or "imagenet" (pre-training on ImageNet).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `channels_last` data format)
or `(3, 224, 244)` (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 48.
E.g. `(200, 200, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
|
VGG16
|
python
|
fchollet/deep-learning-models
|
vgg16.py
|
https://github.com/fchollet/deep-learning-models/blob/master/vgg16.py
|
MIT
|
def VGG19(include_top=True, weights='imagenet',
input_tensor=None, input_shape=None,
pooling=None,
classes=1000):
"""Instantiates the VGG19 architecture.
Optionally loads weights pre-trained
on ImageNet. Note that when using TensorFlow,
for best performance you should set
`image_data_format="channels_last"` in your Keras config
at ~/.keras/keras.json.
The model and the weights are compatible with both
TensorFlow and Theano. The data format
convention used by the model is the one
specified in your Keras config file.
# Arguments
include_top: whether to include the 3 fully-connected
layers at the top of the network.
weights: one of `None` (random initialization)
or "imagenet" (pre-training on ImageNet).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `channels_last` data format)
or `(3, 224, 244)` (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 48.
E.g. `(200, 200, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
"""
if weights not in {'imagenet', None}:
raise ValueError('The `weights` argument should be either '
'`None` (random initialization) or `imagenet` '
'(pre-training on ImageNet).')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as imagenet with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = _obtain_input_shape(input_shape,
default_size=224,
min_size=48,
data_format=K.image_data_format(),
include_top=include_top)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
# Block 1
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(img_input)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
# Block 2
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
# Block 3
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv4')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
# Block 4
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv4')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
# Block 5
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv4')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)
if include_top:
# Classification block
x = Flatten(name='flatten')(x)
x = Dense(4096, activation='relu', name='fc1')(x)
x = Dense(4096, activation='relu', name='fc2')(x)
x = Dense(classes, activation='softmax', name='predictions')(x)
else:
if pooling == 'avg':
x = GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Model(inputs, x, name='vgg19')
# load weights
if weights == 'imagenet':
if include_top:
weights_path = get_file('vgg19_weights_tf_dim_ordering_tf_kernels.h5',
WEIGHTS_PATH,
cache_subdir='models')
else:
weights_path = get_file('vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5',
WEIGHTS_PATH_NO_TOP,
cache_subdir='models')
model.load_weights(weights_path)
if K.backend() == 'theano':
layer_utils.convert_all_kernels_in_model(model)
if K.image_data_format() == 'channels_first':
if include_top:
maxpool = model.get_layer(name='block5_pool')
shape = maxpool.output_shape[1:]
dense = model.get_layer(name='fc1')
layer_utils.convert_dense_weights_data_format(dense, shape, 'channels_first')
if K.backend() == 'tensorflow':
warnings.warn('You are using the TensorFlow backend, yet you '
'are using the Theano '
'image data format convention '
'(`image_data_format="channels_first"`). '
'For best performance, set '
'`image_data_format="channels_last"` in '
'your Keras config '
'at ~/.keras/keras.json.')
return model
|
Instantiates the VGG19 architecture.
Optionally loads weights pre-trained
on ImageNet. Note that when using TensorFlow,
for best performance you should set
`image_data_format="channels_last"` in your Keras config
at ~/.keras/keras.json.
The model and the weights are compatible with both
TensorFlow and Theano. The data format
convention used by the model is the one
specified in your Keras config file.
# Arguments
include_top: whether to include the 3 fully-connected
layers at the top of the network.
weights: one of `None` (random initialization)
or "imagenet" (pre-training on ImageNet).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `channels_last` data format)
or `(3, 224, 244)` (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 48.
E.g. `(200, 200, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
|
VGG19
|
python
|
fchollet/deep-learning-models
|
vgg19.py
|
https://github.com/fchollet/deep-learning-models/blob/master/vgg19.py
|
MIT
|
def Xception(include_top=True, weights='imagenet',
input_tensor=None, input_shape=None,
pooling=None,
classes=1000):
"""Instantiates the Xception architecture.
Optionally loads weights pre-trained
on ImageNet. This model is available for TensorFlow only,
and can only be used with inputs following the TensorFlow
data format `(width, height, channels)`.
You should set `image_data_format="channels_last"` in your Keras config
located at ~/.keras/keras.json.
Note that the default input image size for this model is 299x299.
# Arguments
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization)
or "imagenet" (pre-training on ImageNet).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(299, 299, 3)`.
It should have exactly 3 inputs channels,
and width and height should be no smaller than 71.
E.g. `(150, 150, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
RuntimeError: If attempting to run this model with a
backend that does not support separable convolutions.
"""
if weights not in {'imagenet', None}:
raise ValueError('The `weights` argument should be either '
'`None` (random initialization) or `imagenet` '
'(pre-training on ImageNet).')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as imagenet with `include_top`'
' as true, `classes` should be 1000')
if K.backend() != 'tensorflow':
raise RuntimeError('The Xception model is only available with '
'the TensorFlow backend.')
if K.image_data_format() != 'channels_last':
warnings.warn('The Xception model is only available for the '
'input data format "channels_last" '
'(width, height, channels). '
'However your settings specify the default '
'data format "channels_first" (channels, width, height). '
'You should set `image_data_format="channels_last"` in your Keras '
'config located at ~/.keras/keras.json. '
'The model being returned right now will expect inputs '
'to follow the "channels_last" data format.')
K.set_image_data_format('channels_last')
old_data_format = 'channels_first'
else:
old_data_format = None
# Determine proper input shape
input_shape = _obtain_input_shape(input_shape,
default_size=299,
min_size=71,
data_format=K.image_data_format(),
include_top=include_top)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
x = Conv2D(32, (3, 3), strides=(2, 2), use_bias=False, name='block1_conv1')(img_input)
x = BatchNormalization(name='block1_conv1_bn')(x)
x = Activation('relu', name='block1_conv1_act')(x)
x = Conv2D(64, (3, 3), use_bias=False, name='block1_conv2')(x)
x = BatchNormalization(name='block1_conv2_bn')(x)
x = Activation('relu', name='block1_conv2_act')(x)
residual = Conv2D(128, (1, 1), strides=(2, 2),
padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
x = SeparableConv2D(128, (3, 3), padding='same', use_bias=False, name='block2_sepconv1')(x)
x = BatchNormalization(name='block2_sepconv1_bn')(x)
x = Activation('relu', name='block2_sepconv2_act')(x)
x = SeparableConv2D(128, (3, 3), padding='same', use_bias=False, name='block2_sepconv2')(x)
x = BatchNormalization(name='block2_sepconv2_bn')(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='block2_pool')(x)
x = layers.add([x, residual])
residual = Conv2D(256, (1, 1), strides=(2, 2),
padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
x = Activation('relu', name='block3_sepconv1_act')(x)
x = SeparableConv2D(256, (3, 3), padding='same', use_bias=False, name='block3_sepconv1')(x)
x = BatchNormalization(name='block3_sepconv1_bn')(x)
x = Activation('relu', name='block3_sepconv2_act')(x)
x = SeparableConv2D(256, (3, 3), padding='same', use_bias=False, name='block3_sepconv2')(x)
x = BatchNormalization(name='block3_sepconv2_bn')(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='block3_pool')(x)
x = layers.add([x, residual])
residual = Conv2D(728, (1, 1), strides=(2, 2),
padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
x = Activation('relu', name='block4_sepconv1_act')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name='block4_sepconv1')(x)
x = BatchNormalization(name='block4_sepconv1_bn')(x)
x = Activation('relu', name='block4_sepconv2_act')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name='block4_sepconv2')(x)
x = BatchNormalization(name='block4_sepconv2_bn')(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='block4_pool')(x)
x = layers.add([x, residual])
for i in range(8):
residual = x
prefix = 'block' + str(i + 5)
x = Activation('relu', name=prefix + '_sepconv1_act')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name=prefix + '_sepconv1')(x)
x = BatchNormalization(name=prefix + '_sepconv1_bn')(x)
x = Activation('relu', name=prefix + '_sepconv2_act')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name=prefix + '_sepconv2')(x)
x = BatchNormalization(name=prefix + '_sepconv2_bn')(x)
x = Activation('relu', name=prefix + '_sepconv3_act')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name=prefix + '_sepconv3')(x)
x = BatchNormalization(name=prefix + '_sepconv3_bn')(x)
x = layers.add([x, residual])
residual = Conv2D(1024, (1, 1), strides=(2, 2),
padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
x = Activation('relu', name='block13_sepconv1_act')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name='block13_sepconv1')(x)
x = BatchNormalization(name='block13_sepconv1_bn')(x)
x = Activation('relu', name='block13_sepconv2_act')(x)
x = SeparableConv2D(1024, (3, 3), padding='same', use_bias=False, name='block13_sepconv2')(x)
x = BatchNormalization(name='block13_sepconv2_bn')(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='block13_pool')(x)
x = layers.add([x, residual])
x = SeparableConv2D(1536, (3, 3), padding='same', use_bias=False, name='block14_sepconv1')(x)
x = BatchNormalization(name='block14_sepconv1_bn')(x)
x = Activation('relu', name='block14_sepconv1_act')(x)
x = SeparableConv2D(2048, (3, 3), padding='same', use_bias=False, name='block14_sepconv2')(x)
x = BatchNormalization(name='block14_sepconv2_bn')(x)
x = Activation('relu', name='block14_sepconv2_act')(x)
if include_top:
x = GlobalAveragePooling2D(name='avg_pool')(x)
x = Dense(classes, activation='softmax', name='predictions')(x)
else:
if pooling == 'avg':
x = GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Model(inputs, x, name='xception')
# load weights
if weights == 'imagenet':
if include_top:
weights_path = get_file('xception_weights_tf_dim_ordering_tf_kernels.h5',
TF_WEIGHTS_PATH,
cache_subdir='models')
else:
weights_path = get_file('xception_weights_tf_dim_ordering_tf_kernels_notop.h5',
TF_WEIGHTS_PATH_NO_TOP,
cache_subdir='models')
model.load_weights(weights_path)
if old_data_format:
K.set_image_data_format(old_data_format)
return model
|
Instantiates the Xception architecture.
Optionally loads weights pre-trained
on ImageNet. This model is available for TensorFlow only,
and can only be used with inputs following the TensorFlow
data format `(width, height, channels)`.
You should set `image_data_format="channels_last"` in your Keras config
located at ~/.keras/keras.json.
Note that the default input image size for this model is 299x299.
# Arguments
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization)
or "imagenet" (pre-training on ImageNet).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(299, 299, 3)`.
It should have exactly 3 inputs channels,
and width and height should be no smaller than 71.
E.g. `(150, 150, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
RuntimeError: If attempting to run this model with a
backend that does not support separable convolutions.
|
Xception
|
python
|
fchollet/deep-learning-models
|
xception.py
|
https://github.com/fchollet/deep-learning-models/blob/master/xception.py
|
MIT
|
def beam_search_generator(sess, net, initial_state, initial_sample,
early_term_token, beam_width, forward_model_fn, forward_args):
'''Run beam search! Yield consensus tokens sequentially, as a generator;
return when reaching early_term_token (newline).
Args:
sess: tensorflow session reference
net: tensorflow net graph (must be compatible with the forward_net function)
initial_state: initial hidden state of the net
initial_sample: single token (excluding any seed/priming material)
to start the generation
early_term_token: stop when the beam reaches consensus on this token
(but do not return this token).
beam_width: how many beams to track
forward_model_fn: function to forward the model, must be of the form:
probability_output, beam_state =
forward_model_fn(sess, net, beam_state, beam_sample, forward_args)
(Note: probability_output has to be a valid probability distribution!)
tot_steps: how many tokens to generate before stopping,
unless already stopped via early_term_token.
Returns: a generator to yield a sequence of beam-sampled tokens.'''
# Store state, outputs and probabilities for up to args.beam_width beams.
# Initialize with just the one starting entry; it will branch to fill the beam
# in the first step.
beam_states = [initial_state] # Stores the best activation states
beam_outputs = [[initial_sample]] # Stores the best generated output sequences so far.
beam_probs = [1.] # Stores the cumulative normalized probabilities of the beams so far.
while True:
# Keep a running list of the best beam branches for next step.
# Don't actually copy any big data structures yet, just keep references
# to existing beam state entries, and then clone them as necessary
# at the end of the generation step.
new_beam_indices = []
new_beam_probs = []
new_beam_samples = []
# Iterate through the beam entries.
for beam_index, beam_state in enumerate(beam_states):
beam_prob = beam_probs[beam_index]
beam_sample = beam_outputs[beam_index][-1]
# Forward the model.
prediction, beam_states[beam_index] = forward_model_fn(
sess, net, beam_state, beam_sample, forward_args)
# Sample best_tokens from the probability distribution.
# Sample from the scaled probability distribution beam_width choices
# (but not more than the number of positive probabilities in scaled_prediction).
count = min(beam_width, sum(1 if p > 0. else 0 for p in prediction))
best_tokens = np.random.choice(len(prediction), size=count,
replace=False, p=prediction)
for token in best_tokens:
prob = prediction[token] * beam_prob
if len(new_beam_indices) < beam_width:
# If we don't have enough new_beam_indices, we automatically qualify.
new_beam_indices.append(beam_index)
new_beam_probs.append(prob)
new_beam_samples.append(token)
else:
# Sample a low-probability beam to possibly replace.
np_new_beam_probs = np.array(new_beam_probs)
inverse_probs = -np_new_beam_probs + max(np_new_beam_probs) + min(np_new_beam_probs)
inverse_probs = inverse_probs / sum(inverse_probs)
sampled_beam_index = np.random.choice(beam_width, p=inverse_probs)
if new_beam_probs[sampled_beam_index] <= prob:
# Replace it.
new_beam_indices[sampled_beam_index] = beam_index
new_beam_probs[sampled_beam_index] = prob
new_beam_samples[sampled_beam_index] = token
# Replace the old states with the new states, first by referencing and then by copying.
already_referenced = [False] * beam_width
new_beam_states = []
new_beam_outputs = []
for i, new_index in enumerate(new_beam_indices):
if already_referenced[new_index]:
new_beam = copy.deepcopy(beam_states[new_index])
else:
new_beam = beam_states[new_index]
already_referenced[new_index] = True
new_beam_states.append(new_beam)
new_beam_outputs.append(beam_outputs[new_index] + [new_beam_samples[i]])
# Normalize the beam probabilities so they don't drop to zero
beam_probs = new_beam_probs / sum(new_beam_probs)
beam_states = new_beam_states
beam_outputs = new_beam_outputs
# Prune the agreed portions of the outputs
# and yield the tokens on which the beam has reached consensus.
l, early_term = consensus_length(beam_outputs, early_term_token)
if l > 0:
for token in beam_outputs[0][:l]: yield token
beam_outputs = [output[l:] for output in beam_outputs]
if early_term: return
|
Run beam search! Yield consensus tokens sequentially, as a generator;
return when reaching early_term_token (newline).
Args:
sess: tensorflow session reference
net: tensorflow net graph (must be compatible with the forward_net function)
initial_state: initial hidden state of the net
initial_sample: single token (excluding any seed/priming material)
to start the generation
early_term_token: stop when the beam reaches consensus on this token
(but do not return this token).
beam_width: how many beams to track
forward_model_fn: function to forward the model, must be of the form:
probability_output, beam_state =
forward_model_fn(sess, net, beam_state, beam_sample, forward_args)
(Note: probability_output has to be a valid probability distribution!)
tot_steps: how many tokens to generate before stopping,
unless already stopped via early_term_token.
Returns: a generator to yield a sequence of beam-sampled tokens.
|
beam_search_generator
|
python
|
pender/chatbot-rnn
|
chatbot.py
|
https://github.com/pender/chatbot-rnn/blob/master/chatbot.py
|
MIT
|
def __init__(self, cell_fn, partition_size=128, partitions=1, layers=2):
"""Create a RNN cell composed sequentially of a number of RNNCells.
Args:
cell_fn: reference to RNNCell function to create each partition in each layer.
partition_size: how many horizontal cells to include in each partition.
partitions: how many horizontal partitions to include in each layer.
layers: how many layers to include in the net.
"""
super(PartitionedMultiRNNCell, self).__init__()
self._cells = []
for i in range(layers):
self._cells.append([cell_fn(partition_size) for _ in range(partitions)])
self._partitions = partitions
|
Create a RNN cell composed sequentially of a number of RNNCells.
Args:
cell_fn: reference to RNNCell function to create each partition in each layer.
partition_size: how many horizontal cells to include in each partition.
partitions: how many horizontal partitions to include in each layer.
layers: how many layers to include in the net.
|
__init__
|
python
|
pender/chatbot-rnn
|
model.py
|
https://github.com/pender/chatbot-rnn/blob/master/model.py
|
MIT
|
def _rnn_state_placeholders(state):
"""Convert RNN state tensors to placeholders, reflecting the same nested tuple structure."""
# Adapted from @carlthome's comment:
# https://github.com/tensorflow/tensorflow/issues/2838#issuecomment-302019188
if isinstance(state, tf.contrib.rnn.LSTMStateTuple):
c, h = state
c = tf.placeholder(c.dtype, c.shape, c.op.name)
h = tf.placeholder(h.dtype, h.shape, h.op.name)
return tf.contrib.rnn.LSTMStateTuple(c, h)
elif isinstance(state, tf.Tensor):
h = state
h = tf.placeholder(h.dtype, h.shape, h.op.name)
return h
else:
structure = [_rnn_state_placeholders(x) for x in state]
return tuple(structure)
|
Convert RNN state tensors to placeholders, reflecting the same nested tuple structure.
|
_rnn_state_placeholders
|
python
|
pender/chatbot-rnn
|
model.py
|
https://github.com/pender/chatbot-rnn/blob/master/model.py
|
MIT
|
def forward_model(self, sess, state, input_sample):
'''Run a forward pass. Return the updated hidden state and the output probabilities.'''
shaped_input = np.array([[input_sample]], np.float32)
inputs = {self.input_data: shaped_input}
self.add_state_to_feed_dict(inputs, state)
[probs, state] = sess.run([self.probs, self.final_state], feed_dict=inputs)
return probs[0], state
|
Run a forward pass. Return the updated hidden state and the output probabilities.
|
forward_model
|
python
|
pender/chatbot-rnn
|
model.py
|
https://github.com/pender/chatbot-rnn/blob/master/model.py
|
MIT
|
def check_container_exec_instances(context, num):
"""Modern docker versions remove ExecIDs after they finished, but older
docker versions leave ExecIDs behind. This test is for asserting that
the ExecIDs are cleaned up one way or another"""
container_info = context.docker_client.inspect_container(
context.running_container_id
)
if container_info["ExecIDs"] is None:
execs = []
else:
execs = container_info["ExecIDs"]
print("Container info:\n%s" % container_info)
assert len(execs) <= int(num)
|
Modern docker versions remove ExecIDs after they finished, but older
docker versions leave ExecIDs behind. This test is for asserting that
the ExecIDs are cleaned up one way or another
|
check_container_exec_instances
|
python
|
Yelp/paasta
|
general_itests/steps/paasta_execute_docker_command.py
|
https://github.com/Yelp/paasta/blob/master/general_itests/steps/paasta_execute_docker_command.py
|
Apache-2.0
|
def tail_paasta_logs_let_threads_be_threads(context):
"""This test lets tail_paasta_logs() fire off processes to do work. We
verify that the work was done, basically irrespective of how it was done.
"""
service = "fake_service"
context.levels = ["fake_level1", "fake_level2"]
context.components = ["deploy", "monitoring"]
context.clusters = ["fake_cluster1"]
context.instances = ["fake_instance"]
context.pods = ["fake_pod"]
with mock.patch(
"paasta_tools.cli.cmds.logs.ScribeLogReader.determine_scribereader_envs",
autospec=True,
) as context.determine_scribereader_envs_patch, mock.patch(
"paasta_tools.cli.cmds.logs.ScribeLogReader.scribe_tail", autospec=True
) as scribe_tail_patch, mock.patch(
"paasta_tools.cli.cmds.logs.log", autospec=True
), mock.patch(
"paasta_tools.cli.cmds.logs.print_log", autospec=True
) as context.print_log_patch, mock.patch(
"paasta_tools.cli.cmds.logs.scribereader", autospec=True
):
context.determine_scribereader_envs_patch.return_value = ["env1", "env2"]
def scribe_tail_side_effect(
self,
scribe_env,
stream_name,
service,
levels,
components,
clusters,
instances,
pods,
queue,
filter_fn,
parse_fn=None,
):
# The print here is just for debugging
print("fake log line added for %s" % scribe_env)
queue.put("fake log line added for %s" % scribe_env)
# This sleep() was the straw that broke the camel's back
# and forced me to move this test into the integration
# suite. The test is flaky without the sleep, and the
# sleep make it a lousy unit test.
time.sleep(0.05)
scribe_tail_patch.side_effect = scribe_tail_side_effect
context.scribe_log_reader = logs.ScribeLogReader(
cluster_map={"env1": "env1", "env2": "env2"}
)
context.scribe_log_reader.tail_logs(
service,
context.levels,
context.components,
context.clusters,
context.instances,
context.pods,
)
|
This test lets tail_paasta_logs() fire off processes to do work. We
verify that the work was done, basically irrespective of how it was done.
|
tail_paasta_logs_let_threads_be_threads
|
python
|
Yelp/paasta
|
general_itests/steps/tail_paasta_logs.py
|
https://github.com/Yelp/paasta/blob/master/general_itests/steps/tail_paasta_logs.py
|
Apache-2.0
|
def register_bounce_method(name: str) -> Callable[[BounceMethod], BounceMethod]:
"""Returns a decorator that registers that bounce function at a given name
so get_bounce_method_func can find it."""
def outer(bounce_func: BounceMethod):
_bounce_method_funcs[name] = bounce_func
return bounce_func
return outer
|
Returns a decorator that registers that bounce function at a given name
so get_bounce_method_func can find it.
|
register_bounce_method
|
python
|
Yelp/paasta
|
paasta_tools/bounce_lib.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/bounce_lib.py
|
Apache-2.0
|
def brutal_bounce(
new_config: BounceMethodConfigDict,
new_app_running: bool,
happy_new_tasks: Collection,
old_non_draining_tasks: Sequence,
margin_factor=1.0,
) -> BounceMethodResult:
"""Pays no regard to safety. Starts the new app if necessary, and kills any
old ones. Mostly meant as an example of the simplest working bounce method,
but might be tolerable for some services.
:param new_config: The configuration dictionary representing the desired new app.
:param new_app_running: Whether there is an app in Marathon with the same ID as the new config.
:param happy_new_tasks: Set of MarathonTasks belonging to the new application that are considered healthy and up.
:param old_non_draining_tasks: A sequence of tasks not belonging to the new version. Tasks should be ordered from
most desirable to least desirable.
:param margin_factor: the multiplication factor used to calculate the number of instances to be drained
when the crossover method is used.
:return: A dictionary representing the desired bounce actions and containing the following keys:
- create_app: True if we should start the new Marathon app, False otherwise.
- tasks_to_drain: a set of task objects which should be drained and killed. May be empty.
"""
return {
"create_app": not new_app_running,
"tasks_to_drain": set(old_non_draining_tasks),
}
|
Pays no regard to safety. Starts the new app if necessary, and kills any
old ones. Mostly meant as an example of the simplest working bounce method,
but might be tolerable for some services.
:param new_config: The configuration dictionary representing the desired new app.
:param new_app_running: Whether there is an app in Marathon with the same ID as the new config.
:param happy_new_tasks: Set of MarathonTasks belonging to the new application that are considered healthy and up.
:param old_non_draining_tasks: A sequence of tasks not belonging to the new version. Tasks should be ordered from
most desirable to least desirable.
:param margin_factor: the multiplication factor used to calculate the number of instances to be drained
when the crossover method is used.
:return: A dictionary representing the desired bounce actions and containing the following keys:
- create_app: True if we should start the new Marathon app, False otherwise.
- tasks_to_drain: a set of task objects which should be drained and killed. May be empty.
|
brutal_bounce
|
python
|
Yelp/paasta
|
paasta_tools/bounce_lib.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/bounce_lib.py
|
Apache-2.0
|
def upthendown_bounce(
new_config: BounceMethodConfigDict,
new_app_running: bool,
happy_new_tasks: Collection,
old_non_draining_tasks: Sequence,
margin_factor=1.0,
) -> BounceMethodResult:
"""Starts a new app if necessary; only kills old apps once all the requested tasks for the new version are running.
See the docstring for brutal_bounce() for parameters and return value.
"""
if new_app_running and len(happy_new_tasks) == new_config["instances"]:
return {"create_app": False, "tasks_to_drain": set(old_non_draining_tasks)}
else:
return {"create_app": not new_app_running, "tasks_to_drain": set()}
|
Starts a new app if necessary; only kills old apps once all the requested tasks for the new version are running.
See the docstring for brutal_bounce() for parameters and return value.
|
upthendown_bounce
|
python
|
Yelp/paasta
|
paasta_tools/bounce_lib.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/bounce_lib.py
|
Apache-2.0
|
def crossover_bounce(
new_config: BounceMethodConfigDict,
new_app_running: bool,
happy_new_tasks: Collection,
old_non_draining_tasks: Sequence,
margin_factor=1.0,
) -> BounceMethodResult:
"""Starts a new app if necessary; slowly kills old apps as instances of the new app become happy.
See the docstring for brutal_bounce() for parameters and return value.
"""
assert margin_factor > 0
assert margin_factor <= 1
needed_count = max(
int(math.ceil(new_config["instances"] * margin_factor)) - len(happy_new_tasks),
0,
)
return {
"create_app": not new_app_running,
"tasks_to_drain": set(old_non_draining_tasks[needed_count:]),
}
|
Starts a new app if necessary; slowly kills old apps as instances of the new app become happy.
See the docstring for brutal_bounce() for parameters and return value.
|
crossover_bounce
|
python
|
Yelp/paasta
|
paasta_tools/bounce_lib.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/bounce_lib.py
|
Apache-2.0
|
def downthenup_bounce(
new_config: BounceMethodConfigDict,
new_app_running: bool,
happy_new_tasks: Collection,
old_non_draining_tasks: Sequence,
margin_factor=1.0,
) -> BounceMethodResult:
"""Stops any old apps and waits for them to die before starting a new one.
See the docstring for brutal_bounce() for parameters and return value.
"""
return {
"create_app": not old_non_draining_tasks and not new_app_running,
"tasks_to_drain": set(old_non_draining_tasks),
}
|
Stops any old apps and waits for them to die before starting a new one.
See the docstring for brutal_bounce() for parameters and return value.
|
downthenup_bounce
|
python
|
Yelp/paasta
|
paasta_tools/bounce_lib.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/bounce_lib.py
|
Apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.