desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Returns a unique identifier from `string`. The type of the identifier is backend-specific, and this is typically implemented in the backend-specific subclasses'
@staticmethod def str2id(string):
raise NotImplementedError
'This method opens a scan result, and calls the appropriate store_scan_* method to parse (and store) the scan result.'
def store_scan(self, fname, **kargs):
scanid = utils.hash_file(fname, hashtype='sha256') if self.is_scan_present(scanid): utils.LOGGER.debug('Scan already present in Database (%r).', fname) return False with utils.open_file(fname) as fdesc: fchar = fdesc.read(1) try: store_scan_function = {'<':...
'This method parses an XML scan result, displays a JSON version of the result, and return True if everything went fine, False otherwise. In backend-specific subclasses, this method stores the result instead of displaying it, thanks to the `content_handler` attribute.'
def store_scan_xml(self, fname, **kargs):
parser = xml.sax.make_parser() self.start_store_hosts() try: content_handler = self.content_handler(fname, **kargs) except Exception: utils.LOGGER.warning('Exception (file %r)', fname, exc_info=True) else: parser.setContentHandler(content_handler) parser.setEnti...
'Attempt to merge `host` with an existing record. Return `True` if another record for the same address (and source if `host[\'source\'] exists`) has been found, merged and the resulting document inserted in the database, `False` otherwise (in that case, it is the caller\'s responsibility to add `host` to the database i...
def merge_host(self, host):
try: flt = self.searchhost(host['addr']) if host.get('source'): flt = self.flt_and(flt, self.searchsource(host['source'])) rec = self.get(flt)[0] except IndexError: return False self.store_host(self.merge_host_docs(rec, host)) self.remove(rec) return True
'Backend-specific subclasses may use this method to create some bulk insert structures.'
def start_store_hosts(self):
pass
'Backend-specific subclasses may use this method to commit bulk insert structures.'
def stop_store_hosts(self):
pass
'This method parses a JSON scan result as exported using `ivre scancli --json > file`, displays the parsing result, and return True if everything went fine, False otherwise. In backend-specific subclasses, this method stores the result instead of displaying it, thanks to the `store_host` method.'
def store_scan_json(self, fname, filehash=None, needports=False, needopenports=False, categories=None, source=None, gettoarchive=None, add_addr_infos=True, force_info=False, merge=False, **_):
if (categories is None): categories = [] scan_doc_saved = False self.start_store_hosts() with utils.open_file(fname) as fdesc: for line in fdesc: host = self.json2dbrec(json.loads(line.decode())) for fname in ['_id']: if (fname in host): ...
'Returns the content of a port\'s screenshot.'
@staticmethod def getscreenshot(port):
url = port.get('screenshot') if (url is None): return None if (url == 'field'): return port.get('screendata')
'Implemented in backend-specific classes.'
def migrate_schema(self, archive, version):
pass
'Converts a record from version 0 (no "schema_version" key in the document) to version 1 (`doc["schema_version"] == 1`). Version 1 adds an "openports" nested document to ease open ports based researches.'
@classmethod def __migrate_schema_hosts_0_1(cls, doc):
assert ('schema_version' not in doc) assert ('openports' not in doc) doc['schema_version'] = 1 openports = {'count': 0} for port in doc.get('ports', []): if (port.get('state_state') == 'open'): openports.setdefault(port['protocol'], {}).setdefault('ports', []).append(port['port']...
'Converts a record from version 1 to version 2. Version 2 discards service names when they have been found from nmap-services file.'
@staticmethod def __migrate_schema_hosts_1_2(doc):
assert (doc['schema_version'] == 1) doc['schema_version'] = 2 for port in doc.get('ports', []): if (port.get('service_method') == 'table'): for key in list(port): if key.startswith('service_'): del port[key]
'Converts a record from version 2 to version 3. Version 3 uses new Nmap structured data for scripts using the ls library.'
@staticmethod def __migrate_schema_hosts_2_3(doc):
assert (doc['schema_version'] == 2) doc['schema_version'] = 3 migrate_scripts = set(['afp-ls', 'nfs-ls', 'smb-ls', 'ftp-anon', 'http-ls']) for port in doc.get('ports', []): for script in port.get('scripts', []): if (script['id'] in migrate_scripts): if (script['id'] i...
'Converts a record from version 3 to version 4. Version 4 creates a "fake" port entry to store host scripts.'
@staticmethod def __migrate_schema_hosts_3_4(doc):
assert (doc['schema_version'] == 3) doc['schema_version'] = 4 if ('scripts' in doc): doc.setdefault('ports', []).append({'port': 'host', 'scripts': doc.pop('scripts')})
'Converts a record from version 4 to version 5. Version 5 uses the magic value -1 instead of "host" for "port" in the "fake" port entry used to store host scripts (see `migrate_schema_hosts_3_4()`). Moreover, it changes the structure of the values of "extraports" from [totalcount, {"state": count}] to {"total": totalco...
@staticmethod def __migrate_schema_hosts_4_5(doc):
assert (doc['schema_version'] == 4) doc['schema_version'] = 5 for port in doc.get('ports', []): if (port['port'] == 'host'): port['port'] = (-1) for (state, (total, counts)) in list(viewitems(doc.get('extraports', {}))): doc['extraports'][state] = {'total': total, 'reasons': ...
'Converts a record from version 5 to version 6. Version 6 uses Nmap structured data for scripts using the vulns NSE library.'
@staticmethod def __migrate_schema_hosts_5_6(doc):
assert (doc['schema_version'] == 5) doc['schema_version'] = 6 migrate_scripts = set((script for (script, alias) in viewitems(xmlnmap.ALIASES_TABLE_ELEMS) if (alias == 'vulns'))) for port in doc.get('ports', []): for script in port.get('scripts', []): if (script['id'] in migrate_scrip...
'Converts a record from version 6 to version 7. Version 7 creates a structured output for mongodb-databases script.'
@staticmethod def __migrate_schema_hosts_6_7(doc):
assert (doc['schema_version'] == 6) doc['schema_version'] = 7 for port in doc.get('ports', []): for script in port.get('scripts', []): if (script['id'] == 'mongodb-databases'): if ('mongodb-databases' not in script): data = xmlnmap.add_mongodb_database...
'Converts a record from version 7 to version 8. Version 8 fixes the structured output for scripts using the vulns NSE library.'
@staticmethod def __migrate_schema_hosts_7_8(doc):
assert (doc['schema_version'] == 7) doc['schema_version'] = 8 for port in doc.get('ports', []): for script in port.get('scripts', []): if ('vulns' in script): if any(((elt in script['vulns']) for elt in ['ids', 'refs', 'description', 'state', 'title'])): ...
'This method returns for a specific query `flt` a list of dictionary objects whose keys are `id` and `mean`; the value for `id` is a backend-dependant and uniquely identifies a record, and the value for `mean` is given by: (number of open ports) * sum(port number for each open port)'
def get_mean_open_ports(self, flt, archive=False):
return [{'id': self.getid(host), 'mean': reduce((lambda x, y: (x * y)), reduce((lambda x, y: ((x[0] + y[0]), (x[1] + y[1]))), ((1, port['port']) for port in host.get('ports', []) if (port['state_state'] == 'open')), (0, 0)))} for host in self.get(flt, archive=archive, fields=['ports'])]
'Search SSH host keys'
def searchsshkey(self, fingerprint=None, key=None, keytype=None, bits=None, output=None):
params = {'name': 'ssh-hostkey'} if (fingerprint is not None): if (not isinstance(fingerprint, utils.REGEXP_T)): fingerprint = fingerprint.replace(':', '').lower() params.setdefault('values', {})['fingerprint'] = fingerprint if (key is not None): params.setdefault('values...
'Search particular results from smb-os-discovery host script. Example: .searchsmb(os="Windows 5.1", workgroup="WORKGROUP\x00")'
@classmethod def searchsmb(cls, **args):
if ('dnsdomain' in args): args['domain_dns'] = args.pop('dnsdomain') if ('forest' in args): args['forest_dns'] = args.pop('forest') return cls.searchscript(name='smb-os-discovery', values=args)
'Like `.insert_or_update()`, but `specs` parameter has to be an iterable of (timestamp, spec) values. This generic implementation does not use bulk capacity of the underlying DB implementation but rather calls its `.insert_or_update()` method.'
def insert_or_update_bulk(self, specs, getinfos=None):
for (timestamp, spec) in specs: self.insert_or_update(timestamp, spec, getinfos=getinfos)
'Prepares an agent and adds it to the DB using `self._add_agent()`'
def add_agent(self, masterid, host, remotepath, rsync=None, source=None, maxwaiting=60):
if (rsync is None): rsync = ['rsync'] if (not remotepath.endswith('/')): remotepath += '/' if (source is None): source = (remotepath if (host is None) else ('%s:%s' % (host, remotepath))) master = self.get_master(masterid) localpath = tempfile.mkdtemp(prefix='', dir=master['p...
'Adds an agent from a description string of the form [tor:][hostname:]path.'
def add_agent_from_string(self, masterid, string, source=None, maxwaiting=60):
string = string.split(':', 1) if (string[0].lower() == 'tor'): string = string[1].split(':', 1) rsync = ['torify', 'rsync'] else: rsync = None if (len(string) == 1): return self.add_agent(masterid, None, string[0], rsync=rsync, source=source, maxwaiting=maxwaiting) re...
'Returns the number of targets that can be added to an agent without exceeding its `maxwaiting` limit (the returned value cannot be negative).'
def may_receive(self, agentid):
agent = self.get_agent(agentid) return max((agent['maxwaiting'] - self.count_waiting_targets(agentid)), 0)
'Returns the number of waiting targets an agent has.'
def count_waiting_targets(self, agentid):
agent = self.get_agent(agentid) return sum((len(os.listdir(self.get_local_path(agent, path))) for path in ['input', os.path.join('remote', 'input')]))
'Returns the number of waiting targets an agent has.'
def count_current_targets(self, agentid):
agent = self.get_agent(agentid) return sum((1 for fname in os.listdir(self.get_local_path(agent, os.path.join('remote', 'cur'))) if fname.endswith('.xml')))
'Adds an agent and returns its (backend-specific) unique identifier. This is implemented in the backend-specific class.'
def _add_agent(self, agent):
raise NotImplementedError
'Gets an agent from its (backend-specific) unique identifier. This is implemented in the backend-specific class.'
def get_agent(self, agentid):
raise NotImplementedError
'Removes an agent from its (backend-specific) unique identifier.'
def del_agent(self, agentid, wait_results=True):
agent = self.get_agent(agentid) master = self.get_master(agent['master']) self.unassign_agent(agentid, dont_reuse=True) path = self.get_local_path(agent, 'input') dstdir = os.path.join(master['path'], 'onhold') for fname in os.listdir(path): shutil.move(os.path.join(path, fname), dstdir)...
'Removes an agent\'s database entry from its (backend-specific) unique identifier. This is implemented in the backend-specific class.'
def _del_agent(self, agentid):
raise NotImplementedError
'Prepares a master and adds it to the DB using `self._add_master()`'
def add_master(self, hostname, path):
master = {'hostname': hostname, 'path': path} return self._add_master(master)
'Adds a master and returns its (backend-specific) unique identifier. This is implemented in the backend-specific class.'
def _add_master(self, master):
raise NotImplementedError
'Given a query spec, return an appropriate index in a form suitable to be passed to Cursor.hint().'
def get_hint(self, spec):
for (fieldname, hint) in viewitems(self.hint_indexes): if (fieldname in spec): return hint
'The DB connection.'
@property def db_client(self):
try: return self._db_client except AttributeError: self._db_client = pymongo.MongoClient(host=self.host, read_preference=pymongo.ReadPreference.SECONDARY_PREFERRED) return self._db_client
'The DB.'
@property def db(self):
try: return self._db except AttributeError: self._db = self.db_client[self.dbname] if (self.username is not None): if (self.password is not None): self.db.authenticate(self.username, self.password) elif (self.mechanism is not None): ...
'Server information.'
@property def server_info(self):
try: return self._server_info except AttributeError: self._server_info = self.db_client.server_info() return self._server_info
'Wrapper around column .find() method, depending on pymongo version.'
@property def find(self):
try: return self._find except AttributeError: if (pymongo.version_tuple[0] > 2): def _find(colname, *args, **kargs): if ('spec' in kargs): kargs['filter'] = kargs.pop('spec') if ('fields' in kargs): kargs['projec...
'Wrapper around collection .find_one() method, depending on pymongo version.'
@property def find_one(self):
try: return self._find_one except AttributeError: if (pymongo.version_tuple[0] > 2): def _find_one(colname, *args, **kargs): if ('spec_or_id' in kargs): kargs['filter_or_id'] = kargs.pop('spec_or_id') if ('fields' in kargs): ...
'Process to schema migrations in column `colname` starting from `version`.'
def migrate_schema(self, colname, version):
failed = 0 while (version in self.schema_migrations[colname]): updated = False (new_version, migration_function) = self.schema_migrations[colname][version] utils.LOGGER.info('Migrating column %s from version %r to %r', colname, version, new_version) for recor...
'Returns 0 if the `document`\'s schema version matches the code\'s current version for `colname`, -1 if it is higher (you need to update IVRE), and 1 if it is lower (you need to call .migrate_schema()).'
def cmp_schema_version(self, colname, document):
val1 = self.schema_latest_versions.get(colname, 0) val2 = document.get('schema_version', 0) return ((val1 > val2) - (val1 < val2))
'This method makes use of the aggregation framework to produce top values for a given field.'
def _topvalues(self, field, flt=None, topnbr=10, sort=None, limit=None, skip=None, least=False, aggrflt=None, specialproj=None, specialflt=None, countfield=None):
if (flt is None): flt = self.flt_empty if (aggrflt is None): aggrflt = self.flt_empty if (specialflt is None): specialflt = [] pipeline = [] if flt: pipeline += [{'$match': flt}] if ((sort is not None) and ((limit is not None) or (skip is not None))): pipe...
'This method makes use of the aggregation framework to produce distinct values for a given field.'
def _distinct(self, field, flt=None, sort=None, limit=None, skip=None):
pipeline = [] if flt: pipeline.append({'$match': flt}) if sort: pipeline.append({'$sort': OrderedDict(sort)}) if (skip is not None): pipeline += [{'$skip': skip}] if (limit is not None): pipeline += [{'$limit': limit}] for i in range(field.count('.'), (-1), (-1)):...
'Returns a filter which will accept results if and only if they are accepted by both cond1 and cond2.'
@staticmethod def _flt_and(cond1, cond2):
cond1k = set(cond1) cond2k = set(cond2) cond = {} if ('$and' in cond1): cond1k.remove('$and') cond['$and'] = cond1['$and'] if ('$and' in cond2): cond2k.remove('$and') cond['$and'] = (cond.get('$and', []) + cond2['$and']) for k in cond1k.difference(cond2k): ...
'Filters records by their ObjectID. `oid` can be a single or many (as a list or any iterable) object ID(s), specified as strings or an `ObjectID`s.'
@staticmethod def searchobjectid(oid, neg=False):
if isinstance(oid, (basestring, bson.objectid.ObjectId)): oid = [bson.objectid.ObjectId(oid)] else: oid = [bson.objectid.ObjectId(elt) for elt in oid] if (len(oid) == 1): return {'_id': ({'$ne': oid[0]} if neg else oid[0])} return {'_id': {('$nin' if neg else '$in'): oid}}
'Filters documents based on their schema\'s version.'
@staticmethod def searchversion(version):
return {'schema_version': ({'$exists': False} if (version is None) else version)}
'Filters (if `neg` == True, filters out) one particular host (IP address).'
@staticmethod def searchhost(addr, neg=False):
try: addr = utils.ip2int(addr) except (TypeError, utils.socket.error): pass return {'addr': ({'$ne': addr} if neg else addr)}
'Filters (if `neg` == True, filters out) one particular IP address range.'
@staticmethod def searchrange(start, stop, neg=False):
try: start = utils.ip2int(start) except (TypeError, utils.socket.error): pass try: stop = utils.ip2int(stop) except (TypeError, utils.socket.error): pass if neg: return {'$or': [{'addr': {'$lt': start}}, {'addr': {'$gt': stop}}]} return {'addr': {'$gte': s...
'Initializes the "active" columns, i.e., drops those columns and creates the default indexes.'
def init(self):
self.db[self.colname_scans].drop() self.db[self.colname_hosts].drop() self.db[self.colname_oldscans].drop() self.db[self.colname_oldhosts].drop() self.create_indexes()
'Returns 0 if the `host`\'s schema version matches the code\'s current version, -1 if it is higher (you need to update IVRE), and 1 if it is lower (you need to call .migrate_schema()).'
def cmp_schema_version_host(self, host):
return self.cmp_schema_version(self.colname_hosts, host)
'Returns 0 if the `scan`\'s schema version matches the code\'s current version, -1 if it is higher (you need to update IVRE), and 1 if it is lower (you need to call .migrate_schema()).'
def cmp_schema_version_scan(self, scan):
return self.cmp_schema_version(self.colname_scans, scan)
'Process to schema migrations in column `colname_hosts` or `colname_oldhosts` depending on `archive`archive value, starting from `version`.'
def migrate_schema(self, archive, version):
MongoDB.migrate_schema(self, (self.colname_oldhosts if archive else self.colname_hosts), version)
'Converts a record from version 0 (no "schema_version" key in the document) to version 1 (`doc["schema_version"] == 1`). Version 1 adds an "openports" nested document to ease open ports based researches.'
def migrate_schema_hosts_0_1(self, doc):
assert ('schema_version' not in doc) assert ('openports' not in doc) update = {'$set': {'schema_version': 1}} updated_ports = False openports = {} for port in doc.get('ports', []): if (port.get('state_state') == 'open'): openports.setdefault(port['protocol'], {}).setdefault('...
'Converts a record from version 1 to version 2. Version 2 discards service names when they have been found from nmap-services file.'
@staticmethod def migrate_schema_hosts_1_2(doc):
assert (doc['schema_version'] == 1) update = {'$set': {'schema_version': 2}} update_ports = False for port in doc.get('ports', []): if (port.get('service_method') == 'table'): update_ports = True for key in list(port): if key.startswith('service_'): ...
'Converts a record from version 2 to version 3. Version 3 uses new Nmap structured data for scripts using the ls library.'
@staticmethod def migrate_schema_hosts_2_3(doc):
assert (doc['schema_version'] == 2) update = {'$set': {'schema_version': 3}} updated_ports = False updated_scripts = False migrate_scripts = set(['afp-ls', 'nfs-ls', 'smb-ls', 'ftp-anon', 'http-ls']) for port in doc.get('ports', []): for script in port.get('scripts', []): if ...
'Converts a record from version 3 to version 4. Version 4 creates a "fake" port entry to store host scripts.'
@staticmethod def migrate_schema_hosts_3_4(doc):
assert (doc['schema_version'] == 3) update = {'$set': {'schema_version': 4}} if ('scripts' in doc): doc.setdefault('ports', []).append({'port': 'host', 'scripts': doc.pop('scripts')}) update['$set']['ports'] = doc['ports'] update['$unset'] = {'scripts': True} return update
'Converts a record from version 4 to version 5. Version 5 uses the magic value -1 instead of "host" for "port" in the "fake" port entry used to store host scripts (see `migrate_schema_hosts_3_4()`). Moreover, it changes the structure of the values of "extraports" from [totalcount, {"state": count}] to {"total": totalco...
@staticmethod def migrate_schema_hosts_4_5(doc):
assert (doc['schema_version'] == 4) update = {'$set': {'schema_version': 5}} updated_ports = False updated_extraports = False for port in doc.get('ports', []): if (port['port'] == 'host'): port['port'] = (-1) updated_ports = True if updated_ports: update['...
'Converts a record from version 5 to version 6. Version 6 uses Nmap structured data for scripts using the vulns NSE library.'
@staticmethod def migrate_schema_hosts_5_6(doc):
assert (doc['schema_version'] == 5) update = {'$set': {'schema_version': 6}} updated = False migrate_scripts = set((script for (script, alias) in viewitems(xmlnmap.ALIASES_TABLE_ELEMS) if (alias == 'vulns'))) for port in doc.get('ports', []): for script in port.get('scripts', []): ...
'Converts a record from version 6 to version 7. Version 7 creates a structured output for mongodb-databases script.'
@staticmethod def migrate_schema_hosts_6_7(doc):
assert (doc['schema_version'] == 6) update = {'$set': {'schema_version': 7}} updated = False for port in doc.get('ports', []): for script in port.get('scripts', []): if (script['id'] == 'mongodb-databases'): if ('mongodb-databases' not in script): ...
'Converts a record from version 7 to version 8. Version 8 fixes the structured output for scripts using the vulns NSE library.'
@staticmethod def migrate_schema_hosts_7_8(doc):
assert (doc['schema_version'] == 7) update = {'$set': {'schema_version': 8}} updated = False for port in doc.get('ports', []): for script in port.get('scripts', []): if ('vulns' in script): if any(((elt in script['vulns']) for elt in ['ids', 'refs', 'description', 'st...
'Queries the active column (the old one if "archive" is set to True) with the provided filter "flt", and returns a MongoDB cursor. This should be very fast, as no operation is done (the cursor is only returned). Next operations (e.g., .count(), enumeration, etc.) might take a long time, depending on both the operations...
def get(self, flt, archive=False, **kargs):
return self.set_limits(self.find((self.colname_oldhosts if archive else self.colname_hosts), flt, **kargs))
'Sets the content of a port\'s screenshot.'
def setscreenshot(self, host, port, data, protocol='tcp', archive=False, overwrite=False):
try: port = [p for p in host.get('ports', []) if ((p['port'] == port) and (p['protocol'] == protocol))][0] except IndexError: raise KeyError(('Port %s/%d does not exist' % (protocol, port))) if (('screenshot' in port) and (not overwrite)): return port['screenshot'] = ...
'Sets the `screenwords` attribute based on the screenshot data.'
def setscreenwords(self, host, port=None, protocol='tcp', archive=False, overwrite=False):
if (port is None): if overwrite: flt_cond = (lambda p: ('screenshot' in p)) else: flt_cond = (lambda p: (('screenshot' in p) and ('screenwords' not in p))) elif overwrite: flt_cond = (lambda p: (('screenshot' in p) and (p.get('port') == port) and (p.get('protocol'...
'Removes screenshots'
def removescreenshot(self, host, port=None, protocol='tcp', archive=False):
changed = False for p in host.get('ports', []): if ((port is None) or ((p['port'] == port) and (p.get('protocol') == protocol))): if ('screenshot' in p): if (p['screenshot'] == 'field'): if ('screendata' in p): del p['screendata'] ...
'Merge two host records and return the result. Unmergeable / hard-to-merge fields are lost (e.g., extraports).'
def merge_host_docs(self, rec1, rec2):
if (rec1.get('schema_version') != rec2.get('schema_version')): raise ValueError(('Cannot merge host documents. Schema versions differ (%r != %r)' % (rec1.get('schema_version'), rec2.get('schema_version')))) rec = {} if ('schema_version' in rec1): rec['schema_versio...
'Removes the host "host" from the active (the old one if "archive" is set to True) column. "host" must be the host record as returned by MongoDB. If "host" has a "scanid" attribute, and if it refers to a scan that have no more host record after the deletion of "host", then the scan record is also removed.'
def remove(self, host, archive=False):
if archive: colname_hosts = self.colname_oldhosts colname_scans = self.colname_oldscans else: colname_hosts = self.colname_hosts colname_scans = self.colname_scans self.db[colname_hosts].remove(spec_or_id=host['_id']) for scanid in self.getscanids(host): if (self....
'Archives (when `unarchive` is True, unarchives) a given host record. Also (un)archives the corresponding scan and removes the scan from the "not archived" (or "archived") scan collection if not there is no host left in the "not archived" (or "archived") host collumn.'
def archive(self, host, unarchive=False):
(col_from_hosts, col_from_scans, col_to_hosts, col_to_scans) = ((self.colname_oldhosts, self.colname_oldscans, self.colname_hosts, self.colname_scans) if unarchive else (self.colname_hosts, self.colname_scans, self.colname_oldhosts, self.colname_oldscans)) if (self.find_one(col_from_hosts, {'_id': host['_id']})...
'This method returns for a specific query `flt` a list of dictionary objects whose keys are `id` and `mean`; the value for `id` is a backend-dependant and uniquely identifies a record, and the value for `mean` is given by: (number of open ports) * sum(port number for each open port) This MongoDB specific implementation...
def get_mean_open_ports(self, flt, archive=False):
aggr = [] if flt: aggr += [{'$match': flt}] aggr += [{'$project': {'ports.port': 1, 'ports.state_state': 1}}, {'$project': {'ports': {'$ifNull': ['$ports', []]}}}, {'$redact': {'$cond': {'if': {'$eq': [{'$ifNull': ['$ports', None]}, None]}, 'then': {'$cond': {'if': {'$eq': ['$state_state', 'open']},...
'Work-in-progress function to get scan results grouped by common open ports'
def group_by_port(self, flt, archive=False):
aggr = [] if flt: aggr += [{'$match': flt}] aggr += [{'$project': {'ports.port': 1, 'ports.state_state': 1}}, {'$project': {'ports': {'$ifNull': ['$ports', []]}}}, {'$redact': {'$cond': {'if': {'$eq': [{'$ifNull': ['$ports', None]}, None]}, 'then': {'$cond': {'if': {'$eq': ['$state_state', 'open']},...
'Filters (if `neg` == True, filters out) one particular category (records may have zero, one or more categories).'
@staticmethod def searchcategory(cat, neg=False):
if neg: if isinstance(cat, utils.REGEXP_T): return {'categories': {'$not': cat}} if isinstance(cat, list): if (len(cat) == 1): cat = cat[0] else: return {'categories': {'$nin': cat}} return {'categories': {'$ne': cat}} i...
'Filters (if `neg` == True, filters out) one particular country, or a list of countries.'
@staticmethod def searchcountry(country, neg=False):
country = utils.country_unalias(country) if isinstance(country, list): return {'infos.country_code': {('$nin' if neg else '$in'): country}} return {'infos.country_code': ({'$ne': country} if neg else country)}
'Filters (if `neg` == True, filters out) one particular city.'
@staticmethod def searchcity(city, neg=False):
if neg: if isinstance(city, utils.REGEXP_T): return {'infos.city': {'$not': city}} return {'infos.city': {'$ne': city}} return {'infos.city': city}
'Filters (if `neg` == True, filters out) one or more particular AS number(s).'
@staticmethod def searchasnum(asnum, neg=False):
if ((not isinstance(asnum, basestring)) and hasattr(asnum, '__iter__')): return {'infos.as_num': {('$nin' if neg else '$in'): [int(val) for val in asnum]}} asnum = int(asnum) return {'infos.as_num': ({'$ne': asnum} if neg else asnum)}
'Filters (if `neg` == True, filters out) one or more particular AS.'
@staticmethod def searchasname(asname, neg=False):
if neg: if isinstance(asname, utils.REGEXP_T): return {'infos.as_name': {'$not': asname}} else: return {'infos.as_name': {'$ne': asname}} return {'infos.as_name': asname}
'Filters (if `neg` == True, filters out) one particular source.'
@staticmethod def searchsource(src, neg=False):
if neg: if isinstance(src, utils.REGEXP_T): return {'source': {'$not': src}} return {'source': {'$ne': src}} return {'source': src}
'Filters (if `neg` == True, filters out) records with specified protocol/port at required state. Be aware that when a host has a lot of ports filtered or closed, it will not report all of them, but only a summary, and thus the filter might not work as expected. This filter will always work to find open ports.'
@staticmethod def searchport(port, protocol='tcp', state='open', neg=False):
if (port == 'host'): return {'ports.port': ({'$gte': 0} if neg else (-1))} if (state == 'open'): return {('openports.%s.ports' % protocol): ({'$ne': port} if neg else port)} if neg: return {'$or': [{'ports': {'$elemMatch': {'port': port, 'protocol': protocol, 'state_state': {'$ne': s...
'Filters records with at least one port other than those listed in `ports` with state `state`.'
def searchportsother(self, ports, protocol='tcp', state='open'):
return self.searchport(({'$elemMatch': {'$nin': ports}} if (state == 'open') else {'$nin': ports}), protocol=protocol, state=state)
'Filters records with open port number between minn and maxn'
@staticmethod def searchcountopenports(minn=None, maxn=None, neg=False):
assert ((minn is not None) or (maxn is not None)) flt = [] if (minn == maxn): return {'openports.count': ({'$ne': minn} if neg else minn)} if (minn is not None): flt.append({('$lt' if neg else '$gte'): minn}) if (maxn is not None): flt.append({('$gt' if neg else '$lte'): maxn...
'Filters records with at least one open port.'
@staticmethod def searchopenport(neg=False):
return {'ports.state_state': ({'$nin': ['open']} if neg else 'open')}
'Search an open port with a particular service.'
@staticmethod def searchservice(srv, port=None, protocol=None):
flt = {'service_name': srv} if (port is not None): flt['port'] = port if (protocol is not None): flt['protocol'] = protocol if (len(flt) == 1): return {'ports.service_name': srv} return {'ports': {'$elemMatch': flt}}
'Search a port with a particular `product`. It is (much) better to provide the `service` name and/or `port` number since those fields are indexed.'
@staticmethod def searchproduct(product, version=None, service=None, port=None, protocol=None):
flt = {'service_product': product} if (version is not None): flt['service_version'] = version if (service is not None): flt['service_name'] = service if (port is not None): flt['port'] = port if (protocol is not None): flt['protocol'] = protocol if (len(flt) == 1)...
'Search a particular content in the scripts results.'
@staticmethod def searchscript(name=None, output=None, values=None):
req = {} if (name is not None): req['id'] = name if (output is not None): req['output'] = output if (values is not None): if (name is None): raise TypeError('.searchscript() needs a `name` arg when using a `values` arg') for (field, ...
'Search shared files from a file name (either a string or a regexp), only from scripts using the "ls" NSE module.'
def searchfile(self, fname=None, scripts=None):
if (fname is None): fname = {'$exists': True} if (scripts is None): return {'ports.scripts.ls.volumes.files.filename': fname} if isinstance(scripts, basestring): scripts = [scripts] return {'ports.scripts': {'$elemMatch': {'id': (scripts.pop() if (len(scripts) == 1) else {'$in': ...
'Filter SMB shares with given `access` (default: either read or write, accepted values \'r\', \'w\', \'rw\'). If `hidden` is set to `True`, look for hidden shares, for non-hidden if set to `False` and for both if set to `None` (this is the default).'
def searchsmbshares(self, access='', hidden=None):
access = {'': re.compile('^(READ|WRITE)'), 'r': re.compile('^READ(/|$)'), 'w': re.compile('(^|/)WRITE$'), 'rw': 'READ/WRITE', 'wr': 'READ/WRITE'}[access.lower()] share_type = {None: {'$nin': ['STYPE_IPC_HIDDEN', 'Not a file share', 'STYPE_IPC', 'STYPE_PRINTQ']}, True: 'STYPE_DISKTREE_HIDDEN', False: 'S...
'Filter results with (without, when `neg == True`) a screenshot (on a specific `port` if specified). `words` can be specified as a string, a regular expression, a boolean, or as a list and is/are matched against the OCR results. When `words` is specified and `neg == True`, the result will filter results **with** a scre...
@staticmethod def searchscreenshot(port=None, protocol='tcp', service=None, words=None, neg=False):
result = {'ports': {'$elemMatch': {}}} if (words is None): if ((port is None) and (service is None)): return {'ports.screenshot': {'$exists': (not neg)}} result['ports']['$elemMatch']['screenshot'] = {'$exists': (not neg)} else: result['ports']['$elemMatch']['screenshot']...
'Look for a CPE by type (a, o or h), vendor, product or version (the part after the column following the product). No argument will just check for cpe existence.'
@staticmethod def searchcpe(cpe_type=None, vendor=None, product=None, version=None):
fields = [('type', cpe_type), ('vendor', vendor), ('product', product), ('version', version)] flt = dict(((field, value) for (field, value) in fields if (value is not None))) nflt = len(flt) if (nflt == 0): return {'cpes': {'$exists': True}} elif (nflt == 1): (field, value) = flt.pop...
'This method makes use of the aggregation framework to produce top values for a given field or pseudo-field. Pseudo-fields are: - category / asnum / country / net[:mask] - port - port:open / :closed / :filtered / :<servicename> - portlist:open / :closed / :filtered - countports:open / :closed / :filtered - service / se...
def topvalues(self, field, flt=None, topnbr=10, sort=None, limit=None, skip=None, least=False, archive=False, aggrflt=None, specialproj=None, specialflt=None):
null_if_empty = (lambda val: (val if val else None)) outputproc = None if (flt is None): flt = self.flt_empty if (aggrflt is None): aggrflt = self.flt_empty if (specialflt is None): specialflt = [] if (field == 'category'): field = 'categories' elif (field == ...
'This method makes use of the aggregation framework to produce distinct values for a given field.'
def distinct(self, field, flt=None, sort=None, limit=None, skip=None, archive=False):
cursor = self.set_limits(self.db[(self.colname_oldhosts if archive else self.colname_hosts)].aggregate(self._distinct(field, flt=flt, sort=sort, limit=limit, skip=skip), cursor={})) return (res['_id'] for res in cursor)
'`category1` and `category2` must be categories (provided as str or unicode objects) Returns a generator of tuples: ({\'addr\': address, \'proto\': protocol, \'port\': port}, value) Where `address` is an integer (use `utils.int2ip` to get the corresponding string), and value is: - -1 if the port is open in category1 a...
def diff_categories(self, category1, category2, flt=None, archive=False, include_both_open=True):
category_filter = self.searchcategory([category1, category2]) pipeline = [{'$match': (category_filter if (flt is None) else self.flt_and(flt, category_filter))}, {'$unwind': '$categories'}, {'$match': category_filter}, {'$unwind': '$ports'}, {'$match': {'ports.state_state': 'open'}}, {'$project': {'_id': 0, 'ad...
'Update country info on existing Nmap scan result documents'
def update_country(self, start, stop, code, create=False):
name = self.globaldb.data.country_name_by_code(code) for colname in [self.colname_hosts, self.colname_oldhosts]: self.db[colname].update(self.searchrange(start, stop), {'$set': {'infos.country_code': code, 'infos.country_name': name}}, multi=True)
'Update city/location info on existing Nmap scan result documents'
def update_city(self, start, stop, locid, create=False):
updatespec = dict(((('infos.%s' % key), value) for (key, value) in viewitems(self.globaldb.data.location_byid(locid)))) if ('infos.country_code' in updatespec): updatespec['infos.country_name'] = self.globaldb.data.country_name_by_code(updatespec['infos.country_code']) for colname in [self.colname_h...
'Update AS info on existing Nmap scan result documents'
def update_as(self, start, stop, asnum, asname, create=False):
if (asname is None): updatespec = {'infos.as_num': asnum} else: updatespec = {'infos.as_num': asnum, 'infos.as_name': asname} for colname in [self.colname_hosts, self.colname_oldhosts]: self.db[colname].update(self.searchrange(start, stop), {'$set': updatespec}, multi=True)
'Initializes the "passive" columns, i.e., drops the columns, and creates the default indexes.'
def init(self):
self.db[self.colname_passive].drop() self.db[self.colname_ipdata].drop() self.create_indexes()
'Queries the passive column with the provided filter "spec", and returns a MongoDB cursor. This should be very fast, as no operation is done (the cursor is only returned). Next operations (e.g., .count(), enumeration, etc.) might take a long time, depending on both the operations and the filter. Any keyword argument is...
def get(self, spec, **kargs):
return self.set_limits(self.find(self.colname_passive, spec, **kargs))
'Same function as get, except .find_one() method is called instead of .find(), so the first record matching "spec" (or None) is returned. Unlike get(), this function might take a long time, depending on "spec" and the indexes set on colname_passive column.'
def get_one(self, spec, **kargs):
return self.find_one(self.colname_passive, spec, **kargs)
'Updates the first record matching "spec" in the "passive" column, setting values according to the keyword arguments.'
def update(self, spec, **kargs):
self.db[self.colname_passive].update(spec, {'$set': kargs})
'Inserts the record "spec" into the passive column.'
def insert(self, spec, getinfos=None):
if (getinfos is not None): spec.update(getinfos(spec)) self.db[self.colname_passive].insert(spec) if ('addr' in spec): self.set_data(spec['addr'])
'Like `.insert_or_update()`, but `specs` parameter has to be an iterable of (timestamp, spec) values. This will perform bulk MongoDB inserts with the major drawback that the `getinfos` parameter will be called (if it is not `None`) for each spec, even when the spec already exists in the database and the call was hence ...
def insert_or_update_bulk(self, specs, getinfos=None):
bulk = self.db[self.colname_passive].initialize_unordered_bulk_op() count = 0 try: for (timestamp, spec) in specs: if (spec is not None): updatespec = {'$inc': {'count': 1}, '$min': {'firstseen': timestamp}, '$max': {'lastseen': timestamp}} if (getinfos is...
'Updates the first record matching "spec" (without "firstseen", "lastseen" and "count") by mixing "firstseen", "lastseen" and "count" from "spec" and from the database. This is usefull to mix records from different databases.'
def insert_or_update_mix(self, spec, getinfos=None):
updatespec = {} if ('firstseen' in spec): updatespec['$min'] = {'firstseen': spec['firstseen']} del spec['firstseen'] if ('lastseen' in spec): updatespec['$max'] = {'lastseen': spec['lastseen']} del spec['lastseen'] if ('count' in spec): updatespec['$inc'] = {'cou...
'This method makes use of the aggregation framework to produce top values for a given field. If `distinct` is True (default), the top values are computed by distinct events. If it is False, they are computed based on the "count" field.'
def topvalues(self, field, distinct=True, **kargs):
if (not distinct): kargs['countfield'] = 'count' pipeline = self._topvalues(field, **kargs) return self.set_limits(self.db[self.colname_passive].aggregate(pipeline, cursor={}))
'This method makes use of the aggregation framework to produce distinct values for a given field.'
def distinct(self, field, flt=None, sort=None, limit=None, skip=None):
cursor = self.set_limits(self.db[self.colname_passive].aggregate(self._distinct(field, flt=flt, sort=sort, limit=limit, skip=skip), cursor={})) return (res['_id'] for res in cursor)