desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'If the output file exists, delete it and start over.'
def setup(self):
if os.path.exists(__OUTPUT__): os.remove(__OUTPUT__)
'The open_pep_index function opens the byte stream of the PEP 0 HTML document found in the downloads/ directory.'
def open_pep_index(self):
with open('download/pep0.html', 'rb') as f: content = f.read() return content
'This function parses PEP 0 and retrieves all the relevant URLs containing the various PEPs. It adds all the links with a PEP identifier to a set called links. A set is used to filter out any duplicates and read efficiency. A conditional is added to the end of this function to check that this was successful. If it wasn...
def get_pep_urls(self):
soup = BeautifulSoup(self.open_pep_index(), 'html.parser', from_encoding='UTF-8') for link in soup.findAll('a', href=True, text=re.compile('^[0-9]*$')): self.links.add(link)
'Gets the contents from the HTML file.'
def get_pep_contents(self, link):
self.url = (__PYBASE__ + link['href']) html = request.urlopen(self.url) self.set_pep_document(html.read().decode('utf-8'))
'Makes up the HTML document.'
def set_pep_document(self, doc):
self.document = BeautifulSoup(doc, 'html.parser')
'Gets the title for the PEP'
def set_pep_title(self):
peptitle = self.document.find('h1', {'class': 'page-title'}).get_text() self.long_title = re.sub('--', '-', peptitle) self.title = ('PEP ' + self.pep) self.pep_db.add(self.title)
'Sets the PEP number. For example, if the PEP is identified as PEP 0008, it will be stored as 8. If the PEP is identified as PEP 0143 it will be stored as 143.'
def set_pep_number(self):
pep = re.findall('\\d+', self.url)[0] self.pep = pep.lstrip('0')
'Parses the raw document, cleans the text'
def set_pep_body(self):
try: html = self.document.find('div', {'id': ['abstract', 'introduction', 'rationale', 'motivation', 'what-is-a-pep', 'overview', 'improving-python-zip-application-support', 'scope', 'abstract-and-rationale', 'rationale-and-goals', 'specification']}) if (html is None): raise AttributeErr...
'Sets the structure for the output.txt file'
def set_structure(self):
if ((int(self.pep) not in [205, 210, 308]) and (self.body != '')): entry = OUTPUT_TEMPLATE.format(title=self.title, entry_type='A', redirect_title='', empty_field='', categories='', related_topics='', external_links='', disambiguation='', image='', abstract=self.body, url=self.url) self.collection.a...
'Initialize HtmlFileData object. Load data from Downloaded docs'
def __init__(self, file):
self.HTML = '' self.FILE = file self.load_data()
'Open HTML File and load data'
def load_data(self):
with open(self.FILE, 'r') as html_file: document = html_file.read() self.HTML = document
'Returns the Plain HTML'
def get_raw_data(self):
return self.HTML
'Return File Object'
def get_file(self):
return self.file
'Initialize APIDocsParser object with API Reference HTML'
def __init__(self, data):
self.data = self.get_api_reference_html(data) self.parsed_data = []
'Parses the HTML File Text and returns the API Reference soup'
def get_api_reference_html(self, data):
soup = BeautifulSoup(data.get_raw_data(), 'html.parser') reference = soup.find('a', attrs={'name': 'reference'}) if reference: reference_soup = reference.find_parent().find_parent() return reference_soup
'Parses Individual API and extracts Title , Link , Content and Example Code'
def parse_data(self, file_data):
if self.data: all_api_reference = self.data.findAll(['h4', 'h3']) for api in all_api_reference: title = api.text.replace(' #', '') href = self.parse_link(file_data, api) content = self.parse_content(api) example = self.parse_example(api) ...
'Extract the Example Code Snippet'
def parse_example(self, api):
example_code = '' for tag in api.next_siblings: if (not isinstance(tag, element.Tag)): continue if (tag.name == 'div'): code = str(tag.find('pre')) if (code != 'None'): example_code = tag.text.strip() example_code += '\n' return example...
'Extracts the Abstract from API Docs'
def parse_content(self, api):
abstract = '' for tag in api.next_siblings: if (not isinstance(tag, element.Tag)): continue if ((tag.name == 'hr') or (tag.name == 'blockquote')): break elif (tag.name == 'div'): continue else: abstract += str(tag) abstract = se...
'Forms the API Docs Link'
def parse_link(self, data, api):
return ((REACT_API_DOCS_URL + data.FILE.split('/')[1]) + api.find('a', attrs={'class': 'hash-link'}).attrs['href'])
'Returns the API List'
def get_data(self):
return self.parsed_data
'Initialize with parsed api data list and name of the output file'
def __init__(self, api_data, output_file):
self.data = api_data self.output_file = output_file
'Create the output file using the parsed data'
def create_file(self):
for data_element in self.data: title = data_element['title'] anchor = data_element['href'] example = data_element['example'] content = data_element['content'] if example: example = ('<pre><code>%s</code></pre>' % example) abstract = '<section class=...
''
def get_packages(self):
self.packages = [] table = self.soup.find('table') for row in table.find_all('tr')[1::None]: data = row.find_all('td') name = data[2].a.getText() reference = (self.ARCHLINUX_URL + data[2].a['href']) info = data[4].getText() arch = data[0].getText() package = P...
'Update line with last point and current coordinates.'
def mouseMoveEvent(self, ev):
pos = self.transformPos(ev.pos()) self.restoreCursor() if self.drawing(): self.overrideCursor(CURSOR_DRAW) if self.current: color = self.lineColor if self.outOfPixmap(pos): pos = self.intersectionPoint(self.current[(-1)], pos) elif ((len(se...
'Select the first shape created which contains this point.'
def selectShapePoint(self, point):
self.deSelectShape() if self.selectedVertex(): (index, shape) = (self.hVertex, self.hShape) shape.highlightVertex(index, shape.MOVE_VERTEX) return for shape in reversed(self.shapes): if (self.isVisible(shape) and shape.containsPoint(point)): shape.selected = True ...
'Convert from widget-logical coordinates to painter-logical coordinates.'
def transformPos(self, point):
return ((point / self.scale) - self.offsetToCenter())
'For each edge formed by `points\', yield the intersection with the line segment `(x1,y1) - (x2,y2)`, if it exists. Also return the distance of `(x2,y2)\' to the middle of the edge along with its index, so that the one closest can be chosen.'
def intersectingEdges(self, x1y1, x2y2, points):
(x1, y1) = x1y1 (x2, y2) = x2y2 for i in range(4): (x3, y3) = points[i] (x4, y4) = points[((i + 1) % 4)] denom = (((y4 - y3) * (x2 - x1)) - ((x4 - x3) * (y2 - y1))) nua = (((x4 - x3) * (y1 - y3)) - ((y4 - y3) * (x1 - x3))) nub = (((x2 - x1) * (y1 - y3)) - ((y2 - y1) *...
'Return a pretty-printed XML string for the Element.'
def prettify(self, elem):
rough_string = ElementTree.tostring(elem, 'utf8') root = etree.fromstring(rough_string) return etree.tostring(root, pretty_print=True, encoding=ENCODE_METHOD).replace(' '.encode(), ' DCTB '.encode()) 'reparsed = minidom.parseString(rough_string)\n return re...
'Return XML root'
def genXML(self):
if ((self.filename is None) or (self.foldername is None) or (self.imgSize is None)): return None top = Element('annotation') if self.verified: top.set('verified', 'yes') folder = SubElement(top, 'folder') folder.text = self.foldername filename = SubElement(top, 'filename') fi...
'Enable/Disable widgets which depend on an opened image.'
def toggleActions(self, value=True):
for z in self.actions.zoomActions: z.setEnabled(value) for action in self.actions.onLoadActive: action.setEnabled(value)
'In the middle of drawing, toggling between modes should be disabled.'
def toggleDrawingSensitive(self, drawing=True):
self.actions.editMode.setEnabled((not drawing)) if ((not drawing) and self.beginner()): print 'Cancel creation.' self.canvas.setEditing(True) self.canvas.restoreCursor() self.actions.create.setEnabled(True)
'Function to handle difficult examples Update on each object'
def btnstate(self, item=None):
if (not self.canvas.editing()): return item = self.currentItem() if (not item): item = self.labelList.item((self.labelList.count() - 1)) difficult = self.diffcButton.isChecked() try: shape = self.itemsToShapes[item] except: pass try: if (difficult != s...
'Pop-up and give focus to the label editor. position MUST be in global coordinates.'
def newShape(self):
if ((not self.useDefaultLabelCheckbox.isChecked()) or (not self.defaultLabelTextLine.text())): if (len(self.labelHist) > 0): self.labelDialog = LabelDialog(parent=self, listItem=self.labelHist) if (self.singleClassMode.isChecked() and self.lastLabel): text = self.lastLabel ...
'Load the specified file, or the last opened file if None.'
def loadFile(self, filePath=None):
self.resetState() self.canvas.setEnabled(False) if (filePath is None): filePath = self.settings.get(SETTING_FILENAME) unicodeFilePath = ustr(filePath) if (unicodeFilePath and (self.fileListWidget.count() > 0)): index = self.mImgList.index(unicodeFilePath) fileWidgetItem = sel...
'Figure out the size of the pixmap in order to fit the main widget.'
def scaleFitWindow(self):
e = 2.0 w1 = (self.centralWidget().width() - e) h1 = (self.centralWidget().height() - e) a1 = (w1 / h1) w2 = (self.canvas.pixmap.width() - 0.0) h2 = (self.canvas.pixmap.height() - 0.0) a2 = (w2 / h2) return ((w1 / w2) if (a2 >= a1) else (h1 / h2))
'Create a statefile, with the offset of the end of the log file. Override if your tailer implementation can do this more efficiently'
def create_statefile(self):
for _ in self.ireadlines(): pass
'Return a generator over lines in the logfile, updating the statefile when the generator is exhausted'
def ireadlines(self):
raise NotImplementedError()
'Specify Amazon CloudWatch params'
def __init__(self, key, secret_key, metric):
self.base_url = 'monitoring.ap-northeast-1.amazonaws.com' self.key = key self.secret_key = secret_key self.metric = metric
'get instance id from amazon meta data server'
def get_instance_id(self, instance_id=None):
self.instance_id = instance_id if (self.instance_id is None): try: conn = HTTPConnection('169.254.169.254') conn.request('GET', '/latest/meta-data/instance-id') except Exception: raise CloudWatchException("Can't connect Amazon meta data server ...
'build signed parameters following http://docs.amazonwebservices.com/AmazonCloudWatch/latest/APIReference/API_PutMetricData.html'
def get_signed_url(self):
keys = sorted(self.url_params) values = map(self.url_params.get, keys) url_string = urlencode(list(zip(keys, values))) string_to_sign = ('GET\n%s\n/\n%s' % (self.base_url, url_string)) try: if (sys.version_info[:2] == (2, 5)): signature = hmac.new(key=self.secret_key, msg=string_...
'Take a line and do any parsing we need to do. Required for parsers'
def parse_line(self, line):
raise RuntimeError('Implement me!')
'Run any calculations needed and return list of metric objects'
def get_state(self, duration):
raise RuntimeError('Implement me!')
'Convenience method for contructing metric names Takes into account any supplied prefix/suffix options'
def get_metric_name(self, metric, separator='.'):
metric_name = metric.name if self.options.metric_prefix: metric_name = ((self.options.metric_prefix + separator) + metric_name) if self.options.metric_suffix: metric_name = ((metric_name + separator) + self.options.metric_suffix) return metric_name
'Send metrics to the specific output'
def submit(self, metrics):
raise RuntimeError('Implement me!')
'Initialize any data structures or variables needed for keeping track of the tasty bits we find in the log we are parsing.'
def __init__(self, option_string=None):
self.notice = 0 self.warn = 0 self.error = 0 self.crit = 0 self.other = 0 self.reg = re.compile('^\\[[^]]+\\] \\[(?P<loglevel>\\w+)\\] .*')
'This function should digest the contents of one line at a time, updating object\'s state variables. Takes a single argument, the line to be parsed.'
def parse_line(self, line):
try: regMatch = self.reg.match(line) if regMatch: linebits = regMatch.groupdict() level = linebits['loglevel'] if (level == 'notice'): self.notice += 1 elif (level == 'warn'): self.warn += 1 elif (level == 'e...
'Run any necessary calculations on the data collected from the logs and return a list of metric objects.'
def get_state(self, duration):
self.duration = (duration / 10.0) return [MetricObject('notice', (self.notice / self.duration), 'Logs per 10 sec'), MetricObject('warn', (self.warn / self.duration), 'Logs per 10 sec'), MetricObject('error', (self.error / self.duration), 'Logs per 10 sec'), MetricObject('crit', (self....
'Initialize any data structures or variables needed for keeping track of the tasty bits we find in the log we are parsing.'
def __init__(self, option_string=None):
self.numSent = 0 self.numDeferred = 0 self.numBounced = 0 self.totalDelay = 0 self.numRbl = 0 self.reg = re.compile('.*delay=(?P<send_delay>[^,]+),.*status=(?P<status>(sent|deferred|bounced))')
'This function should digest the contents of one line at a time, updating object\'s state variables. Takes a single argument, the line to be parsed.'
def parse_line(self, line):
try: regMatch = self.reg.match(line) if regMatch: linebits = regMatch.groupdict() if (linebits['status'] == 'sent'): self.totalDelay += float(linebits['send_delay']) self.numSent += 1 elif (linebits['status'] == 'deferred'): ...
'Run any necessary calculations on the data collected from the logs and return a list of metric objects.'
def get_state(self, duration):
self.duration = float(duration) totalTxns = ((self.numSent + self.numBounced) + self.numDeferred) pctDeferred = 0.0 pctSent = 0.0 pctBounced = 0.0 avgDelay = 0 mailTxnsSec = 0 mailSentSec = 0 if (totalTxns > 0): pctDeferred = ((float(self.numDeferred) / totalTxns) * 100) ...
'Initialize any data structures or variables needed for keeping track of the tasty bits we find in the log we are parsing.'
def __init__(self, option_string=None):
self.metrics = {} if option_string: options = option_string.split(' ') else: options = [] optparser = optparse.OptionParser() optparser.add_option('--key-separator', '-k', dest='key_separator', default='.', help="Key separator for flattened json object key nam...
'Default key_filter method. Override and implement this method if you want to do any filtering or transforming on specific keys in your JSON object.'
def key_filter(self, key):
return key
'Recurses through dicts and/or lists and flattens them into a single level dict of key: value pairs. Each key consists of all of the recursed keys joined by separator. If key_filter_callback is callable, it will be called with each key. It should return either a new key which will be used in the final full key strin...
def flatten_object(self, node, separator='.', key_filter_callback=None, parent_keys=[]):
items = {} try: if (sys.version_info >= (3, 0)): iterator = iter(node.items()) else: iterator = node.iteritems() except AttributeError: iterator = enumerate(node) for (key, item) in iterator: if callable(key_filter_callback): key = key_...
'This function should digest the contents of one line at a time, updating object\'s state variables. Takes a single argument, the line to be parsed.'
def parse_line(self, line):
try: json_data = json.loads(line) except Exception as e: raise LogsterParsingException('{0} - {1}'.format(type(e), e)) self.metrics = self.flatten_object(json.loads(line), self.key_separator, self.key_filter)
'Run any necessary calculations on the data collected from the logs and return a list of metric objects.'
def get_state(self, duration):
self.duration = duration metric_objects = [] for (metric_name, metric_value) in self.metrics.items(): if (type(metric_value) == float): metric_type = 'float' elif ((type(metric_value) == int) or (type(metric_value) == long)): metric_type = 'int32' else: ...
'Initialize any data structures or variables needed for keeping track of the tasty bits we find in the log we are parsing.'
def __init__(self, option_string=None):
if option_string: options = option_string.split(' ') else: options = [] optparser = optparse.OptionParser() optparser.add_option('--log-levels', '-l', dest='levels', default='WARN,ERROR,FATAL', help='Comma-separated list of log levels to track: (default: "WARN,...
'This function should digest the contents of one line at a time, updating object\'s state variables. Takes a single argument, the line to be parsed.'
def parse_line(self, line):
try: regMatch = self.reg.match(line) if regMatch: linebits = regMatch.groupdict() log_level = linebits['log_level'] if (log_level in self.levels): current_val = getattr(self, log_level) setattr(self, log_level, (current_val + 1)) ...
'Run any necessary calculations on the data collected from the logs and return a list of metric objects.'
def get_state(self, duration):
self.duration = float(duration) metrics = [MetricObject(level, (getattr(self, level) / self.duration)) for level in self.levels] return metrics
'Initialize any data structures or variables needed for keeping track of the tasty bits we find in the log we are parsing.'
def __init__(self, option_string=None):
self.size_transferred = 0 self.squid_codes = {'TCP_MISS': 0, 'TCP_DENIED': 0, 'TCP_HIT': 0, 'TCP_MEM_HIT': 0, 'OTHER': 0} self.http_1xx = 0 self.http_2xx = 0 self.http_3xx = 0 self.http_4xx = 0 self.http_5xx = 0 self.reg = re.compile('^[0-9.]+ +(?P<size>[0-9]+) .*(?P<squid_code>(TC...
'This function should digest the contents of one line at a time, updating object\'s state variables. Takes a single argument, the line to be parsed.'
def parse_line(self, line):
try: regMatch = self.reg.match(line) if regMatch: linebits = regMatch.groupdict() status = int(linebits['http_status_code']) squid_code = linebits['squid_code'] size = int(linebits['size']) if (status < 200): self.http_1xx +...
'Run any necessary calculations on the data collected from the logs and return a list of metric objects.'
def get_state(self, duration):
self.duration = float(duration) return_array = [MetricObject('http_1xx', (self.http_1xx / self.duration), 'Responses per sec'), MetricObject('http_2xx', (self.http_2xx / self.duration), 'Responses per sec'), MetricObject('http_3xx', (self.http_3xx / self.duration), 'Responses per sec'), Metric...
'Initialize any data structures or variables needed for keeping track of the tasty bits we find in the log we are parsing.'
def __init__(self, option_string=None):
self.http_1xx = 0 self.http_2xx = 0 self.http_3xx = 0 self.http_4xx = 0 self.http_5xx = 0 self.reg = re.compile('.*HTTP/1.\\d" (?P<http_status_code>\\d{3}) .*')
'This function should digest the contents of one line at a time, updating object\'s state variables. Takes a single argument, the line to be parsed.'
def parse_line(self, line):
try: regMatch = self.reg.match(line) if regMatch: linebits = regMatch.groupdict() status = int(linebits['http_status_code']) if (status < 200): self.http_1xx += 1 elif (status < 300): self.http_2xx += 1 elif ...
'Run any necessary calculations on the data collected from the logs and return a list of metric objects.'
def get_state(self, duration):
self.duration = float(duration) return [MetricObject('http_1xx', (self.http_1xx / self.duration), 'Responses per sec'), MetricObject('http_2xx', (self.http_2xx / self.duration), 'Responses per sec'), MetricObject('http_3xx', (self.http_3xx / self.duration), 'Responses per sec'), MetricObject('...
'Initialize any data structures or variables needed for keeping track of the tasty bits we find in the log we are parsing.'
def __init__(self, option_string=None):
self.counts = {} self.times = {} if option_string: options = option_string.split(' ') else: options = [] optparser = optparse.OptionParser() optparser.add_option('--percentiles', '-p', dest='percentiles', default='90', help='Comma-separated list of integer percenti...
'This function should digest the contents of one line at a time, updating object\'s state variables. Takes a single argument, the line to be parsed.'
def parse_line(self, line):
count_match = self.count_reg.match(line) if count_match: countbits = count_match.groupdict() count_name = countbits['count_name'] if (count_name not in self.counts): self.counts[count_name] = 0.0 self.counts[count_name] += float(countbits['count_value']) time_matc...
'Run any necessary calculations on the data collected from the logs and return a list of metric objects.'
def get_state(self, duration):
duration = float(duration) metrics = [] if (duration > 0): metrics += [MetricObject(counter, (self.counts[counter] / duration)) for counter in self.counts] for time_name in self.times: values = self.times[time_name]['values'] unit = self.times[time_name]['unit'] metrics.a...
'Mostly ones (1), with a final value of 1000'
def data(self, ts):
timeseries = map(list, zip(map(float, range((int(ts) - 86400), (int(ts) + 1))), ([1] * 86401))) timeseries[(-1)][1] = 1000 timeseries[(-2)][1] = 1 timeseries[(-3)][1] = 1 return (ts, timeseries)
'Assert that a user can add their own custom algorithm. This mocks out settings.ALGORITHMS and settings.CONSENSUS to use only a single custom-defined function (alwaysTrue)'
@unittest.skip('Fails inexplicable in certain environments.') @patch.object(algorithms, 'CONSENSUS') @patch.object(algorithms, 'ALGORITHMS') @patch.object(algorithms, 'time') def test_run_selected_algorithm_runs_novel_algorithm(self, timeMock, algorithmsListMock, consensusMock):
algorithmsListMock.__iter__.return_value = ['alwaysTrue'] consensusMock = 1 (timeMock.return_value, timeseries) = self.data(time()) alwaysTrue = Mock(return_value=True) with patch.dict(algorithms.__dict__, {'alwaysTrue': alwaysTrue}): (result, ensemble, tail_avg) = algorithms.run_selected_al...
'Generate a pickle from a stream'
def gen_unpickle(self, infile):
try: bunch = self.unpickler.loads(infile) (yield bunch) except EOFError: return
'Read n bytes from a stream'
def read_all(self, sock, n):
data = '' while (n > 0): buf = sock.recv(n) n -= len(buf) data += buf return data
'Self explanatory'
def check_if_parent_is_alive(self):
try: kill(self.current_pid, 0) kill(self.parent_pid, 0) except: exit(0)
'Listen for pickles over tcp'
def listen_pickle(self):
while 1: try: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) s.bind((self.ip, self.port)) s.setblocking(1) s.listen(5) logger.info(('listening over tcp for pickl...
'Listen over udp for MessagePack strings'
def listen_udp(self):
while 1: try: s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.bind((self.ip, self.port)) logger.info(('listening over udp for messagepack on %s' % self.port)) chunk = [] while 1: self.check_if_parent_is_aliv...
'Called when process intializes.'
def run(self):
logger.info('started listener') if (self.type == 'pickle'): self.listen_pickle() elif (self.type == 'udp'): self.listen_udp() else: logging.error('unknown listener format')
'Self explanatory.'
def check_if_parent_is_alive(self):
try: kill(self.parent_pid, 0) except: exit(0)
'Check if the metric is in SKIP_LIST.'
def in_skip_list(self, metric_name):
for to_skip in settings.SKIP_LIST: if (to_skip in metric_name): return True return False
'Called when the process intializes.'
def run(self):
logger.info('started worker') FULL_NAMESPACE = settings.FULL_NAMESPACE MINI_NAMESPACE = settings.MINI_NAMESPACE MAX_RESOLUTION = settings.MAX_RESOLUTION full_uniques = (FULL_NAMESPACE + 'unique_metrics') mini_uniques = (MINI_NAMESPACE + 'unique_metrics') pipe = self.redis_conn.pipeline() ...
'Self explanatory.'
def check_if_parent_is_alive(self):
try: kill(self.parent_pid, 0) except: exit(0)
'Trim metrics that are older than settings.FULL_DURATION and purge old metrics.'
def vacuum(self, i, namespace, duration):
begin = time() unique_metrics = list(self.redis_conn.smembers((namespace + 'unique_metrics'))) keys_per_processor = (len(unique_metrics) / settings.ROOMBA_PROCESSES) assigned_max = (i * keys_per_processor) assigned_min = (assigned_max - keys_per_processor) assigned_keys = range(assigned_min, ass...
'Called when process initializes.'
def run(self):
logger.info('started roomba') while 1: now = time() try: self.redis_conn.ping() except: logger.error(("roomba can't connect to redis at socket path %s" % settings.REDIS_SOCKET_PATH)) sleep(10) self.redis_conn = St...
'Initialize the Analyzer'
def __init__(self, parent_pid):
super(Analyzer, self).__init__() self.redis_conn = StrictRedis(unix_socket_path=settings.REDIS_SOCKET_PATH) self.daemon = True self.parent_pid = parent_pid self.current_pid = getpid() self.anomalous_metrics = Manager().list() self.exceptions_q = Queue() self.anomaly_breakdown_q = Queue()...
'Self explanatory'
def check_if_parent_is_alive(self):
try: kill(self.current_pid, 0) kill(self.parent_pid, 0) except: exit(0)
'Assign a bunch of metrics for a process to analyze.'
def spin_process(self, i, unique_metrics):
keys_per_processor = int(ceil((float(len(unique_metrics)) / float(settings.ANALYZER_PROCESSES)))) if (i == settings.ANALYZER_PROCESSES): assigned_max = len(unique_metrics) else: assigned_max = (i * keys_per_processor) assigned_min = (assigned_max - keys_per_processor) assigned_keys =...
'Called when the process intializes.'
def run(self):
while 1: now = time() try: self.redis_conn.ping() except: logger.error(("skyline can't connect to redis at socket path %s" % settings.REDIS_SOCKET_PATH)) sleep(10) self.redis_conn = StrictRedis(unix_socket_path=settings....
'Create a new instance of :class:`GridFS`. Raises :class:`TypeError` if `database` is not an instance of :class:`~pymongo.database.Database`. :Parameters: - `database`: database to use - `collection` (optional): root collection to use .. versionchanged:: 3.1 Indexes are only ensured on the first write to the DB. .. ver...
def __init__(self, database, collection='fs'):
if (not isinstance(database, Database)): raise TypeError('database must be an instance of Database') if (not database.write_concern.acknowledged): raise ConfigurationError('database must use acknowledged write_concern') self.__database = database self.__coll...
'Create a new file in GridFS. Returns a new :class:`~gridfs.grid_file.GridIn` instance to which data can be written. Any keyword arguments will be passed through to :meth:`~gridfs.grid_file.GridIn`. If the ``"_id"`` of the file is manually specified, it must not already exist in GridFS. Otherwise :class:`~gridfs.errors...
def new_file(self, **kwargs):
return GridIn(self.__collection, **kwargs)
'Put data in GridFS as a new file. Equivalent to doing:: try: f = new_file(**kwargs) f.write(data) finally: f.close() `data` can be either an instance of :class:`str` (:class:`bytes` in python 3) or a file-like object providing a :meth:`read` method. If an `encoding` keyword argument is passed, `data` can also be a :cl...
def put(self, data, **kwargs):
grid_file = GridIn(self.__collection, **kwargs) try: grid_file.write(data) finally: grid_file.close() return grid_file._id
'Get a file from GridFS by ``"_id"``. Returns an instance of :class:`~gridfs.grid_file.GridOut`, which provides a file-like interface for reading. :Parameters: - `file_id`: ``"_id"`` of the file to get'
def get(self, file_id):
gout = GridOut(self.__collection, file_id) gout._ensure_file() return gout
'Get a file from GridFS by ``"filename"`` or metadata fields. Returns a version of the file in GridFS whose filename matches `filename` and whose metadata fields match the supplied keyword arguments, as an instance of :class:`~gridfs.grid_file.GridOut`. Version numbering is a convenience atop the GridFS API provided by...
def get_version(self, filename=None, version=(-1), **kwargs):
query = kwargs if (filename is not None): query['filename'] = filename cursor = self.__files.find(query) if (version < 0): skip = (abs(version) - 1) cursor.limit((-1)).skip(skip).sort('uploadDate', DESCENDING) else: cursor.limit((-1)).skip(version).sort('uploadDate', ...
'Get the most recent version of a file in GridFS by ``"filename"`` or metadata fields. Equivalent to calling :meth:`get_version` with the default `version` (``-1``). :Parameters: - `filename`: ``"filename"`` of the file to get, or `None` - `**kwargs` (optional): find files by custom metadata.'
def get_last_version(self, filename=None, **kwargs):
return self.get_version(filename=filename, **kwargs)
'Delete a file from GridFS by ``"_id"``. Deletes all data belonging to the file with ``"_id"``: `file_id`. .. warning:: Any processes/threads reading from the file while this method is executing will likely see an invalid/corrupt file. Care should be taken to avoid concurrent reads to a file while it is being deleted. ...
def delete(self, file_id):
self.__files.delete_one({'_id': file_id}) self.__chunks.delete_many({'files_id': file_id})
'List the names of all files stored in this instance of :class:`GridFS`. .. versionchanged:: 3.1 ``list`` no longer ensures indexes.'
def list(self):
return [name for name in self.__files.distinct('filename') if (name is not None)]
'Get a single file from gridfs. All arguments to :meth:`find` are also valid arguments for :meth:`find_one`, although any `limit` argument will be ignored. Returns a single :class:`~gridfs.grid_file.GridOut`, or ``None`` if no matching file is found. For example:: file = fs.find_one({"filename": "lisa.txt"}) :Parameter...
def find_one(self, filter=None, *args, **kwargs):
if ((filter is not None) and (not isinstance(filter, Mapping))): filter = {'_id': filter} for f in self.find(filter, *args, **kwargs): return f return None
'Query GridFS for files. Returns a cursor that iterates across files matching arbitrary queries on the files collection. Can be combined with other modifiers for additional control. For example:: for grid_out in fs.find({"filename": "lisa.txt"}, no_cursor_timeout=True): data = grid_out.read() would iterate through all ...
def find(self, *args, **kwargs):
return GridOutCursor(self.__collection, *args, **kwargs)
'Check if a file exists in this instance of :class:`GridFS`. The file to check for can be specified by the value of its ``_id`` key, or by passing in a query document. A query document can be passed in as dictionary, or by using keyword arguments. Thus, the following three calls are equivalent: >>> fs.exists(file_id) >...
def exists(self, document_or_id=None, **kwargs):
if kwargs: return (self.__files.find_one(kwargs, ['_id']) is not None) return (self.__files.find_one(document_or_id, ['_id']) is not None)
'Create a new instance of :class:`GridFSBucket`. Raises :exc:`TypeError` if `database` is not an instance of :class:`~pymongo.database.Database`. Raises :exc:`~pymongo.errors.ConfigurationError` if `write_concern` is not acknowledged. :Parameters: - `database`: database to use. - `bucket_name` (optional): The name of t...
def __init__(self, db, bucket_name='fs', chunk_size_bytes=DEFAULT_CHUNK_SIZE, write_concern=None, read_preference=None):
if (not isinstance(db, Database)): raise TypeError('database must be an instance of Database') wtc = (write_concern if (write_concern is not None) else db.write_concern) if (not wtc.acknowledged): raise ConfigurationError('write concern must be acknowledged') ...
'Opens a Stream that the application can write the contents of the file to. The user must specify the filename, and can choose to add any additional information in the metadata field of the file document or modify the chunk size. For example:: my_db = MongoClient().test fs = GridFSBucket(my_db) grid_in, file_id = fs.op...
def open_upload_stream(self, filename, chunk_size_bytes=None, metadata=None):
validate_string('filename', filename) opts = {'filename': filename, 'chunk_size': (chunk_size_bytes if (chunk_size_bytes is not None) else self._chunk_size_bytes)} if (metadata is not None): opts['metadata'] = metadata return GridIn(self._collection, **opts)
'Opens a Stream that the application can write the contents of the file to. The user must specify the file id and filename, and can choose to add any additional information in the metadata field of the file document or modify the chunk size. For example:: my_db = MongoClient().test fs = GridFSBucket(my_db) grid_in, fil...
def open_upload_stream_with_id(self, file_id, filename, chunk_size_bytes=None, metadata=None):
validate_string('filename', filename) opts = {'_id': file_id, 'filename': filename, 'chunk_size': (chunk_size_bytes if (chunk_size_bytes is not None) else self._chunk_size_bytes)} if (metadata is not None): opts['metadata'] = metadata return GridIn(self._collection, **opts)
'Uploads a user file to a GridFS bucket. Reads the contents of the user file from `source` and uploads it to the file `filename`. Source can be a string or file-like object. For example:: my_db = MongoClient().test fs = GridFSBucket(my_db) file_id = fs.upload_from_stream( "test_file", "data I want to store!", chunk_siz...
def upload_from_stream(self, filename, source, chunk_size_bytes=None, metadata=None):
with self.open_upload_stream(filename, chunk_size_bytes, metadata) as gin: gin.write(source) return gin._id