code
stringlengths
4
4.48k
docstring
stringlengths
1
6.45k
_id
stringlengths
24
24
def detectBlobs(self, col, row, globalize=False): <NEW_LINE> <INDENT> padded_tile = self.getPaddedTile(col, row) <NEW_LINE> blobs = self.detection_method(padded_tile) <NEW_LINE> pad = self.default_padding <NEW_LINE> outside = [] <NEW_LINE> for i, blob in enumerate(blobs): <NEW_LINE> <INDENT> if min(blob[0:2]) < pad or blob[0] >= (self.tileh+pad) or blob[1] >= (self.tilew+pad): <NEW_LINE> <INDENT> outside.append(i) <NEW_LINE> <DEDENT> <DEDENT> blobs = np.delete(blobs, outside, 0) <NEW_LINE> blobs[:, 0:2] -= pad <NEW_LINE> if globalize: <NEW_LINE> <INDENT> blobs[:, 1] += col * self.tilew <NEW_LINE> blobs[:, 0] += row * self.tileh <NEW_LINE> <DEDENT> print('Blobs found:', blobs.shape[0]) <NEW_LINE> return blobs
Run detection and return array of detected blobs for the specified tile :param col: :param row: :param globalize: if true, blobs will be returned with global coordinates :return:
625941bf29b78933be1e55e7
def get_absolute_url(self): <NEW_LINE> <INDENT> return reverse('resource_detail', args=(self.slug, ))
Get detail url.
625941bf462c4b4f79d1d607
def prepare_batch_input(insts, src_pad_idx, trg_pad_idx, n_head): <NEW_LINE> <INDENT> def __pad_batch_data(insts, pad_idx, is_target=False, return_pos=True, return_attn_bias=True, return_max_len=True): <NEW_LINE> <INDENT> return_list = [] <NEW_LINE> max_len = max(len(inst) for inst in insts) <NEW_LINE> inst_data = np.array( [inst + [pad_idx] * (max_len - len(inst)) for inst in insts]) <NEW_LINE> return_list += [inst_data.astype("int64").reshape([-1, 1])] <NEW_LINE> if return_pos: <NEW_LINE> <INDENT> inst_pos = np.array([[ pos_i + 1 if w_i != pad_idx else 0 for pos_i, w_i in enumerate(inst) ] for inst in inst_data]) <NEW_LINE> return_list += [inst_pos.astype("int64").reshape([-1, 1])] <NEW_LINE> <DEDENT> if return_attn_bias: <NEW_LINE> <INDENT> if is_target: <NEW_LINE> <INDENT> slf_attn_bias_data = np.ones((inst_data.shape[0], max_len, max_len)) <NEW_LINE> slf_attn_bias_data = np.triu(slf_attn_bias_data, 1).reshape( [-1, 1, max_len, max_len]) <NEW_LINE> slf_attn_bias_data = np.tile(slf_attn_bias_data, [1, n_head, 1, 1]) * [-1e9] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> slf_attn_bias_data = np.array([[0] * len(inst) + [-1e9] * (max_len - len(inst)) for inst in insts]) <NEW_LINE> slf_attn_bias_data = np.tile( slf_attn_bias_data.reshape([-1, 1, 1, max_len]), [1, n_head, max_len, 1]) <NEW_LINE> <DEDENT> return_list += [slf_attn_bias_data.astype("float32")] <NEW_LINE> <DEDENT> if return_max_len: <NEW_LINE> <INDENT> return_list += [max_len] <NEW_LINE> <DEDENT> return return_list if len(return_list) > 1 else return_list[0] <NEW_LINE> <DEDENT> src_word, src_pos, src_slf_attn_bias, src_max_len = __pad_batch_data( [inst[0] for inst in insts], src_pad_idx, is_target=False) <NEW_LINE> trg_word, trg_pos, trg_slf_attn_bias, trg_max_len = __pad_batch_data( [inst[1] for inst in insts], trg_pad_idx, is_target=True) <NEW_LINE> trg_src_attn_bias = np.tile(src_slf_attn_bias[:, :, ::src_max_len, :], [1, 1, trg_max_len, 1]).astype("float32") <NEW_LINE> lbl_word = __pad_batch_data([inst[2] for inst in insts], trg_pad_idx, False, False, False, False) <NEW_LINE> lbl_weight = (lbl_word != trg_pad_idx).astype("float32").reshape([-1, 1]) <NEW_LINE> return [ src_word, src_pos, trg_word, trg_pos, src_slf_attn_bias, trg_slf_attn_bias, trg_src_attn_bias, lbl_word, lbl_weight ]
Pad the instances to the max sequence length in batch, and generate the corresponding position data and attention bias. Then, convert the numpy data to tensors and return a dict mapping names to tensors.
625941bfa934411ee37515ca
def get_applied_coupons(self, code, **kwargs): <NEW_LINE> <INDENT> kwargs['_return_http_data_only'] = True <NEW_LINE> if kwargs.get('callback'): <NEW_LINE> <INDENT> return self.get_applied_coupons_with_http_info(code, **kwargs) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> (data) = self.get_applied_coupons_with_http_info(code, **kwargs) <NEW_LINE> return data
Returns a list of unique coupons which have been applied. { "nickname":"Retrieve used unique coupons","response":"getAppliedCodesForCode.html"} This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_applied_coupons(code, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str code: The base code to use in the generation of the unique codes. (required) :param list[str] organizations: A list of organization-IDs used to restrict the scope of API calls. :param int offset: The offset from the first subscription to return. :param int records: The maximum number of subscriptions to return. :param str order_by: Specify a field used to order the result set. :param str order: Ihe direction of any ordering, either ASC or DESC. :param bool include_retired: Whether retired subscriptions should be returned. :return: CouponPagedMetadata If the method is called asynchronously, returns the request thread.
625941bf283ffb24f3c5583b
def test_modify_job_pillar(self): <NEW_LINE> <INDENT> schedule = {'foo': 'bar'} <NEW_LINE> ret = copy.deepcopy(self.schedule.opts) <NEW_LINE> ret.update({'pillar': {'schedule': {'name': {'foo': 'bar'}}}}) <NEW_LINE> self.schedule.opts.update({'pillar': {'schedule': {'name': {'foo': 'bar'}}}}) <NEW_LINE> Schedule.modify_job(self.schedule, 'name', schedule, persist=False, where='pillar') <NEW_LINE> self.assertEqual(self.schedule.opts, ret)
Tests modifying a job in the scheduler in pillar
625941bf6e29344779a6254b
def __init__(self): <NEW_LINE> <INDENT> self.tlock = threading.Lock()
Return a new Lock instance.
625941bf8a349b6b435e80aa
def new(self, item, count, customer, deadline, destination, comment): <NEW_LINE> <INDENT> time = dt.datetime.now(tz=MSK_TZ).strftime("%d.%m.%Y %H:%M:%S") <NEW_LINE> self.last_id += 1 <NEW_LINE> order_id = "{:05}".format(self.last_id) <NEW_LINE> body = { 'values': [[order_id, item[0], item[1], count, customer, time, '', '', '', '', deadline, destination, comment]] } <NEW_LINE> query = self.service.spreadsheets().values().append(spreadsheetId=self.spreadsheet_id, range=self.range, body=body, valueInputOption="RAW") <NEW_LINE> result = query.execute() <NEW_LINE> return order_id
:param item: Item code :param count: Items count :param customer: Customer name :param deadline: Deadline date :param destination: Destination school :param comment: Purpose for order :return:
625941bfc432627299f04b7b
def mock_method(name): <NEW_LINE> <INDENT> def decorator(func): <NEW_LINE> <INDENT> setattr(func, mock_method.attr_name, name) <NEW_LINE> return func <NEW_LINE> <DEDENT> setattr(decorator, mock_method.decorator_attr_name, True) <NEW_LINE> return decorator
Decorator to "register" a mock method. `name` is required. Unlike `remote_method`, and `Mock.method`, this doesn't immediately add functions to a `Registry` - instead, it sets an attribute on the function, allowing such functions to be recognised when the test case is instantiated. Example: class MyTestCase(ZorpTestCase): @mock_method("ping") def ping(self): return "pong" print MyTestCase()._get_zorp_mock_methods()
625941bf379a373c97cfaa7a
def unload(self): <NEW_LINE> <INDENT> for action in self.actions: <NEW_LINE> <INDENT> self.iface.removePluginWebMenu( self.tr(u'&OSM Data'), action) <NEW_LINE> self.iface.removeToolBarIcon(action) <NEW_LINE> <DEDENT> del self.toolbar
Removes the plugin menu item and icon from QGIS GUI.
625941bf3eb6a72ae02ec40d
def OriginalOwnersFiles(self): <NEW_LINE> <INDENT> def owners_file_filter(f): <NEW_LINE> <INDENT> return 'OWNERS' in os.path.split(f.LocalPath())[1] <NEW_LINE> <DEDENT> files = self.AffectedFiles(file_filter=owners_file_filter) <NEW_LINE> return dict([(f.LocalPath(), f.OldContents()) for f in files])
A map from path names of affected OWNERS files to their old content.
625941bfd486a94d0b98e07c
def context_switches(self): <NEW_LINE> <INDENT> return self._css
Returns a dictionary of context switches.
625941bf50812a4eaa59c25b
def set_checkpoint_image(self, ckptin): <NEW_LINE> <INDENT> self.add_input_file(ckptin) <NEW_LINE> self.add_var_opt('_condor_restart', ckptin, short=True)
Adds the argument -_condor_restart for the cases in which we want to checkpoint and grabs the checkpoint image name from get_checkpoint_image in the InspiralAnalysisCode section.
625941bf6aa9bd52df036cda
def get_protein_sequence(self, seq_dict): <NEW_LINE> <INDENT> offset = find_offset(self.exon_frames, self.strand) <NEW_LINE> cds = self.get_cds(seq_dict) <NEW_LINE> if len(cds) < 3: <NEW_LINE> <INDENT> return "" <NEW_LINE> <DEDENT> return translate_sequence(cds[offset:].upper())
Returns the translated protein sequence for this transcript in single character space. Overrides this function in the Transcript class to make use of frame information.
625941bf07d97122c41787bd
def set_background(self, background): <NEW_LINE> <INDENT> self.background = background
Set image to use for background subtraction
625941bf5fc7496912cc38b5
def check_sched_params(*args): <NEW_LINE> <INDENT> hypervisor, dicts, guestname, domobj = args <NEW_LINE> sched_dict = {} <NEW_LINE> if hypervisor == "xen": <NEW_LINE> <INDENT> cmd = "xm sched-credit -d %s" % guestname <NEW_LINE> out = process.system_output(cmd, shell=True, ignore_status=True) <NEW_LINE> sched_dict = eval(out) <NEW_LINE> if sched_dict['weight'] == dicts['weight'] and sched_dict['cap'] == dicts['cap']: <NEW_LINE> <INDENT> return 0 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return 1 <NEW_LINE> <DEDENT> <DEDENT> if hypervisor == "kvm": <NEW_LINE> <INDENT> sched_dict = domobj.schedulerParameters() <NEW_LINE> if sched_dict['cpu_shares'] == dicts['cpu_shares']: <NEW_LINE> <INDENT> return 0 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return 1
Check scheduler parameters validity after setting
625941bf16aa5153ce3623af
def letterCombinations(self, digits): <NEW_LINE> <INDENT> if digits == "": <NEW_LINE> <INDENT> return [] <NEW_LINE> <DEDENT> mapping = {"1" : [], "2" : ["a", "b", "c"], "3" : ["d", "e", "f"], "4" : ["g", "h", "i"], "5" : ["j", "k", "l"], "6" : ["m", "n", "o"], "7" : ["p", "q", "r", "s"], "8" : ["t", "u", "v"], "9" : ["w", "x", "y", "z"], } <NEW_LINE> lst = [""] <NEW_LINE> for num in digits: <NEW_LINE> <INDENT> new = [] <NEW_LINE> for comb in lst: <NEW_LINE> <INDENT> for letter in mapping[num]: <NEW_LINE> <INDENT> new.append(comb + letter) <NEW_LINE> <DEDENT> <DEDENT> lst = new <NEW_LINE> <DEDENT> return lst
:type digits: str :rtype: List[str]
625941bfdc8b845886cb546b
def read(self, name): <NEW_LINE> <INDENT> raise NotImplementedError
Read the contents of a file. If a file with the given `name` does not exist then a :exc:`storage.exc.FileNotFoundError` exception is raised. :argument name: the name of the file to retrieve :returns: a string representing the file contents
625941bff8510a7c17cf9632
@user_blueprint.route("/follow_user", methods=["POST"]) <NEW_LINE> @jwt_required() <NEW_LINE> def follow_user(): <NEW_LINE> <INDENT> current_user = get_jwt_identity() <NEW_LINE> data = request.get_json(force=True) <NEW_LINE> unfollow = False if data["action"] == "follow" else True <NEW_LINE> status = database_client.follow_user( current_user["user_id"], data["user_id"], unfollow ) <NEW_LINE> if status == 200: <NEW_LINE> <INDENT> return {"result": "success"}, 200 <NEW_LINE> <DEDENT> return {"result": "err"}, 430
User that is signed in will follow/unfollow other user
625941bf3d592f4c4ed1cfab
def test_redirect_when_account_created_to_homepage(self): <NEW_LINE> <INDENT> response = self.client.post('/register/', {'username': 'seiph', 'first_name': 'Jean', 'last_name': 'Robert', 'email': 'jbr@aol.com', 'password1': 'kevin1234', 'password2': 'kevin1234'}) <NEW_LINE> self.assertEqual(response.status_code, 302) <NEW_LINE> self.assertTemplateUsed('welcome/index.html')
test redirection when the user create his account
625941bf66656f66f7cbc0e1
def snap( self ): <NEW_LINE> <INDENT> if self.fileset == '': <NEW_LINE> <INDENT> snapname = time.strftime("%Y%m%d") + self.snap_name_separator + time.strftime("%H%M") <NEW_LINE> ( rc, cmd_out, cmd_err ) = execute_command( "/usr/lpp/mmfs/bin/mmcrsnapshot {0} {1}".format( self.gpfsdev, snapname ) ) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> snapname = self.fileset + self.snap_name_separator + time.strftime("%Y%m%d") + self.snap_name_separator + time.strftime("%H%M") <NEW_LINE> ( rc, cmd_out, cmd_err ) = execute_command( "/usr/lpp/mmfs/bin/mmcrsnapshot {0} {1} -j {2}".format( self.gpfsdev, snapname, self.fileset ) ) <NEW_LINE> <DEDENT> return ( rc, cmd_out, cmd_err )
This code will create a snapshot of the specified filesystem or fileset. NOTE: You can NOT mix filesystem and fileset snapshots on the same GPFS device. Filesystem snapshots are named: CCYYMMDD==HHMM for easy sorting / processing. Filesystem snapshots are named: <Fileset>==CCYYMMDD==HHMM for easy processing again.
625941bf63d6d428bbe44426
def _verify_signal_packet(self, signal_packet): <NEW_LINE> <INDENT> errors.check_type(signal_packet, dict) <NEW_LINE> if len(signal_packet.keys()) > 1: <NEW_LINE> <INDENT> raise ValueError('signal_packet base-level should contain only' + ' the pipe hash identifier as key') <NEW_LINE> <DEDENT> hkey = signal_packet.keys()[0] <NEW_LINE> errors.check_has_key(signal_packet[hkey], 'data') <NEW_LINE> errors.check_has_key(signal_packet[hkey], 'meta') <NEW_LINE> errors.check_has_key(signal_packet[hkey]['meta'], 'ax_0') <NEW_LINE> errors.check_has_key(signal_packet[hkey]['meta'], 'ax_1') <NEW_LINE> errors.check_has_key(signal_packet[hkey]['meta']['ax_0'], 'label') <NEW_LINE> errors.check_has_key(signal_packet[hkey]['meta']['ax_0'], 'index') <NEW_LINE> errors.check_has_key(signal_packet[hkey]['meta']['ax_1'], 'label') <NEW_LINE> errors.check_has_key(signal_packet[hkey]['meta']['ax_1'], 'index') <NEW_LINE> errors.check_type(signal_packet[hkey]['data'], np.ndarray) <NEW_LINE> errors.check_type(signal_packet[hkey]['meta']['ax_0']['label'], str) <NEW_LINE> errors.check_type(signal_packet[hkey]['meta']['ax_0']['index'], np.ndarray) <NEW_LINE> errors.check_type(signal_packet[hkey]['meta']['ax_1']['label'], str) <NEW_LINE> errors.check_type(signal_packet[hkey]['meta']['ax_1']['index'], np.ndarray)
Ensure signal packet is organized properly
625941bf3cc13d1c6d3c72b2
def parse_xml(xml_path): <NEW_LINE> <INDENT> with open(xml_path, 'rb') as f: <NEW_LINE> <INDENT> xml_dict = xmltodict.parse(f) <NEW_LINE> bndboxs = list() <NEW_LINE> objects = xml_dict['annotation']['object'] <NEW_LINE> if isinstance(objects, list): <NEW_LINE> <INDENT> for obj in objects: <NEW_LINE> <INDENT> obj_name = obj['name'] <NEW_LINE> difficult = int(obj['difficult']) <NEW_LINE> if 'car'.__eq__(obj_name) and difficult != 1: <NEW_LINE> <INDENT> bndbox = obj['bndbox'] <NEW_LINE> bndboxs.append((int(bndbox['xmin']), int(bndbox['ymin']), int(bndbox['xmax']), int(bndbox['ymax']))) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> elif isinstance(objects, dict): <NEW_LINE> <INDENT> obj_name = objects['name'] <NEW_LINE> difficult = int(objects['difficult']) <NEW_LINE> if 'car'.__eq__(obj_name) and difficult != 1: <NEW_LINE> <INDENT> bndbox = objects['bndbox'] <NEW_LINE> bndboxs.append((int(bndbox['xmin']), int(bndbox['ymin']), int(bndbox['xmax']), int(bndbox['ymax']))) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> return np.array(bndboxs)[0]
解析xml文件,返回标注边界框坐标
625941bf4e4d5625662d4312
def setMidiText(ln): <NEW_LINE> <INDENT> if not ln: <NEW_LINE> <INDENT> error("Use: MidiText text") <NEW_LINE> <DEDENT> gbl.mtrks[0].addText(gbl.tickOffset, ' '.join(ln))
Set midi text into meta track.
625941bfd8ef3951e3243474
def test_generate_tablename_numeric(self): <NEW_LINE> <INDENT> foo = ANumericEnded1(foo="test") <NEW_LINE> self.assertEqual(foo.tablename, "anumericended1")
Check if table name ended by a number is not changed.
625941bfd99f1b3c44c674cc
def add_variable(self, name, shape, dtype=None, initializer=None, regularizer=None, trainable=True): <NEW_LINE> <INDENT> if dtype is None: <NEW_LINE> <INDENT> dtype = self.dtype <NEW_LINE> <DEDENT> existing_variables = set(tf_variables.global_variables()) <NEW_LINE> self._set_scope(None) <NEW_LINE> with vs.variable_scope(self._scope, reuse=self.built or self._reuse) as scope: <NEW_LINE> <INDENT> with ops.name_scope(scope.original_name_scope): <NEW_LINE> <INDENT> variable = vs.get_variable(name, shape=shape, initializer=initializer, dtype=dtypes.as_dtype(dtype), trainable=trainable and self.trainable) <NEW_LINE> if variable in existing_variables: <NEW_LINE> <INDENT> return variable <NEW_LINE> <DEDENT> if regularizer: <NEW_LINE> <INDENT> if isinstance(variable, tf_variables.PartitionedVariable): <NEW_LINE> <INDENT> for v in variable: <NEW_LINE> <INDENT> with ops.colocate_with(v.op): <NEW_LINE> <INDENT> with ops.name_scope(name + '/Regularizer'): <NEW_LINE> <INDENT> regularization = regularizer(v) <NEW_LINE> <DEDENT> <DEDENT> if regularization is not None: <NEW_LINE> <INDENT> self.add_loss(regularization) <NEW_LINE> _add_elements_to_collection( regularization, ops.GraphKeys.REGULARIZATION_LOSSES) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> with ops.colocate_with(variable.op): <NEW_LINE> <INDENT> with ops.name_scope(name + '/Regularizer'): <NEW_LINE> <INDENT> regularization = regularizer(variable) <NEW_LINE> <DEDENT> <DEDENT> if regularization is not None: <NEW_LINE> <INDENT> self.add_loss(regularization) <NEW_LINE> _add_elements_to_collection( regularization, ops.GraphKeys.REGULARIZATION_LOSSES) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> <DEDENT> if trainable: <NEW_LINE> <INDENT> self._trainable_weights.append(variable) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self._non_trainable_weights.append(variable) <NEW_LINE> <DEDENT> return variable
Adds a new variable to the layer. Arguments: name: variable name. shape: variable shape. dtype: The type of the variable. Defaults to `self.dtype`. initializer: initializer instance (callable). regularizer: regularizer instance (callable). trainable: whether the variable should be part of the layer's "trainable_variables" (e.g. variables, biases) or "non_trainable_variables" (e.g. BatchNorm mean, stddev). Returns: The created variable.
625941bf26238365f5f0eda2
def run(file, *args): <NEW_LINE> <INDENT> start_hint = "" <NEW_LINE> if args: <NEW_LINE> <INDENT> start_hint = "I started it as:\n\n" + code( file + " " + " ".join(shlex.quote(a) for a in args) ) <NEW_LINE> args = ["--"] + list(args) <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> proc = subprocess.run( [ "python3", "-m", "friendly", "--formatter", "correction_helper.friendly_traceback_markdown", file, *args, ], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.DEVNULL, universal_newlines=True, check=True, ) <NEW_LINE> <DEDENT> except subprocess.CalledProcessError as err: <NEW_LINE> <INDENT> stdout = stderr = "" <NEW_LINE> if err.stdout: <NEW_LINE> <INDENT> if len(err.stdout) > 1_000: <NEW_LINE> <INDENT> stdout = ( f"Your code printed {len(err.stdout)} " "characters before being interrupted:\n\n" + code(err.stdout[:256] + "\n…truncated…\n" + err.stdout[-256:]) ) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> stdout = "Your code printed:\n\n" + code(err.stdout) <NEW_LINE> <DEDENT> <DEDENT> if err.stderr: <NEW_LINE> <INDENT> if len(err.stderr) > 1_000: <NEW_LINE> <INDENT> stderr = ( f"Your code printed {len(err.stderr)} " "characters on stderr before being interrupted:\n\n" + code(err.stderr[:256] + "\n…truncated…\n" + err.stderr[-256:]) ) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> stderr = "Found this on stderr:\n\n" + code(err.stderr) <NEW_LINE> <DEDENT> <DEDENT> if err.returncode == -9: <NEW_LINE> <INDENT> fail( "I had to halt your program, sorry...", "It were either too slow, or consuming too much resources.", "Check for an infinite loop maybe?", start_hint, stdout, stderr, ) <NEW_LINE> <DEDENT> fail( f"Your program exited with the error code: {err.returncode}.", start_hint, stdout, stderr, ) <NEW_LINE> <DEDENT> except MemoryError: <NEW_LINE> <INDENT> fail( "Your program is eating up all the memory! Check for infinite loops maybe?", start_hint, ) <NEW_LINE> <DEDENT> if proc.stderr: <NEW_LINE> <INDENT> if "EOF when reading a line" in proc.stderr and "input" in Path(file).read_text( encoding="UTF-8" ): <NEW_LINE> <INDENT> fail( "Don't use the `input` builtin, there's no human to interact with here." ) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> fail(proc.stderr) <NEW_LINE> <DEDENT> <DEDENT> return proc.stdout.rstrip()
subprocess.run wrapper specialized to run Python with friendly.
625941bf4d74a7450ccd40fa
def prime_factors(num: int) -> Dict[Prime, int]: <NEW_LINE> <INDENT> n: Prime = 2 <NEW_LINE> result: Dict[Prime, int] = {} <NEW_LINE> while n <= num: <NEW_LINE> <INDENT> if num % n == 0: <NEW_LINE> <INDENT> num = num // n <NEW_LINE> if n in result: <NEW_LINE> <INDENT> result[n] += 1 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> result.update({n: 1}) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> n += 1 <NEW_LINE> <DEDENT> <DEDENT> return result
Creates a dictionary of prime factors for a given number. params num: Number to be factored return: Dictionary of where the keys are prime factors and the values are their respective powers. Example: 36 = 2^2 * 3^2 >>> prime_factors(36) {2: 2, 3: 2,}
625941bf596a8972360899fa
def _did_create(self, conn, resp): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> if resp.getType() == "result": <NEW_LINE> <INDENT> log.info("PUBSUB: pubsub node %s has been created" % self.nodename) <NEW_LINE> return self.recover(wait=True) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> log.error("PUBSUB: can't create pubsub: %s" % str(resp)) <NEW_LINE> return False <NEW_LINE> <DEDENT> <DEDENT> except Exception as ex: <NEW_LINE> <INDENT> log.error("PUBSUB: unable to create pubsub node: %s" % str(ex))
called after pubsub creation
625941bf8e05c05ec3eea2a9
def getpath_event(): <NEW_LINE> <INDENT> ici = os.getcwd() <NEW_LINE> path_event = check_slash(os.path.realpath(ici)) <NEW_LINE> return path_event
Return the path of the event directory. :return: path of event directory :rtype: string
625941bf4c3428357757c261
def init_handle(self): <NEW_LINE> <INDENT> self.c.setopt(pycurl.FOLLOWLOCATION, 1) <NEW_LINE> self.c.setopt(pycurl.MAXREDIRS, 10) <NEW_LINE> self.c.setopt(pycurl.CONNECTTIMEOUT, 30) <NEW_LINE> self.c.setopt(pycurl.NOSIGNAL, 1) <NEW_LINE> self.c.setopt(pycurl.NOPROGRESS, 1) <NEW_LINE> if hasattr(pycurl, "AUTOREFERER"): <NEW_LINE> <INDENT> self.c.setopt(pycurl.AUTOREFERER, 1) <NEW_LINE> <DEDENT> self.c.setopt(pycurl.SSL_VERIFYPEER, 0) <NEW_LINE> self.c.setopt(pycurl.LOW_SPEED_TIME, 60) <NEW_LINE> self.c.setopt(pycurl.LOW_SPEED_LIMIT, 5) <NEW_LINE> if hasattr(pycurl, "USE_SSL"): <NEW_LINE> <INDENT> self.c.setopt(pycurl.USE_SSL, pycurl.USESSL_TRY) <NEW_LINE> <DEDENT> self.c.setopt( pycurl.USERAGENT, b"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36", ) <NEW_LINE> if pycurl.version_info()[7]: <NEW_LINE> <INDENT> self.c.setopt(pycurl.ENCODING, b"gzip, deflate") <NEW_LINE> <DEDENT> self.c.setopt( pycurl.HTTPHEADER, [ b"Accept: */*", b"Accept-Language: en-US,en", b"Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.7", b"Connection: keep-alive", b"Keep-Alive: 300", b"Expect:", ], )
sets common options to curl handle.
625941bf23849d37ff7b2fc8
def build(): <NEW_LINE> <INDENT> includes = ['handlers', 'resources', 'templates', '*.py'] <NEW_LINE> excludes = ['test', '.*', '*.pyc', '*.pyo'] <NEW_LINE> local('rm -f dist/%s' % _TAR_FILE) <NEW_LINE> with lcd(os.path.join(_current_path(), 'www')): <NEW_LINE> <INDENT> cmd = ['tar', '--dereference', '-czvf', '../dist/%s' % _TAR_FILE] <NEW_LINE> cmd.extend(['--exclude=\'%s\'' % ex for ex in excludes]) <NEW_LINE> cmd.extend(includes) <NEW_LINE> local(' '.join(cmd))
Build dist package.
625941bf287bf620b61d399d
def on_update_folder(self, change): <NEW_LINE> <INDENT> folder_list = list(self.folder_sel.cur_dir.glob("*")) <NEW_LINE> folder_list = [x for x in folder_list if x.is_dir()] <NEW_LINE> folder_list = [x for x in folder_list if not re.search("\..*", x.stem)] <NEW_LINE> experiment = pd.concat( [ pd.DataFrame( pd.read_pickle(x.joinpath("Result/results.pkl").as_posix()) ).T for x in folder_list ] ) <NEW_LINE> experiment = experiment.reset_index() <NEW_LINE> experiment = experiment.rename(columns={"index": "filename"}) <NEW_LINE> experiment["exp_name"] = experiment.filename.apply( lambda x: re.findall(".*?_(.*)?-\d_d\d.*", x)[0] ) <NEW_LINE> experiment["repl_ind"] = experiment.filename.apply( lambda x: int(re.findall(".*-(\d)_d\d.*", x)[0]) ) <NEW_LINE> experiment["day"] = experiment.filename.apply( lambda x: int(re.findall(".*-\d_d(\d).*", x)[0]) ) <NEW_LINE> self.experiment = experiment <NEW_LINE> self.exp_select.options = np.unique(experiment["exp_name"])
Load analysis files for all folders present in current folder
625941bfcc0a2c11143dcdc8
def __init__(self, input_size, hidden_size, output_size, weight_init_std=0.01): <NEW_LINE> <INDENT> self.params = {} <NEW_LINE> self.params['W1'] = weight_init_std * np.random.randn(input_size, hidden_size) <NEW_LINE> self.params['b1'] = np.zeros(hidden_size) <NEW_LINE> self.params['W2'] = weight_init_std * np.random.randn(hidden_size, output_size) <NEW_LINE> self.params['b2'] = np.zeros(output_size)
初期化を行う input_size: 入力層のニューロンの数 hidden_size: 隠れ層のニューロンの数 output_size: 出力層のニューロンの数 変数 params: ニューラルネットワークのパラメータを保持するディクショナリ変数 grads: 勾配を保持するディクショナリ変数
625941bf0c0af96317bb8120
def get_transcripts_from_youtube(youtube_id): <NEW_LINE> <INDENT> utf8_parser = etree.XMLParser(encoding='utf-8') <NEW_LINE> youtube_api = copy.deepcopy(settings.YOUTUBE_API) <NEW_LINE> youtube_api['params']['v'] = youtube_id <NEW_LINE> data = requests.get(youtube_api['url'], params=youtube_api['params']) <NEW_LINE> if data.status_code != 200 or not data.text: <NEW_LINE> <INDENT> msg = _("Can't receive transcripts from Youtube for {youtube_id}. Status code: {statuc_code}.").format( youtube_id=youtube_id, statuc_code=data.status_code ) <NEW_LINE> raise GetTranscriptsFromYouTubeException(msg) <NEW_LINE> <DEDENT> sub_starts, sub_ends, sub_texts = [], [], [] <NEW_LINE> xmltree = etree.fromstring(data.content, parser=utf8_parser) <NEW_LINE> for element in xmltree: <NEW_LINE> <INDENT> if element.tag == "text": <NEW_LINE> <INDENT> start = float(element.get("start")) <NEW_LINE> duration = float(element.get("dur", 0)) <NEW_LINE> text = element.text <NEW_LINE> end = start + duration <NEW_LINE> if text: <NEW_LINE> <INDENT> sub_starts.append(int(start * 1000)) <NEW_LINE> sub_ends.append(int((end + 0.0001) * 1000)) <NEW_LINE> sub_texts.append(text.replace('\n', ' ')) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return {'start': sub_starts, 'end': sub_ends, 'text': sub_texts}
Gets transcripts from youtube for youtube_id. Parses only utf-8 encoded transcripts. Other encodings are not supported at the moment. Returns (status, transcripts): bool, dict.
625941bf55399d3f055885eb
@app.route('/story-form-1') <NEW_LINE> def go_form_1(): <NEW_LINE> <INDENT> return render_template("story-1-form.html")
Navigate to story 1 entry form
625941bf50812a4eaa59c25c
def update(self, chart=None): <NEW_LINE> <INDENT> if chart is not None: <NEW_LINE> <INDENT> self._chart = chart <NEW_LINE> self._edgelevels = [] <NEW_LINE> self._marks = {} <NEW_LINE> self._analyze() <NEW_LINE> self._grow() <NEW_LINE> self.draw() <NEW_LINE> self.erase_tree() <NEW_LINE> self._resize() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> for edge in self._chart: <NEW_LINE> <INDENT> if edge not in self._edgetags: <NEW_LINE> <INDENT> self._add_edge(edge) <NEW_LINE> <DEDENT> <DEDENT> self._resize()
Draw any edges that have not been drawn. This is typically called when a after modifies the canvas that a CanvasView is displaying. C{update} will cause any edges that have been added to the chart to be drawn. If update is given a C{chart} argument, then it will replace the current chart with the given chart.
625941bfbde94217f3682d2b
def flatten(self, root): <NEW_LINE> <INDENT> if not root: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> if root.right: <NEW_LINE> <INDENT> self.flatten(root.right) <NEW_LINE> <DEDENT> if root.left: <NEW_LINE> <INDENT> save = root.right <NEW_LINE> self.flatten(root.left) <NEW_LINE> p = root.left <NEW_LINE> while p.right: <NEW_LINE> <INDENT> p = p.right <NEW_LINE> <DEDENT> root.right = root.left <NEW_LINE> root.left = None <NEW_LINE> p.right = save
:type root: TreeNode :rtype: void Do not return anything, modify root in-place instead.
625941bf3c8af77a43ae36d6
def calculate_precision_recall_all_images( all_prediction_boxes, all_gt_boxes, iou_threshold): <NEW_LINE> <INDENT> num_tp = 0 <NEW_LINE> num_fp = 0 <NEW_LINE> num_fn = 0 <NEW_LINE> all_boxes = zip(all_prediction_boxes, all_gt_boxes) <NEW_LINE> for p, gt in all_boxes: <NEW_LINE> <INDENT> image_result = calculate_individual_image_result(p, gt, iou_threshold) <NEW_LINE> num_tp += image_result['true_pos'] <NEW_LINE> num_fp += image_result['false_pos'] <NEW_LINE> num_fn += image_result['false_neg'] <NEW_LINE> <DEDENT> precision = calculate_precision(num_tp, num_fp, num_fn) <NEW_LINE> recall = calculate_recall(num_tp, num_fp, num_fn) <NEW_LINE> return (precision, recall)
Given a set of prediction boxes and ground truth boxes for all images, calculates recall and precision over all images. NB: all_prediction_boxes and all_gt_boxes are not matched! Args: all_prediction_boxes: (list of np.array of floats): each element in the list is a np.array containing all predicted bounding boxes for the given image with shape: [number of predicted boxes, 4]. Each row includes [xmin, xmax, ymin, ymax] all_gt_boxes: (list of np.array of floats): each element in the list is a np.array containing all ground truth bounding boxes for the given image objects with shape: [number of ground truth boxes, 4]. Each row includes [xmin, xmax, ymin, ymax] Returns: tuple: (precision, recall). Both float.
625941bf0a366e3fb873e750
def plot_chart_ventilation_duration_distribution( values=None, path_directory=None ): <NEW_LINE> <INDENT> fonts = define_font_properties() <NEW_LINE> colors = define_color_properties() <NEW_LINE> path_file = os.path.join( path_directory, "ventilation_duration.svg" ) <NEW_LINE> figure = plot_distribution_histogram( series=values, name="", bin_method="count", bin_count=70, label_bins="duration on ventilation (hours)", label_counts="counts of persons per bin", fonts=fonts, colors=colors, line=False, position=1, text="", ) <NEW_LINE> write_figure( path=path_file, figure=figure ) <NEW_LINE> pass
Plots charts from the analysis process. arguments: values (list<float>): values threshold (float): value of threshold for which to draw line path_directory (str): path to directory raises: returns:
625941bff548e778e58cd4b4
def dot_product(x, kernel): <NEW_LINE> <INDENT> return K.squeeze(K.dot(x, K.expand_dims(kernel)), axis=-1)
Wrapper for dot product operation Args: x (): input kernel (): weights Returns:
625941bf656771135c3eb7a4
def _ip_2_bin_ip(self, ip=None): <NEW_LINE> <INDENT> if ip is None: <NEW_LINE> <INDENT> ip = self.ip <NEW_LINE> <DEDENT> bin_ip = ip_2_bin_ip(ip) <NEW_LINE> return bin_ip
convert IP from decimal IP to binary string :param ip: decimal IP in a numpy array :type ip: np.array :return: a binary string of the IP address :rtype: string
625941bf97e22403b379ced1
def _initialize_sensors(client): <NEW_LINE> <INDENT> logging.info("Initializing current sensors") <NEW_LINE> current_sensors = [] <NEW_LINE> for address, name in zip([0x40, 0x41, 0x42, 0x42], ["left_wheel_current", "right_wheel_current", "left_flipper_current", "right_flipper_current"]): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> sensor = CurrentSensor(address=address, name=name) <NEW_LINE> <DEDENT> except I2CSlotEmptyError as e: <NEW_LINE> <INDENT> logging.warning(e) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> current_sensors.append(sensor) <NEW_LINE> <DEDENT> <DEDENT> logging.info("Initializing IMUs") <NEW_LINE> imus = [] <NEW_LINE> for address, name in zip([0x68, 0x69], ["rear_imu", "front_imu"]): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> imu = IMU(address=address, name=name) <NEW_LINE> <DEDENT> except I2CSlotEmptyError as e: <NEW_LINE> <INDENT> logging.warning(e) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> imus.append(imu) <NEW_LINE> <DEDENT> <DEDENT> logging.debug("Registering sensors to client") <NEW_LINE> for sensor in current_sensors: <NEW_LINE> <INDENT> client.add_current_sensor(sensor) <NEW_LINE> <DEDENT> for imu in imus: <NEW_LINE> <INDENT> client.add_imu(imu)
Initialize the sensors.
625941bfd18da76e2353240b
def cal_linear_value(color_band, distances): <NEW_LINE> <INDENT> min_value = min(distances, key=distances.get) <NEW_LINE> max_value = max(distances, key=distances.get) <NEW_LINE> closest_values = sorted(distances, key=distances.get) <NEW_LINE> closest_color = color_band[closest_values[0]] <NEW_LINE> cloest_color_distance = distances[closest_values[0]] <NEW_LINE> second_cloest_color = color_band[closest_values[1]] <NEW_LINE> second_cloest_color_distance = distances[closest_values[1]] <NEW_LINE> direct_distance = colour.delta_E(closest_color, second_cloest_color, method='CIE 2000') <NEW_LINE> if second_cloest_color_distance > direct_distance: <NEW_LINE> <INDENT> return closest_values[0] <NEW_LINE> <DEDENT> print(distances) <NEW_LINE> percent = cloest_color_distance / (cloest_color_distance + second_cloest_color_distance) <NEW_LINE> if closest_values[1] > closest_values[0]: <NEW_LINE> <INDENT> return closest_values[0] + abs(closest_values[1] - closest_values[0]) * percent <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return closest_values[0] - abs(closest_values[1] - closest_values[0]) * percent
calculate linear value from color band
625941bfcad5886f8bd26f12
def write(self, filename, format_='pickle'): <NEW_LINE> <INDENT> if format_ == 'pickle': <NEW_LINE> <INDENT> io.write(self, filename) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise KeyError("Format not supported.")
Exports Simulator object. Parameters ---------- filename : str Name of the Simulator object to be created. format_ : str Available options are 'pickle' and 'hdf5'.
625941bff9cc0f698b140536
def get_all_active_memories(self, memory_list): <NEW_LINE> <INDENT> available_memories = {} <NEW_LINE> active_memory_counter = 0 <NEW_LINE> active_memory = 'ROM' if any('ROM' in mem_list for mem_list in memory_list) else 'RAM' <NEW_LINE> try: <NEW_LINE> <INDENT> cmsis_part = self._get_cmsis_part() <NEW_LINE> <DEDENT> except ConfigException: <NEW_LINE> <INDENT> if getattr(self.target, "mbed_ram_start") and getattr(self.target, "mbed_rom_start"): <NEW_LINE> <INDENT> mem_start = int(getattr(self.target, "mbed_" + active_memory.lower() + "_start"), 0) <NEW_LINE> mem_size = int(getattr(self.target, "mbed_" + active_memory.lower() + "_size"), 0) <NEW_LINE> available_memories[active_memory] = [mem_start, mem_size] <NEW_LINE> return available_memories <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise ConfigException("Bootloader not supported on this target. " "ram/rom start/size not found in " "targets.json.") <NEW_LINE> <DEDENT> <DEDENT> present_memories = set(cmsis_part['memory'].keys()) <NEW_LINE> valid_memories = set(memory_list).intersection(present_memories) <NEW_LINE> for memory in valid_memories: <NEW_LINE> <INDENT> mem_start, mem_size = self._get_mem_specs( [memory], cmsis_part, "Not enough information in CMSIS packs to build a bootloader " "project" ) <NEW_LINE> if memory=='IROM1' or memory=='PROGRAM_FLASH': <NEW_LINE> <INDENT> mem_start = getattr(self.target, "mbed_rom_start", False) or mem_start <NEW_LINE> mem_size = getattr(self.target, "mbed_rom_size", False) or mem_size <NEW_LINE> memory = 'ROM' <NEW_LINE> <DEDENT> elif memory == 'IRAM1' or memory == 'SRAM_OC' or memory == 'SRAM_UPPER' or memory == 'SRAM': <NEW_LINE> <INDENT> if (self.has_ram_regions): <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> mem_start = getattr(self.target, "mbed_ram_start", False) or mem_start <NEW_LINE> mem_size = getattr(self.target, "mbed_ram_size", False) or mem_size <NEW_LINE> memory = 'RAM' <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> active_memory_counter += 1 <NEW_LINE> memory = active_memory + str(active_memory_counter) <NEW_LINE> <DEDENT> mem_start = int(mem_start, 0) <NEW_LINE> mem_size = int(mem_size, 0) <NEW_LINE> available_memories[memory] = [mem_start, mem_size] <NEW_LINE> <DEDENT> return available_memories
Get information of all available rom/ram memories in the form of dictionary {Memory: [start_addr, size]}. Takes in the argument, a list of all available regions within the ram/rom memory
625941bf73bcbd0ca4b2bfae
def test_simple_arith_if_2(self): <NEW_LINE> <INDENT> tra = tgis.TemporalRasterAlgebraParser(run = True, debug = True) <NEW_LINE> expr = 'R = if(A#A == 1, A - A)' <NEW_LINE> ret = tra.setup_common_granularity(expression=expr, lexer = tgis.TemporalRasterAlgebraLexer()) <NEW_LINE> self.assertEqual(ret, True) <NEW_LINE> tra.parse(expression=expr, basename="r", overwrite=True) <NEW_LINE> D = tgis.open_old_stds("R", type="strds") <NEW_LINE> self.assertEqual(D.metadata.get_number_of_maps(), 6) <NEW_LINE> self.assertEqual(D.metadata.get_min_min(), 0) <NEW_LINE> self.assertEqual(D.metadata.get_max_max(), 0) <NEW_LINE> start, end = D.get_absolute_time() <NEW_LINE> self.assertEqual(start, datetime.datetime(2001, 1, 1)) <NEW_LINE> self.assertEqual(end, datetime.datetime(2001, 7, 1))
Simple arithmetic test with if condition
625941bf63f4b57ef0001057
def _attach_zscores(self): <NEW_LINE> <INDENT> aucs = [] <NEW_LINE> for i, submission in enumerate(self.submissions): <NEW_LINE> <INDENT> report = json.loads(submission['substatus']['report']) <NEW_LINE> auc = report['auc'] <NEW_LINE> aucs.append(auc) <NEW_LINE> <DEDENT> ranks = np.argsort(aucs)[::-1] <NEW_LINE> for i, rank in enumerate(ranks): <NEW_LINE> <INDENT> self.submissions[rank]['ranking'] = i <NEW_LINE> report = json.loads(self.submissions[i]['substatus']['report']) <NEW_LINE> self.submissions[i]['auc'] = report['auc'] <NEW_LINE> self.submissions[i]['zscore'] = report['score']
attach mean zscore, 32 individual zscores
625941bfc4546d3d9de7296a
def t_BINARY(t): <NEW_LINE> <INDENT> t.value = t.value[2:] <NEW_LINE> t.value = t.value[:-1] <NEW_LINE> try: <NEW_LINE> <INDENT> t.value = int(t.value,2) <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> print("ERROR CONVERSION NUMERO %d", t.value) <NEW_LINE> t.value = 0 <NEW_LINE> <DEDENT> return t
b\'[01]+\'
625941bf73bcbd0ca4b2bfaf
def is_number(x): <NEW_LINE> <INDENT> return isinstance(x, numbers.Number)
Check whether the object provided is a number. :param x: object to check :return: True if x is a number
625941bfa8ecb033257d3007
def update_vendor(self): <NEW_LINE> <INDENT> vnd_name = self.source.vendor.name if self.source else '' <NEW_LINE> self.vendorLine.setText(vnd_name)
Manually update the vendor name field. It isn't covered by the data mapper.
625941bfe5267d203edcdbd8
def pickle_save(object_, path, **kwargs): <NEW_LINE> <INDENT> logger.info("pickle object %s to %s", object_, path) <NEW_LINE> with gzip.open(path, "wb") as file: <NEW_LINE> <INDENT> cPickle.dump(object_, file, **kwargs)
Pickle serializing with gzip.
625941bf4a966d76dd550f45
def rbridge_id_snmp_server_v3host_udp_port(**kwargs): <NEW_LINE> <INDENT> config = ET.Element("config") <NEW_LINE> rbridge_id = ET.SubElement(config, "rbridge-id", xmlns="urn:brocade.com:mgmt:brocade-rbridge") <NEW_LINE> if kwargs.pop('delete_rbridge_id', False) is True: <NEW_LINE> <INDENT> delete_rbridge_id = config.find('.//*rbridge-id') <NEW_LINE> delete_rbridge_id.set('operation', 'delete') <NEW_LINE> <DEDENT> rbridge_id_key = ET.SubElement(rbridge_id, "rbridge-id") <NEW_LINE> rbridge_id_key.text = kwargs.pop('rbridge_id') <NEW_LINE> if kwargs.pop('delete_rbridge_id', False) is True: <NEW_LINE> <INDENT> delete_rbridge_id = config.find('.//*rbridge-id') <NEW_LINE> delete_rbridge_id.set('operation', 'delete') <NEW_LINE> <DEDENT> snmp_server = ET.SubElement(rbridge_id, "snmp-server", xmlns="urn:brocade.com:mgmt:brocade-snmp") <NEW_LINE> if kwargs.pop('delete_snmp_server', False) is True: <NEW_LINE> <INDENT> delete_snmp_server = config.find('.//*snmp-server') <NEW_LINE> delete_snmp_server.set('operation', 'delete') <NEW_LINE> <DEDENT> v3host = ET.SubElement(snmp_server, "v3host") <NEW_LINE> if kwargs.pop('delete_v3host', False) is True: <NEW_LINE> <INDENT> delete_v3host = config.find('.//*v3host') <NEW_LINE> delete_v3host.set('operation', 'delete') <NEW_LINE> <DEDENT> hostip_key = ET.SubElement(v3host, "hostip") <NEW_LINE> hostip_key.text = kwargs.pop('hostip') <NEW_LINE> if kwargs.pop('delete_hostip', False) is True: <NEW_LINE> <INDENT> delete_hostip = config.find('.//*hostip') <NEW_LINE> delete_hostip.set('operation', 'delete') <NEW_LINE> <DEDENT> username_key = ET.SubElement(v3host, "username") <NEW_LINE> username_key.text = kwargs.pop('username') <NEW_LINE> if kwargs.pop('delete_username', False) is True: <NEW_LINE> <INDENT> delete_username = config.find('.//*username') <NEW_LINE> delete_username.set('operation', 'delete') <NEW_LINE> <DEDENT> udp_port = ET.SubElement(v3host, "udp-port") <NEW_LINE> if kwargs.pop('delete_udp_port', False) is True: <NEW_LINE> <INDENT> delete_udp_port = config.find('.//*udp-port') <NEW_LINE> delete_udp_port.set('operation', 'delete') <NEW_LINE> <DEDENT> udp_port.text = kwargs.pop('udp_port') <NEW_LINE> callback = kwargs.pop('callback', _callback) <NEW_LINE> return callback(config, mgr=kwargs.pop('mgr'))
Auto Generated Code
625941bfd268445f265b4da7
def update_site_forward(apps, schema_editor): <NEW_LINE> <INDENT> Site = apps.get_model('sites', 'Site') <NEW_LINE> Site.objects.update_or_create( id=settings.SITE_ID, defaults={ 'domain': 'football.com', 'name': 'Football Manager' } )
Set site domain and name.
625941bf15baa723493c3eac
def report_ols(formula, data, fit_regularized=False, L1_wt=1, refit=False, **kwargs): <NEW_LINE> <INDENT> RegressionResultsWrapper = statsmodels.regression.linear_model.RegressionResultsWrapper <NEW_LINE> y, X = patsy.dmatrices(formula, data=data, return_type='dataframe') <NEW_LINE> if fit_regularized: <NEW_LINE> <INDENT> results = RegressionResultsWrapper(smf.OLS(endog=y, exog=X,).fit_regularized(start_params=None, L1_wt=L1_wt, refit=refit, **kwargs)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> results = smf.OLS(endog=y, exog=X,).fit(**kwargs) <NEW_LINE> <DEDENT> return results
Fit OLS regression, print a report, and return the fit object.
625941bf8da39b475bd64ea9
def test_enableRemote(self): <NEW_LINE> <INDENT> self.assertFalse(self.protocol.enableRemote('\0'))
L{telnet.Telnet.enableRemote} should reject all options, since L{telnet.Telnet} does not know how to implement any options.
625941bf5e10d32532c5ee5f
def _finish_task(self): <NEW_LINE> <INDENT> self.task_interface.Finish() <NEW_LINE> self._check_task_signals()
Finish a task.
625941bf94891a1f4081b9e0
def get_error(deltas, sums, weights): <NEW_LINE> <INDENT> n, nl = sums.shape <NEW_LINE> delt = (deltas.dot(weights))*sigmoid_prime(sums) <NEW_LINE> otvet = delt.mean(axis = 0) <NEW_LINE> otvet1 = otvet.reshape(nl,1) <NEW_LINE> print(otvet1) <NEW_LINE> return otvet1
compute error on the previous layer of network deltas - ndarray of shape (n, n_{l+1}) sums - ndarray of shape (n, n_l) weights - ndarray of shape (n_{l+1}, n_l) Сигнатура: get_error(deltas, sums, weights), где deltas — ndarray формы (n, nl+1), содержащий в i-й строке значения ошибок для i-го примера из входных данных, sums — ndarray формы (n, nl), содержащий в i-й строке значения сумматорных функций нейронов l-го слоя для i-го примера из входных данных, weights — ndarray формы (nl+1, nl), содержащий веса для перехода между l-м и l+1-м слоем сети. Требуется вернуть вектор δl — ndarray формы (nl, 1); мы не проверяем размер (форму) ответа, но это может помочь вам сориентироваться. Все нейроны в сети — сигмоидальные. Функции sigmoid и sigmoid_prime уже определены.
625941bf6fece00bbac2d675
def IOWwriteReport(self, wdata, start=True, stop=True, addrScheme=1, msg=""): <NEW_LINE> <INDENT> start = time.time() <NEW_LINE> fncname = " {:12s}: {:15s} ".format("IOWwriteReport", msg) <NEW_LINE> lenwdata = len(wdata) <NEW_LINE> if lenwdata > 6: <NEW_LINE> <INDENT> cdprint(fncname + "Programming ERROR in IOWwriteReport: Can't write more than 6 bytes in single report!") <NEW_LINE> sys.exit() <NEW_LINE> <DEDENT> flags = 0x00 <NEW_LINE> if start: flags = flags | 0x80 <NEW_LINE> if stop: flags = flags | 0x40 <NEW_LINE> flags = flags | lenwdata <NEW_LINE> data = wdata + [0x00, 0x00, 0x00, 0x00, 0x00, 0x00] <NEW_LINE> report = IOWKIT_SPECIAL_REPORT( 0x02, flags, data[0], data[1], data[2], data[3], data[4], data[5], ) <NEW_LINE> ikw = iowkit.IowKitWrite(self.iow, self.numPipe, ctypes.byref(report), self.reportSize) <NEW_LINE> duration = 1000 * (time.time() - start) <NEW_LINE> return ikw, report
report ID = 2 write a single report
625941bf5166f23b2e1a5091
def parse_test_interactions(test_file): <NEW_LINE> <INDENT> has_action = re.compile(r"^\#\$\s(input|output|verify)\=(.+$)") <NEW_LINE> interactions = {} <NEW_LINE> with open(test_file, 'r') as file: <NEW_LINE> <INDENT> for line_no, line in enumerate(file.readlines(), start=1): <NEW_LINE> <INDENT> check_line = has_action.match(line) <NEW_LINE> if check_line: <NEW_LINE> <INDENT> interactions[(line_no + 1)] = {"action": check_line.group(1), "value": check_line.group(2)} <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if line.startswith("#$"): <NEW_LINE> <INDENT> exc_msg = [ "Improper interaction syntax on", f"line {line_no} in '{test_file}'", ] <NEW_LINE> raise SyntaxWarning(" ".join(exc_msg)) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> return interactions
Method to parse a test file, and return the necessary interaction information. :param: str test_file: Path to the file to parse. Test interactions are annotated as comments in the test file. Interaction markups should be placed one line above the expected interaction, and must consist of only a single line. The current available markups are as follows: - `#$ input=`: Denotes that the next line requires an input to the REPL. - `#$ output=`: Denotes what the next line's output in the REPL should be. - `#$ verify=`: Denotes a function that exists inside RosiePi to use for verification. The function should be prefixed with the module that contains it. For example: ... code: python #$ input=4 result = input() #$ output=4 print(result) with digitalio.DigitalInOut(board.D0) as mypin: #$ verify=pin_tests.assert_pin_high mypin.switch_to_output(value=True) #$ input= input() # only proceed if previous verify passed
625941bfa219f33f346288a5
def getAllMachines(self, ctx): <NEW_LINE> <INDENT> ret = ComputerManager().getComputersList(ctx) <NEW_LINE> return [Machine(ret[m][1]) for m in ret]
return all declared machines
625941bfa4f1c619b28aff77
def auth(func): <NEW_LINE> <INDENT> @wraps(func) <NEW_LINE> def wapper(who: User, what: Target, check: bool = False): <NEW_LINE> <INDENT> user_roles = who.get_roles() <NEW_LINE> work_roles = [] <NEW_LINE> for role in user_roles: <NEW_LINE> <INDENT> parent_roles = what.get_parent_roles() <NEW_LINE> if OriginalTarget in parent_roles: <NEW_LINE> <INDENT> work_roles.append(role) <NEW_LINE> <DEDENT> elif role in parent_roles: <NEW_LINE> <INDENT> work_roles.append(role) <NEW_LINE> <DEDENT> <DEDENT> if not work_roles: <NEW_LINE> <INDENT> if check: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> raise PermissionError <NEW_LINE> <DEDENT> for role in work_roles: <NEW_LINE> <INDENT> if role.can_or_not(func.__name__): <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> if check: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> raise PermissionError <NEW_LINE> <DEDENT> for role in work_roles: <NEW_LINE> <INDENT> if role.deny_or_not(func.__name__): <NEW_LINE> <INDENT> if check: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> raise PermissionError <NEW_LINE> <DEDENT> <DEDENT> if check: <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> return func(who, what) <NEW_LINE> <DEDENT> return wapper
验证
625941bff7d966606f6a9f3a
def voltage_sense_configuration(self, auto_range = True, manual_range = 21, NPLCs = 10): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> if 'KEITHLEY' in self.identity and '2400' in self.identity: <NEW_LINE> <INDENT> buffer = ':SENS:FUNC:CONC ON;:SENS:FUNC:ALL;' <NEW_LINE> if auto_range: <NEW_LINE> <INDENT> buffer += ':SENS:VOLT:RANG:AUTO ON;' <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> buffer += ':SENS:VOLT:RANG:AUTO OFF;:SENS:VOLT:RANG {!s};'.format(manual_range) <NEW_LINE> <DEDENT> buffer += ':SENS:VOLT:NPLC {!s};'.format(NPLCs) <NEW_LINE> self.instrument.write(buffer) <NEW_LINE> self._operation_complete_query_() <NEW_LINE> self._error_query_() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise Errors.IncorrectInstrumentError <NEW_LINE> <DEDENT> <DEDENT> except (Errors.ReportInstrumentInternalError, Errors.IncorrectInstrumentError) as error: <NEW_LINE> <INDENT> error.error_handler()
This function is used to configure the voltage measurement. Parameters: - auto_range = boolean parameter that enables (true) or disables auto voltage ranging (i.e. the instrument is able or not to auto-determine the most accurate measurement range). - manual_range = if auto ranging is disabled,the user can specify the desired manual range here. - NPLCs = measurement integration time (in units of pulse line cycles or PLCs)
625941bfec188e330fd5a6dc
def parse_config(self): <NEW_LINE> <INDENT> self.configraw = [] <NEW_LINE> try: <NEW_LINE> <INDENT> with open(self.configfile) as f: <NEW_LINE> <INDENT> for line in f: <NEW_LINE> <INDENT> linetrim = line.strip() <NEW_LINE> if linetrim and (not linetrim.startswith('#')): <NEW_LINE> <INDENT> self.configraw.append(linetrim) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> except EnvironmentError as exread: <NEW_LINE> <INDENT> self.log.error('Unable to read ban config:' '{}\n{}'.format(self.configfile, exread)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.build_patterns()
Parse the config file contents, if available. Logs errors, sets: self.banpatterns (via self.build_patterns())
625941bfb7558d58953c4e51
def visualize_gui_smplpose_basic(self, smpl, poseParamList, shapeParamList =None, colorList = None, isRotMat = False, scalingFactor=300, waittime =1): <NEW_LINE> <INDENT> zero_betas = torch.from_numpy(np.zeros( (1,10), dtype=np.float32)) <NEW_LINE> default_color = glViewer.g_colorSet['eft'] <NEW_LINE> meshList =[] <NEW_LINE> for i, poseParam in enumerate(poseParamList): <NEW_LINE> <INDENT> if shapeParamList is not None: <NEW_LINE> <INDENT> shapeParam = torch.from_numpy(shapeParamList[i][np.newaxis,:]) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> shapeParam = zero_betas <NEW_LINE> <DEDENT> if colorList is not None: <NEW_LINE> <INDENT> color = colorList[i] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> color = default_color <NEW_LINE> <DEDENT> poseParam_tensor = torch.from_numpy( poseParam[np.newaxis,:]).float() <NEW_LINE> if isRotMat: <NEW_LINE> <INDENT> pred_output = smpl(betas=shapeParam, body_pose=poseParam_tensor[:,1:], global_orient=poseParam_tensor[:,[0]], pose2rot=False) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> pred_output = smpl(betas=shapeParam, body_pose=poseParam_tensor[:,3:], global_orient=poseParam_tensor[:,:3], pose2rot=True) <NEW_LINE> <DEDENT> nn_vertices = pred_output.vertices[0].numpy() * scalingFactor <NEW_LINE> tempMesh = {'ver': nn_vertices, 'f': smpl.faces, 'color':color} <NEW_LINE> meshList.append(tempMesh) <NEW_LINE> <DEDENT> glViewer.setMeshData(meshList, bComputeNormal= True) <NEW_LINE> glViewer.show(waittime)
Visualize SMPL vertices from SMPL pose parameters This can be used as a quick visualize function if you have a pose parameters args: poseParamList: list of pose parameters (numpy array) in angle axis (72,) by default or rot matrix (24,3,3) with isRotMat==True shapeParamList: (optional) list of shape parameters (numpy array) (10,). If not provided, use a zero vector colorList: (optional) list of color RGB values e.g., (255,0,0) for red
625941bf82261d6c526ab3d4
def checksum1(data, stringlength): <NEW_LINE> <INDENT> value_buffer = 0 <NEW_LINE> for count in range(0, stringlength): <NEW_LINE> <INDENT> value_buffer = value_buffer ^ data[count] <NEW_LINE> <DEDENT> return value_buffer&0xFE
Calculate Checksum 1 Calculate the ckecksum 1 required for the herkulex data packet Args: data (list): the data of which checksum is to be calculated stringlength (int): the length of the data Returns: int: The calculated checksum 1
625941bfadb09d7d5db6c6ca
def parse(document): <NEW_LINE> <INDENT> if isinstance(document, str): <NEW_LINE> <INDENT> document = Path(document) <NEW_LINE> <DEDENT> with document.open('r') as fin: <NEW_LINE> <INDENT> lines = [__.strip() for __ in fin.readlines() if __.strip()][1:] <NEW_LINE> content = ''.join(lines) <NEW_LINE> return bs(content, 'lxml')
Return a BeautifulSoup XML object or None if no document specified Parameter --------- document : str, Path Path to XML document for parsing
625941bfa934411ee37515cb
def __setitem__(self, key: Union[int, np.ndarray], value: Any) -> None: <NEW_LINE> <INDENT> raise NotImplementedError(f"{type(self)} does not implement __setitem__.")
Set one or more values inplace. This method is not required to satisfy the pandas extension array interface. Parameters ---------- key : int, ndarray, or slice When called from, e.g. ``Series.__setitem__``, ``key`` will be one of * scalar int * ndarray of integers. * boolean ndarray * slice object value : ExtensionDtype.type, Sequence[ExtensionDtype.type], or object value or values to be set of ``key``. Returns ------- None
625941bffbf16365ca6f60f7
@app.route('/end_session') <NEW_LINE> def end_session(): <NEW_LINE> <INDENT> session.clear() <NEW_LINE> return render_template("home.html")
End session.
625941bf009cb60464c632ec
def p_table_wild(p): <NEW_LINE> <INDENT> p[0] = ("*", p[1])
table_wild : identifier '.' '*'
625941bf7c178a314d6ef394
def set_input(self, deepmag, deepmag_sigma=None, *args, **kwargs): <NEW_LINE> <INDENT> filter_list = DEEPFilterList() <NEW_LINE> maggie, maggie_ivar = deepmag_to_maggie(deepmag, deepmag_sigma) <NEW_LINE> super(DEEPPhotoZ, self).set_input(filter_list, maggie, maggie_ivar, *args, **kwargs)
Set input data The input DEEP BRI photometry will be converted into AB maggies via DEEP.deepmag_to_maggie; see the documentation of the function for detail. INPUT redshift -- List of redshifts deepmag -- List of DEEP BRI magnitudes deepmag_sigma -- List of 1-sigma uncertainties in DEEP BRI magnitudes; defaults to an empty list
625941bf94891a1f4081b9e1
def get_extant_genome_by_name(self, name): <NEW_LINE> <INDENT> for taxon in self.taxonomy.leaves: <NEW_LINE> <INDENT> if taxon.name == name: <NEW_LINE> <INDENT> if "genome" in taxon.features: <NEW_LINE> <INDENT> return taxon.genome <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> raise KeyError('No extant genomes match the query name: {}'.format(name))
Get the :obj:`pyham.genome.ExtantGenome` that match the query name. Args: name (:obj:`str`): Name of the :obj:`pyham.genome.ExtantGenome`. Returns: :obj:`pyham.genome.ExtantGenome` or raise KeyError
625941bf2ae34c7f2600d06a
def lstm_syndir_attention_decoder(inputs, hparams, train, name, initial_state, encoder_outputs, att_scores): <NEW_LINE> <INDENT> def dropout_lstm_cell(): <NEW_LINE> <INDENT> return tf.contrib.rnn.DropoutWrapper( tf.nn.rnn_cell.BasicLSTMCell(hparams.hidden_size), input_keep_prob=1.0 - hparams.dropout * tf.to_float(train)) <NEW_LINE> <DEDENT> layers = [dropout_lstm_cell() for _ in range(hparams.num_hidden_layers)] <NEW_LINE> attention_mechanism = SyntaxDirectedAttention(hparams.hidden_size, encoder_outputs, att_scores) <NEW_LINE> cell = AttentionWrapper( tf.nn.rnn_cell.MultiRNNCell(layers), [attention_mechanism] * hparams.num_heads, attention_layer_size=[hparams.attention_layer_size] * hparams.num_heads, output_attention=(hparams.output_attention == 1)) <NEW_LINE> batch_size = common_layers.shape_list(inputs)[0] <NEW_LINE> initial_state = cell.zero_state(batch_size, tf.float32).clone( cell_state=initial_state) <NEW_LINE> with tf.variable_scope(name): <NEW_LINE> <INDENT> output, state = tf.nn.dynamic_rnn( cell, inputs, initial_state=initial_state, dtype=tf.float32, time_major=False) <NEW_LINE> if hparams.output_attention == 1 and hparams.num_heads > 1: <NEW_LINE> <INDENT> output = tf.layers.dense(output, hparams.hidden_size) <NEW_LINE> <DEDENT> return output, state
Run LSTM cell with attention on inputs of shape [batch x time x size].
625941bfff9c53063f47c12d
def getThread(self): <NEW_LINE> <INDENT> return [message.getDetail() for message in self.message_set.order_by('time_sent')]
Returns list of most recent messages with corresponding info
625941bfb545ff76a8913d4f
@follows(filtering, peakcalling_tasks, IDR, buildBigWig) <NEW_LINE> def full(): <NEW_LINE> <INDENT> pass
runs entire pipeline
625941bf01c39578d7e74d74
def get_year(self): <NEW_LINE> <INDENT> return self.date_to.year
# Helper method to get the year (normalized between Odoo Versions) :return: int year of payslip
625941bf0a50d4780f666dc9
def applyAngularConversion(angularCalibrationFile, detectorPosition, channeldata): <NEW_LINE> <INDENT> if angularCalibrationFile is None: <NEW_LINE> <INDENT> raise Exception("Angular calibration file is not available for converting channel numbers to angles.") <NEW_LINE> <DEDENT> if detectorPosition is None: <NEW_LINE> <INDENT> raise Exception("PSD detector position in degree must be provided for angular conversion to proceed.") <NEW_LINE> <DEDENT> angular_calibration_data = read_angular_calibration_data(angularCalibrationFile) <NEW_LINE> results=[] <NEW_LINE> for data in channeldata: <NEW_LINE> <INDENT> moduleindex=data[0]/1280 <NEW_LINE> channelmodule=data[0]%1280 <NEW_LINE> moduleparameters=angular_calibration_data[moduleindex] <NEW_LINE> centre=moduleparameters[0] <NEW_LINE> conversion=moduleparameters[1] <NEW_LINE> offset=moduleparameters[2] <NEW_LINE> angle=2.404350+offset+math.degrees(math.atan((channelmodule-centre)*conversion))+detectorPosition+GLOBAL_OFFSET+BEAMLINE_OFFSET <NEW_LINE> results.append((angle, data[1], data[2], data[0])) <NEW_LINE> <DEDENT> return results
convert channel number to actual angle in degree based on angular calibration file provided, return a list of tuples (angle, count, error, channel) required input parameters: 1. angular calibration file, if None raise exception 2. detector position at which the raw data is taken, if None raise exception 3. flat field corrected data list of tuples (channel-number, count, error)
625941bfbe7bc26dc91cd53d
def predict(self, X): <NEW_LINE> <INDENT> check_is_fitted(self, 'threshold_') <NEW_LINE> X = check_array(X) <NEW_LINE> is_inlier = -np.ones(X.shape[0], dtype=int) <NEW_LINE> if self.contamination is not None: <NEW_LINE> <INDENT> values = self.decision_function(X, raw_values=True) <NEW_LINE> is_inlier[values <= self.threshold_] = 1 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise NotImplementedError("You must provide a contamination rate.") <NEW_LINE> <DEDENT> return is_inlier
Outlyingness of observations in X according to the fitted model. Parameters ---------- X : array-like, shape = (n_samples, n_features) Returns ------- is_outliers : array, shape = (n_samples, ), dtype = bool For each observation, tells whether or not it should be considered as an outlier according to the fitted model. threshold : float, The values of the less outlying point's decision function.
625941bf67a9b606de4a7df4
def swappableDependency(value): <NEW_LINE> <INDENT> return SwappableTuple((value.split(".", 1)[0], "__first__"), value)
Turns a setting value into a dependency.
625941bf004d5f362079a26e
def get_file_path(self): <NEW_LINE> <INDENT> file_list = os.listdir(self.apk_path) <NEW_LINE> for file in file_list: <NEW_LINE> <INDENT> if self.style in file: <NEW_LINE> <INDENT> file_path = os.path.join(self.apk_path, file) <NEW_LINE> os.rename(file_path, file_path.replace(' ', '')) <NEW_LINE> time.sleep(1) <NEW_LINE> return file_path
获取安装包路径
625941bf85dfad0860c3ad92
def is_kind_of_class(obj, a_class): <NEW_LINE> <INDENT> return isinstance(obj, a_class)
is_kind_of_class method checks inheritance of obj
625941bf442bda511e8be355
def decode(bytes): <NEW_LINE> <INDENT> return bytes.decode('utf-8')
Decodes a given list of bytes as UTF-8. @type bytes: list of bytes @rtype: string @return: A decoded version of the input bytes.
625941bf4f6381625f114975
def set_qsmiles(self,qsmilesmat,qcountsmat,querylengths,querymags): <NEW_LINE> <INDENT> tempqlmat = self._padded_array(qsmilesmat.T) <NEW_LINE> if tempqlmat.shape[1] > 65536 or tempqlmat.shape[0] > 32768: <NEW_LINE> <INDENT> raise ValueError("Error: query matrix is not allowed to have more than 65536 rows (molecules) or 32768 columns (LINGOs) (both padded to multiple of 16). Dimensions = (%d,%d)"%tempqlmat.shape) <NEW_LINE> <DEDENT> self.gpu.qsmiles = cl.Buffer(self.gpu.context,cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR, hostbuf=tempqlmat) <NEW_LINE> tempqcmat = self._padded_array(qcountsmat.T) <NEW_LINE> self.gpu.qcounts = cl.Buffer(self.gpu.context,cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR, hostbuf=tempqcmat) <NEW_LINE> self.qPitchTInInt = numpy.int32(tempqlmat.shape[1]) <NEW_LINE> del tempqlmat <NEW_LINE> del tempqcmat <NEW_LINE> self.qshape = qsmilesmat.shape <NEW_LINE> self.nquery = qsmilesmat.shape[0] <NEW_LINE> self.gpu.ql_gpu = cl.Buffer(self.gpu.context,cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR, hostbuf = querylengths) <NEW_LINE> self.gpu.qmag_gpu = cl.Buffer(self.gpu.context,cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR, hostbuf = querymags) <NEW_LINE> return
Sets the reference SMILES set to use Lingo matrix *qsmilesmat*, count matrix *qcountsmat*, and length vector *querylengths*. If *querymags* is provided, it will be used as the magnitude vector; else, the magnitude vector will be computed (on the GPU) from the count matrix. Because of hardware limitations, the query matrices (*qsmilesmat* and *qcountsmat*) must have no more than 65,536 rows (molecules) and 32,768 columns (Lingos). Larger computations must be performed in tiles.
625941bffff4ab517eb2f373
def __init__(self, fnam, histnam='h_cor'): <NEW_LINE> <INDENT> self.file = TFile(fnam) <NEW_LINE> if self.file.IsZombie(): <NEW_LINE> <INDENT> raise ValueError(fnam+' cannot be opened') <NEW_LINE> <DEDENT> self.hist = self.file.Get(histnam) <NEW_LINE> if self.hist==None: <NEW_LINE> <INDENT> raise ValueError('{h} cannot be found in {f}'.format(h=histnam, f=fnam))
fnam is a root file containing a 1D histogram giving the correction factor as a function of eta.
625941bf50812a4eaa59c25d
def clean(self): <NEW_LINE> <INDENT> super(SignedSSHKey, self).clean() <NEW_LINE> if not self.certificate.startswith('ssh-rsa-cert-v01@openssh.com'): <NEW_LINE> <INDENT> raise BadRequestError("Certificate is not a valid signed RSA key.")
# Checks if certificate is specific ssh-rsa-cert and ensures that self is a valid RSA keypair.
625941bfab23a570cc2500b9
def test(self, message): <NEW_LINE> <INDENT> return super().test(message.channel) and self.pattern.search(message.content)
Test whether or not the given message should trigger the Event's execution.
625941bf1f037a2d8b946138
def load_asdf(self, filename, mode="a"): <NEW_LINE> <INDENT> if self.mpi_mode: <NEW_LINE> <INDENT> return ASDFDataSet(filename, compression=None, debug=self._debug, mode=mode) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return ASDFDataSet(filename, mode=mode)
Load asdf file :param filename: :param mode: :return:
625941bfb5575c28eb68df37
def get_ip(): <NEW_LINE> <INDENT> s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) <NEW_LINE> try: <NEW_LINE> <INDENT> s.connect(('10.255.255.255', 1)) <NEW_LINE> myip = s.getsockname()[0] <NEW_LINE> <DEDENT> except BaseException: <NEW_LINE> <INDENT> myip = '127.0.0.1' <NEW_LINE> <DEDENT> finally: <NEW_LINE> <INDENT> s.close() <NEW_LINE> <DEDENT> return myip
Return IP address of current machine
625941bf507cdc57c6306c0e
def _stdout_chunk_read(self, chunk_bytes): <NEW_LINE> <INDENT> self._stdout_bytes += chunk_bytes <NEW_LINE> self._stdout.write(chunk_bytes)
See ExternalCmdJob._stdout_chunk_read(). Overridden to accumulate stdout in a member variable, so that it can be accessed from memory instead of having to be re-read from the file ExternalCmdJob saves it to.
625941bf435de62698dfdb85
def _set_callbacks(self): <NEW_LINE> <INDENT> self.callbacks = LoadBalancerCallbacks(self.plugin, self.env, self.pool_scheduler) <NEW_LINE> topic = lbaasv1constants.TOPIC_PROCESS_ON_HOST <NEW_LINE> if self.env: <NEW_LINE> <INDENT> topic = topic + "_" + self.env <NEW_LINE> <DEDENT> if PREJUNO: <NEW_LINE> <INDENT> self.conn = rpc.create_connection(new=True) <NEW_LINE> self.conn.create_consumer( topic, self.callbacks.create_rpc_dispatcher(), fanout=False) <NEW_LINE> self.conn.consume_in_thread() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.conn = q_rpc.create_connection(new=True) <NEW_LINE> self.conn.create_consumer( topic, [self.callbacks, agents_db.AgentExtRpcCallback(self.plugin)], fanout=False) <NEW_LINE> self.conn.consume_in_threads()
Setup callbacks to receive calls from agent
625941bf379a373c97cfaa7d
def exam_cmp(exam1, exam2): <NEW_LINE> <INDENT> date1 = str2DateTime(exam1["date"]) <NEW_LINE> date2 = str2DateTime(exam2["date"]) <NEW_LINE> if date1 < date2: <NEW_LINE> <INDENT> return -1 <NEW_LINE> <DEDENT> if date1 == date2: <NEW_LINE> <INDENT> return 0 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return 1
used for sorting exams by date.
625941bfa4f1c619b28aff78
def user_followers_like_follow_helper(data, user_id, users_count=25, likes_count=10, thresholds=(0.7, 1.2), sleep_time=1): <NEW_LINE> <INDENT> result = {} <NEW_LINE> api = data['api'] <NEW_LINE> followers = api.getTotalFollowers(user_id) <NEW_LINE> users_count = users_count if users_count < len(followers) else ( len(followers) // 2) <NEW_LINE> for i in range(users_count): <NEW_LINE> <INDENT> user = random.choice(followers) <NEW_LINE> try: <NEW_LINE> <INDENT> status = user_like_follow(data, user['pk'], likes_count, thresholds, sleep_time) <NEW_LINE> result[user['username']] = status <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> print('ERROR') <NEW_LINE> <DEDENT> <DEDENT> save_data(api.username.lower() + '.api', data) <NEW_LINE> return result
Helper function. It gets user followers list. Then choice some users and like and follow them. :param :data: data dictionary with 'api' and 'follows' keys :param :user_id: user id. His followers function will like and follow :param :user_count: how much users function should like/follow :param :likes_count: how much gives likes :param :sleep_time: dellay between likes :param :thresholds: min border and max border to likes if estimation less then min => user will not follow you back if estimation greater then max => user can follow you without your following (or he can be not true user) :return: dictionary {'user_name': staus} where status ia True or False follow this user or not
625941bf4f88993c3716bfa4
def test_get_members_by_member(self): <NEW_LINE> <INDENT> new_user = User(username='another_user', fullname='Another user', passhash='hash') <NEW_LINE> server.db.session.add(new_user) <NEW_LINE> new_user.groups.append(self.group) <NEW_LINE> server.db.session.commit() <NEW_LINE> new_user.get_token() <NEW_LINE> url = '/group/{group_id}/users/'.format(group_id=self.group.id) <NEW_LINE> rv = self.get(url, token=new_user.token) <NEW_LINE> self.assertJsonOk(rv) <NEW_LINE> json = loads(rv.data) <NEW_LINE> self.assertTrue('users' in json) <NEW_LINE> self.assertEqual(len(json['users']), 2) <NEW_LINE> return
Non admin user requests the list of group members.
625941bf23849d37ff7b2fc9
def creatTaskCpnDeclaration(self): <NEW_LINE> <INDENT> comments = ('/***********************************************************\n') <NEW_LINE> comments += ('* 组件运行任务\n') <NEW_LINE> comments += ('***********************************************************/\n') <NEW_LINE> comments += ('/*运行任务声明\n') <NEW_LINE> comments += ('***********************************************/\n') <NEW_LINE> comments += ('void task%s(void);\n' %self.__fileName) <NEW_LINE> return comments
组件运行任务
625941bf566aa707497f44a6
def find_blobs(img_data, verbose=False, min_area=None): <NEW_LINE> <INDENT> blobs = [] <NEW_LINE> copy_data = img_data.copy() <NEW_LINE> if img_data.dtype == 'uint16': <NEW_LINE> <INDENT> if verbose: print("16 bit image found. Scaling down to 8bit.") <NEW_LINE> copy_data = (copy_data/2.**8).astype(np.uint8) <NEW_LINE> <DEDENT> retval, copy_data = cv2.threshold(copy_data, 140, 255, cv2.THRESH_BINARY) <NEW_LINE> contours, hierarchy = cv2.findContours(copy_data, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE, ) <NEW_LINE> toplevel_indices, secondlevel_contours = [],[] <NEW_LINE> if hierarchy == None: <NEW_LINE> <INDENT> if verbose: print("Finished finding no second level contours.") <NEW_LINE> return [] <NEW_LINE> <DEDENT> h = hierarchy[0] <NEW_LINE> for i in range(len(h)): <NEW_LINE> <INDENT> if h[i][3] == -1: <NEW_LINE> <INDENT> toplevel_indices.append(i) <NEW_LINE> <DEDENT> <DEDENT> for i in range(len(h)): <NEW_LINE> <INDENT> if h[i][3] in toplevel_indices: <NEW_LINE> <INDENT> if verbose: print("Found a second level contour. Starting at: %s." % contours[i][0]) <NEW_LINE> if min_area != None: <NEW_LINE> <INDENT> if cv2.contourArea(contours[i]) >= min_area: <NEW_LINE> <INDENT> secondlevel_contours.append(contours[i]) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> secondlevel_contours.append(contours[i]) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> if verbose: print("Finished finding second level contours.") <NEW_LINE> blobs = sorted(secondlevel_contours, key=lambda contour:cv2.contourArea(contour), reverse=True) <NEW_LINE> return blobs
Find second level contours in 16bit images Here is an example to illustrate the contours of the blob this function can find: .. image:: _static/blobs-contours.png
625941bf236d856c2ad44710
def __init__(__self__, *, application_name: Optional[pulumi.Input[str]] = None, application_version_id: Optional[pulumi.Input[int]] = None, snapshot_creation_timestamp: Optional[pulumi.Input[str]] = None, snapshot_name: Optional[pulumi.Input[str]] = None): <NEW_LINE> <INDENT> if application_name is not None: <NEW_LINE> <INDENT> pulumi.set(__self__, "application_name", application_name) <NEW_LINE> <DEDENT> if application_version_id is not None: <NEW_LINE> <INDENT> pulumi.set(__self__, "application_version_id", application_version_id) <NEW_LINE> <DEDENT> if snapshot_creation_timestamp is not None: <NEW_LINE> <INDENT> pulumi.set(__self__, "snapshot_creation_timestamp", snapshot_creation_timestamp) <NEW_LINE> <DEDENT> if snapshot_name is not None: <NEW_LINE> <INDENT> pulumi.set(__self__, "snapshot_name", snapshot_name)
Input properties used for looking up and filtering ApplicationSnapshot resources. :param pulumi.Input[str] application_name: The name of an existing Kinesis Analytics v2 Application. Note that the application must be running for a snapshot to be created. :param pulumi.Input[int] application_version_id: The current application version ID when the snapshot was created. :param pulumi.Input[str] snapshot_creation_timestamp: The timestamp of the application snapshot. :param pulumi.Input[str] snapshot_name: The name of the application snapshot.
625941bfac7a0e7691ed400a
def test_get_yearly_with_gains(): <NEW_LINE> <INDENT> totals = { 2014: {"contributions": 1000, "transfers": 0, "value": 5000.0}, 2013: {"contributions": 500, "transfers": 500, "value": 1000.0}, 2015: {"contributions": 0, "transfers": 0, "value": 5000.0}, 2016: {"contributions": 3000.0, "transfers": -1000, "value": 4000}, } <NEW_LINE> expected = [ portfolio.Year(2013, 500, 500, 1000.0, 1, 0), portfolio.Year(2014, 1000, 0, 5000.0, 3, 3000), portfolio.Year(2015, 0, 0, 5000.0, 1, 0), portfolio.Year(2016, 3000.0, -1000, 4000.0, 0.5, -3000.0), ] <NEW_LINE> actual = portfolio.get_yearly_with_gains(totals) <NEW_LINE> assert actual == expected
get_yearly_with_gains should produce a sorted list of Years
625941bf92d797404e3040c2
def main_wrap(args=None): <NEW_LINE> <INDENT> step_info = { 'title': '', 'exit_code': 0 } <NEW_LINE> def step_func(title=None, exit_code=None): <NEW_LINE> <INDENT> if title is not None: <NEW_LINE> <INDENT> step_info['title'] = title <NEW_LINE> <DEDENT> if exit_code is not None: <NEW_LINE> <INDENT> step_info['exit_code'] = exit_code <NEW_LINE> <DEDENT> <DEDENT> try: <NEW_LINE> <INDENT> return main_core(args=args, step_func=step_func) <NEW_LINE> <DEDENT> except KeyboardInterrupt: <NEW_LINE> <INDENT> return 0 <NEW_LINE> <DEDENT> except Exception: <NEW_LINE> <INDENT> step_title = step_info.get('title', '') <NEW_LINE> tb_msg = format_exc() <NEW_LINE> if step_title: <NEW_LINE> <INDENT> msg = '# Error: {}\n---\n{}---\n'.format(step_title, tb_msg) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> msg = '# Error\n---\n{}---\n'.format(tb_msg) <NEW_LINE> <DEDENT> sys.stderr.write(msg) <NEW_LINE> exit_code = step_info.get('exit_code', 1) <NEW_LINE> return exit_code
The main function that provides exception handling. Call "main_core" to implement the core functionality. @param args: Command arguments list. @return: Exit code.
625941bf1d351010ab855a56
def _run_exit_func(self, additional_args: List) -> bool: <NEW_LINE> <INDENT> def _exit_func(): <NEW_LINE> <INDENT> with TimeIt(self.worker_insights.worker_exit_time, self.worker_id): <NEW_LINE> <INDENT> return self.map_params.worker_exit(*additional_args) <NEW_LINE> <DEDENT> <DEDENT> results, should_return = self._run_safely(_exit_func, no_args=True) <NEW_LINE> if should_return: <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.worker_comms.add_exit_results(self.worker_id, results) <NEW_LINE> return False
Runs the exit function when provided and stores its results. :param additional_args: Additional args to pass to the function (worker ID, shared objects, worker state) :return: True when the worker needs to shut down, False otherwise
625941bf5510c4643540f323
def restore(destination_ip, source_ip): <NEW_LINE> <INDENT> destination_mac = get_mac(destination_ip) <NEW_LINE> source_mac = get_mac(source_ip) <NEW_LINE> packet = scapy.ARP(op=2, pdst=destination_ip, hwdst=destination_mac, psrc=source_ip, hwsrc=source_mac) <NEW_LINE> scapy.send(packet, count=4, verbose=False)
Restore all MAC addresses on program exit.
625941bf5fdd1c0f98dc016b
def geo_lng_qq(self): <NEW_LINE> <INDENT> return self.geo_qq[1]
返回 QQ 坐标系经度
625941bf21bff66bcd68488e