code
stringlengths
4
4.48k
docstring
stringlengths
1
6.45k
_id
stringlengths
24
24
def no_op_wraps(func): <NEW_LINE> <INDENT> if func.__module__ is None or 'ocellaris' not in func.__module__: <NEW_LINE> <INDENT> return functools.orig_wraps(func) <NEW_LINE> <DEDENT> def wrapper(decorator): <NEW_LINE> <INDENT> return func <NEW_LINE> <DEDENT> return wrapper
Replaces functools.wraps in order to undo wrapping when generating Sphinx documentation
625941bd711fe17d82542272
def __init__(self, df,M,improved = False,ratio = 1.5): <NEW_LINE> <INDENT> assert isinstance(df,pandas.DataFrame) <NEW_LINE> self.df_as_np_array = np.array(df) <NEW_LINE> self.features = df.columns[1::].values.tolist() <NEW_LINE> Default = np_utls.calc_majority(self.df_as_np_array) <NEW_LINE> self.tree = utls.TDIDT.TDIDT(self.df_as_np_array,self.features,Default,utls.SelectFeatures.IG_MAX().IG_max,M,improved=improved,ratio=ratio)
this class will bulid for us an id3 decision tree and store it in self.tree :param df: the train data :param M: the prune parameter :param improved: whether to know we want the improved knn (part 4) or not (part 1-3) :param ratio: this is used for part 4 (improving the loss) if it bigger than 1 then this class will behave exactly as the id3 that we learned in the lectures (it is not relevant for part 1 and 3)
625941bd9b70327d1c4e0cd5
def gimme_a_genexp(f, start, gen = -float("inf")): <NEW_LINE> <INDENT> return ((gimme_a_genexp(f,start,itrat) for itrat in itertools.count(1)) if gen == -float("inf") else f(gimme_a_genexp(f,start,gen-1)) if gen!=1 else start)
this function creates a generator for recursive use of the given function starting. or a generator for the solutions for an = f(f(f(...n times... (start)))) a1 = start Arguments: f - f as stated in the task, any function. start - x0 as stated in the task, any argument f should get. Return: a generator for an, as defined above. Note: if the function returns something it can not get as an argument, it will fail.
625941bd596a8972360899c5
def __init__(self, screen, bat): <NEW_LINE> <INDENT> super(Ball, self).__init__() <NEW_LINE> self.screen = screen <NEW_LINE> self.screen_rect = screen.get_rect() <NEW_LINE> self.image = conf_ball.IMAGE <NEW_LINE> self.rect = self.image.get_rect() <NEW_LINE> self.rect.centerx = bat.rect.centerx <NEW_LINE> self.rect.bottom = bat.rect.top - 10 <NEW_LINE> self.x = float(self.rect.centerx) <NEW_LINE> self.y = float(self.rect.centery) <NEW_LINE> self.speedx, self.speedy = conf_ball.SPEED, conf_ball.SPEED <NEW_LINE> self.count = conf_ball.BALLS_COUNT <NEW_LINE> self.moving = False <NEW_LINE> self.moving_left = False <NEW_LINE> self.moving_right = False
Создает шарик в текущей позиции ракетки
625941bdbe383301e01b538d
def remove(self, choices): <NEW_LINE> <INDENT> for choice in choices: <NEW_LINE> <INDENT> self.student_list_copy.remove(choice)
Remove selected students from the selection list
625941bdfb3f5b602dac3592
def collapse_n_steps(self, transitions): <NEW_LINE> <INDENT> state, action, cumulated_reward, next_state, done, info = transitions[0] <NEW_LINE> discount = 1 <NEW_LINE> for transition in transitions[1:]: <NEW_LINE> <INDENT> if done: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> _, _, reward, next_state, done, info = transition <NEW_LINE> discount *= self.config['gamma'] <NEW_LINE> cumulated_reward += discount*reward <NEW_LINE> <DEDENT> <DEDENT> return state, action, cumulated_reward, next_state, done, info
Collapse n transitions <s,a,r,s',t> of a trajectory into one transition <s0, a0, Sum(r_i), sp, tp>. We start from the initial state, perform the first action, and then the return estimate is formed by accumulating the discounted rewards along the trajectory until a terminal state or the end of the trajectory is reached. :param transitions: A list of n successive transitions :return: The corresponding n-step transition
625941bd56b00c62f0f14559
def permute_data(X, y): <NEW_LINE> <INDENT> perm = np.random.permutation(X.shape[0]) <NEW_LINE> return X[perm], y[perm]
Permutes data with numpys permute function
625941bd63f4b57ef0001022
def _parse_name(text, default='-'): <NEW_LINE> <INDENT> tag = 'Name=' <NEW_LINE> idx = text.find(tag) <NEW_LINE> if idx == -1: <NEW_LINE> <INDENT> return default <NEW_LINE> <DEDENT> text = text[idx + len(tag):] <NEW_LINE> idx = text.find(';') <NEW_LINE> if idx != -1: <NEW_LINE> <INDENT> text = text[:idx] <NEW_LINE> <DEDENT> return text
Extract name from the 'attributes' field, if present. e.g. 'ID=mrna0001;Name=sonichedgehog' -> 'sonichedgehog'
625941bdbaa26c4b54cb1024
def run(self, review): <NEW_LINE> <INDENT> self.layer_1 *= 0 <NEW_LINE> unique_indices = set() <NEW_LINE> for word in review.lower().split(" "): <NEW_LINE> <INDENT> if word in self.word2index.keys(): <NEW_LINE> <INDENT> unique_indices.add(self.word2index[word]) <NEW_LINE> <DEDENT> <DEDENT> for index in unique_indices: <NEW_LINE> <INDENT> self.layer_1 += self.weights_0_1[index] <NEW_LINE> <DEDENT> layer_2 = self.sigmoid(self.layer_1.dot(self.weights_1_2)) <NEW_LINE> if(layer_2[0] >= 0.5): <NEW_LINE> <INDENT> return "POSITIVE" <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return "NEGATIVE"
Returns a POSITIVE or NEGATIVE prediction for the given review.
625941bd4e4d5625662d42dd
def __init__(self, actor, state, name="TrafficLightStateSetter"): <NEW_LINE> <INDENT> super(TrafficLightStateSetter, self).__init__(name) <NEW_LINE> self._actor = actor if "traffic_light" in actor.type_id else None <NEW_LINE> self._state = state <NEW_LINE> self.logger.debug("%s.__init__()" % (self.__class__.__name__))
Init
625941bd6fb2d068a760ef9c
def __init__(self, icinga_host: RemoteHosts, target_hosts: TypeHosts, *, verbatim_hosts: bool = False) -> None: <NEW_LINE> <INDENT> if not verbatim_hosts: <NEW_LINE> <INDENT> target_hosts = [target_host.split(".")[0] for target_host in target_hosts] <NEW_LINE> <DEDENT> if isinstance(target_hosts, NodeSet): <NEW_LINE> <INDENT> self._target_hosts = target_hosts <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self._target_hosts = NodeSet.fromlist(target_hosts) <NEW_LINE> <DEDENT> if not self._target_hosts: <NEW_LINE> <INDENT> raise IcingaError("Got empty target hosts list.") <NEW_LINE> <DEDENT> self._command_file = CommandFile(icinga_host) <NEW_LINE> self._icinga_host = icinga_host <NEW_LINE> self._verbatim_hosts = verbatim_hosts
Initialize the instance. Arguments: icinga_host (spicerack.remote.RemoteHosts): the RemoteHosts instance for the Icinga server. target_hosts (spicerack.typing.TypeHosts): the target hosts either as a NodeSet instance or a sequence of strings. verbatim_hosts (bool, optional): if :py:data:`True` use the hosts passed verbatim as is, if instead :py:data:`False`, the default, consider the given target hosts as FQDNs and extract their hostnames to be used in Icinga.
625941bd30bbd722463cbcc5
def test_quota_class_show(self): <NEW_LINE> <INDENT> default_quota_class_set = self.client.quota_classes.get('default') <NEW_LINE> default_values = { quota_name: str(getattr(default_quota_class_set, quota_name)) for quota_name in self._included_resources } <NEW_LINE> output = self.nova('quota-class-show %s' % self._get_quota_class_name()) <NEW_LINE> self._verify_qouta_class_show_output(output, default_values)
Tests showing quota class values for a fake non-existing quota class. The API will return the defaults if the quota class does not actually exist. We use a fake class to avoid any interaction with the real default quota class values.
625941bd23849d37ff7b2f92
def check_new_devices(self): <NEW_LINE> <INDENT> for device in self._bt_devices: <NEW_LINE> <INDENT> if device not in self._connected_devices: <NEW_LINE> <INDENT> PushNotification.send_notification_via_pushbullet( 'from Pi', 'to {} : Temp {}C Humidity {}%'.format(device, TEMP, HUMIDITY) ) <NEW_LINE> self._connected_devices.append(device)
checks to see what new devices are available to be connect by bluetooth
625941bd0c0af96317bb80ea
def set_defaults(self, config): <NEW_LINE> <INDENT> for key, value in config.items(): <NEW_LINE> <INDENT> self.setdefault(key, value)
Applies default values from config. :param config: another :py:obj:`dict`
625941bd711fe17d82542273
def code_gen_stmt_list(self, tree): <NEW_LINE> <INDENT> if tree[0] == "<EMPTY>": <NEW_LINE> <INDENT> return 0 <NEW_LINE> <DEDENT> stmt_res = self.code_gen_statement(tree[1]) <NEW_LINE> if type(stmt_res) == int and stmt_res != 0: <NEW_LINE> <INDENT> return 1 <NEW_LINE> <DEDENT> if self.code_gen_stmt_list(tree[3]) != 0: <NEW_LINE> <INDENT> return 1 <NEW_LINE> <DEDENT> if type(stmt_res) == str and not self.labels[stmt_res]: <NEW_LINE> <INDENT> return self.process_error(19, stmt_res) <NEW_LINE> <DEDENT> return 0
Rule #9: <STATEMENTS-LIST> -> <STATEMENT> <STATEMENTS-LIST> | <EMPTY> Semantic definitions: {[2][1]} {}
625941bda8370b77170527a3
def extract_from_computed_attribute(name): <NEW_LINE> <INDENT> decorated_method = getattr(self, name) <NEW_LINE> computed_attribute = decorated_method.__computed_attribute__ <NEW_LINE> computed_attribute.name = computed_attribute.name or name <NEW_LINE> computed_attribute.value = decorated_method(model) <NEW_LINE> return computed_attribute
Get information about an attribute extracted from an "computed_attribute".
625941bdd10714528d5ffbe3
@pytest.mark.django_db <NEW_LINE> def test_get_resource_tp(rf, default, tp0): <NEW_LINE> <INDENT> store_name = "store0.po" <NEW_LINE> subdir_name = "subdir0/" <NEW_LINE> request = rf.get("/") <NEW_LINE> request.user = default <NEW_LINE> func = get_resource(lambda x, y, s, t: (x, y, s, t)) <NEW_LINE> func(request, tp0, "", "") <NEW_LINE> assert isinstance(request.resource_obj, TranslationProject) <NEW_LINE> func(request, tp0, "", store_name) <NEW_LINE> assert isinstance(request.resource_obj, Store) <NEW_LINE> func(request, tp0, subdir_name, "") <NEW_LINE> assert isinstance(request.resource_obj, Directory)
Tests that the correct resources are set for the given TP contexts.
625941bd66673b3332b91f93
def _params(self, t=None): <NEW_LINE> <INDENT> if t is None: <NEW_LINE> <INDENT> elements = vectors_to_elements(self._e0, self._j0) <NEW_LINE> return (self._e0, self._j0, self._r0, self._v0) + elements <NEW_LINE> <DEDENT> if self._t is None: <NEW_LINE> <INDENT> raise KeplerRingError("You must integrate this KeplerRing before " "evaluating it at a specific time step") <NEW_LINE> <DEDENT> t = np.array(t).flatten() <NEW_LINE> e = [self._interpolatedInner[k](t) for k in ('ex', 'ey', 'ez')] <NEW_LINE> j = [self._interpolatedInner[k](t) for k in ('jx', 'jy', 'jz')] <NEW_LINE> e = np.stack(e, axis=-1) <NEW_LINE> j = np.stack(j, axis=-1) <NEW_LINE> x, y, z = [self._interpolatedOuter[k](t) for k in ('x', 'y', 'z')] <NEW_LINE> v = [self._interpolatedOuter[k](t) for k in ('v_R', 'v_z', 'v_phi')] <NEW_LINE> R = (x**2 + y**2)**0.5 <NEW_LINE> phi = np.arctan2(y, x) <NEW_LINE> r = np.stack((R, z, phi), axis=-1) <NEW_LINE> v = np.stack(v, axis=-1) <NEW_LINE> if e.shape == (1, 3): <NEW_LINE> <INDENT> e = e[0] <NEW_LINE> <DEDENT> if j.shape == (1, 3): <NEW_LINE> <INDENT> j = j[0] <NEW_LINE> <DEDENT> if r.shape == (1, 3): <NEW_LINE> <INDENT> r = r[0] <NEW_LINE> <DEDENT> if v.shape == (1, 3): <NEW_LINE> <INDENT> v = v[0] <NEW_LINE> <DEDENT> return (e, j, r, v) + vectors_to_elements(e, j)
Return a tuple of all time-dependent parameters at a specified time. Parameters ---------- t : array_like, optional A time or array of times at which to retrieve the parameters. All times must be contained within the KeplerRing.t() array. Returns ------- e : ndarray e vector at the specified time steps. j : ndarray j vector at the specified time steps. r : ndarray Position vector at the specified time steps. Has the form [R, z, phi] in [pc, pc, rad]. v : ndarray Velocity vector at the specified time steps. Has the form [v_R, v_z, v_phi] in km/s. ecc : float or ndarray Eccentricity at the specified time steps. inc : float or ndarray Inclination at the specified time steps. long_asc : float or ndarray Longitude of the ascending node at the specified time steps. arg_peri : float or ndarray Argument of pericentre at the specified time steps.
625941bd097d151d1a222d5e
def create_access_token_response(self, uri, http_method='GET', body=None, headers=None, credentials=None): <NEW_LINE> <INDENT> resp_headers = {'Content-Type': 'application/x-www-form-urlencoded'} <NEW_LINE> try: <NEW_LINE> <INDENT> request = self._create_request(uri, http_method, body, headers) <NEW_LINE> valid, processed_request = self.validate_access_token_request( request) <NEW_LINE> if valid: <NEW_LINE> <INDENT> token = self.create_access_token(request, credentials or {}) <NEW_LINE> self.request_validator.invalidate_request_token( request.client_key, request.resource_owner_key, request) <NEW_LINE> return resp_headers, token, 200 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return {}, None, 401 <NEW_LINE> <DEDENT> <DEDENT> except errors.OAuth1Error as e: <NEW_LINE> <INDENT> return resp_headers, e.urlencoded, e.status_code
Create an access token response, with a new request token if valid. :param uri: The full URI of the token request. :param http_method: A valid HTTP verb, i.e. GET, POST, PUT, HEAD, etc. :param body: The request body as a string. :param headers: The request headers as a dict. :param credentials: A list of extra credentials to include in the token. :returns: A tuple of 3 elements. 1. A dict of headers to set on the response. 2. The response body as a string. 3. The response status code as an integer. An example of a valid request:: >>> from your_validator import your_validator >>> from oauthlib.oauth1 import AccessTokenEndpoint >>> endpoint = AccessTokenEndpoint(your_validator) >>> h, b, s = endpoint.create_access_token_response( ... 'https://your.provider/access_token?foo=bar', ... headers={ ... 'Authorization': 'OAuth oauth_token=234lsdkf....' ... }, ... credentials={ ... 'my_specific': 'argument', ... }) >>> h {'Content-Type': 'application/x-www-form-urlencoded'} >>> b 'oauth_token=lsdkfol23w54jlksdef&oauth_token_secret=qwe089234lkjsdf&oauth_authorized_realms=movies+pics&my_specific=argument' >>> s 200 An response to invalid request would have a different body and status:: >>> b 'error=invalid_request&description=missing+resource+owner+key' >>> s 400 The same goes for an an unauthorized request: >>> b '' >>> s 401
625941bd91f36d47f21ac3f2
def test_attribute_names_with_invalid_value(self): <NEW_LINE> <INDENT> payload = get_attributes.GetAttributesRequestPayload() <NEW_LINE> args = (payload, 'attribute_names', 0) <NEW_LINE> self.assertRaisesRegexp( TypeError, "attribute_names must be a list of strings", setattr, *args )
Test that a TypeError is raised when an invalid list is used to set the attribute_names attribute of a GetAttributes request payload.
625941bd63b5f9789fde6fe7
def extract_rule(self) -> PartRule: <NEW_LINE> <INDENT> best_record = self.get_best_record() <NEW_LINE> best_rule = self.grammar[best_record.rule_id] <NEW_LINE> for tnode, subtree, boundary_edges in zip(best_record.tnodes_list, best_record.subtree_list, best_record.boundary_edges_list): <NEW_LINE> <INDENT> subtree = set(subtree) & set(self.g.nodes()) <NEW_LINE> compress_graph(g=self.g, boundary_edges=None, subtree=subtree, permanent=True) <NEW_LINE> self.update_tree(tnode=tnode) <NEW_LINE> <DEDENT> if best_rule.lhs == 0: <NEW_LINE> <INDENT> assert self.g.order() == 1, 'Improper extraction, since the graph has > 1 nodes' <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.rule_id_to_record = {} <NEW_LINE> self.tnode_to_rule = {} <NEW_LINE> self.grammar.reset() <NEW_LINE> self.update_subtree_scores(start_tnode=self.root) <NEW_LINE> self.update_all_record_scores() <NEW_LINE> <DEDENT> return best_rule
step 1: get best record step 2: for each tnode in the record step 2.1: update the tree rooted at the tree node step 2.1.1: the decendant rules get disabled only if they are not used elsewhere in the tree step 2.1.2: update the ancestors and their records and rules regardless step 3: update ALL the record scores after extraction since the graph changes. :return:
625941bda8ecb033257d2fd1
def test_render_display(self): <NEW_LINE> <INDENT> dataset = VisibleDatasetFactory() <NEW_LINE> url = url_for('datasets.show', dataset=dataset) <NEW_LINE> response = self.get(url) <NEW_LINE> self.assert200(response)
It should render the dataset page
625941bd26238365f5f0ed6d
def call_pure_intrin(dtype, func_name, *args): <NEW_LINE> <INDENT> args = convert(args) <NEW_LINE> return _make.Call( dtype, func_name, convert(args), _Call.PureIntrinsic, None, 0)
Build expression by calling a pure intrinsic function. Intrinsics can be overloaded with multiple data types via the intrinsic translation rule. Parameters ---------- dtype : str The data type of the result. func_name: str The intrinsic function name. args : list Positional arguments. Returns ------- call : Expr The call expression.
625941bd82261d6c526ab39e
def local_path(self, volume): <NEW_LINE> <INDENT> nfs_share = volume['provider_location'] <NEW_LINE> return os.path.join(self._get_mount_point_for_share(nfs_share), volume['name'], 'volume')
Get volume path (mounted locally fs path) for given volume. :param volume: volume reference
625941bd7d43ff24873a2ba0
def get_cancel_all_order(self, **kwargs): <NEW_LINE> <INDENT> return self.private_request(get_cancel_all_order_url(), 'PUT', kwargs)
[Private] Cancel All Order :response { "success": true, "message": null, "dataVersion": "xxxx", "data": null }
625941bdc4546d3d9de72934
def RegCreateKeyW_handler(exec_ctx): <NEW_LINE> <INDENT> args = tuple(exec_ctx.get_stack_args("dudddddpd")) <NEW_LINE> logging.info("advapi32.dll.RegCreateKeyW(0x%x, %s, %d, %d, %d, %d, %d, " "%d, %d)" % args) <NEW_LINE> return
Callback for RegCreateKeyEx
625941bdd6c5a10208143f4a
def __init__( self, topic_name, key_schema, value_schema=None, num_partitions=1, num_replicas=1, ): <NEW_LINE> <INDENT> self.topic_name = topic_name <NEW_LINE> self.key_schema = key_schema <NEW_LINE> self.value_schema = value_schema <NEW_LINE> self.num_partitions = num_partitions <NEW_LINE> self.num_replicas = num_replicas <NEW_LINE> self.broker_properties = { "bootstrap.servers": KAFKA_BROKER_URL } <NEW_LINE> if self.topic_name not in Producer.existing_topics: <NEW_LINE> <INDENT> self.create_topic() <NEW_LINE> Producer.existing_topics.add(self.topic_name) <NEW_LINE> <DEDENT> self.producer = AvroProducer( { **self.broker_properties, "schema.registry.url": SCHEMA_REGISTRY_URL }, default_key_schema = self.key_schema, default_value_schema = self.value_schema )
Initializes a Producer object with basic settings
625941bd9f2886367277a792
def get_applicant_row(*args, **kwargs): <NEW_LINE> <INDENT> return db_instance.get_row(Applicant, *args, **kwargs)
获取信息 :param args: :param kwargs: :return: None/object
625941bd94891a1f4081b9aa
def test_autocorrect(self): <NEW_LINE> <INDENT> word1 = u"مُُضاعَفة" <NEW_LINE> word2 = u"مُضاعَفة" <NEW_LINE> self.assertEqual(ar.autocorrect(word1), word2) <NEW_LINE> text1 = u"حَرَكَة مُُضاعَفة َسابقة قبل شَّدة سابقاً" <NEW_LINE> text2 = u"حَرَكَة مُضاعَفة سابقة قبل شّدة سابقًا" <NEW_LINE> self.assertEqual(ar.autocorrect(text1), text2)
Test auto correct
625941bd2ae34c7f2600d034
def compress_for_output_listing(paths): <NEW_LINE> <INDENT> will_remove = list(paths) <NEW_LINE> will_skip = set() <NEW_LINE> folders = set() <NEW_LINE> files = set() <NEW_LINE> for path in will_remove: <NEW_LINE> <INDENT> if path.endswith(".pyc"): <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> if path.endswith("player.py") or ".dist-info" in path: <NEW_LINE> <INDENT> folders.add(os.path.dirname(path)) <NEW_LINE> <DEDENT> files.add(path) <NEW_LINE> <DEDENT> _normcased_files = set(map(os.path.normcase, files)) <NEW_LINE> folders = compact(folders) <NEW_LINE> for folder in folders: <NEW_LINE> <INDENT> for dirpath, _, dirfiles in os.walk(folder): <NEW_LINE> <INDENT> for fname in dirfiles: <NEW_LINE> <INDENT> if fname.endswith(".pyc"): <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> file_ = os.path.join(dirpath, fname) <NEW_LINE> if (os.path.isfile(file_) and os.path.normcase(file_) not in _normcased_files): <NEW_LINE> <INDENT> will_skip.add(file_) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> will_remove = files | { os.path.join(folder, "*") for folder in folders } <NEW_LINE> return will_remove, will_skip
Returns a tuple of 2 sets of which paths to display to user The first set contains paths that would be deleted. Files of a package are not added and the top-level directory of the package has a '*' added at the end - to signify that all it's contents are removed. The second set contains files that would have been skipped in the above folders.
625941bd85dfad0860c3ad5c
def test_cache_units(): <NEW_LINE> <INDENT> msr_conf = ik.thorlabs.PM100USB.MeasurementConfiguration.current <NEW_LINE> with expected_protocol( ik.thorlabs.PM100USB, ["CONF?"], [f"{msr_conf.value}"], ) as inst: <NEW_LINE> <INDENT> inst.cache_units = True <NEW_LINE> assert inst._cache_units == inst._READ_UNITS[msr_conf] <NEW_LINE> inst.cache_units = False <NEW_LINE> assert not inst.cache_units
Get, set cache units bool.
625941bd925a0f43d2549d77
def consecutive_counters(letters: str)-> list: <NEW_LINE> <INDENT> if (len(letters) == 0): <NEW_LINE> <INDENT> return [] <NEW_LINE> <DEDENT> prev = letters[0] <NEW_LINE> counter = 1 <NEW_LINE> result = [] <NEW_LINE> for i in range(1, len(letters)): <NEW_LINE> <INDENT> if letters[i] == prev: <NEW_LINE> <INDENT> counter += 1 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> elem = (prev, counter) <NEW_LINE> result.append(elem) <NEW_LINE> counter = 1 <NEW_LINE> prev = letters[i] <NEW_LINE> <DEDENT> <DEDENT> elem = (prev, counter) <NEW_LINE> result.append(elem) <NEW_LINE> return result
Returns a list of counters of consecutive letters Args: letters (str): a consecutive letters like aaaabbbcca Returns: list: list of tuple of a letter and counter of consecutive
625941bdd486a94d0b98e047
def test_space_setter(dumbalgo): <NEW_LINE> <INDENT> nested_algo = {'DumbAlgo': dict( value=9, )} <NEW_LINE> nested_algo2 = {'DumbAlgo': dict( judgement=10, )} <NEW_LINE> algo = dumbalgo(8, value=1, naedw=nested_algo, naekei=nested_algo2) <NEW_LINE> algo.space = 'etsh' <NEW_LINE> assert algo.space == 'etsh' <NEW_LINE> assert algo.naedw.space == 'etsh' <NEW_LINE> assert algo.naedw.value == 9 <NEW_LINE> assert algo.naekei.space == 'etsh' <NEW_LINE> assert algo.naekei.judgement == 10
Check whether space setter works for nested algos.
625941bd63b5f9789fde6fe8
def scalar(name, tensor, family=None): <NEW_LINE> <INDENT> def record(): <NEW_LINE> <INDENT> with summary_op_util.summary_scope( name, family, values=[tensor]) as (tag, scope): <NEW_LINE> <INDENT> gen_summary_ops.write_scalar_summary( context.context().summary_writer_resource, training_util.get_global_step(), tag, tensor, name=scope) <NEW_LINE> <DEDENT> <DEDENT> return control_flow_ops.cond(should_record_summaries(), record, _nothing)
Writes a scalar summary if possible.
625941bd4f6381625f11493f
def delete_attachment(self, name): <NEW_LINE> <INDENT> db = self.get_db() <NEW_LINE> result = db.delete_attachment(self._doc, name) <NEW_LINE> try: <NEW_LINE> <INDENT> self._doc['_attachments'].pop(name) <NEW_LINE> <DEDENT> except KeyError: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> return result
delete document attachment @param name: name of attachment @return: dict, with member ok set to True if delete was ok.
625941bd7cff6e4e81117888
def doConfig(self): <NEW_LINE> <INDENT> self.context.logger.log("Configuring.", "addStep") <NEW_LINE> try: <NEW_LINE> <INDENT> self.prepareConfigCluster() <NEW_LINE> self.initNodeInstance() <NEW_LINE> self.configInstance() <NEW_LINE> DefaultValue.enableWhiteList( self.context.sshTool, self.context.mpprcFile, self.context.clusterInfo.getClusterNodeNames(), self.context.logger) <NEW_LINE> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> self.context.logger.logExit(str(e)) <NEW_LINE> <DEDENT> self.context.logger.log("Configuration is completed.", "constant")
function: Do config action input : NA output: NA
625941bdcc40096d61595854
def searchInsert(self, nums, target): <NEW_LINE> <INDENT> if target in nums: <NEW_LINE> <INDENT> return [index for index,value in enumerate(nums) if value == target][0] <NEW_LINE> <DEDENT> elif target < min(nums): <NEW_LINE> <INDENT> return 0 <NEW_LINE> <DEDENT> elif target > max(nums): <NEW_LINE> <INDENT> return len(nums) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> res = [x-target for x in nums] <NEW_LINE> return [index for index,value in enumerate(res) if value >0][0]
:type nums: List[int] :type target: int :rtype: int
625941bd956e5f7376d70d71
def test_8bit_3sample_1frame(self): <NEW_LINE> <INDENT> ds = dcmread(EXPL_8_3_1F) <NEW_LINE> for uid in SUPPORTED_SYNTAXES: <NEW_LINE> <INDENT> ds.file_meta.TransferSyntaxUID = uid <NEW_LINE> arr = ds.pixel_array <NEW_LINE> assert arr.flags.writeable <NEW_LINE> assert (255, 0, 0) == tuple(arr[5, 50, :]) <NEW_LINE> assert (255, 128, 128) == tuple(arr[15, 50, :]) <NEW_LINE> assert (0, 255, 0) == tuple(arr[25, 50, :]) <NEW_LINE> assert (128, 255, 128) == tuple(arr[35, 50, :]) <NEW_LINE> assert (0, 0, 255) == tuple(arr[45, 50, :]) <NEW_LINE> assert (128, 128, 255) == tuple(arr[55, 50, :]) <NEW_LINE> assert (0, 0, 0) == tuple(arr[65, 50, :]) <NEW_LINE> assert (64, 64, 64) == tuple(arr[75, 50, :]) <NEW_LINE> assert (192, 192, 192) == tuple(arr[85, 50, :]) <NEW_LINE> assert (255, 255, 255) == tuple(arr[95, 50, :])
Test pixel_array for 8-bit, 3 sample/pixel, 1 frame.
625941bd23849d37ff7b2f93
def __init__(self, parent=None, border=None, lockAspect=False, enableMouse=True, invertY=False, enableMenu=True, name=None, invertX=False): <NEW_LINE> <INDENT> super(MosaicSliceBox, self).__init__(parent=None, border=border, lockAspect=True, enableMouse=True, invertY=False, enableMenu=True, name=None, invertX=False) <NEW_LINE> self.zoomCenter = [0, 0] <NEW_LINE> self.state['wheelScaleFactor'] = -0.025 <NEW_LINE> self.useMyMenu() <NEW_LINE> """ linkedAxis saves to which axis of the linked view the axis is linked to """ <NEW_LINE> self.state['linkedAxis']= [None, None]
============== ============================================================= **Arguments:** *parent* (QGraphicsWidget) Optional parent widget *border* (QPen) Do draw a border around the view, give any single argument accepted by :func:`mkPen <pyqtgraph.mkPen>` *lockAspect* (False or float) The aspect ratio to lock the view coorinates to. (or False to allow the ratio to change) *enableMouse* (bool) Whether mouse can be used to scale/pan the view *invertY* (bool) See :func:`invertY <pyqtgraph.ViewBox.invertY>` *invertX* (bool) See :func:`invertX <pyqtgraph.ViewBox.invertX>` *enableMenu* (bool) Whether to display a context menu when right-clicking on the ViewBox background. *name* (str) Used to register this ViewBox so that it appears in the "Link axis" dropdown inside other ViewBox context menus. This allows the user to manually link the axes of any other view to this one. ============== =============================================================
625941bdfbf16365ca6f60c0
def hashed_embedding_lookup_sparse(params, sparse_values, dimension, combiner="mean", default_value=None, name=None): <NEW_LINE> <INDENT> if not isinstance(params, list): <NEW_LINE> <INDENT> params = [params] <NEW_LINE> <DEDENT> if not isinstance(sparse_values, ops.SparseTensor): <NEW_LINE> <INDENT> raise TypeError("sparse_values must be SparseTensor") <NEW_LINE> <DEDENT> with ops.op_scope(params + [sparse_values], name, "hashed_sparse_embedding_lookup") as scope: <NEW_LINE> <INDENT> if default_value is None: <NEW_LINE> <INDENT> if sparse_values.dtype == dtypes.string: <NEW_LINE> <INDENT> default_value = "6ZxWzWOHxZ" <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> default_value = 1288896567 <NEW_LINE> <DEDENT> <DEDENT> sparse_values, _ = sparse_ops.sparse_fill_empty_rows( sparse_values, default_value) <NEW_LINE> segment_ids = sparse_values.indices[:, 0] <NEW_LINE> if segment_ids.dtype != dtypes.int32: <NEW_LINE> <INDENT> segment_ids = math_ops.cast(segment_ids, dtypes.int32) <NEW_LINE> <DEDENT> values = sparse_values.values <NEW_LINE> values, idx = array_ops.unique(values) <NEW_LINE> embeddings = hashed_embedding_lookup(params, values, dimension) <NEW_LINE> if combiner == "sum": <NEW_LINE> <INDENT> embeddings = math_ops.sparse_segment_sum(embeddings, idx, segment_ids, name=scope) <NEW_LINE> <DEDENT> elif combiner == "mean": <NEW_LINE> <INDENT> embeddings = math_ops.sparse_segment_mean(embeddings, idx, segment_ids, name=scope) <NEW_LINE> <DEDENT> elif combiner == "sqrtn": <NEW_LINE> <INDENT> embeddings = math_ops.sparse_segment_sqrt_n(embeddings, idx, segment_ids, name=scope) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise ValueError("Combiner must be one of 'mean', 'sqrtn' or 'sum'.") <NEW_LINE> <DEDENT> return embeddings
Looks up embeddings of a sparse feature using parameter hashing. See `tf.contrib.layers.hashed_embedding_lookup` for embedding with hashing. Args: params: A `Tensor` or `list` of `Tensors`. Each tensor must be of rank 1 with fully-defined shape. sparse_values: A 2-D `SparseTensor` containing the values to be embedded. Some rows may be empty. dimension: Embedding dimension combiner: A string specifying how to combine embedding results for each entry. Currently "mean", "sqrtn" and "sum" are supported, with "mean" the default. default_value: The value to use for an entry with no features. name: An optional name for this op. Returns: Dense tensor with shape [N, dimension] with N the number of rows in sparse_values. Raises: TypeError: If sparse_values is not a SparseTensor. ValueError: If combiner is not one of {"mean", "sqrtn", "sum"}.
625941bdbf627c535bc130d1
def detailed(self, todo: Todo) -> str: <NEW_LINE> <INDENT> extra_lines = [] <NEW_LINE> if todo.description: <NEW_LINE> <INDENT> extra_lines.append(self._format_multiline("Description", todo.description)) <NEW_LINE> <DEDENT> if todo.location: <NEW_LINE> <INDENT> extra_lines.append(self._format_multiline("Location", todo.location)) <NEW_LINE> <DEDENT> return f"{self.compact(todo)}{''.join(extra_lines)}"
Returns a detailed representation of a task. :param todo: The todo component.
625941bdcb5e8a47e48b79b0
def __init__(self, path, size_guidance=None, tensor_size_guidance=None, purge_orphaned_data=True): <NEW_LINE> <INDENT> size_guidance = dict(size_guidance or DEFAULT_SIZE_GUIDANCE) <NEW_LINE> sizes = {} <NEW_LINE> for key in DEFAULT_SIZE_GUIDANCE: <NEW_LINE> <INDENT> if key in size_guidance: <NEW_LINE> <INDENT> sizes[key] = size_guidance[key] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> sizes[key] = DEFAULT_SIZE_GUIDANCE[key] <NEW_LINE> <DEDENT> <DEDENT> self._size_guidance = size_guidance <NEW_LINE> self._tensor_size_guidance = dict(tensor_size_guidance or {}) <NEW_LINE> self._first_event_timestamp = None <NEW_LINE> self._graph = None <NEW_LINE> self._graph_from_metagraph = False <NEW_LINE> self._meta_graph = None <NEW_LINE> self._tagged_metadata = {} <NEW_LINE> self.summary_metadata = {} <NEW_LINE> self.tensors_by_tag = {} <NEW_LINE> self._tensors_by_tag_lock = threading.Lock() <NEW_LINE> self._plugin_to_tag_to_content = collections.defaultdict(dict) <NEW_LINE> self._generator_mutex = threading.Lock() <NEW_LINE> self.path = path <NEW_LINE> self._generator = _GeneratorFromPath(path) <NEW_LINE> self.purge_orphaned_data = purge_orphaned_data <NEW_LINE> self.most_recent_step = -1 <NEW_LINE> self.most_recent_wall_time = -1 <NEW_LINE> self.file_version = None <NEW_LINE> self.accumulated_attrs = () <NEW_LINE> self._tensor_summaries = {}
Construct the `EventAccumulator`. Args: path: A file path to a directory containing tf events files, or a single tf events file. The accumulator will load events from this path. size_guidance: Information on how much data the EventAccumulator should store in memory. The DEFAULT_SIZE_GUIDANCE tries not to store too much so as to avoid OOMing the client. The size_guidance should be a map from a `tagType` string to an integer representing the number of items to keep per tag for items of that `tagType`. If the size is 0, all events are stored. tensor_size_guidance: Like `size_guidance`, but allowing finer granularity for tensor summaries. Should be a map from the `plugin_name` field on the `PluginData` proto to an integer representing the number of items to keep per tag. Plugins for which there is no entry in this map will default to the value of `size_guidance[event_accumulator.TENSORS]`. Defaults to `{}`. purge_orphaned_data: Whether to discard any events that were "orphaned" by a TensorFlow restart.
625941bd4f88993c3716bf6e
def do_Mstep_many_sequences(self, stats): <NEW_LINE> <INDENT> if 's' in self.learn_params: <NEW_LINE> <INDENT> num = stats['pi'] + self.startprob_prior_conc - 1 <NEW_LINE> self.startprob = num / num.sum() <NEW_LINE> <DEDENT> for k in range(self.nstates): <NEW_LINE> <INDENT> if 't' in self.learn_params: <NEW_LINE> <INDENT> num = stats['A'][k,:] + self.transmat_prior_conc[k] - 1 <NEW_LINE> self.transmat[k,:] = num / num.sum() <NEW_LINE> <DEDENT> if 'm' in self.learn_params: <NEW_LINE> <INDENT> self.means[k] = (stats['mu'][k] + self.prior_mean)/(stats['gammad'][k] + 1) <NEW_LINE> <DEDENT> if 'c' in self.learn_params: <NEW_LINE> <INDENT> mu_mu = np.outer(self.means_old[k],self.means_old[k]) <NEW_LINE> prior = mu_mu + self.covar_prior <NEW_LINE> self.covs[k] = (stats['cov'][k] + prior)/(stats['gammad'][k] + (2*self.D + 4))
M-step for N i.i.d sequences Parameters ---------- stats : dict, contains updates for all parameters fields: - pi startprob - A transmat - mu means - cov covariances
625941bd10dbd63aa1bd2aa9
def __init__(self, data, metadata, data_retrieval_callback=None): <NEW_LINE> <INDENT> from cis.exceptions import InvalidDataTypeError <NEW_LINE> from iris.cube import CubeMetadata <NEW_LINE> import numpy as np <NEW_LINE> self._data_flattened = None <NEW_LINE> self.attributes = {} <NEW_LINE> self.metadata = Metadata.from_CubeMetadata(metadata) if isinstance(metadata, CubeMetadata) else metadata <NEW_LINE> if isinstance(data, np.ndarray): <NEW_LINE> <INDENT> self._data = data <NEW_LINE> self._data_manager = None <NEW_LINE> self._post_process() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self._data = None <NEW_LINE> self._data_manager = listify(data) <NEW_LINE> if data_retrieval_callback is not None: <NEW_LINE> <INDENT> self.retrieve_raw_data = data_retrieval_callback <NEW_LINE> <DEDENT> elif type(self._data_manager[0]).__name__ in static_mappings and all([type(d).__name__ == type(self._data_manager[0]).__name__ for d in self._data_manager]): <NEW_LINE> <INDENT> self.retrieve_raw_data = static_mappings[type(self._data_manager[0]).__name__] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise InvalidDataTypeError
:param data: The data handler (e.g. SDS instance) for the specific data type, or a numpy array of data This can be a list of data handlers, or a single data handler :param metadata: Any associated metadata :param data_retrieval_callback: An, optional, method for retrieving data when needed
625941bdd164cc6175782c50
def projection_shortcut(inputs, output_dim): <NEW_LINE> <INDENT> inputs = mtf.layers.conv2d_with_blocks( inputs, output_dim, filter_size=[1, 1], strides=strides, padding="SAME", h_blocks_dim=None, w_blocks_dim=col_blocks_dim, name="shortcut0") <NEW_LINE> return batch_norm_relu( inputs, is_training, relu=False)
Project identity branch.
625941bd9c8ee82313fbb677
def build_engine(): <NEW_LINE> <INDENT> with trt.Builder(TRT_LOGGER) as builder, builder.create_network() as network, trt.OnnxParser(network, TRT_LOGGER) as parser: <NEW_LINE> <INDENT> builder.max_workspace_size = 1 << 28 <NEW_LINE> builder.max_batch_size = 1 <NEW_LINE> if not os.path.exists(onnx_file_path): <NEW_LINE> <INDENT> print('ONNX file {} not found, please run yolov3_to_onnx.py first to generate it.'.format(onnx_file_path)) <NEW_LINE> exit(0) <NEW_LINE> <DEDENT> print('Loading ONNX file from path {}...'.format(onnx_file_path)) <NEW_LINE> with open(onnx_file_path, 'rb') as model: <NEW_LINE> <INDENT> print('Beginning ONNX file parsing') <NEW_LINE> parser.parse(model.read()) <NEW_LINE> <DEDENT> print('Completed parsing of ONNX file') <NEW_LINE> print('Building an engine from file {}; this may take a while...'.format(onnx_file_path)) <NEW_LINE> engine = builder.build_cuda_engine(network) <NEW_LINE> print("Completed creating Engine") <NEW_LINE> with open(engine_file_path, "wb") as f: <NEW_LINE> <INDENT> f.write(engine.serialize()) <NEW_LINE> <DEDENT> return engine
Takes an ONNX file and creates a TensorRT engine to run inference with
625941bdb57a9660fec33783
def parse_cfg(cfg_path): <NEW_LINE> <INDENT> blocks = [] <NEW_LINE> with open(cfg_path, "r") as f: <NEW_LINE> <INDENT> lines = (line.strip() for line in f if not line.startswith("#")) <NEW_LINE> lines = (re.sub(r"\s+", "", line) for line in lines if len(line) > 0) <NEW_LINE> for line in lines: <NEW_LINE> <INDENT> if line.startswith("["): <NEW_LINE> <INDENT> blocks.append({}) <NEW_LINE> blocks[-1]["type"] = line[1:-1] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> key, value = line.split("=") <NEW_LINE> blocks[-1][key] = value <NEW_LINE> <DEDENT> <DEDENT> return blocks
Parses the yolov3 layer configuration file Arguments: cfg_path {str} -- cfg file path Returns a list of blocks. Each blocks describes a block in the neural network to be built. Block is represented as a dictionary in the list
625941bdcdde0d52a9e52f32
def wfc3(self): <NEW_LINE> <INDENT> with open(os.path.join(os.path.dirname(__file__), "reference", "wfc3_input.json")) as data_file: <NEW_LINE> <INDENT> pandeia_data = json.load(data_file) <NEW_LINE> pandeia_data["configuration"]["instrument"]["disperser"] = self.config <NEW_LINE> <DEDENT> return pandeia_data
Handles WFC3 template
625941bd21bff66bcd684857
def fisher_z_test(x=None, y=None, p=0.05, two_tailed=True): <NEW_LINE> <INDENT> def get_n(vec): <NEW_LINE> <INDENT> vec = np.ma.masked_invalid(vec) <NEW_LINE> return np.sum(~vec.mask) <NEW_LINE> <DEDENT> rval = r(y, x) <NEW_LINE> rval_null = r(np.full_like(y, np.mean(y).item()), x) <NEW_LINE> z = np.arctanh(rval) <NEW_LINE> znull = np.arctanh(rval_null) <NEW_LINE> n = min(get_n(x), get_n(y)) <NEW_LINE> zobs = (z - znull) / np.sqrt(2*(1.0/(n-3))) <NEW_LINE> if two_tailed: <NEW_LINE> <INDENT> p /= 2 <NEW_LINE> <DEDENT> zcrit = st.norm.ppf(1.0 - p) <NEW_LINE> return np.abs(zobs) > zcrit
Test if a regression is significant using Fisher z-transformation of the Pearson correlation Parameters ---------- x : array-like The x-values (independent data) y : array-like The y-values (dependent data) p : float The p-value, i.e. the critical probability that the regression is not significant. two_tailed : bool Whether to treat the p-value as two-tailed, i.e. the regression may be positive or negative (either side of the null hypothesis) or can only be above or below. Returns ------- bool `True` if the regression is significant, `False` otherwise.
625941bd379a373c97cfaa46
def _validateParameters(self): <NEW_LINE> <INDENT> params = self._getClassAttributesByBase(ScriptParameter) <NEW_LINE> fails = [] <NEW_LINE> for attr_name, attr_cls in params.items(): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> attr_cls.validate(getattr(self, attr_name)) <NEW_LINE> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> fails.append("ERROR: Script parameter %s %s" % (attr_name, e.message)) <NEW_LINE> <DEDENT> <DEDENT> return fails
Validate script parameters :return: List of failure reasons, if any :rtype: list[str]
625941bd7d847024c06be1bc
def JSONList(*args, **kwargs): <NEW_LINE> <INDENT> type_ = JSON <NEW_LINE> try: <NEW_LINE> <INDENT> if kwargs.pop('unique_sorted'): <NEW_LINE> <INDENT> type_ = JSONUniqueListType <NEW_LINE> <DEDENT> <DEDENT> except KeyError: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> return MutationList.as_mutable(type_(*args, **kwargs))
Stores a list as JSON on database, with mutability support. If kwargs has a param `unique_sorted` (which evaluated to True), list values are made unique and sorted.
625941bd1b99ca400220a9b3
def psnr(a1,a2): <NEW_LINE> <INDENT> mse = np.mean( (a1 - a2) ** 2 ) <NEW_LINE> if mse == 0: <NEW_LINE> <INDENT> return 100 <NEW_LINE> <DEDENT> if type(a1[0])==np.int16: <NEW_LINE> <INDENT> max_intensity = float(2**15) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> max_intensity = 1.0 <NEW_LINE> <DEDENT> return 20 * math.log10(max_intensity / math.sqrt(mse))
si a1 n'est pas un int16, alors a1 doit être compris entre -1 et 1
625941bde8904600ed9f1e2c
def ohodnot(znak): <NEW_LINE> <INDENT> for i in range(pocet_riadkov): <NEW_LINE> <INDENT> for a in range(pocet_stlpcov): <NEW_LINE> <INDENT> hodnoty[i][a] = [0, 0] <NEW_LINE> if values[i][a] == " ": <NEW_LINE> <INDENT> list.clear(dlzkyL) <NEW_LINE> list.clear(dlzkyP) <NEW_LINE> if a == 0: <NEW_LINE> <INDENT> mxd = max_hlbka <NEW_LINE> mxh = 0 <NEW_LINE> hodnoty[i][a][1] = ohodnotP(i, a, znak, -1, -1, 0, 0, mxd) <NEW_LINE> ohodnotL(i, a, znak, -1, -1, 0, 0, mxh) <NEW_LINE> <DEDENT> elif a == 5: <NEW_LINE> <INDENT> mxh = max_hlbka <NEW_LINE> mxd = 0 <NEW_LINE> hodnoty[i][a][1] = ohodnotL(i, a, znak, -1, -1, 0, 0, mxh) <NEW_LINE> ohodnotP(i, a, znak, -1, -1, 0, 0, mxd) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> for c in range(1, max_hlbka + 1): <NEW_LINE> <INDENT> mxd = c <NEW_LINE> mxh = max_hlbka - c <NEW_LINE> hodnoty[i][a][1] += ((ohodnotL(i, a, znak, -1, -1, 0, 0, mxh)) * (ohodnotP(i, a, znak, -1, -1, 0, 0, mxd))) <NEW_LINE> <DEDENT> <DEDENT> if dlzkyL and dlzkyP: <NEW_LINE> <INDENT> hodnoty[i][a][0] = min(dlzkyL) + min(dlzkyP) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> hodnoty[i][a][0] = 500 <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> return
funkcia, ktorá pre každé políčko vyhodnotí ako dobrá/zlá táto pozícia je(pre hráča W) - vyhodnocuje pomocou dvoch pomocných funkcií - ohodnotP a ohodnotL
625941bdf548e778e58cd47f
def __getQgsSymbol(self, symbol): <NEW_LINE> <INDENT> new_symbol_layer = False <NEW_LINE> for msstyle in self.msclass["styles"]: <NEW_LINE> <INDENT> symbol_layer = None <NEW_LINE> if self.geom_type == QGis.Point: <NEW_LINE> <INDENT> symbol_layer = self.__getQgsMarkerSymbolLayer(msstyle) <NEW_LINE> <DEDENT> elif self.geom_type == QGis.Line: <NEW_LINE> <INDENT> symbol_layer = self.__getQgsLineSymbolLayer(msstyle) <NEW_LINE> <DEDENT> elif self.geom_type == QGis.Polygon: <NEW_LINE> <INDENT> symbol_layer = self.__getQgsPolygonSymbolLayer(msstyle) <NEW_LINE> <DEDENT> if symbol_layer: <NEW_LINE> <INDENT> new_symbol_layer = True <NEW_LINE> symbol.appendSymbolLayer(symbol_layer) <NEW_LINE> <DEDENT> <DEDENT> if new_symbol_layer: <NEW_LINE> <INDENT> return symbol <NEW_LINE> <DEDENT> return False
docstring for __getMarkerSymbol
625941bd090684286d50ebe5
def PrintTimeInformation(elapsedtimes,start,done,matchcount,bar): <NEW_LINE> <INDENT> os.system('cls' if os.name == 'nt' else 'clear') <NEW_LINE> elapsedtimes.append(time.time() - start) <NEW_LINE> avgtime = mean(elapsedtimes) <NEW_LINE> timetogo = str(datetime.timedelta(seconds=(matchcount-done)*int(avgtime))) <NEW_LINE> pace = str(int(60/avgtime*10)) + '/10 min' <NEW_LINE> text = colored('\nTime used for the most recent: {}','red') + colored('\n\Current pace: {}', 'green') + colored('\nWith this pace you have {} left\n','blue') <NEW_LINE> bar.next() <NEW_LINE> print(text.format(elapsedtimes[-1],pace,timetogo)) <NEW_LINE> return elapsedtimes
Print information about the manual annotations etc
625941bdbd1bec0571d9053a
def enrollment_record(self, datestamp, enrolled_at_end, change_since_last_day): <NEW_LINE> <INDENT> return (datestamp, self.course_id, self.user_id, enrolled_at_end, change_since_last_day)
A complete enrollment record.
625941bd63d6d428bbe443f2
def threeSum(self, nums): <NEW_LINE> <INDENT> nums.sort() <NEW_LINE> res = [] <NEW_LINE> for i in range(len(nums)-2): <NEW_LINE> <INDENT> if i >0 and nums[i]==nums[i-1]: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> j = i+1 <NEW_LINE> k = len(nums)-1 <NEW_LINE> target = 0 - nums[i] <NEW_LINE> while j<k: <NEW_LINE> <INDENT> if (nums[j] + nums[k]) > target: <NEW_LINE> <INDENT> k-=1 <NEW_LINE> <DEDENT> elif (nums[j] + nums[k]) < target: <NEW_LINE> <INDENT> j+=1 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> tmp = [nums[i],nums[j],nums[k]] <NEW_LINE> res.append(tmp) <NEW_LINE> j+=1 <NEW_LINE> k-=1 <NEW_LINE> while j<k and nums[j] == nums[j-1]: <NEW_LINE> <INDENT> j+=1 <NEW_LINE> <DEDENT> while j<k and nums[k] == nums[k+1]: <NEW_LINE> <INDENT> k-=1 <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> return res
:type nums: List[int] :rtype: List[List[int]]
625941bde64d504609d74743
def __str__(self): <NEW_LINE> <INDENT> tank_string = str('--- Tank properties (selected tank) ---'+ '\n Minimum elevtaion: ' + str(self.min_elevation) + '\n Maximum elevation: ' + str(self.max_elevation) + '\n Content of tank: ' + self.content + '\n Defined density: ' + str(self.density) + '\n Defined acceleration: ' + 'st = ' + str(self.acc_static) + ' , azl = ' + str(self.acc_dyn_loaded) + ' , azb = ' + str(self.acc_dyn_ballast) + '\n Added pressure at tank top: ' + str(self.added_pressure) ) <NEW_LINE> return tank_string
Prints a string for the tank. :return:
625941bd1f5feb6acb0c4a57
def _density_linear(self, j, c_coef, x_array, y_array): <NEW_LINE> <INDENT> k_lim_ = int(pow(2, -j) * self.len_signal) <NEW_LINE> z_array = np.empty(shape=(len(x_array), len(y_array))) <NEW_LINE> cpt = 0 <NEW_LINE> perc = 0 <NEW_LINE> for i in range(0, len(x_array)): <NEW_LINE> <INDENT> for ii in range(0, len(y_array)): <NEW_LINE> <INDENT> sum_ = 0 <NEW_LINE> for k in range(-k_lim_, k_lim_): <NEW_LINE> <INDENT> for k2 in range(-k_lim_, k_lim_): <NEW_LINE> <INDENT> sum_ = sum_ + c_coef[k + k_lim_, k2 + k_lim_] * self._derived_father_wavelet(x_array[i], j, k) * self._derived_father_wavelet(y_array[ii], j, k2) <NEW_LINE> <DEDENT> <DEDENT> z_array[i, ii] = sum_ <NEW_LINE> if 100 * cpt / (len(x_array) * len(y_array)) > perc + 1: <NEW_LINE> <INDENT> perc = int(100 * cpt / (len(x_array) * len(y_array))) <NEW_LINE> print("{}%".format(perc)) <NEW_LINE> <DEDENT> cpt = cpt + 1 <NEW_LINE> <DEDENT> <DEDENT> z_array[z_array < 0] = 0 <NEW_LINE> return z_array
Compute the signal's density (Linear estimator) :param j: scaling parameter [int] :param c_coef: scaling coefficients C [1D array] :param x_array: density index [1D array] :return: density values [1D array]
625941bd498bea3a759b99b3
@register_topology_factory('FOURTREE') <NEW_LINE> def topology_four_child_tree(network_cache=0.05, n_contents=100000, seed=None): <NEW_LINE> <INDENT> h = 5 <NEW_LINE> topology = fnss.k_ary_tree_topology(4, h) <NEW_LINE> topology.add_node(1365, depth=-1) <NEW_LINE> topology.add_path([0, 1365]) <NEW_LINE> receivers = [v for v in topology.nodes_iter() if topology.node[v]['depth'] == h] <NEW_LINE> sources = [v for v in topology.nodes_iter() if topology.node[v]['depth'] == -1] <NEW_LINE> caches = [v for v in topology.nodes_iter() if topology.node[v]['depth'] >= 0 and topology.node[v]['depth'] < h] <NEW_LINE> content_placement = uniform_content_placement(topology, range(1, n_contents+1), sources, seed=seed) <NEW_LINE> for v in sources: <NEW_LINE> <INDENT> fnss.add_stack(topology, v, 'source', {'contents': content_placement[v]}) <NEW_LINE> <DEDENT> for v in receivers: <NEW_LINE> <INDENT> fnss.add_stack(topology, v, 'receiver', {}) <NEW_LINE> <DEDENT> cache_placement = uniform_cache_placement(topology, network_cache*n_contents, caches) <NEW_LINE> for node, size in cache_placement.iteritems(): <NEW_LINE> <INDENT> fnss.add_stack(topology, node, 'cache', {'size': size}) <NEW_LINE> <DEDENT> fnss.set_weights_constant(topology, 1.0) <NEW_LINE> fnss.set_delays_constant(topology, INTERNAL_LINK_DELAY, 'ms') <NEW_LINE> for u, v in topology.edges_iter(): <NEW_LINE> <INDENT> if u in sources or v in sources: <NEW_LINE> <INDENT> topology.edge[u][v]['type'] = 'external' <NEW_LINE> fnss.set_delays_constant(topology, EXTERNAL_LINK_DELAY, 'ms', [(u, v)]) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> topology.edge[u][v]['type'] = 'internal' <NEW_LINE> <DEDENT> <DEDENT> return topology
Returns a tree topology Parameters ---------- network_cache : float Size of network cache (sum of all caches) normalized by size of content population n_contents : int Size of content population seed : int, optional The seed used for random number generation Returns ------- topology : fnss.Topology The topology object
625941bdd53ae8145f87a177
def _check_node(self, obj): <NEW_LINE> <INDENT> if not isinstance(obj, self.definition['node_class']): <NEW_LINE> <INDENT> raise ValueError("Expected node of class " + self.definition['node_class'].__name__) <NEW_LINE> <DEDENT> if not hasattr(obj, '_id'): <NEW_LINE> <INDENT> raise ValueError("Can't perform operation on unsaved node " + repr(obj))
check for valid node i.e correct class and is saved
625941bd44b2445a33931fa2
def get_source_units(source='Voltage'): <NEW_LINE> <INDENT> if source == 'Voltage': <NEW_LINE> <INDENT> source_unit = 'V' <NEW_LINE> measure_unit = 'A' <NEW_LINE> <DEDENT> elif source == 'Current': <NEW_LINE> <INDENT> source_unit = 'A' <NEW_LINE> measure_unit = 'V' <NEW_LINE> <DEDENT> return source_unit, measure_unit
units for source/measure elements
625941bdab23a570cc250083
def update_prediction(locus: str, preds: Dict[str, str], target: str, target_list: List[int], lists: List[List[int]], mappings: List[Dict[str, str]]) -> None: <NEW_LINE> <INDENT> assert len(lists) == len(mappings) <NEW_LINE> for idx, target_element in enumerate(target_list): <NEW_LINE> <INDENT> key = "nrpspksdomains_{}_{}.{}".format(locus, target, idx + 1) <NEW_LINE> for sublist, mapping in zip(lists, mappings): <NEW_LINE> <INDENT> for position in sublist: <NEW_LINE> <INDENT> if not target_element < position: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> if idx + 1 <= len(target_list) - 1 and not position < target_list[idx + 1]: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> current = preds[key] <NEW_LINE> preds[key] = mapping.get(current, current)
Updates predictions of a gene's domains. Modifies in place. Arguments: locus: the name of the gene preds: a dict mapping domain label (e.g. nrpspksdomains_SCO123_AT1) to a prediction for that domain target: "PKS_KS" or "PKS_AT" for checking AT vs trans-AT target_list: a list of positions in the gene's domains where target is found lists: a list of lists of positions for KR, DH and ER domains mappings: a list of dictionaries mapping a prediction to an altered prediction Returns: None
625941bd2eb69b55b151c7af
def draw(self): <NEW_LINE> <INDENT> color = self.color <NEW_LINE> pos = self.pos <NEW_LINE> radius = self.radius <NEW_LINE> pygame.draw.circle(SCREEN, color, pos, radius)
Draws a ball :return: None
625941bd7c178a314d6ef35d
def add(self, name, value): <NEW_LINE> <INDENT> if not (value == "" or value is None): <NEW_LINE> <INDENT> self.context[name] = value
Add a name value pair to the Payload object
625941bd91af0d3eaac9b919
def get_object_or_404( queryset: 'QuerySet[_ModelT]', *filter_args: Any, **filter_kwargs: Any) -> _ModelT: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> return _get_object_or_404(queryset, *filter_args, **filter_kwargs) <NEW_LINE> <DEDENT> except (TypeError, ValueError, ValidationError): <NEW_LINE> <INDENT> raise Http404 from None
Same as Django's standard shortcut, but make sure to also raise 404 if the filter_kwargs don't match the required types. This function was copied from rest_framework.generics because of issue #36.
625941bdfff4ab517eb2f33d
def test_roi_align_fp16(self): <NEW_LINE> <INDENT> N, C, H, W = 1, 3, 16, 20 <NEW_LINE> features = torch.randn(N, C, H, W) <NEW_LINE> rois = rand_rois(N, H, W, 250) <NEW_LINE> utils.compare_tracing_methods( SimpleRoiAlignModel("NCHW"), features, rois, fusible_ops={"_caffe2::RoIAlign"}, fp16=True, atol=1e-1, rtol=1e-1, )
Test of the _caffe2::RoiAlign Node on Glow.
625941bd07f4c71912b11388
def parse_paths(args): <NEW_LINE> <INDENT> world_list = [] <NEW_LINE> region_list = [] <NEW_LINE> warning = False <NEW_LINE> for arg in args: <NEW_LINE> <INDENT> if arg[-4:] == ".mca": <NEW_LINE> <INDENT> region_list.append(arg) <NEW_LINE> <DEDENT> elif arg[-4:] == ".mcr": <NEW_LINE> <INDENT> if not warning: <NEW_LINE> <INDENT> print("Warning: Region-Fixer only works with anvil format region files. Ignoring *.mcr files") <NEW_LINE> warning = True <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> world_list.append(arg) <NEW_LINE> <DEDENT> <DEDENT> region_list_tmp = [] <NEW_LINE> for f in region_list: <NEW_LINE> <INDENT> if exists(f): <NEW_LINE> <INDENT> if isfile(f): <NEW_LINE> <INDENT> region_list_tmp.append(f) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> print("Warning: \"{0}\" is not a file. Skipping it and scanning the rest.".format(f)) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> print("Warning: The region file {0} doesn't exists. Skipping it and scanning the rest.".format(f)) <NEW_LINE> <DEDENT> <DEDENT> region_list = region_list_tmp <NEW_LINE> world_list = parse_world_list(world_list) <NEW_LINE> return world_list, world.RegionSet(region_list = region_list)
Parse the list of args passed to region-fixer.py and returns a RegionSet object with the list of regions and a list of World objects.
625941bda05bb46b383ec727
def summarize(data: pd.DataFrame): <NEW_LINE> <INDENT> type_counts = _get_dtypes(data).value_counts() <NEW_LINE> click.echo( f"{len(data):,} observations of {len(data.columns):,} variables\n" f"\t{type_counts.get('binary', 0):,} Binary Variables\n" f"\t{type_counts.get('categorical', 0):,} Categorical Variables\n" f"\t{type_counts.get('continuous', 0):,} Continuous Variables\n" f"\t{type_counts.get('unknown', 0):,} Unknown-Type Variables\n" )
Print the number of each type of variable and the number of observations Parameters ---------- data: pd.DataFrame The DataFrame to be described Returns ------- result: None Examples -------- >>> import clarite >>> clarite.describe.get_types(df).head() RIDAGEYR continuous female binary black binary mexican binary other_hispanic binary dtype: object
625941bd099cdd3c635f0b5f
def _openTelnet(self, host, port): <NEW_LINE> <INDENT> self.logger.info("Opening Telnet connection") <NEW_LINE> tn = telnetlib.Telnet() <NEW_LINE> tn.open(host, port) <NEW_LINE> tn.read_until(self.cursor+self.sentinel) <NEW_LINE> self._keepConnectionAlive(tn.sock) <NEW_LINE> return tn
Open Telnet connection with the host Parameters ---------- host : str ip address of the host to connect to port : int port number to connect to Returns ------- tn : telnet object
625941bd4a966d76dd550f10
def main(): <NEW_LINE> <INDENT> files = list(Path(".").rglob("*.py")) <NEW_LINE> style = pycodestyle.StyleGuide(quiet=False, config_file='setup.cfg') <NEW_LINE> result = style.check_files(files) <NEW_LINE> print("Total errors:", result.total_errors) <NEW_LINE> if result.total_errors > 0: <NEW_LINE> <INDENT> exit(1)
Entry point to Python code style checker tool.
625941bd379a373c97cfaa47
def _prepare_db_url(self): <NEW_LINE> <INDENT> dbhost = self.dbhost <NEW_LINE> dbport = self.dbport <NEW_LINE> dbuser = self.dbuser <NEW_LINE> dbpass = self.dbpass <NEW_LINE> dbname = self.dbname <NEW_LINE> driver = self.driver <NEW_LINE> connector = self.connector <NEW_LINE> if driver not in self.supported_drivers: <NEW_LINE> <INDENT> raise ValueError('Database driver {0} is not supported yet.'.format(driver)) <NEW_LINE> <DEDENT> if driver != 'sqlite' and (not dbuser or not dbhost): <NEW_LINE> <INDENT> raise ValueError('driver {0} require dbuser and dbhost details, {1},{2}'.format(driver,dbuser,dbhost)) <NEW_LINE> <DEDENT> dburl='{0}'.format(driver) <NEW_LINE> if connector: <NEW_LINE> <INDENT> dburl='{0}+{1}'.format(dburl, connector) <NEW_LINE> <DEDENT> dburl='{0}://'.format(dburl) <NEW_LINE> if dbuser and dbpass and dbhost: <NEW_LINE> <INDENT> dburl='{0}{1}:{2}@{3}'.format(dburl, dbuser, dbpass, dbhost) <NEW_LINE> <DEDENT> if dbport: <NEW_LINE> <INDENT> dburl='{0}:{1}'.format(dburl, dbport) <NEW_LINE> <DEDENT> dburl='{0}/{1}'.format(dburl, dbname) <NEW_LINE> return dburl
An internal method for preparing url for database connection
625941bd8da39b475bd64e74
def __repr__(self): <NEW_LINE> <INDENT> if self._raw: <NEW_LINE> <INDENT> return "OrderInfo.from_array({self._raw})".format(self=self) <NEW_LINE> <DEDENT> return "OrderInfo(name={self.name!r}, phone_number={self.phone_number!r}, email={self.email!r}, shipping_address={self.shipping_address!r})".format(self=self)
Implements `repr(orderinfo_instance)`
625941bd187af65679ca5021
def test_get_vendor_tags(self): <NEW_LINE> <INDENT> pass
Test case for get_vendor_tags Get the tags for a vendor. # noqa: E501
625941bd07d97122c4178789
def p_parameter_list(self, p): <NEW_LINE> <INDENT> if in_function_parsing_phase(): <NEW_LINE> <INDENT> p[0] = "" <NEW_LINE> return <NEW_LINE> <DEDENT> if len(p) == 4: <NEW_LINE> <INDENT> p[0] = p[1] + [(p[3].text, p[3].type, p[3].pre_type)] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> p[0] = [(p[1].text, p[1].type, p[1].pre_type)]
parameter_list : parameter_list COMMA expression parameter_list : expression
625941bd3617ad0b5ed67dfc
def item_transform_test(self, item: dict): <NEW_LINE> <INDENT> image = item["image"] <NEW_LINE> image_gt = image.copy() <NEW_LINE> bboxes = item['bboxes'] <NEW_LINE> cids = item["cids"] <NEW_LINE> h, w = image.shape[:2] <NEW_LINE> transformed = self.transform_fn(image=image) <NEW_LINE> image = np.array(transformed["image"], dtype=np.float32) <NEW_LINE> h, w = image.shape[:2] <NEW_LINE> shape_pp = np.array(image.shape[:2], dtype=np.float32) <NEW_LINE> right = self.w - w <NEW_LINE> bottom = self.h - h <NEW_LINE> image = cv2.copyMakeBorder(image, 0, bottom, 0, right, cv2.BORDER_CONSTANT, value=0) <NEW_LINE> image = image.transpose((2, 0, 1)) <NEW_LINE> item.update({ "image": image, "bboxes": bboxes, "cids": cids, "image_gt": image_gt, "shape_pp": shape_pp, }) <NEW_LINE> return item
:param item: dictionary - image: h x w x 3 - bboxes: n x 4; [[x, y, width, height], ...] - cids: n, :return: dictionary - image: 3 x 512 x 512 ground truth: - bboxes: n x 4; [[x, y, width, height], ...] - cids: n,
625941bdec188e330fd5a6a8
def main(): <NEW_LINE> <INDENT> parser = argparse.ArgumentParser( description="Generate non-flat interpolation qualifier tests with fp64 types") <NEW_LINE> parser.add_argument( '--names-only', dest='names_only', action='store_true', default=False, help="Don't output files, just generate a list of filenames to stdout") <NEW_LINE> args = parser.parse_args() <NEW_LINE> for test in itertools.chain(RegularTestTuple.all_tests(args.names_only), ColumnsTestTuple.all_tests(args.names_only)): <NEW_LINE> <INDENT> test.generate()
Main function.
625941bd30c21e258bdfa39f
def ingoing(self): <NEW_LINE> <INDENT> self.logger_info("RECEIVED: {0}".format(self.message)) <NEW_LINE> if self.message.get("type") == "LivechatSessionStart": <NEW_LINE> <INDENT> if settings.DEBUG: <NEW_LINE> <INDENT> print("LivechatSessionStart") <NEW_LINE> <DEDENT> <DEDENT> if self.message.get("type") == "LivechatSession": <NEW_LINE> <INDENT> if settings.DEBUG: <NEW_LINE> <INDENT> print("LivechatSession") <NEW_LINE> <DEDENT> <DEDENT> if self.message.get("type") == "LivechatSessionTaken": <NEW_LINE> <INDENT> self.handle_livechat_session_taken() <NEW_LINE> <DEDENT> if self.message.get("type") == "LivechatSessionForwarded": <NEW_LINE> <INDENT> if settings.DEBUG: <NEW_LINE> <INDENT> print("LivechatSessionForwarded") <NEW_LINE> <DEDENT> <DEDENT> if self.message.get("type") == "LivechatSessionQueued": <NEW_LINE> <INDENT> if settings.DEBUG: <NEW_LINE> <INDENT> print("LivechatSessionQueued") <NEW_LINE> <DEDENT> <DEDENT> if self.message.get("type") == "Message": <NEW_LINE> <INDENT> message, created = self.register_message() <NEW_LINE> if not message.delivered: <NEW_LINE> <INDENT> for message in self.message.get("messages", []): <NEW_LINE> <INDENT> agent_name = self.get_agent_name(message) <NEW_LINE> if message.get("closingMessage"): <NEW_LINE> <INDENT> if self.connector.config.get( "force_close_message", ): <NEW_LINE> <INDENT> message["msg"] = self.connector.config[ "force_close_message" ] <NEW_LINE> <DEDENT> if message.get("msg"): <NEW_LINE> <INDENT> if self.connector.config.get( "add_agent_name_at_close_message" ): <NEW_LINE> <INDENT> self.outgo_text_message(message, agent_name=agent_name) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.outgo_text_message(message) <NEW_LINE> <DEDENT> self.close_room() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.message_object.delivered = True <NEW_LINE> self.message_object.save() <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> if message.get("attachments", {}): <NEW_LINE> <INDENT> self.outgo_file_message(message, agent_name=agent_name) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.outgo_text_message(message, agent_name=agent_name) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> self.logger_info("MESSAGE ALREADY SEND. IGNORING.")
this method will process the outcoming messages comming from Rocketchat, and deliver to the connector
625941bd92d797404e30408d
def get_md_resource(file_path): <NEW_LINE> <INDENT> namespaces = Namespaces().get_namespaces(keys=('gmd', 'gmi')) <NEW_LINE> with io.open(file_path, mode='r', encoding='utf-8') as f: <NEW_LINE> <INDENT> data = f.read().encode('utf-8') <NEW_LINE> data = etree.fromstring(data) <NEW_LINE> mdelem = data.find('.//' + util.nspath_eval( 'gmd:MD_Metadata', namespaces)) or data.find( './/' + util.nspath_eval('gmi:MI_Metadata', namespaces)) <NEW_LINE> if mdelem is None and data.tag == '{http://www.isotc211.org/2005/gmd}MD_Metadata': <NEW_LINE> <INDENT> mdelem = data <NEW_LINE> <DEDENT> <DEDENT> return mdelem
Read the file and parse into an XML tree. Parameters ---------- file_path : str Path of the file to read. Returns ------- etree.ElementTree XML tree of the resource on disk.
625941bd82261d6c526ab39f
def test_lstm(self): <NEW_LINE> <INDENT> token_vocab = SimpleVocab(u'a b c d'.split()) <NEW_LINE> sequences = [ ['a', 'b', 'c', 'd'], ['c', 'd'], ['a', 'b', 'c', 'd'], ] <NEW_LINE> sequences_alt = [ ['a', 'b', 'c', 'd', 'a', 'b', 'd', 'c'], ['b', 'a', 'd'], ['c', 'd'], ] <NEW_LINE> with clean_session(): <NEW_LINE> <INDENT> token_embeds = tf.constant([ [1, 2, 0], [3, 4, 1], [5, 6, 0], [7, 8, 1], ], dtype=tf.float32) <NEW_LINE> model = LSTMSequenceEmbedder(token_embeds, seq_length=4, hidden_size=7) <NEW_LINE> test_embeds, test_hidden_states = model.compute( [model.embeds, model.hidden_states.values], sequences, token_vocab) <NEW_LINE> assert test_embeds.shape == (3, 7) <NEW_LINE> assert test_hidden_states.shape == (3, 4, 7) <NEW_LINE> assert_array_almost_equal(test_hidden_states[1,1,:], test_hidden_states[1,2,:], decimal=5) <NEW_LINE> assert_array_almost_equal(test_hidden_states[1,1,:], test_hidden_states[1,3,:], decimal=5) <NEW_LINE> big_model = LSTMSequenceEmbedder(token_embeds, seq_length=8, hidden_size=7) <NEW_LINE> big_model.weights = model.weights <NEW_LINE> test_embeds_alt, test_hidden_states_alt = big_model.compute( [big_model.embeds, big_model.hidden_states.values], sequences_alt, token_vocab) <NEW_LINE> assert test_embeds_alt.shape == (3, 7) <NEW_LINE> assert test_hidden_states_alt.shape == (3, 8, 7) <NEW_LINE> <DEDENT> assert_array_almost_equal(test_embeds[1,:], test_embeds_alt[2,:], decimal=5) <NEW_LINE> assert_array_almost_equal(test_hidden_states[1,:2,:], test_hidden_states_alt[2,:2,:], decimal=5)
Test whether the mask works properly for LSTM embedder.
625941bd30dc7b766590186d
def test_create_new_imbuser(self, imb_user): <NEW_LINE> <INDENT> assert imb_user.username == 'jbloggs' <NEW_LINE> assert imb_user.name == 'Joe Bloggs' <NEW_LINE> assert repr(imb_user) == '<IMB User jbloggs>'
GIVEN an IMBUser user model WHEN a new IMBUser model is created THEN check the username and name fields are defined correctly
625941bdcad5886f8bd26ee2
def encoder_lstm(enc_inputs, orig_dim, latent_dim, encode_dim): <NEW_LINE> <INDENT> encode_layers = [enc_inputs] <NEW_LINE> for i, units in enumerate(encode_dim): <NEW_LINE> <INDENT> encode_layers.append(LSTM(units, return_sequences=True, name='enc_{}'.format(i))(encode_layers[-1])) <NEW_LINE> <DEDENT> return encode_layers
Encoder with LSTM layers
625941bdad47b63b2c509e84
def http_download_file(url, fd, show_progress, logger): <NEW_LINE> <INDENT> class Progress(object): <NEW_LINE> <INDENT> def __init__(self): <NEW_LINE> <INDENT> self.last_mb = -1 <NEW_LINE> <DEDENT> def progress(self, down_total, down_current, up_total, up_current): <NEW_LINE> <INDENT> if down_total == 0: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> current_mb = int(down_current) / 10485760 <NEW_LINE> if current_mb > self.last_mb or down_current == down_total: <NEW_LINE> <INDENT> self.last_mb = current_mb <NEW_LINE> logger.debug("%dkB of %dkB" % (down_current/1024, down_total/1024)) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> def _data(buf): <NEW_LINE> <INDENT> write_bytes_to_fd(fd, buf) <NEW_LINE> <DEDENT> progress = Progress() <NEW_LINE> c = pycurl.Curl() <NEW_LINE> c.setopt(c.URL, url) <NEW_LINE> c.setopt(c.CONNECTTIMEOUT, 5) <NEW_LINE> c.setopt(c.WRITEFUNCTION, _data) <NEW_LINE> c.setopt(c.FOLLOWLOCATION, 1) <NEW_LINE> if show_progress: <NEW_LINE> <INDENT> c.setopt(c.NOPROGRESS, 0) <NEW_LINE> c.setopt(c.PROGRESSFUNCTION, progress.progress) <NEW_LINE> <DEDENT> c.perform() <NEW_LINE> c.close()
Function to download a file from url to file descriptor fd.
625941bd99cbb53fe6792aeb
def jenkins_queued(): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> result = urllib2.urlopen('http://127.0.0.1:8090/queue/api/python') <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> return -1 <NEW_LINE> <DEDENT> if result.code != 200: <NEW_LINE> <INDENT> return -1 <NEW_LINE> <DEDENT> return len(eval(result.read()).get('items'))
Returns number of jobs Jenkins currently has in its queue. -1 if unknown.
625941bd3539df3088e2e24f
def load_child_keys(self): <NEW_LINE> <INDENT> ah = self.get_auction_house() <NEW_LINE> return ah.get_lot_ids(self.get_key())
Return list of lot ids for children of this auction.
625941bd9b70327d1c4e0cd7
def test_list_of_bytes(self): <NEW_LINE> <INDENT> bm = ByteMatrix(2, 3, [b'123', b'456']) <NEW_LINE> assert bm.width == 3 <NEW_LINE> assert bm.height == 2 <NEW_LINE> assert bm.to_bytes() == b'123456'
Create matrix from list of bytes.
625941bd76e4537e8c351574
def url_factory(prefix): <NEW_LINE> <INDENT> def url(path): <NEW_LINE> <INDENT> if path.startswith(prefix): <NEW_LINE> <INDENT> return path <NEW_LINE> <DEDENT> joiner = "" if path.startswith("/") else "/" <NEW_LINE> return joiner.join([prefix, path]) <NEW_LINE> <DEDENT> return url
Allow different `url` function for prod and dev
625941bd3317a56b86939b64
def __init__(self, *args, pad=0, **kwargs): <NEW_LINE> <INDENT> axes_class = kwargs.pop("axes_class", self._defaultAxesClass) <NEW_LINE> self.RGB = ax = axes_class(*args, **kwargs) <NEW_LINE> ax.get_figure().add_axes(ax) <NEW_LINE> self.R, self.G, self.B = make_rgb_axes( ax, pad=pad, axes_class=axes_class, **kwargs) <NEW_LINE> for ax1 in [self.RGB, self.R, self.G, self.B]: <NEW_LINE> <INDENT> ax1.axis[:].line.set_color("w") <NEW_LINE> ax1.axis[:].major_ticks.set_markeredgecolor("w")
Parameters ---------- pad : float, default: 0 fraction of the axes height to put as padding. axes_class : matplotlib.axes.Axes *args Unpacked into axes_class() init for RGB **kwargs Unpacked into axes_class() init for RGB, R, G, B axes
625941bddd821e528d63b0ae
def c2f(temperature): <NEW_LINE> <INDENT> return (temperature * 9.0/5) + 32
Convert Celsius to Fahrenheit
625941bde1aae11d1e749bb8
def headerData(self, id, orientation, role): <NEW_LINE> <INDENT> if orientation == Qt.Horizontal: <NEW_LINE> <INDENT> if id >= len(self.header_list): <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> if role == Qt.DisplayRole: <NEW_LINE> <INDENT> return self.header_list[id].split('|')[-1] <NEW_LINE> <DEDENT> elif role == Qt.BackgroundRole: <NEW_LINE> <INDENT> color_id = self.header_color_list[id] <NEW_LINE> return QColor(*self.joint_color_list[color_id]) <NEW_LINE> <DEDENT> <DEDENT> if orientation == Qt.Vertical: <NEW_LINE> <INDENT> if role == Qt.DisplayRole: <NEW_LINE> <INDENT> return self.v_header_list[id] <NEW_LINE> <DEDENT> elif role == Qt.BackgroundRole: <NEW_LINE> <INDENT> if id in self.mesh_rows: <NEW_LINE> <INDENT> end_of_mesh_rows = self.mesh_rows[self.mesh_rows.index(id)+1] <NEW_LINE> mesh_rows = set(range(id+1,end_of_mesh_rows)) <NEW_LINE> if mesh_rows & self.under_weight_rows: <NEW_LINE> <INDENT> return QColor(*self.under_weight_color) <NEW_LINE> <DEDENT> if mesh_rows & self.over_weight_rows: <NEW_LINE> <INDENT> return QColor(*self.over_weight_color) <NEW_LINE> <DEDENT> for mrow in mesh_rows: <NEW_LINE> <INDENT> if self.over_influence_limit_dict[mrow] > MAXIMUM_INFLUENCE_COUNT > 0: <NEW_LINE> <INDENT> return QColor(*self.over_influence_color) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> if id in self.under_weight_rows: <NEW_LINE> <INDENT> return QColor(*self.under_weight_color) <NEW_LINE> <DEDENT> if id in self.over_weight_rows: <NEW_LINE> <INDENT> return QColor(*self.over_weight_color) <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> if self.over_influence_limit_dict[id] > MAXIMUM_INFLUENCE_COUNT > 0: <NEW_LINE> <INDENT> return QColor(*self.over_influence_color) <NEW_LINE> <DEDENT> <DEDENT> except: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> return QColor(*self.v_header_bg) <NEW_LINE> <DEDENT> elif role == Qt.TextAlignmentRole: <NEW_LINE> <INDENT> return Qt.AlignRight <NEW_LINE> <DEDENT> <DEDENT> return None
見出しを返す
625941bdde87d2750b85fc93
def parse_word(msg_type, word): <NEW_LINE> <INDENT> word_type = word_type_table[msg_type].inv[word[0:1]] <NEW_LINE> return (word_type,) + tuple(word_struct_table[word_type].unpack(word)[1:])
Returns a tuple of data contained in word (little endian), first item is always the word type
625941bdbe383301e01b538f
def get(self, build_id, subjob_id, atom_id): <NEW_LINE> <INDENT> max_lines = int(self.get_query_argument('max_lines', 50)) <NEW_LINE> offset_line = self.get_query_argument('offset_line', None) <NEW_LINE> if offset_line is not None: <NEW_LINE> <INDENT> offset_line = int(offset_line) <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> response = self._cluster_master.get_console_output( build_id, subjob_id, atom_id, Configuration['results_directory'], max_lines, offset_line ) <NEW_LINE> self.write(response) <NEW_LINE> return <NEW_LINE> <DEDENT> except ItemNotFoundError as e: <NEW_LINE> <INDENT> build = self._cluster_master.get_build(int(build_id)) <NEW_LINE> subjob = build.subjob(int(subjob_id)) <NEW_LINE> slave = subjob.slave <NEW_LINE> if slave is None: <NEW_LINE> <INDENT> raise e <NEW_LINE> <DEDENT> api_url_builder = UrlBuilder(slave.url) <NEW_LINE> slave_console_url = api_url_builder.url('build', build_id, 'subjob', subjob_id, 'atom', atom_id, 'console') <NEW_LINE> query = {'max_lines': max_lines} <NEW_LINE> if offset_line is not None: <NEW_LINE> <INDENT> query['offset_line'] = offset_line <NEW_LINE> <DEDENT> query_string = urllib.parse.urlencode(query) <NEW_LINE> self.redirect('{}?{}'.format(slave_console_url, query_string))
:type build_id: int :type subjob_id: int :type atom_id: int
625941bd851cf427c661a416
def render(self, name, value, attrs=None): <NEW_LINE> <INDENT> if value is None: <NEW_LINE> <INDENT> value = "" <NEW_LINE> <DEDENT> if attrs is None: <NEW_LINE> <INDENT> attrs = {} <NEW_LINE> <DEDENT> if attrs.has_key('class'): <NEW_LINE> <INDENT> attrs['class'] += " python-code" <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> attrs['class'] = "python-code" <NEW_LINE> <DEDENT> return super(PythonCodeWidget, self).render(name, value, attrs=attrs)
TODO: have a syntax hilight feature, where instead of a TextArea, you get a div that can be double-clicked in to make it editable, and after leaving it re-highlights.
625941bd6fb2d068a760ef9e
def testDebianPackagesStatusParserBadInput(self): <NEW_LINE> <INDENT> parser = linux_software_parser.DebianPackagesStatusParser(deb822) <NEW_LINE> path = os.path.join(self.base_path, "numbers.txt") <NEW_LINE> with open(path, "rb") as data: <NEW_LINE> <INDENT> out = list(parser.Parse(None, data, None)) <NEW_LINE> <DEDENT> for result in out: <NEW_LINE> <INDENT> self.assertIsInstance(result, rdf_anomaly.Anomaly)
If the status file is broken, fail nicely.
625941bd63f4b57ef0001024
def _parse_path(path): <NEW_LINE> <INDENT> if '/' in path: <NEW_LINE> <INDENT> stage, step = path.strip().split('/') <NEW_LINE> path = { 'stage': stage.strip(), 'step': step.strip() } <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> stage = path.strip() <NEW_LINE> path = { 'stage': stage.strip() } <NEW_LINE> <DEDENT> return path
Splits the directory like node 'path' into the different components Returns a dict with all of the found components populated
625941bdbe8e80087fb20b4b
def __init__(self, repo_map, cloud_provider='', default_channels=None): <NEW_LINE> <INDENT> self.repositories = repo_map.repositories <NEW_LINE> self.mapping = repo_map.mapping <NEW_LINE> self.default_channels = default_channels or ['ga'] <NEW_LINE> self.prio_channel = get_target_product_channel(default=None) <NEW_LINE> if cloud_provider.startswith('aws'): <NEW_LINE> <INDENT> self.cloud_provider = 'aws' <NEW_LINE> <DEDENT> elif cloud_provider.startswith('azure'): <NEW_LINE> <INDENT> self.cloud_provider = 'azure' <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.cloud_provider = cloud_provider
Initialize the object based on the given RepositoriesMapping msg. Expects that msg contains just stuff related for the current IPU (at least mapping and repos for the used upgrade path and architecture). :param repo_map: A valid RepositoryMapping message. :type repo_map: RepositoryMapping :param default_channels: A list of default channels to use when a target repository equivalent exactly matching a source repository was not found. :type default_channels: List[str] :param prio_channel: Prefer repositories with this channel when looking for target equivalents. :type prio_channel: str
625941bd8e7ae83300e4aed0
@user_passes_test(is_colleur, login_url='accueil') <NEW_LINE> def colleNoteEleve(request,id_colle): <NEW_LINE> <INDENT> colle=get_object_or_404(Colle,pk=id_colle,colleur=request.user.colleur,matiere__in=request.user.colleur.matieres.all()) <NEW_LINE> request.session['matiere']=colle.matiere.pk <NEW_LINE> note = Note(semaine = colle.semaine, jour = colle.creneau.jour, heure = colle.creneau.heure) <NEW_LINE> return noteEleves(request, colle.creneau.classe.pk, "0" if not colle.eleve else str(colle.eleve.pk), note)
Récupère la colle dont l'id est id_colle puis redirige vers la page de notation de l'élève sur la colle concernée
625941bd5fc7496912cc3882
def get_temp_upload_path(): <NEW_LINE> <INDENT> path = os.path.join(TEMP_DIR, 'uploads') <NEW_LINE> if not os.path.exists(path): <NEW_LINE> <INDENT> os.makedirs(path) <NEW_LINE> <DEDENT> return path
Get the path for the upload directory and create it if needed.
625941bdac7a0e7691ed3fdc
def _I_Spawn_Processes(message_to_spawner, message_to_PTM, cmds_queue): <NEW_LINE> <INDENT> while True: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> message = message_to_spawner.get(timeout=.1) <NEW_LINE> if message == None: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> nParallel, nTasks, qListOfLists = message <NEW_LINE> if qListOfLists: <NEW_LINE> <INDENT> runningProcesses = [mp.Process(target=Worker_RunOrderedCommandList, args = (cmds_queue, nParallel, nTasks)) for i_ in range(nParallel)] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> runningProcesses = [mp.Process(target=Worker_RunCommand, args = (cmds_queue, nParallel, nTasks)) for i_ in range(nParallel)] <NEW_LINE> <DEDENT> for proc in runningProcesses: <NEW_LINE> <INDENT> proc.start() <NEW_LINE> <DEDENT> for proc in runningProcesses: <NEW_LINE> <INDENT> while proc.is_alive(): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> proc.join() <NEW_LINE> <DEDENT> except RuntimeError: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> message_to_PTM.put("Done") <NEW_LINE> time.sleep(1) <NEW_LINE> <DEDENT> except queue.Empty: <NEW_LINE> <INDENT> time.sleep(1) <NEW_LINE> <DEDENT> <DEDENT> pass
Args: message_queue - for passing messages that a new queue of tasks should be started (PTM -> I_Space_Processes) or that the tasks are complete cmds_queue - queue containing tasks that should be done Use: A process should be started as early as possible (while RAM usage is low) with this method as its target. This is now a separate process with low RAM usage. Each time some parallel work is required then the queue for that is placed in the message_queue by the PTM. _I_Spawn_Processes - will spawn parallel processes when instructed by the message_queue in the message_queue and get them working on the queue. When the queue is empty it will wait for the next one. It can receive a special signal to exit - the None object
625941bdcc0a2c11143dcd94
def _read_user_execute(self): <NEW_LINE> <INDENT> user = '' <NEW_LINE> if self.user: <NEW_LINE> <INDENT> if platform.system() == 'SunOS': <NEW_LINE> <INDENT> return "su %s -c '%s -l'" % (pipes.quote(self.user), pipes.quote(CRONCMD)) <NEW_LINE> <DEDENT> elif platform.system() == 'AIX': <NEW_LINE> <INDENT> return "%s -l %s" % (pipes.quote(CRONCMD), pipes.quote(self.user)) <NEW_LINE> <DEDENT> elif platform.system() == 'HP-UX': <NEW_LINE> <INDENT> return "%s %s %s" % (CRONCMD , '-l', pipes.quote(self.user)) <NEW_LINE> <DEDENT> elif os.getlogin() != self.user: <NEW_LINE> <INDENT> user = '-u %s' % pipes.quote(self.user) <NEW_LINE> <DEDENT> <DEDENT> return "%s %s %s" % (CRONCMD , user, '-l')
Returns the command line for reading a crontab
625941bd046cf37aa974cc4e