code
stringlengths
4
4.48k
docstring
stringlengths
1
6.45k
_id
stringlengths
24
24
def seatings(people, prefs): <NEW_LINE> <INDENT> for seating in circular_permutations(people): <NEW_LINE> <INDENT> yield total_happiness(seating, prefs), seating
Generate all the possible seatings as pairs (happiness, seating).
625941bda8ecb033257d2fd2
def pop(self): <NEW_LINE> <INDENT> tmp = self.l[0] <NEW_LINE> self.l.remove(self.l[0]) <NEW_LINE> return tmp
Removes the element from in front of queue and returns that element. :rtype: int
625941bdbde94217f3682cf8
def ToGeoJson(geometry, as_dict=False): <NEW_LINE> <INDENT> json_geometry = json.dumps(InsureGeoJsonWinding(geometry.__geo_interface__)) <NEW_LINE> return json.loads(json_geometry) if as_dict else json_geometry
Returns a GeoJSON geometry from a generic geometry. A generic geometry implements the __geo_interface__ as supported by all major geometry libraries, such as shapely, etc... Args: geometry: A generic geometry, for example a shapely geometry. as_dict: If True returns the GeoJSON as a dict, otherwise as a string.
625941bdbe383301e01b5390
def __init__(self, *args, **kwds): <NEW_LINE> <INDENT> if args or kwds: <NEW_LINE> <INDENT> super(markerActionResult, self).__init__(*args, **kwds) <NEW_LINE> if self.header is None: <NEW_LINE> <INDENT> self.header = std_msgs.msg.Header() <NEW_LINE> <DEDENT> if self.status is None: <NEW_LINE> <INDENT> self.status = actionlib_msgs.msg.GoalStatus() <NEW_LINE> <DEDENT> if self.result is None: <NEW_LINE> <INDENT> self.result = actionmsg.msg.markerResult() <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> self.header = std_msgs.msg.Header() <NEW_LINE> self.status = actionlib_msgs.msg.GoalStatus() <NEW_LINE> self.result = actionmsg.msg.markerResult()
Constructor. Any message fields that are implicitly/explicitly set to None will be assigned a default value. The recommend use is keyword arguments as this is more robust to future message changes. You cannot mix in-order arguments and keyword arguments. The available fields are: header,status,result :param args: complete set of field values, in .msg order :param kwds: use keyword arguments corresponding to message field names to set specific fields.
625941bd0a366e3fb873e71b
def get_job_client(self): <NEW_LINE> <INDENT> job_client = self._j_table_result.getJobClient() <NEW_LINE> if job_client.isPresent(): <NEW_LINE> <INDENT> return JobClient(job_client.get()) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return None
For DML and DQL statement, return the JobClient which associates the submitted Flink job. For other statements (e.g. DDL, DCL) return empty. :return: The job client, optional. :rtype: pyflink.common.JobClient .. versionadded:: 1.11.0
625941bd26238365f5f0ed6e
def __init__(self, state_size: int, action_size: int, seed: int, fc1_units: int = 64, fc2_units: int = 64): <NEW_LINE> <INDENT> super(Actor, self).__init__() <NEW_LINE> self.seed = torch.manual_seed(seed) <NEW_LINE> self.fc1 = nn.Linear(state_size, fc1_units) <NEW_LINE> self.fc2 = nn.Linear(fc1_units, fc2_units) <NEW_LINE> self.fc3 = nn.Linear(fc2_units, action_size) <NEW_LINE> self.reset_parameters()
Initilize parameters and build model. Params ===== state_size: dimensions of each state action_size: dimensions of each action seed: random seed fc1_units: number of nodes in the first hidden layer fc2_units: number of nodes in the 2nd hidden layer
625941bdf9cc0f698b140502
def PredictTrials(X, y, fitter, data): <NEW_LINE> <INDENT> prices = [] <NEW_LINE> for k in range(10): <NEW_LINE> <INDENT> X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = k) <NEW_LINE> reg = fitter(X_train, y_train) <NEW_LINE> pred = reg.predict([data[1]])[0] <NEW_LINE> prices.append(pred) <NEW_LINE> print("Trial {}: ${:,.2f}".format(k+1, pred)) <NEW_LINE> <DEDENT> print("\nRange in prices: ${:,.2f}".format(max(prices) - min(prices)))
Performs trials of fitting and predicting data.
625941bdd268445f265b4d72
def calculate_log_posterior(cluster_graph: ClusterGraph, data_graph: nx.DiGraph, params: Parameters) -> float: <NEW_LINE> <INDENT> log_likelihood, cluster_graph = graph_likelihood(data_graph, cluster_graph, params) <NEW_LINE> log_likelihood += graph_prior(cluster_graph, params) <NEW_LINE> return log_likelihood
:param cluster_graph: :param data_graph: :param params: :return:
625941bd99fddb7c1c9de296
def train( self, X, y, learning_rate=1e-3, reg=1e-5, num_iters=100, batch_size=200, verbose=False, ): <NEW_LINE> <INDENT> X = LogisticRegression.append_biases(X) <NEW_LINE> num_train, dim = X.shape <NEW_LINE> if self.w is None: <NEW_LINE> <INDENT> self.w = np.random.randn(dim) * 0.01 <NEW_LINE> <DEDENT> self.loss_history = [] <NEW_LINE> for it in range(num_iters): <NEW_LINE> <INDENT> batch_indexes = np.random.choice(num_train, batch_size) <NEW_LINE> X_batch = X[batch_indexes] <NEW_LINE> y_batch = y[batch_indexes] <NEW_LINE> loss, gradient = self.loss(X_batch, y_batch, reg) <NEW_LINE> self.loss_history.append(loss) <NEW_LINE> self.w -= learning_rate * gradient <NEW_LINE> if verbose and it % 100 == 0: <NEW_LINE> <INDENT> print("iteration %d / %d: loss %f" % (it, num_iters, loss)) <NEW_LINE> <DEDENT> <DEDENT> return self
Train this classifier using stochastic gradient descent. Inputs: - X: N x D array of training data. Each training point is a D-dimensional column. - y: 1-dimensional array of length N with labels 0-1, for 2 classes. - learning_rate: (float) learning rate for optimization. - reg: (float) regularization strength. - num_iters: (integer) number of steps to take when optimizing - batch_size: (integer) number of training examples to use at each step - verbose: (boolean) If true, print progress during optimization. Outputs: A list containing the value of the loss function at each training iteration.
625941bd8e05c05ec3eea276
def set_provenance(self, provenance): <NEW_LINE> <INDENT> self.PYprovenance = provenance
Set molecule provenance >>> H2OH2O.set_provenance('water_dimer')
625941bd31939e2706e4cd72
def multipleOutput(vocabulary_size, num_income_groups): <NEW_LINE> <INDENT> post_input = layers.Input(shape=(None, ), dtype='int32', name='posts') <NEW_LINE> embedded_post = layers.Embedding(input_dim=256, output_dim=vocabulary_size)(post_input) <NEW_LINE> x = layers.Conv1D(filters=128, kernel_size=5, activation='relu')(embedded_post) <NEW_LINE> x = layers.MaxPooling1D(pool_size=5)(x) <NEW_LINE> x = layers.Conv1D(filters=256, kernel_size=5, activation='relu')(x) <NEW_LINE> x = layers.Conv1D(filters=256, kernel_size=5, activation='relu')(x) <NEW_LINE> x = layers.MaxPooling1D(pool_size=5)(x) <NEW_LINE> x = layers.Conv1D(filters=256, kernel_size=5, activation='relu')(x) <NEW_LINE> x = layers.Conv1D(filters=256, kernel_size=5, activation='relu')(x) <NEW_LINE> x = layers.GlobalMaxPool1D()(x) <NEW_LINE> age_prediction = layers.Dense(units=1, name='age')(x) <NEW_LINE> income_prediction = layers.Dense(units=num_income_groups, activation='softmax', name='income')(x) <NEW_LINE> gender_prediction = layers.Dense(units=1, activation='sigmoid', name='gender')(x) <NEW_LINE> model = Model(inputs=post_input, outputs=[age_prediction, income_prediction, gender_prediction]) <NEW_LINE> model.compile(optimizer=RMSprop(lr=1e-5), loss={'age': 'mse', 'income': 'categorical_crossentropy', 'gender': 'binary_crossentropy'}, loss_weights={'age': 0.25, 'income': 1., 'gender': 10.} ) <NEW_LINE> return model
multiple output model :param vocabulary_size: :param num_income_groups: :return:
625941bd8da39b475bd64e75
def shipWithinDays(self, weights, D): <NEW_LINE> <INDENT> high, low = sum(weights)+1, max(weights) <NEW_LINE> while(low < high): <NEW_LINE> <INDENT> mid = (high+low)/2 <NEW_LINE> temp_left = mid <NEW_LINE> packet_at_left = D-1 <NEW_LINE> for weight in weights: <NEW_LINE> <INDENT> if weight <= mid: <NEW_LINE> <INDENT> if temp_left < weight: <NEW_LINE> <INDENT> if packet_at_left == 0: <NEW_LINE> <INDENT> low = mid+1 <NEW_LINE> break <NEW_LINE> <DEDENT> packet_at_left -= 1 <NEW_LINE> temp_left = mid-weight <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> temp_left -= weight <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> high = mid <NEW_LINE> <DEDENT> <DEDENT> return low
:type weights: List[int] :type D: int :rtype: int
625941bda17c0f6771cbdf57
def _select(*args, **kwargs): <NEW_LINE> <INDENT> row = ds._dbgetv(*args, **kwargs) <NEW_LINE> if row[0]: <NEW_LINE> <INDENT> raise DatabaseError("Database returned error: {0}".format(row)) <NEW_LINE> <DEDENT> return row[1]
Get values from db 5.4 returns (retcode, value), patch_oldversion fixes for 5.3 and below
625941bda79ad161976cc049
def prediction (df,model): <NEW_LINE> <INDENT> model.transform(df).show()
Show dataframe of prediction in kernel Input : -Dataframe test(df) -Trained model (model) Output : -Dataframe display with prediction column (transformed)
625941bd9f2886367277a794
def action(self, observation, deterministic): <NEW_LINE> <INDENT> raise NotImplementedError()
Returns an action
625941bdf548e778e58cd480
def get_local_gb_used(): <NEW_LINE> <INDENT> return get_local_gb(CONF.ovz_ve_private_dir)['used'] / (1024 ** 3)
Get the total used disk space on the host computer. :returns: The total amount of HDD(GB) Note that this value show a partition where OVZ_VE_PRIVATE_DIR is.
625941bd796e427e537b04c7
def publish_document(self, cr, uid, ids, context=None): <NEW_LINE> <INDENT> a_brw = self._get_anonymous_id(cr, uid, ids, context=context) <NEW_LINE> doc_brw = self.browse(cr, uid, ids, context=context) <NEW_LINE> location = self.pool.get('ir.config_parameter').get_param(cr, uid, 'ir_attachment.location') <NEW_LINE> plocation = self.pool.get('ir.config_parameter').get_param(cr, uid, 'ir_attachment.plocation') <NEW_LINE> dict_write = a_brw and {'user_id': a_brw.id} or {} <NEW_LINE> for d in doc_brw: <NEW_LINE> <INDENT> if d.parent_id.publish: <NEW_LINE> <INDENT> d.write(dict_write, context=context) <NEW_LINE> if not d.store_fname: <NEW_LINE> <INDENT> raise osv.except_osv(_('Security!'), 'Store Fname no setted Try to recreate' ' the file or use an export/import script to convert this' 'file in a file saved on the file system. ' '%s ' % d.store_fname) <NEW_LINE> <DEDENT> if not plocation: <NEW_LINE> <INDENT> raise osv.except_osv(_( 'Security!'), 'Public location not setted go to Setting') <NEW_LINE> <DEDENT> destiny = self._full_path_plocation( cr, uid, plocation, d.store_fname) <NEW_LINE> origin = self._full_path(cr, uid, location, d.store_fname) <NEW_LINE> try: <NEW_LINE> <INDENT> dirname = os.path.dirname(destiny) <NEW_LINE> if not os.path.isdir(dirname): <NEW_LINE> <INDENT> os.makedirs(dirname) <NEW_LINE> <DEDENT> copyfile(origin, destiny) <NEW_LINE> <DEDENT> except IOError: <NEW_LINE> <INDENT> _logger.error("Making public error writing from %s to %s" % ( origin, destiny)) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> raise osv.except_osv(_('Security!'), _('You can not make public a File in a folder that is not ' 'explicitally marked as ' '"Allowe Publish" it is happening sharing the file' '%s in the folder %s' % (d.name, d.parent_id.name))) <NEW_LINE> <DEDENT> <DEDENT> return True
Make Public a list of attachments. A file is public IF the user_id is anonymous
625941bd596a8972360899c8
def connect(self, signal_name, receiver, reconnect=False): <NEW_LINE> <INDENT> if not self._signaller: <NEW_LINE> <INDENT> raise RuntimeError("ModelItem.connect() called before enableSignals()") <NEW_LINE> <DEDENT> if reconnect or (signal_name, receiver) not in self._connections: <NEW_LINE> <INDENT> self._connections.add((signal_name, receiver)) <NEW_LINE> if signal_name == 'updated': <NEW_LINE> <INDENT> self._signaller.updated.connect(receiver) <NEW_LINE> <DEDENT> elif signal_name == 'changeCurrentSource': <NEW_LINE> <INDENT> self._signaller.changeCurrentSource.connect(receiver) <NEW_LINE> <DEDENT> elif signal_name == 'selected': <NEW_LINE> <INDENT> self._signaller.selected.connect(receiver) <NEW_LINE> <DEDENT> elif signal_name == 'changeGroupingStyle': <NEW_LINE> <INDENT> self._signaller.changeGroupingStyle.connect(receiver) <NEW_LINE> <DEDENT> elif signal_name == 'changeGroupingVisibility': <NEW_LINE> <INDENT> self._signaller.changeGroupingVisibility.connect(receiver)
Connects SIGNAL from object to specified receiver slot. If reconnect is True, allows duplicate connections.
625941bd7cff6e4e8111788a
def _delete_volume(self, volume, is_snapshot=False): <NEW_LINE> <INDENT> dev_path = self.local_path(volume) <NEW_LINE> if os.path.exists(dev_path) and not is_snapshot: <NEW_LINE> <INDENT> self.clear_volume(volume) <NEW_LINE> <DEDENT> name = volume['name'] <NEW_LINE> if is_snapshot: <NEW_LINE> <INDENT> name = self._escape_snapshot(volume['name']) <NEW_LINE> <DEDENT> self.vg.delete(name)
Deletes a logical volume.
625941bd3cc13d1c6d3c7280
def find_roots(func, min_x, max_x, x_steps, args=()): <NEW_LINE> <INDENT> x_trials = np.linspace(min_x, max_x, x_steps) <NEW_LINE> possible_solutions = [] <NEW_LINE> for x in x_trials: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> possible_solution = sp.optimize.newton(func, x, args=args) <NEW_LINE> <DEDENT> except RuntimeError: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> possible_solutions.append(possible_solution) <NEW_LINE> <DEDENT> <DEDENT> return possible_solutions
Find the roots of a function. The roots are determined by using sp.optimize.newton on various starting points between min_x and max_x. Params: func (function): The function that will be finding the roots of. The function must take the form func(x, arg1, arg2, ...) where x is the variable that we want to find roots for. min_x (float): The minimum value that x can take max_x (float): The maximum value that x can take x_steps (int): The number of slices to create between min_x and max_x. We will try to find a root for each slice, so more slices takes longer to evaluate. args (tuple): The arguments that will be passed to the function Returns: roots (list): A list of all the roots found by this function. Will be at most x_steps long. If no roots are found, the list will be empty.
625941bd4f6381625f114942
def hasperm(myrole, scope): <NEW_LINE> <INDENT> return any(role == myrole for role in scope)
Return True if the user role falls in the given scope
625941bdd99f1b3c44c6749a
def get_credentials(self): <NEW_LINE> <INDENT> credential_path = os.path.join(GmailService.tmpdir, 'credentials.json') <NEW_LINE> with open(credential_path, 'a'): <NEW_LINE> <INDENT> os.utime(credential_path, None) <NEW_LINE> store = Storage(credential_path) <NEW_LINE> credentials = store.get() <NEW_LINE> <DEDENT> if not credentials or credentials.invalid: <NEW_LINE> <INDENT> flow = client.flow_from_clientsecrets(GmailService.CLIENT_SECRET_FILE, GmailService.SCOPES) <NEW_LINE> flow.user_agent = GmailService.APPLICATION_NAME <NEW_LINE> credentials = tools.run_flow(flow, store) <NEW_LINE> print('Storing credentials to ' + credential_path) <NEW_LINE> <DEDENT> return credentials
Gets valid user credentials from storage. If nothing has been stored, or if the stored credentials are invalid, the OAuth2 flow is completed to obtain the new credentials. Returns: Credentials, the obtained credential.
625941bdbf627c535bc130d3
def pi_1(N): <NEW_LINE> <INDENT> odds = itertools.count(1,2) <NEW_LINE> ns = list(itertools.takewhile(lambda x: x <= 2*N-1, odds)) <NEW_LINE> pp = reduce(lambda x, y: x + y, map(lambda x: math.pow(-1, ns.index(x))*4/x, ns)) <NEW_LINE> return pp
计算pi的值
625941bdcb5e8a47e48b79b2
def setup_databases(drop=False): <NEW_LINE> <INDENT> tables = [Role, CalendarEntry] <NEW_LINE> if drop is True: <NEW_LINE> <INDENT> print('dropping existing tables') <NEW_LINE> sandbox.drop_tables(tables, cascade=True, safe=True) <NEW_LINE> <DEDENT> sandbox.create_tables(tables, safe=True) <NEW_LINE> from .roles import all_roles <NEW_LINE> for r in all_roles: <NEW_LINE> <INDENT> r.save()
Initiliaze all tables in the databse If drop is True, drop all tables before recreating them.
625941bd71ff763f4b54958c
def think(self, result): <NEW_LINE> <INDENT> result['gp'] = None <NEW_LINE> if result['map'] is None: <NEW_LINE> <INDENT> gp_response = "Ça ne me dit rien du tout !" <NEW_LINE> gp_response += f" Que veut dire {result['p_query']} ?" <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> gp_response = "Bien sûr que je connais cette adresse !" <NEW_LINE> gp_response += " Je ne suis quand même pas si vieux : " <NEW_LINE> gp_response += str(result['map']['formatted_address']) <NEW_LINE> <DEDENT> result['gp'] = gp_response <NEW_LINE> return result
This method formats the result in a text that will be displayed (grandpy's responses) :param result: dict, with "map" key :return: result dict with a new key "gp"
625941bd5510c4643540f2f0
def __getData( self , filename , item ): <NEW_LINE> <INDENT> i = self.__getIndex( filename ) <NEW_LINE> return self.__data[i][ item ]
__getData(f,i) --> variable type Returns the data item i for the dll f from the basefile data source.
625941bd99fddb7c1c9de297
def stop_worker(self): <NEW_LINE> <INDENT> pass
停止子进程
625941bd796e427e537b04c8
def make_epsilon_greedy_policy(Q, G, epsilon, observation, deadline): <NEW_LINE> <INDENT> current_node = observation <NEW_LINE> time_traversed = 0 <NEW_LINE> next_node = 0 <NEW_LINE> feasible_edges = [] <NEW_LINE> for i in G[int(current_node)]: <NEW_LINE> <INDENT> if G[int(current_node)][i]['wct'] <= deadline: <NEW_LINE> <INDENT> feasible_edges.append(i) <NEW_LINE> <DEDENT> <DEDENT> if(feasible_edges == []): <NEW_LINE> <INDENT> return(0,0) <NEW_LINE> <DEDENT> if(current_node == G.nodes[G.number_of_nodes() - 1]['index']): <NEW_LINE> <INDENT> return(0,0) <NEW_LINE> <DEDENT> probs = np.ones(len(feasible_edges), dtype = float) * epsilon / len(feasible_edges) <NEW_LINE> probs = np.zeros(G.number_of_nodes()) <NEW_LINE> unique_nodes = [] <NEW_LINE> new_probs = epsilon / len(feasible_edges) <NEW_LINE> for i in feasible_edges: <NEW_LINE> <INDENT> probs[feasible_edges] = new_probs <NEW_LINE> <DEDENT> if(all(np.multiply(Q[current_node],probs) == 0.0)): <NEW_LINE> <INDENT> best_action = np.argmax(probs) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> best_action = np.argmax(np.multiply(Q[current_node],probs)) <NEW_LINE> <DEDENT> probs[best_action] += (1.0 - epsilon) <NEW_LINE> next_edge = np.random.choice(np.arange(len(probs)), p=probs) <NEW_LINE> time_traversed = G[int(current_node)][int(next_edge)]["tx"] <NEW_LINE> next_node = next_edge <NEW_LINE> return(next_node, time_traversed)
Creates an epsilon-greedy policy based on a given Q-function and epsilon. Args: Q: A dictionary that maps from state -> action-values. Each value is a numpy array of length nA (see below) epsilon: The probability to select a random action . float between 0 and 1. nA: Number of actions in the environment. Returns: A function that takes the observation as an argument and returns the probabilities for each action in the form of a numpy array of length nA.
625941bd21a7993f00bc7bf0
def plot(decisionTree): <NEW_LINE> <INDENT> def toString(decisionTree, indent=''): <NEW_LINE> <INDENT> if decisionTree.results != None: <NEW_LINE> <INDENT> return str(decisionTree.results) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if isinstance(decisionTree.value, int) or isinstance(decisionTree.value, float): <NEW_LINE> <INDENT> decision = 'Column %s: x >= %s?' % (decisionTree.col, decisionTree.value) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> decision = 'Column %s: x == %s?' % (decisionTree.col + 1, decisionTree.value) <NEW_LINE> <DEDENT> trueBranch = indent + 'yes -> ' + toString(decisionTree.trueBranch, indent + '\t\t') <NEW_LINE> falseBranch = indent + 'no -> ' + toString(decisionTree.falseBranch, indent + '\t\t') <NEW_LINE> return (decision + '\n' + trueBranch + '\n' + falseBranch) <NEW_LINE> <DEDENT> <DEDENT> print(toString(decisionTree))
Plots the obtained decision tree.
625941bd94891a1f4081b9ad
def dir_attribs(location, mode=None, owner=None, group=None, recursive=False, use_sudo=False): <NEW_LINE> <INDENT> args = '' <NEW_LINE> if recursive: <NEW_LINE> <INDENT> args = args + ' -R ' <NEW_LINE> <DEDENT> if mode: <NEW_LINE> <INDENT> if use_sudo: <NEW_LINE> <INDENT> sudo('chmod %s %s %s' % (args, mode, location)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> run('chmod %s %s %s' % (args, mode, location)) <NEW_LINE> <DEDENT> <DEDENT> if owner: <NEW_LINE> <INDENT> if use_sudo: <NEW_LINE> <INDENT> sudo('chown %s %s %s' % (args, owner, location)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> run('chown %s %s %s' % (args, owner, location)) <NEW_LINE> <DEDENT> <DEDENT> if group: <NEW_LINE> <INDENT> if use_sudo: <NEW_LINE> <INDENT> sudo('chgrp %s %s %s' % (args, group, location)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> run('chgrp %s %s %s' % (args, group, location)) <NEW_LINE> <DEDENT> <DEDENT> return True
cuisine dir_attribs doesn't do sudo, so we implement our own Updates the mode/owner/group for the given remote directory.
625941bdadb09d7d5db6c696
def post(self): <NEW_LINE> <INDENT> post_data = request.get_json() <NEW_LINE> email = post_data.get('email') <NEW_LINE> password = post_data.get('password') <NEW_LINE> response = val_data.validate_login_data(email, password) <NEW_LINE> if response: <NEW_LINE> <INDENT> return jsonify(response), 400 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> ret_u = {} <NEW_LINE> returned_user = db_connection.get_user_by_email(email) <NEW_LINE> is_admin = db_connection.get_admin_field(email) <NEW_LINE> access_token = create_access_token(identity=email, expires_delta=datetime.timedelta(hours=24)) <NEW_LINE> ret_u["token"] = access_token <NEW_LINE> return jsonify({"msg":ret_u, "is_admin":is_admin}), 200
Login a user
625941bd3eb6a72ae02ec3da
def setup_locale(application, name, language=None, format="%s.%s"): <NEW_LINE> <INDENT> if not language: <NEW_LINE> <INDENT> language = get_language() <NEW_LINE> if not language: <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> <DEDENT> filename = format % (name, language) <NEW_LINE> translator = QTranslator(application) <NEW_LINE> if filename.startswith(":"): <NEW_LINE> <INDENT> ret = translator.load(filename) <NEW_LINE> if ret: <NEW_LINE> <INDENT> application.installTranslator(translator) <NEW_LINE> debug("Load locale from resources: %s" % filename) <NEW_LINE> return True <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> for directory in ('.', '/usr/share/ufwi_rpcd/i18n'): <NEW_LINE> <INDENT> ret = translator.load(filename, directory) <NEW_LINE> if not ret: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> debug("Load locale file: %s" % path_join(directory, filename)) <NEW_LINE> application.installTranslator(translator) <NEW_LINE> return True <NEW_LINE> <DEDENT> <DEDENT> error("Unable to load translation file: %s" % filename) <NEW_LINE> return False
Load the translation for the current user locale. The name argument is the file name without the suffix (eg. ".fr.qm"). So use the name "ufwi_rpcd" to load "ufwi_rpcd.fr.qm".
625941bd24f1403a92600a6e
@command <NEW_LINE> def build(locale_dir, language=(), out=sys.stdout): <NEW_LINE> <INDENT> if not language: <NEW_LINE> <INDENT> language = get_lang_dirs(locale_dir) <NEW_LINE> <DEDENT> for lang in language: <NEW_LINE> <INDENT> lang_dir = os.path.join(locale_dir, lang) <NEW_LINE> for dirpath, dirnames, filenames in os.walk(lang_dir): <NEW_LINE> <INDENT> for filename in filenames: <NEW_LINE> <INDENT> po_file = os.path.join(dirpath, filename) <NEW_LINE> base, ext = os.path.splitext(po_file) <NEW_LINE> if ext != ".po": <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> mo_file = base + ".mo" <NEW_LINE> print_('Build:', mo_file, file=out) <NEW_LINE> po = polib.pofile(po_file) <NEW_LINE> po.save_as_mofile(fpath=mo_file)
Build all po files into mo file. :param locale_dir: a locale directry. required. :param language: tuple of language. if empty, all languages are specified. :param out: file like object for displaying information. :return: None
625941bd5166f23b2e1a505e
@app.route("/sell", methods=["GET", "POST"]) <NEW_LINE> @login_required <NEW_LINE> def sell(): <NEW_LINE> <INDENT> if request.method == "POST": <NEW_LINE> <INDENT> cash, name, price, shares, symbol = getFields() <NEW_LINE> if not symbol: <NEW_LINE> <INDENT> return apology('please enter a valid stock symbol', 422) <NEW_LINE> <DEDENT> if not shares or shares < 1: <NEW_LINE> <INDENT> return apology("please enter the number of shares", 422) <NEW_LINE> <DEDENT> total = price * shares <NEW_LINE> row = db.execute("SELECT sum(shares) as total_shares FROM user_transactions WHERE symbol = ? " "and user_id = ?", symbol, session['user_id']) <NEW_LINE> total_shares = row[0]['total_shares'] <NEW_LINE> if shares > total_shares: <NEW_LINE> <INDENT> return apology(f"there are only {total_shares} {symbol} shares available to sell", 403) <NEW_LINE> <DEDENT> row_id = db.execute("INSERT INTO user_transactions (user_id, type, company_name, symbol, price, shares," "transaction_date) VALUES (?, 'sold', ?, ?, ?, ?, datetime('now'))", session['user_id'], name, symbol, price, -shares) <NEW_LINE> sqlProblem(row_id) <NEW_LINE> row_id = db.execute("UPDATE users SET cash = ? WHERE id = ?", cash + total, session['user_id']) <NEW_LINE> sqlProblem(row_id) <NEW_LINE> session['cash'] = cash + total <NEW_LINE> flash(f"{shares} shares of {name} ({symbol}) sold for {usd(total)}") <NEW_LINE> return redirect('/') <NEW_LINE> <DEDENT> stocks = db.execute("SELECT *, sum(shares) as total_shares FROM user_transactions WHERE user_id = ? " "GROUP BY symbol HAVING sum(shares) > 0 ORDER BY company_name", session['user_id']) <NEW_LINE> return render_template('sell.html', stocks=stocks)
Sell shares of stock
625941bd379a373c97cfaa48
def randomize_weights(self): <NEW_LINE> <INDENT> for i in range(325): <NEW_LINE> <INDENT> self.perceptron_list[i].randomize()
Simple little method to re-randomize weights.
625941bd29b78933be1e55b5
def dagger(self): <NEW_LINE> <INDENT> return type(self)( name=self._dagger_name, dom=self.cod, cod=self.dom, function=self._dagger_function, dagger_function=self.function, dagger_name=self.name, data=self.data, _dagger=not self.is_dagger )
The dagger of a box :return: A callable box inverse/dagger to this one :rtype: :class:`CallableDaggerBox`
625941bd07d97122c417878a
def _build_state(self): <NEW_LINE> <INDENT> pass
Hook function to build state to be used while building all the filters. Useful to compute common data between all filters such as some data about model so that the computation can be avoided while building individual filters.
625941bd7047854f462a1311
def _get_batch_title_director(self, batch): <NEW_LINE> <INDENT> cfg.logger.info(f"Scraping batch") <NEW_LINE> rs = (grequests.get(url) for url in batch) <NEW_LINE> resp = grequests.map(rs) <NEW_LINE> cfg.logger.debug(f"Size of resp: {len(resp)}") <NEW_LINE> titles = [] <NEW_LINE> directors = [] <NEW_LINE> for i, r in enumerate(resp): <NEW_LINE> <INDENT> if r is None: <NEW_LINE> <INDENT> cfg.logger.critical("A webpage content was expected, got None instead.") <NEW_LINE> <DEDENT> title, director = self._parse_title_director(r) <NEW_LINE> if title and director: <NEW_LINE> <INDENT> cfg.logger.info("Parsed title and director successfully") <NEW_LINE> <DEDENT> cfg.logger.debug(f"{i + 1}/{len(batch)} - title: {title}, director: {director}") <NEW_LINE> titles.append(title) <NEW_LINE> directors.append(director) <NEW_LINE> <DEDENT> return titles, directors
Given an IMDB movies' urls list, it retrieves the movies' titles and directors
625941bde64d504609d74744
def get_mention_string(self): <NEW_LINE> <INDENT> return f"[@{self.first_name}](tg://user?id={self.id})"
Create telegram-style user mention string. Note: When using this you need to set the 'parse_mode' of the 'send_message' function to 'markdown' mode. Returns: str. A telegram-style user mention.
625941bd30bbd722463cbcc8
def time_me__decorator(func): <NEW_LINE> <INDENT> @wraps(func) <NEW_LINE> def time_me__function_wrapper(*args, **kw): <NEW_LINE> <INDENT> startTime = int(round(time.time() * 1000)) <NEW_LINE> result = func(*args, **kw) <NEW_LINE> endTime = int(round(time.time() * 1000)) <NEW_LINE> print(label, endTime - startTime,'ms') <NEW_LINE> return result <NEW_LINE> <DEDENT> return time_me__function_wrapper
Prints a label and the number of msec it took to run a function.
625941bd6e29344779a62519
def bin_to_gray(bin_list): <NEW_LINE> <INDENT> b = [bin_list[0]] <NEW_LINE> for i in range(0, len(bin_list) - 1): <NEW_LINE> <INDENT> b += str(int(bin_list[i]) ^ int(b[i - 1])) <NEW_LINE> <DEDENT> return ''.join(b)
Convert from binary coding to gray coding. We assume big endian encoding. Examples ======== >>> from sympy.combinatorics.graycode import bin_to_gray >>> bin_to_gray('111') '100' See Also ======== gray_to_bin
625941bde8904600ed9f1e2e
def bias_add_grad_eager_fallback(out_backprop, data_format="NHWC", name=None, ctx=None): <NEW_LINE> <INDENT> _ctx = ctx if ctx else _context.context() <NEW_LINE> if data_format is None: <NEW_LINE> <INDENT> data_format = "NHWC" <NEW_LINE> <DEDENT> data_format = _execute.make_str(data_format, "data_format") <NEW_LINE> _attr_T, (out_backprop,) = _execute.args_to_matching_eager([out_backprop], _ctx) <NEW_LINE> _inputs_flat = [out_backprop] <NEW_LINE> _attrs = ("T", _attr_T, "data_format", data_format) <NEW_LINE> _result = _execute.execute(b"BiasAddGrad", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) <NEW_LINE> _execute.record_gradient( "BiasAddGrad", _inputs_flat, _attrs, _result, name) <NEW_LINE> _result, = _result <NEW_LINE> return _result
This is the slowpath function for Eager mode. This is for function bias_add_grad
625941bd1f037a2d8b946103
def patternGenerator(self): <NEW_LINE> <INDENT> indices = self.pattern_indices <NEW_LINE> path = self.input_path <NEW_LINE> if os.path.isdir(path): <NEW_LINE> <INDENT> dir_listing = os.listdir(path) <NEW_LINE> dir_listing.sort() <NEW_LINE> if indices != 'all': <NEW_LINE> <INDENT> dir_listing = [ d for (i, d) in enumerate(dir_listing) if i in indices ] <NEW_LINE> <DEDENT> h5_files = [ os.path.join(path, f) for f in dir_listing if f.split('.')[-1] == "h5" ] <NEW_LINE> for h5_file in h5_files: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> with h5py.File(h5_file, 'r') as h5: <NEW_LINE> <INDENT> root_path = '/data/' <NEW_LINE> if self.poissonize: <NEW_LINE> <INDENT> path_to_data = root_path + 'data' <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> path_to_data = root_path + 'diffr' <NEW_LINE> <DEDENT> diffr = h5[path_to_data].value <NEW_LINE> yield diffr <NEW_LINE> <DEDENT> <DEDENT> except: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> with h5py.File(path, 'r') as h5: <NEW_LINE> <INDENT> if indices is None or indices == 'all': <NEW_LINE> <INDENT> indices = [key for key in h5['data'].keys()] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> indices = ["%0.7d" % ix for ix in indices] <NEW_LINE> <DEDENT> for ix in indices: <NEW_LINE> <INDENT> root_path = '/data/%s/' % (ix) <NEW_LINE> if self.poissonize: <NEW_LINE> <INDENT> path_to_data = root_path + 'data' <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> path_to_data = root_path + 'diffr' <NEW_LINE> <DEDENT> diffr = h5[path_to_data].value <NEW_LINE> yield diffr * self.mask
Yield an iterator over a given pattern sequence from a diffraction file.
625941bdbde94217f3682cf9
def search_re(out, l): <NEW_LINE> <INDENT> m = re.search(l, out) <NEW_LINE> if m: <NEW_LINE> <INDENT> return m.start() <NEW_LINE> <DEDENT> return None
Search the regular expression 'l' in the output 'out' and return the start index when successful.
625941bddc8b845886cb5439
def align_reads(self): <NEW_LINE> <INDENT> self._test_folder_existance( self._paths.required_read_alignment_folders()) <NEW_LINE> assert self._args.paired_end in [True, False] <NEW_LINE> self._ref_seq_files = self._paths.get_ref_seq_files() <NEW_LINE> self._paths.set_ref_seq_paths(self._ref_seq_files) <NEW_LINE> self._test_align_file_existance() <NEW_LINE> if not self._args.paired_end: <NEW_LINE> <INDENT> self._read_files = self._paths.get_read_files() <NEW_LINE> self._lib_names = self._paths.get_lib_names_single_end() <NEW_LINE> self._paths.set_read_files_dep_file_lists_single_end( self._read_files, self._lib_names) <NEW_LINE> if not self._args.realign: <NEW_LINE> <INDENT> self._set_primary_aligner_paths_to_final_paths() <NEW_LINE> <DEDENT> self._prepare_reads_single_end() <NEW_LINE> self._align_single_end_reads() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self._read_file_pairs = self._paths.get_read_file_pairs() <NEW_LINE> self._lib_names = self._paths.get_lib_names_paired_end() <NEW_LINE> self._paths.set_read_files_dep_file_lists_paired_end( self._read_file_pairs, self._lib_names) <NEW_LINE> if not self._args.realign: <NEW_LINE> <INDENT> self._set_primary_aligner_paths_to_final_paths() <NEW_LINE> <DEDENT> self._prepare_reads_paired_end() <NEW_LINE> self._align_paired_end_reads() <NEW_LINE> <DEDENT> self._sam_to_bam( self._paths.primary_read_aligner_sam_paths, self._paths.primary_read_aligner_bam_prefix_paths, self._paths.primary_read_aligner_bam_paths) <NEW_LINE> self._generate_read_alignment_stats( self._lib_names, self._paths.primary_read_aligner_bam_paths, self._paths.unaligned_reads_paths, self._paths.primary_read_aligner_stats_path) <NEW_LINE> final_unaligned_reads_paths = self._paths.unaligned_reads_paths <NEW_LINE> if self._args.realign: <NEW_LINE> <INDENT> self._run_realigner_and_process_alignments() <NEW_LINE> self._merge_bam_files() <NEW_LINE> final_unaligned_reads_paths = ( self._paths.realigned_unaligned_reads_paths) <NEW_LINE> <DEDENT> if self._args.crossalign_cleaning_str is not None: <NEW_LINE> <INDENT> self._remove_crossaligned_reads() <NEW_LINE> <DEDENT> self._generate_read_alignment_stats( self._lib_names, self._paths.read_alignment_bam_paths, final_unaligned_reads_paths, self._paths.read_alignments_stats_path) <NEW_LINE> self._write_alignment_stat_table()
Perform the alignment of the reads.
625941bd4e696a04525c9351
def type(self): <NEW_LINE> <INDENT> return self._type
Return the type of the face. EXAMPLES:: sage: from sage.combinat.e_one_star import Face sage: f = Face((0,2,0), 3) sage: f.type() 3 :: sage: f = Face((0,2,0), 3) sage: f.type() 3
625941bd377c676e912720ae
def get_addkey(self, user): <NEW_LINE> <INDENT> sum = 0 <NEW_LINE> L = [] <NEW_LINE> for i in user: <NEW_LINE> <INDENT> if sum < len(user): <NEW_LINE> <INDENT> emails = self.config_read('addressed', i) <NEW_LINE> L.append(emails) <NEW_LINE> sum += 1 <NEW_LINE> <DEDENT> <DEDENT> return L
遍历获得配置文件收件人email
625941bd498bea3a759b99b5
def weixin_oauth(weixin): <NEW_LINE> <INDENT> original_methods = { 'authorize': weixin.authorize, 'authorized_response': weixin.authorized_response, } <NEW_LINE> def authorize(*args, **kwargs): <NEW_LINE> <INDENT> response = original_methods['authorize'](*args, **kwargs) <NEW_LINE> url = url_parse(response.headers['Location']) <NEW_LINE> args = url.decode_query() <NEW_LINE> args['appid'] = args.pop('client_id') <NEW_LINE> url = url.replace(query=url_encode(args), fragment='wechat_redirect') <NEW_LINE> response.headers['Location'] = url.to_url() <NEW_LINE> return response <NEW_LINE> <DEDENT> def authorized_response(*args, **kwargs): <NEW_LINE> <INDENT> original_access_token_params = weixin.access_token_params <NEW_LINE> weixin.access_token_params = { 'appid': weixin.consumer_key, 'secret': weixin.consumer_secret, } <NEW_LINE> response = original_methods['authorized_response'](*args, **kwargs) <NEW_LINE> weixin.access_token_params = original_access_token_params <NEW_LINE> return response <NEW_LINE> <DEDENT> weixin.authorize = authorize <NEW_LINE> weixin.authorized_response = authorized_response <NEW_LINE> return weixin
Fixes the nonstandard OAuth interface of Tencent WeChat.
625941bd26068e7796caebdf
def setup(): <NEW_LINE> <INDENT> txt = sys.argv[1] if len(sys.argv) > 1 else input("Code:\n") <NEW_LINE> if re.search(r'.bf|.txt', txt): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> with open(txt, 'r') as f: <NEW_LINE> <INDENT> txt = f.read() <NEW_LINE> <DEDENT> <DEDENT> except IOError as err: <NEW_LINE> <INDENT> print(err) <NEW_LINE> setup() <NEW_LINE> <DEDENT> <DEDENT> code = list(re.sub(r'[^<>+-.,\[\]]', '', txt)) <NEW_LINE> brainfuck.main(code)
Sanitize code
625941bd38b623060ff0acf3
def find_pattern_4(i1, i2, i3, i4, i5, i6, i7): <NEW_LINE> <INDENT> t1 = i1.split() <NEW_LINE> t2 = i2.split() <NEW_LINE> t3 = i3.split() <NEW_LINE> t4 = i4.split() <NEW_LINE> t5 = i5.split() <NEW_LINE> t6 = i6.split() <NEW_LINE> t7 = i7.split() <NEW_LINE> a, b, c, d = None, None, None, None <NEW_LINE> if t1[0] == 'cpy' and t1[1] == '0': <NEW_LINE> <INDENT> a = t1[-1] <NEW_LINE> if t2[0] == 'cpy' and t2[1] in ['b', 'c', 'd'] and t2[2] in ['b', 'c', 'd'] and t2[2] != t2[1]: <NEW_LINE> <INDENT> b = t2[1] <NEW_LINE> c = t2[2] <NEW_LINE> if t3[0] == 'inc' and t3[1] == a and t4[0] == 'dec' and t4[1] == c and t5[0] == 'jnz' and t5[1] == c and t5[2] == '-2': <NEW_LINE> <INDENT> if t6[0] == 'dec' and t6[1] not in [a, b, c]: <NEW_LINE> <INDENT> d = t6[1] <NEW_LINE> if t7[0] == 'jnz' and t7[2] == '-5' and t7[1] == d: <NEW_LINE> <INDENT> return True, (a, b, c, d) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> <DEDENT> return False, None
Look for patterns: cpy 0 a cpy b c inc a dec c jnz c -2 dec d jnz d -5 which translate to: a *= b c = 0 d = 0
625941bd0a50d4780f666d95
def posix_shell(chan, user_obj, bind_host_obj, cmd_caches, log_recording): <NEW_LINE> <INDENT> import select <NEW_LINE> oldtty = termios.tcgetattr(sys.stdin) <NEW_LINE> try: <NEW_LINE> <INDENT> tty.setraw(sys.stdin.fileno()) <NEW_LINE> tty.setcbreak(sys.stdin.fileno()) <NEW_LINE> chan.settimeout(0.0) <NEW_LINE> cmd = '' <NEW_LINE> tab_key = False <NEW_LINE> while True: <NEW_LINE> <INDENT> r, w, e = select.select([chan, sys.stdin], [], []) <NEW_LINE> if chan in r: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> x = u(chan.recv(1024)) <NEW_LINE> if tab_key: <NEW_LINE> <INDENT> if x not in ('\x07', '\r\n'): <NEW_LINE> <INDENT> cmd += x <NEW_LINE> <DEDENT> tab_key = False <NEW_LINE> <DEDENT> if len(x) == 0: <NEW_LINE> <INDENT> sys.stdout.write('\r\n*** EOF\r\n') <NEW_LINE> break <NEW_LINE> <DEDENT> sys.stdout.write(x) <NEW_LINE> sys.stdout.flush() <NEW_LINE> <DEDENT> except socket.timeout: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> <DEDENT> if sys.stdin in r: <NEW_LINE> <INDENT> x = sys.stdin.read(1) <NEW_LINE> if '\r' != x: <NEW_LINE> <INDENT> cmd += x <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> user_record_cmd = user_obj.username + '_user_record' <NEW_LINE> pool = redis.ConnectionPool(host='localhost', port=6379) <NEW_LINE> user_record = [user_obj.id, bind_host_obj.id, 'cmd', cmd, time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())] <NEW_LINE> r = redis.Redis(connection_pool=pool) <NEW_LINE> r.lpush(user_record_cmd, user_record) <NEW_LINE> cmd = '' <NEW_LINE> <DEDENT> if '\t' == x: <NEW_LINE> <INDENT> tab_key = True <NEW_LINE> <DEDENT> if len(x) == 0: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> chan.send(x) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> finally: <NEW_LINE> <INDENT> termios.tcsetattr(sys.stdin, termios.TCSADRAIN, oldtty)
:param chan: :param user_obj: :param bind_host_obj: :param cmd_caches: :param log_recording: :return:
625941bd91af0d3eaac9b91b
def advance(self): <NEW_LINE> <INDENT> return self.current_playlist.advance()
Convenience proxy for self.get_current_playlist().advance()
625941bda17c0f6771cbdf58
def _default_browse_path(self): <NEW_LINE> <INDENT> current_index = self._presets.currentIndex() <NEW_LINE> path = self._presets.itemData(current_index) <NEW_LINE> if not path: <NEW_LINE> <INDENT> paths = self.get_preset_paths(get_all=True) <NEW_LINE> if paths: <NEW_LINE> <INDENT> path = paths[-1] <NEW_LINE> <DEDENT> <DEDENT> return path
Returns the current browse path for save/load preset If a preset is currently loaded it will use that specific path otherwise it will go to the last registered preset path :return: str, path to use as default browse location
625941bd460517430c394091
def is_in_previous_version(path, new_contents, encoding): <NEW_LINE> <INDENT> if os.path.isfile(path): <NEW_LINE> <INDENT> with codecs.open(path, 'rb', encoding=encoding) as handle: <NEW_LINE> <INDENT> if handle.read() == new_contents: <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> elif os.path.isdir(path): <NEW_LINE> <INDENT> shutil.rmtree(path) <NEW_LINE> <DEDENT> elif os.path.exists(path): <NEW_LINE> <INDENT> raise Exception('The object at {} is not recognisable! It is ' 'neither a file, nor a directory!'.format(path)) <NEW_LINE> <DEDENT> return False
Test if a file we try to create already is in the output directory. It tests if the pre-existent file has all the expected content. It also handles the following two cases: 1. The path is a directory - If this happens, it removes the directory from the file tree. 2. The path exists, but it's neither a file, nor a directory. - If this happens, it will raise an exception. Parameters ---------- path: str The path we want to test for existence. new_contents: bytes The contents we want to write to the file in the new version of the website. encoding: str The encoding to open the file in (if the path exists and is a file. Returns ------- bool True - if the file exists and has the same contents. False - if the path doesn't exist/ is not a file. Raises ------ Exception If the path exists, but is neither a file, nor a directory.
625941bd8c3a8732951582bd
def get_batches(data, target, batch_size, mode='test', use_gpu=False): <NEW_LINE> <INDENT> idx = np.arange(0, data.shape[0]) <NEW_LINE> if mode == 'train': <NEW_LINE> <INDENT> np.random.shuffle(idx) <NEW_LINE> <DEDENT> while idx.shape[0] > 0: <NEW_LINE> <INDENT> batch_idx = idx[:batch_size] <NEW_LINE> idx = idx[batch_size:] <NEW_LINE> batch_data = data[batch_idx] <NEW_LINE> batch_target = target[batch_idx] <NEW_LINE> batch_data = torch.from_numpy(batch_data) <NEW_LINE> batch_data = batch_data.float() <NEW_LINE> batch_target = torch.from_numpy(batch_target) <NEW_LINE> batch_target = batch_target.float().view(-1, 1) <NEW_LINE> if use_gpu: <NEW_LINE> <INDENT> batch_data = batch_data.cuda() <NEW_LINE> batch_target = batch_target.cuda() <NEW_LINE> <DEDENT> yield batch_data, batch_target
Generator function to yield minibatches of data and targets
625941bd30dc7b766590186e
def __lt__(self, other): <NEW_LINE> <INDENT> lower = self.side < other.side <NEW_LINE> return bool(lower)
Méthode permettant de vérifier si un premier carré est plus petit qu'un autre.
625941bd10dbd63aa1bd2aac
def symbol_to_path(symbol, base_dir="data"): <NEW_LINE> <INDENT> return os.path.join(base_dir, "{}.csv".format(str(symbol)))
Return CSV file path given ticker symbol
625941bd31939e2706e4cd73
def load_vgg16(): <NEW_LINE> <INDENT> base_model = VGG16(include_top=True, weights='imagenet', input_shape=(224, 224, 3)) <NEW_LINE> model = Model(inputs=base_model.input, outputs=base_model.get_layer(name="fc2").output) <NEW_LINE> return model
Method to load the VGG16 model
625941bda219f33f34628872
def close(self, abandon=False, add_to_catalog=True): <NEW_LINE> <INDENT> if abandon: <NEW_LINE> <INDENT> pkg_fmri = None <NEW_LINE> pkg_state = "ABANDONED" <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> pkg_fmri = self.pkg_name <NEW_LINE> pkg_state = "PUBLISHED" <NEW_LINE> <DEDENT> return pkg_fmri, pkg_state
Ends an in-flight transaction. Returns a tuple containing a package fmri (if applicable) and the final state of the related package.
625941bdad47b63b2c509e86
def _is_unexpected_warning( actual_warning: warnings.WarningMessage, expected_warning: type[Warning] | bool | None, ) -> bool: <NEW_LINE> <INDENT> if actual_warning and not expected_warning: <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> expected_warning = cast(Type[Warning], expected_warning) <NEW_LINE> return bool(not issubclass(actual_warning.category, expected_warning))
Check if the actual warning issued is unexpected.
625941bd15fb5d323cde0a11
def checksum_md5(filename): <NEW_LINE> <INDENT> amd5 = md5() <NEW_LINE> with open(filename, mode='rb') as f: <NEW_LINE> <INDENT> for chunk in iter(lambda: f.read(128 * amd5.block_size), b''): <NEW_LINE> <INDENT> amd5.update(chunk) <NEW_LINE> <DEDENT> <DEDENT> return amd5.hexdigest()
Calculates the MD5 checksum of a file.
625941bd5fc7496912cc3883
def test_headloss_manifold(self): <NEW_LINE> <INDENT> checks = (([0.12 * u.m**3/u.s, 0.4 * u.m, 6 * u.m, 0.8, 0.75 * u.m**2/u.s, 0.0003 * u.m, 5], 38.57715300752375 * u.m), ([2 * u.m**3/u.s, 6 * u.m, 40 * u.m, 5, 1.1 * u.m**2/u.s, 0.04 * u.m, 6], 0.11938889890999548 * u.m)) <NEW_LINE> for i in checks: <NEW_LINE> <INDENT> with self.subTest(i=i): <NEW_LINE> <INDENT> self.assertAlmostEqualQuantity(pc.headloss_manifold(*i[0]), i[1])
headloss_manifold should return known value for known input.
625941bd16aa5153ce36237e
def persist(self): <NEW_LINE> <INDENT> config_dir = self.opts.get("conf_dir", None) <NEW_LINE> if config_dir is None and "conf_file" in self.opts: <NEW_LINE> <INDENT> config_dir = os.path.dirname(self.opts["conf_file"]) <NEW_LINE> <DEDENT> if config_dir is None: <NEW_LINE> <INDENT> config_dir = salt.syspaths.CONFIG_DIR <NEW_LINE> <DEDENT> minion_d_dir = os.path.join( config_dir, os.path.dirname( self.opts.get( "default_include", salt.config.DEFAULT_MINION_OPTS["default_include"], ) ), ) <NEW_LINE> if not os.path.isdir(minion_d_dir): <NEW_LINE> <INDENT> os.makedirs(minion_d_dir) <NEW_LINE> <DEDENT> schedule_conf = os.path.join(minion_d_dir, "_schedule.conf") <NEW_LINE> log.debug("Persisting schedule") <NEW_LINE> schedule_data = self._get_schedule(include_pillar=False, remove_hidden=True) <NEW_LINE> try: <NEW_LINE> <INDENT> with salt.utils.files.fopen(schedule_conf, "wb+") as fp_: <NEW_LINE> <INDENT> fp_.write( salt.utils.stringutils.to_bytes( salt.utils.yaml.safe_dump({"schedule": schedule_data}) ) ) <NEW_LINE> <DEDENT> <DEDENT> except (IOError, OSError): <NEW_LINE> <INDENT> log.error( "Failed to persist the updated schedule", exc_info_on_loglevel=logging.DEBUG, )
Persist the modified schedule into <<configdir>>/<<default_include>>/_schedule.conf
625941bd32920d7e50b280d3
def init_command(self, action, flags=None): <NEW_LINE> <INDENT> cmd = ['singularity', action ] <NEW_LINE> if self.quiet is True: <NEW_LINE> <INDENT> cmd.insert(1, '--quiet') <NEW_LINE> <DEDENT> if self.debug is True: <NEW_LINE> <INDENT> cmd.insert(1, '--debug') <NEW_LINE> <DEDENT> return cmd
return the initial Singularity command with any added flags. Parameters ========== action: the main action to perform (e.g., build) flags: one or more additional flags (e.g, volumes) not implemented yet.
625941bd711fe17d82542276
def __call__(self, g_ij, x_i, x_j): <NEW_LINE> <INDENT> temp = self.a * (1 + numpy.tanh((self.b * x_j - self.midpoint) / self.sigma)) <NEW_LINE> if self.normalise: <NEW_LINE> <INDENT> in_strength = g_ij.sum(axis=2)[:, :, numpy.newaxis, :] <NEW_LINE> in_strength[in_strength==0] = numpy.inf <NEW_LINE> temp *= (g_ij / in_strength) <NEW_LINE> coupled_input = temp.mean(axis=0) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> coupled_input = (g_ij*temp).mean(axis=0) <NEW_LINE> <DEDENT> return coupled_input
Evaluate the Sigmoidal function for the arg ``x``. The equation being evaluated has the following form: .. math:: a * (1 + tanh((x - midpoint)/sigma))
625941bd442bda511e8be322
def __json__(self): <NEW_LINE> <INDENT> json_ = {} <NEW_LINE> json_['id'] = self.id <NEW_LINE> json_['action'] = self.action <NEW_LINE> json_['created'] = self.created <NEW_LINE> json_['updated'] = self.updated <NEW_LINE> json_['ctxt'] = self.ctxt <NEW_LINE> json_['method'] = self.method <NEW_LINE> json_['args'] = self.args <NEW_LINE> json_['status'] = self.status <NEW_LINE> json_['owner'] = self.owner <NEW_LINE> json_['response'] = self.response <NEW_LINE> json_['failure'] = self.failure <NEW_LINE> return json_
JSON representation
625941bdff9c53063f47c0fb
def remove_stop_words(text, stop_words=stop_words_list): <NEW_LINE> <INDENT> stop_words = set(stop_words) <NEW_LINE> split_list = text.split(" ") <NEW_LINE> split_list = [word for word in split_list if word not in stop_words] <NEW_LINE> return " ".join(split_list)
This function removes stop words from text Example: I am very excited for today's football match => very excited today's football match Params text (str) :text on which processing needs to done stop_words (list) : stop words which needs to be removed Returns text(str): text after stop words removal
625941bd56b00c62f0f1455d
def create_observations(df, subject_label, observation_size): <NEW_LINE> <INDENT> observations = [] <NEW_LINE> labels = [] <NEW_LINE> for i in range(len(df)-observation_size): <NEW_LINE> <INDENT> df_observation = df.iloc[i:i + observation_size] <NEW_LINE> has_nan = np.isnan(df_observation["BtO2"]).any() or np.isnan(df_observation["HR"]).any() or np.isnan(df_observation["SpO2"]).any() or np.isnan(df_observation["artMAP"]).any() <NEW_LINE> if not has_nan: <NEW_LINE> <INDENT> observations.append(df_observation) <NEW_LINE> labels.append(subject_label) <NEW_LINE> <DEDENT> <DEDENT> return observations, labels
Creates an array of observations and the corresponding labels. The observations are a rolling window of the data frame, that is, a subset of the data frame is copied for every observation. :param df: DataFrame to split up into observations :param subject_label: 1 or 0, to be repeated as each observation's label :param observation_size: The size of each observation :return: Array (observations), Array (labels)
625941bd66656f66f7cbc0b0
def max_value(self, state, currDepth, maxDepthToReach, numberOfAgents): <NEW_LINE> <INDENT> if (currDepth ) == maxDepthToReach: <NEW_LINE> <INDENT> return self.evaluationFunction(state) <NEW_LINE> <DEDENT> listOfActions = state.getLegalActions(0) <NEW_LINE> max_score = -99999999999 <NEW_LINE> best_action_so_far = Directions.STOP <NEW_LINE> best_action_found = False <NEW_LINE> for action in listOfActions: <NEW_LINE> <INDENT> mini_score = self.value(state.generateSuccessor(0, action), currDepth, self.depth, 1, numberOfAgents) <NEW_LINE> if mini_score > max_score: <NEW_LINE> <INDENT> max_score = mini_score <NEW_LINE> best_action_so_far = action <NEW_LINE> best_action_found = True <NEW_LINE> <DEDENT> <DEDENT> if not best_action_found: <NEW_LINE> <INDENT> return state.getScore() <NEW_LINE> <DEDENT> self.nextActionToTake = best_action_so_far <NEW_LINE> return max_score
:param state: current state of the game :param currDepth: current depth of the ply we explored :param numberOfAgents: Number of agents in the game :param maxDepthToReach: Maximum depth we should explore. Any node at this depth should be directly evaluated using the self.evaluation function :return: max_score that the MAX player can achieve. ------------------------------------------------------- This method tries to find the max score a MAX player can make. It is based on the following logic: 1) If the currDepth is the maximum depth according to the self.depth, then directly calculate the score for the state using the self.evaluationFunction and the return the score. 2) Calculate the max score based on the scores of the MIN players for every action taken by the MAX player. 3) If we cannot find any optimal max score (this may be probably because of state either being a win or a lose position), we directly send the score of that state using the state.getScore() method 4) In case, we found the max score, than we update the self.nextActionToTake property, which will be used by our getAction method.
625941bd30bbd722463cbcc9
def naked_twins(values): <NEW_LINE> <INDENT> for unit in unitlist: <NEW_LINE> <INDENT> values_seen = dict() <NEW_LINE> naked_values = [] <NEW_LINE> naked_boxes = [] <NEW_LINE> for box in unit: <NEW_LINE> <INDENT> if values[box] in values_seen and len(values[box]) == 2: <NEW_LINE> <INDENT> naked_values += values[box] <NEW_LINE> naked_boxes.append(box) <NEW_LINE> naked_boxes.append(values_seen[values[box]]) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> values_seen[values[box]] = box <NEW_LINE> <DEDENT> <DEDENT> for box in unit: <NEW_LINE> <INDENT> if box in naked_boxes: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> for value in naked_values: <NEW_LINE> <INDENT> for digit in list(value): <NEW_LINE> <INDENT> assign_value(values, box, values[box].replace(digit, '')) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> return values
Eliminate values using the naked twins strategy. Args: values(dict): a dictionary of the form {'box_name': '123456789', ...} Returns: the values dictionary with the naked twins eliminated from peers.
625941bd5f7d997b8717499a
def __init__(self, routepath, **kargs): <NEW_LINE> <INDENT> self.routepath = routepath <NEW_LINE> self.sub_domains = False <NEW_LINE> self.prior = None <NEW_LINE> self.name = None <NEW_LINE> self.minimization = kargs.pop('_minimize', True) <NEW_LINE> self.encoding = kargs.pop('_encoding', 'utf-8') <NEW_LINE> self.reqs = kargs.get('requirements', {}) <NEW_LINE> self.decode_errors = 'replace' <NEW_LINE> self.static = kargs.pop('_static', False) <NEW_LINE> self.filter = kargs.pop('_filter', None) <NEW_LINE> self.absolute = kargs.pop('_absolute', False) <NEW_LINE> self.member_name = kargs.pop('_member_name', None) <NEW_LINE> self.collection_name = kargs.pop('_collection_name', None) <NEW_LINE> self.parent_resource = kargs.pop('_parent_resource', None) <NEW_LINE> self.conditions = kargs.pop('conditions', None) <NEW_LINE> self.explicit = kargs.pop('_explicit', False) <NEW_LINE> reserved_keys = ['requirements'] <NEW_LINE> self.done_chars = ('/', ',', ';', '.', '#') <NEW_LINE> if self.static: <NEW_LINE> <INDENT> self.external = '://' in self.routepath <NEW_LINE> self.minimization = False <NEW_LINE> <DEDENT> if routepath.startswith('/') and self.minimization: <NEW_LINE> <INDENT> routepath = routepath[1:] <NEW_LINE> <DEDENT> self.routelist = routelist = self._pathkeys(routepath) <NEW_LINE> routekeys = frozenset([key['name'] for key in routelist if isinstance(key, dict)]) <NEW_LINE> if not self.minimization: <NEW_LINE> <INDENT> self.make_full_route() <NEW_LINE> <DEDENT> self.req_regs = {} <NEW_LINE> for key, val in self.reqs.iteritems(): <NEW_LINE> <INDENT> self.req_regs[key] = re.compile('^' + val + '$') <NEW_LINE> <DEDENT> (self.defaults, defaultkeys) = self._defaults(routekeys, reserved_keys, kargs) <NEW_LINE> self.maxkeys = defaultkeys | routekeys <NEW_LINE> (self.minkeys, self.routebackwards) = self._minkeys(routelist[:]) <NEW_LINE> self.hardcoded = frozenset([key for key in self.maxkeys if key not in routekeys and self.defaults[key] is not None])
Initialize a route, with a given routepath for matching/generation The set of keyword args will be used as defaults. Usage:: >>> from routes.base import Route >>> newroute = Route(':controller/:action/:id') >>> sorted(newroute.defaults.items()) [('action', 'index'), ('id', None)] >>> newroute = Route('date/:year/:month/:day', ... controller="blog", action="view") >>> newroute = Route('archives/:page', controller="blog", ... action="by_page", requirements = { 'page':'\d{1,2}' }) >>> newroute.reqs {'page': '\\d{1,2}'} .. Note:: Route is generally not called directly, a Mapper instance connect method should be used to add routes.
625941bd3c8af77a43ae36a4
def __init__(self, values): <NEW_LINE> <INDENT> self.values = values <NEW_LINE> self.i = -1
Set the values :param values : [Identifier|Number|SymbolicExpression]
625941bd0c0af96317bb80ee
def _create_predefined_nodes_seq2seq_csv(self, wf_state_id): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> self._set_nn_wf_node_info( wf_state_id, self.netconf_data, 'data_frame') <NEW_LINE> self._set_nn_wf_node_info( wf_state_id, self.netconf_feed, 'pre_feed_fr2seq') <NEW_LINE> self._set_nn_wf_node_info( wf_state_id, self.netconf_node, 'seq_to_seq') <NEW_LINE> self._set_nn_wf_node_info( wf_state_id, self.eval_data, 'data_frame') <NEW_LINE> self._set_nn_wf_node_info( wf_state_id, self.eval_feed, 'pre_feed_fr2seq') <NEW_LINE> self._set_nn_wf_node_info( wf_state_id, self.eval_node, 'eval_extra') <NEW_LINE> self._set_nn_wf_node_relation(wf_state_id, self.netconf_data, self.netconf_feed) <NEW_LINE> self._set_nn_wf_node_relation(wf_state_id, self.netconf_feed, self.netconf_node) <NEW_LINE> self._set_nn_wf_node_relation(wf_state_id, self.netconf_node, self.eval_node) <NEW_LINE> self._set_nn_wf_node_relation(wf_state_id, self.eval_data, self.eval_feed) <NEW_LINE> self._set_nn_wf_node_relation(wf_state_id, self.eval_feed, self.eval_node) <NEW_LINE> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> raise Exception(e) <NEW_LINE> <DEDENT> finally: <NEW_LINE> <INDENT> return True
:return:
625941bd4428ac0f6e5ba6f7
@pos.route('/products', methods=['GET', 'POST']) <NEW_LINE> @login_required <NEW_LINE> def list_products(): <NEW_LINE> <INDENT> products = Product.query.order_by(Product.priority.desc()).filter_by(activity_id=current_user.id).all() <NEW_LINE> return render_template('pos/product_list.html', products=products)
View all products.
625941bd3cc13d1c6d3c7281
def widget(self, r, method="cms", widget_id=None, **attr): <NEW_LINE> <INDENT> if not current.deployment_settings.has_module("cms"): <NEW_LINE> <INDENT> return "" <NEW_LINE> <DEDENT> request = current.request <NEW_LINE> return self.resource_content(request.controller, request.function, widget_id)
Render a Rich Text widget suitable for use in a page such as S3Summary @param method: the widget method @param r: the S3Request @param attr: controller attributes @ToDo: Support comments
625941bddd821e528d63b0b1
def serialize(self, buff): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> _x = self <NEW_LINE> buff.write(_struct_3I.pack(_x.encoder_l, _x.encoder_r, _x.pwm_value)) <NEW_LINE> <DEDENT> except struct.error as se: self._check_types(se) <NEW_LINE> except TypeError as te: self._check_types(te)
serialize message into buffer :param buff: buffer, ``StringIO``
625941bda8ecb033257d2fd5
def load_model(arch, code_length): <NEW_LINE> <INDENT> if arch == 'alexnet': <NEW_LINE> <INDENT> model = models.alexnet(pretrained=True) <NEW_LINE> model.classifier = model.classifier[:-2] <NEW_LINE> model = ModelWrapper(model, 4096, code_length) <NEW_LINE> <DEDENT> elif arch == 'vgg16': <NEW_LINE> <INDENT> model = models.vgg16(pretrained=True) <NEW_LINE> model.classifier = model.classifier[:-3] <NEW_LINE> model = ModelWrapper(model, 4096, code_length) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise ValueError("Invalid model name!") <NEW_LINE> <DEDENT> return model
Load CNN model. Args arch(str): Model name. code_length(int): Hash code length. Returns model(torch.nn.Module): CNN model.
625941bd627d3e7fe0d68d54
def _SectionRefs( self ): <NEW_LINE> <INDENT> return ( self._RefNameToSectionName( ref ) for ref in self.repo.listall_references() if self._IsSectionRef( ref ) )
Gets an iterator over the section refs
625941bd7b180e01f3dc470a
@click.group() <NEW_LINE> @click.option('--rpc', metavar="<rpc-address>", default=default_rpc, help='rpc node to use') <NEW_LINE> @click.option('--fishcake-box', metavar="<contract-addr>", default=default_fsck_box_addr, help='fishcake box contract address') <NEW_LINE> @click.option('--fishcake-token', metavar="<contract-addr>", default=default_fsck_token_addr, help='fishcake token contract address') <NEW_LINE> def cli(rpc, fishcake_box, fishcake_token): <NEW_LINE> <INDENT> global pytezos, fishcake_box_addr, fishcake_token_addr <NEW_LINE> pytezos = pytezos.using(shell=rpc) <NEW_LINE> fishcake_box_addr = fishcake_box <NEW_LINE> fishcake_token_addr = fishcake_token
CLI group
625941bda17c0f6771cbdf59
def correct_sign(deg_f, deg_g, s1, rdel, cdel): <NEW_LINE> <INDENT> M = s1[:, :] <NEW_LINE> for i in range(M.rows - deg_f - 1, M.rows - deg_f - rdel - 1, -1): <NEW_LINE> <INDENT> M.row_del(i) <NEW_LINE> <DEDENT> for i in range(M.rows - 1, M.rows - rdel - 1, -1): <NEW_LINE> <INDENT> M.row_del(i) <NEW_LINE> <DEDENT> for i in range(cdel): <NEW_LINE> <INDENT> M.col_del(M.rows - 1) <NEW_LINE> <DEDENT> Md = M[:, 0: M.rows] <NEW_LINE> return Md.det()
Used in various subresultant prs algorithms. Evaluates the determinant, (a.k.a. subresultant) of a properly selected submatrix of s1, Sylvester's matrix of 1840, to get the correct sign and value of the leading coefficient of a given polynomial remainder. deg_f, deg_g are the degrees of the original polynomials p, q for which the matrix s1 = sylvester(p, q, x, 1) was constructed. rdel denotes the expected degree of the remainder; it is the number of rows to be deleted from each group of rows in s1 as described in the reference below. cdel denotes the expected degree minus the actual degree of the remainder; it is the number of columns to be deleted --- starting with the last column forming the square matrix --- from the matrix resulting after the row deletions. References: =========== Akritas, A. G., G.I. Malaschonok and P.S. Vigklas: ``Sturm Sequences and Modified Subresultant Polynomial Remainder Sequences.'' Serdica Journal of Computing, Vol. 8, No 1, 29–46, 2014.
625941bdb830903b967e9814
def test_hello(test_client): <NEW_LINE> <INDENT> assert True
GIVEN: A flask hello app WHEN: I GET the hello /route THEN: The response should be "hello world"
625941bd5166f23b2e1a505f
def get_templates(self): <NEW_LINE> <INDENT> index_templates = {} <NEW_LINE> for path in glob.iglob(self.data_path + '/template/*.json'): <NEW_LINE> <INDENT> logger.debug('Reading index template setup from {}'.format(path)) <NEW_LINE> index_template = None <NEW_LINE> with open(path) as f: <NEW_LINE> <INDENT> index_template = json.load(f) <NEW_LINE> <DEDENT> template_name = index_template['name'] <NEW_LINE> setup_body = index_template['body'] <NEW_LINE> index_templates[template_name] = setup_body <NEW_LINE> <DEDENT> return index_templates
Builds a templates dict from index templates in the templates folder. The dict has the format: { template_name: { template_body } }
625941bd0c0af96317bb80ef
def SetMaskValue(self, *args): <NEW_LINE> <INDENT> return _itkKappaSigmaThresholdImageFilterPython.itkKappaSigmaThresholdImageFilterIF3IUC3_SetMaskValue(self, *args)
SetMaskValue(self, unsigned char _arg)
625941bd956e5f7376d70d75
def ac_mon(client, device_id): <NEW_LINE> <INDENT> device = client.get_device(device_id) <NEW_LINE> if device.type != wideq.DeviceType.AC: <NEW_LINE> <INDENT> print('This is not an AC device.') <NEW_LINE> return <NEW_LINE> <DEDENT> ac = wideq.ACDevice(client, device) <NEW_LINE> try: <NEW_LINE> <INDENT> ac.monitor_start() <NEW_LINE> while True: <NEW_LINE> <INDENT> time.sleep(1) <NEW_LINE> state = ac.poll() <NEW_LINE> if state: <NEW_LINE> <INDENT> print( '{1}; ' '{0.mode.name}; ' 'cur {0.temp_cur_f} F; ' 'cfg {0.temp_cfg_f} F; ' 'air clean {0.airclean_state.name}' .format( state, 'on' if state.is_on else 'off' ) ) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> except KeyboardInterrupt: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> finally: <NEW_LINE> <INDENT> ac.monitor_stop()
Monitor an AC/HVAC device, showing higher-level information about its status such as its temperature and operation mode.
625941bd656771135c3eb772
def send_monthly_data(): <NEW_LINE> <INDENT> econ_data = pd.read_sql(f'''SELECT * FROM forecast''', ECON_CON) <NEW_LINE> econ_data.datetime = pd.to_datetime(econ_data.datetime) <NEW_LINE> econ_data = econ_data.set_index('datetime', drop=True).replace('', np.nan) <NEW_LINE> data = {} <NEW_LINE> for ccy in econ_data.ccy.unique(): <NEW_LINE> <INDENT> temp = econ_data[ (econ_data.ccy == ccy) & (econ_data.long_term.notna()) ] <NEW_LINE> roll_window = len(temp.ccy_event.unique()) <NEW_LINE> final_data = temp.long_term.rolling(roll_window).sum() / roll_window <NEW_LINE> final_data.index = final_data.index.date <NEW_LINE> data[ccy] = final_data.groupby([final_data.index]).sum() <NEW_LINE> <DEDENT> timeframe = 'D1' <NEW_LINE> symbols = ['EURUSD', 'GBPUSD', 'EURGBP', 'USDCHF', 'NZDUSD', 'USDCAD', 'USDJPY'] <NEW_LINE> for symbol in symbols: <NEW_LINE> <INDENT> df = mt5_ohlc_request(symbol, timeframe, num_candles=270) <NEW_LINE> df.index = df.index.date <NEW_LINE> df['long_term1'] = data[symbol[:3]] <NEW_LINE> df['long_term2'] = data[symbol[3:]] <NEW_LINE> df.long_term1 = df.long_term1.fillna(method='ffill') <NEW_LINE> df.long_term2 = df.long_term2.fillna(method='ffill') <NEW_LINE> df.index = pd.to_datetime(df.index) <NEW_LINE> plots = [] <NEW_LINE> plots.append(mpf.make_addplot(df.long_term1, color='r', panel=1, width=2, secondary_y=True)) <NEW_LINE> plots.append(mpf.make_addplot(df.long_term2, color='b', panel=1, width=1, secondary_y=True)) <NEW_LINE> mpf.plot(df, type='candle', tight_layout=True, addplot=plots, show_nontrading=False, volume=True, title=f'{symbol} {timeframe}', savefig=f'{symbol}_longterm.png' )
Send the long term data as a nice chart :s
625941bdcc40096d61595858
def sbox_nibble(bits, i, N): <NEW_LINE> <INDENT> offset = N - (i+1)*4 <NEW_LINE> nibble = (bits >> offset) & 0xF <NEW_LINE> return bits & ~(0xF << offset) | (SBOX[nibble] << offset)
Replaces the i-th nibble (0-base) in N bits with SBOX[nibble].
625941bd3d592f4c4ed1cf7c
def fibonacci_recursive(n): <NEW_LINE> <INDENT> if n < 0: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> if (n == 0) or (n == 1): <NEW_LINE> <INDENT> return n <NEW_LINE> <DEDENT> return fibonacci_recursive(n - 2) + fibonacci_recursive(n - 1)
Compute and return the nth value in the fibonacci sequence via recursion. Args: n (int): nth position in the fibonacci sequence. Returns: int: nth value in the fibonacci sequence.
625941bdbf627c535bc130d5
def parallel_categories( data_frame=None, dimensions=None, color=None, labels=None, color_continuous_scale=None, range_color=None, color_continuous_midpoint=None, title=None, template=None, width=None, height=None, dimensions_max_cardinality=50, ): <NEW_LINE> <INDENT> return make_figure(args=locals(), constructor=go.Parcats)
In a parallel categories (or parallel sets) plot, each row of `data_frame` is grouped with other rows that share the same values of `dimensions` and then plotted as a polyline mark through a set of parallel axes, one for each of the `dimensions`.
625941bdcb5e8a47e48b79b4
def get(self, question_id): <NEW_LINE> <INDENT> get_question = MY_QUESTION.get_single_question(question_id) <NEW_LINE> response = jsonify(get_question) <NEW_LINE> response.status_code = 200 <NEW_LINE> return response
endpoint to get question by id
625941bd73bcbd0ca4b2bf7d
def parse(self): <NEW_LINE> <INDENT> self.kind = self.packet.data['bk'] <NEW_LINE> if self.kind not in raeting.BODY_KIND_NAMES: <NEW_LINE> <INDENT> self.kind = raeting.bodyKinds.unknown <NEW_LINE> self.packet.error = "Unrecognizible packet body." <NEW_LINE> return False <NEW_LINE> <DEDENT> self.data = odict() <NEW_LINE> if self.kind == raeting.bodyKinds.json: <NEW_LINE> <INDENT> if self.packed: <NEW_LINE> <INDENT> kit = json.loads(self.packed, object_pairs_hook=odict) <NEW_LINE> if not isinstance(kit, Mapping): <NEW_LINE> <INDENT> self.packet.error = "Packet body not a mapping." <NEW_LINE> return False <NEW_LINE> <DEDENT> self.data = kit <NEW_LINE> <DEDENT> <DEDENT> elif self.kind == raeting.bodyKinds.nada: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> return True
Parses body. Assumes already unpacked. Results in updated .data
625941bd4c3428357757c230
def rename_key(self, key_to_rename: dict, strict: bool = True) -> dict: <NEW_LINE> <INDENT> if isinstance(self.subject, dict) and isinstance(key_to_rename, dict): <NEW_LINE> <INDENT> for old, new in key_to_rename.items(): <NEW_LINE> <INDENT> if strict: <NEW_LINE> <INDENT> if old in self.subject: <NEW_LINE> <INDENT> self.subject[new] = self.subject.pop(old) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> to_rename = {} <NEW_LINE> for index in self.subject: <NEW_LINE> <INDENT> if old in index: <NEW_LINE> <INDENT> to_rename.update({index: new[:-1] + index.split(old)[-1]}) <NEW_LINE> <DEDENT> <DEDENT> self.subject = DictHelper(self.subject).rename_key(to_rename, True) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return self.subject
Rename the given keys from the given dictionary. :param key_to_rename: The key(s) to rename. Expected format: :code:`{old:new}` :param strict: Tell us if we have to rename the exact index or the index which looks like the given key(s) :return: The well formatted dict.
625941bd091ae35668666e6a
def match(self, path): <NEW_LINE> <INDENT> m = self._regex.search(path) <NEW_LINE> if not m: <NEW_LINE> <INDENT> raise NotFoundException <NEW_LINE> <DEDENT> items = dict((key, unquote_plus(val)) for key, val in m.groupdict().items()) <NEW_LINE> items = unpickle_dict(items) <NEW_LINE> [items.setdefault(key, val) for key, val in self._options.items()] <NEW_LINE> return self._view_func, items
Attempts to match a url to the given path. If successful, a tuple is returned. The first item is the matched function and the second item is a dictionary containing items to be passed to the function parsed from the provided path. Args: path (str): The URL path. Raises: NotFoundException: If the provided path does not match this url rule.
625941bda4f1c619b28aff46
def format_input(card, prev_card): <NEW_LINE> <INDENT> if prev_card[0] < card[0]: <NEW_LINE> <INDENT> rank = 0 <NEW_LINE> <DEDENT> elif prev_card[0] == card[0]: <NEW_LINE> <INDENT> rank = 1 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> rank = 2 <NEW_LINE> <DEDENT> suite = prev_card[1] == card[1] <NEW_LINE> return card + prev_card + (rank, suite)
Format two cards in a manner that the decision tree can understand them.
625941bd23e79379d52ee46d
def _replace_names_in_json_dict(jdict, renmap): <NEW_LINE> <INDENT> if jdict: <NEW_LINE> <INDENT> for k in list(jdict.keys()): <NEW_LINE> <INDENT> nk = renmap.get(k) <NEW_LINE> if nk: <NEW_LINE> <INDENT> jdict[nk] = jdict[k] <NEW_LINE> del jdict[k]
Replace keys that has been renamed in a JSON result directory Args: jdict: Json result dictionary renmap: Renaming map, key is name to replace, value is name to use instead
625941bd01c39578d7e74d42
def __repr__(self): <NEW_LINE> <INDENT> return "Channel {0} value: {1}".format(self.channel, self.value)
String representation of this sample
625941bdd6c5a10208143f4f
def lru(maxsize=100): <NEW_LINE> <INDENT> def decorator(call): <NEW_LINE> <INDENT> cache = dict() <NEW_LINE> items_ = items <NEW_LINE> repr_ = repr <NEW_LINE> intern_ = intern <NEW_LINE> cache_get = cache.get <NEW_LINE> len_ = len <NEW_LINE> lock = Lock() <NEW_LINE> root = [] <NEW_LINE> nonlocal_root = [root] <NEW_LINE> root[:] = [root, root, None, None] <NEW_LINE> PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 <NEW_LINE> if maxsize is None: <NEW_LINE> <INDENT> def wrapper(*args, **kw): <NEW_LINE> <INDENT> key = repr_(args, items_(kw)) if kw else repr_(args) <NEW_LINE> result = cache_get(key, root) <NEW_LINE> if result is not root: <NEW_LINE> <INDENT> return result <NEW_LINE> <DEDENT> result = call(*args, **kw) <NEW_LINE> cache[intern_(key)] = result <NEW_LINE> return result <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> def wrapper(*args, **kw): <NEW_LINE> <INDENT> key = repr_(args, items_(kw)) if kw else repr_(args) <NEW_LINE> with lock: <NEW_LINE> <INDENT> link = cache_get(key) <NEW_LINE> if link is not None: <NEW_LINE> <INDENT> root, = nonlocal_root <NEW_LINE> link_prev, link_next, key, result = link <NEW_LINE> link_prev[NEXT] = link_next <NEW_LINE> link_next[PREV] = link_prev <NEW_LINE> last = root[PREV] <NEW_LINE> last[NEXT] = root[PREV] = link <NEW_LINE> link[PREV] = last <NEW_LINE> link[NEXT] = root <NEW_LINE> return result <NEW_LINE> <DEDENT> <DEDENT> result = call(*args, **kw) <NEW_LINE> with lock: <NEW_LINE> <INDENT> root = first(nonlocal_root) <NEW_LINE> if len_(cache) < maxsize: <NEW_LINE> <INDENT> last = root[PREV] <NEW_LINE> link = [last, root, key, result] <NEW_LINE> cache[intern_(key)] = last[NEXT] = root[PREV] = link <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> root[KEY] = key <NEW_LINE> root[RESULT] = result <NEW_LINE> cache[intern_(key)] = root <NEW_LINE> root = nonlocal_root[0] = root[NEXT] <NEW_LINE> del cache[root[KEY]] <NEW_LINE> root[KEY] = None <NEW_LINE> root[RESULT] = None <NEW_LINE> <DEDENT> <DEDENT> return result <NEW_LINE> <DEDENT> <DEDENT> def clear(): <NEW_LINE> <INDENT> with lock: <NEW_LINE> <INDENT> cache.clear() <NEW_LINE> root = first(nonlocal_root) <NEW_LINE> root[:] = [root, root, None, None] <NEW_LINE> <DEDENT> <DEDENT> wrapper.__wrapped__ = call <NEW_LINE> wrapper.clear = clear <NEW_LINE> try: <NEW_LINE> <INDENT> return update_wrapper(wrapper, call) <NEW_LINE> <DEDENT> except AttributeError: <NEW_LINE> <INDENT> return wrapper <NEW_LINE> <DEDENT> <DEDENT> return decorator
Least-recently-used cache decorator. If *maxsize* is set to None, the LRU features are disabled and the cache can grow without bound. Arguments to the cached function must be hashable. Access the underlying function with f.__wrapped__. See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used By Raymond Hettinger
625941bdcdde0d52a9e52f36
def _gen_sql(table_name, mappings): <NEW_LINE> <INDENT> pk = None <NEW_LINE> sql = ['-- generating SQL for %s:' % table_name, 'create table `%s` (' % table_name] <NEW_LINE> for f in sorted(mappings.values(), lambda x, y: cmp(x._order, y._order)): <NEW_LINE> <INDENT> if not hasattr(f, 'ddl'): <NEW_LINE> <INDENT> raise StandardError('no ddl in field "%s".' % f) <NEW_LINE> <DEDENT> ddl = f.ddl <NEW_LINE> nullable = f.nullable <NEW_LINE> if f.primary_key: <NEW_LINE> <INDENT> pk = f.name <NEW_LINE> <DEDENT> sql.append(' `%s` %s,' % (f.name, ddl) if nullable else ' `%s` %s not null,' % (f.name, ddl)) <NEW_LINE> <DEDENT> sql.append(' primary key(`%s`)' % pk) <NEW_LINE> sql.append(');') <NEW_LINE> return '\n'.join(sql)
类 ==> 表时 生成创建表的sql
625941bd566aa707497f4476
def test_obscure_date(self): <NEW_LINE> <INDENT> product = self.amazon.lookup(ItemId="0933635869") <NEW_LINE> assert_equals(product.publication_date.year, 1992) <NEW_LINE> assert_equals(product.publication_date.month, 5) <NEW_LINE> assert_true(isinstance(product.publication_date, datetime.date))
Test Obscure Date Formats Test a product with an obscure date format
625941bd66656f66f7cbc0b1
def test_deactivate_multiple_equipments(self): <NEW_LINE> <INDENT> data = [test_utils.EQUIPMENT_CODE, test_utils.EQUIPMENT_CODE_2] <NEW_LINE> deactivate_url = '/equipments/deactivate/' <NEW_LINE> response = self.client.patch(deactivate_url, data, format='json') <NEW_LINE> self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) <NEW_LINE> self.assertEqual(Equipment.objects.get(code=test_utils.EQUIPMENT_CODE).status, "inactive") <NEW_LINE> self.assertEqual(Equipment.objects.get(code=test_utils.EQUIPMENT_CODE_2).status, "inactive")
Ensure we can deactivate more than one equipment.
625941bd379a373c97cfaa4a
@pytest.mark.parametrize('with_psf_obs', [False, True]) <NEW_LINE> @pytest.mark.parametrize('guess_from_moms', [False, True]) <NEW_LINE> def test_admom_psf_runner(with_psf_obs, guess_from_moms): <NEW_LINE> <INDENT> rng = np.random.RandomState(8821) <NEW_LINE> if with_psf_obs: <NEW_LINE> <INDENT> data = get_ngauss_obs( rng=rng, ngauss=1, noise=0.0, with_psf=True, psf_model='gauss', ) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> data = get_psf_obs(rng=rng, model='gauss') <NEW_LINE> <DEDENT> obs = data['obs'] <NEW_LINE> guesser = GMixPSFGuesser( rng=rng, ngauss=1, guess_from_moms=guess_from_moms, ) <NEW_LINE> fitter = AdmomFitter() <NEW_LINE> runner = PSFRunner( fitter=fitter, guesser=guesser, ntry=2, ) <NEW_LINE> res = runner.go(obs=obs) <NEW_LINE> assert res['flags'] == 0 <NEW_LINE> imfit = res.make_image() <NEW_LINE> if with_psf_obs: <NEW_LINE> <INDENT> comp_image = obs.psf.image <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> comp_image = obs.image <NEW_LINE> <DEDENT> imtol = 0.001 / obs.jacobian.scale**2 <NEW_LINE> assert np.abs(imfit - comp_image).max() < imtol
Test a PSFRunner running the EM fitter with_psf_obs means it is an ordinary obs with a psf obs also. The code knows to fit the psf obs not the main obs
625941bd167d2b6e31218a9d