code
stringlengths
4
4.48k
docstring
stringlengths
1
6.45k
_id
stringlengths
24
24
def handle_one_request(self): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> self.raw_requestline = self.rfile.readline(65537) <NEW_LINE> if len(self.raw_requestline) > 65536: <NEW_LINE> <INDENT> return self.send_error(414) <NEW_LINE> <DEDENT> if not self.raw_requestline: <NEW_LINE> <INDENT> self.close_connection = 1 <NEW_LINE> return <NEW_LINE> <DEDENT> if not self.parse_request(): <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> for m in [getattr(self, m) for m in dir(self) if m.startswith('parse_preauth_header_')]: <NEW_LINE> <INDENT> if not m(): <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> <DEDENT> if not self.check_authentication(): <NEW_LINE> <INDENT> return self.send_error(401, args[TITLE_AUTH_PROMPT]) <NEW_LINE> <DEDENT> for m in [getattr(self, m) for m in dir(self) if m.startswith('parse_header_')]: <NEW_LINE> <INDENT> if not m(): <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> <DEDENT> command = self.get_command() <NEW_LINE> mname = 'do_' + command <NEW_LINE> if not hasattr(self, mname): <NEW_LINE> <INDENT> self.send_error(501, "Unsupported method (%r)" % self.command) <NEW_LINE> return <NEW_LINE> <DEDENT> self.invoke(getattr(self, mname)) <NEW_LINE> self.wfile.flush() <NEW_LINE> <DEDENT> except socket.timeout: <NEW_LINE> <INDENT> self.close_connection = 1 <NEW_LINE> return self.send_error(408, "Data timeout (%s seconds)" % args[TITLE_TIMEOUT])
Handle a single HTTP request. You normally don't need to override this method; see the class __doc__ string for information on how to handle specific HTTP commands such as GET and POST. That being said, I overrode this function in order to have a nice upstream spot to put the whitelist/blacklist feature.
625941bfe8904600ed9f1e6b
def _save(self, hash_value): <NEW_LINE> <INDENT> session = self.storage() <NEW_LINE> filter_obj = self.table(hash_value=hash_value) <NEW_LINE> session.add(filter_obj) <NEW_LINE> session.commit() <NEW_LINE> session.close()
use mysql stored the hash data :param hash_value: :return:
625941bf956e5f7376d70daf
def setStyleAttributes(self, mxObject, m21Object, musicXMLNames, m21Names=None): <NEW_LINE> <INDENT> if isinstance(m21Object, style.Style): <NEW_LINE> <INDENT> stObj = m21Object <NEW_LINE> <DEDENT> elif m21Object.hasStyleInformation is False: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> stObj = m21Object.style <NEW_LINE> <DEDENT> if not common.isIterable(musicXMLNames): <NEW_LINE> <INDENT> musicXMLNames = [musicXMLNames] <NEW_LINE> <DEDENT> if m21Names is None: <NEW_LINE> <INDENT> m21Names = [common.hyphenToCamelCase(x) for x in musicXMLNames] <NEW_LINE> <DEDENT> elif not common.isIterable(m21Names): <NEW_LINE> <INDENT> m21Names = [common.hyphenToCamelCase(m21Names)] <NEW_LINE> <DEDENT> for xmlName, m21Name in zip(musicXMLNames, m21Names): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> m21Value = getattr(stObj, m21Name) <NEW_LINE> <DEDENT> except AttributeError: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> if m21Name in xmlObjects.STYLE_ATTRIBUTES_STR_NONE_TO_NONE and m21Value is None: <NEW_LINE> <INDENT> m21Value = 'none' <NEW_LINE> <DEDENT> if m21Name in xmlObjects.STYLE_ATTRIBUTES_YES_NO_TO_BOOL: <NEW_LINE> <INDENT> m21Value = xmlObjects.booleanToYesNo(m21Value) <NEW_LINE> <DEDENT> if m21Value is None: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> m21Value = str(m21Value) <NEW_LINE> <DEDENT> except ValueError: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> mxObject.set(xmlName, m21Value)
Sets any attribute from .style, doing some conversions. m21Object can also be a style.Style object itself.
625941bfd99f1b3c44c674d5
def get_average_scores(directors): <NEW_LINE> <INDENT> result = defaultdict(float) <NEW_LINE> for director, movies in directors.items(): <NEW_LINE> <INDENT> if len(movies) < MIN_MOVIES: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> result[(director, _calc_mean(movies))] = sorted(movies, key=lambda m: m.score, reverse=True) <NEW_LINE> <DEDENT> <DEDENT> return result
Filter directors with < MIN_MOVIES and calculate averge score
625941bf01c39578d7e74d7c
def mutator_flush(self, mutator): <NEW_LINE> <INDENT> self.send_mutator_flush(mutator) <NEW_LINE> self.recv_mutator_flush()
Flush mutator buffers Parameters: - mutator
625941bf091ae35668666ea3
def _resolve_conflict(self, mymembers, members): <NEW_LINE> <INDENT> if isinstance(mymembers, Fieldspec): <NEW_LINE> <INDENT> if isinstance(members, Fieldspec): <NEW_LINE> <INDENT> return Fieldspec(mymembers).merge(members) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return mymembers <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> return members
Resolve conflicting members by mergin them.
625941bf0fa83653e4656efd
def maxim_powers(sets, remainder): <NEW_LINE> <INDENT> sums = set() <NEW_LINE> prev = set({0}) <NEW_LINE> for line in sets: <NEW_LINE> <INDENT> sums = set() <NEW_LINE> for j in itertools.product(line, prev): <NEW_LINE> <INDENT> sums.add((j[0]+j[1]) % remainder) <NEW_LINE> <DEDENT> prev = sums <NEW_LINE> <DEDENT> return max(sums)
solve the problem: the entries are already normalized (powers, %M)
625941bfe76e3b2f99f3a751
def _find_master(self): <NEW_LINE> <INDENT> my_node_id = list(self.client.nodes.info('_local')['nodes'])[0] <NEW_LINE> master_node_id = self.client.cluster.state(metric='master_node')['master_node'] <NEW_LINE> self.is_master = my_node_id == master_node_id
Find out if we are connected to the elected master node
625941bfeab8aa0e5d26da98
def add_teds_ai_voltage_chan_with_excit( self, physical_channel, name_to_assign_to_channel="", terminal_config=TerminalConfiguration.DEFAULT, min_val=-10.0, max_val=10.0, units=TEDSUnits.FROM_TEDS, voltage_excit_source=ExcitationSource.INTERNAL, voltage_excit_val=0.0, custom_scale_name=""): <NEW_LINE> <INDENT> cfunc = lib_importer.windll.DAQmxCreateTEDSAIVoltageChanWithExcit <NEW_LINE> if cfunc.argtypes is None: <NEW_LINE> <INDENT> with cfunc.arglock: <NEW_LINE> <INDENT> if cfunc.argtypes is None: <NEW_LINE> <INDENT> cfunc.argtypes = [ lib_importer.task_handle, ctypes_byte_str, ctypes_byte_str, ctypes.c_int, ctypes.c_double, ctypes.c_double, ctypes.c_int, ctypes.c_int, ctypes.c_double, ctypes_byte_str] <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> error_code = cfunc( self._handle, physical_channel, name_to_assign_to_channel, terminal_config.value, min_val, max_val, units.value, voltage_excit_source.value, voltage_excit_val, custom_scale_name) <NEW_LINE> check_for_error(error_code) <NEW_LINE> return self._create_chan(physical_channel, name_to_assign_to_channel)
Creates channel(s) to measure voltage. Use this instance for custom sensors that require excitation. You can use the excitation to scale the measurement. You must configure the physical channel(s) with TEDS information to use this function. Args: physical_channel (str): Specifies the names of the physical channels to use to create virtual channels. The DAQmx physical channel constant lists all physical channels on devices and modules installed in the system. name_to_assign_to_channel (Optional[str]): Specifies a name to assign to the virtual channel this function creates. If you do not specify a value for this input, NI-DAQmx uses the physical channel name as the virtual channel name. terminal_config (Optional[nidaqmx.constants.TerminalConfiguration]): Specifies the input terminal configuration for the channel. min_val (Optional[float]): Specifies in **units** the minimum value you expect to measure. max_val (Optional[float]): Specifies in **units** the maximum value you expect to measure. units (Optional[nidaqmx.constants.TEDSUnits]): Specifies the units to use to return measurements. voltage_excit_source (Optional[nidaqmx.constants.ExcitationSource]): Specifies the source of excitation. voltage_excit_val (Optional[float]): Specifies in volts the amount of excitation supplied to the sensor. Refer to the sensor documentation to determine appropriate excitation values. custom_scale_name (Optional[str]): Specifies the name of a custom scale for the channel. If you want the channel to use a custom scale, specify the name of the custom scale to this input and set **units** to **FROM_CUSTOM_SCALE**. Returns: nidaqmx._task_modules.channels.ai_channel.AIChannel: Indicates the newly created channel object.
625941bf7cff6e4e811178c6
def getData(self): <NEW_LINE> <INDENT> return self.parsedFile
Return the parsed torrent file data.
625941bf6e29344779a62555
def diagonal(a, offset=0, axis1=0, axis2=1): <NEW_LINE> <INDENT> return asarray(a).diagonal(offset, axis1, axis2)
Return specified diagonals. If `a` is 2-D, returns the diagonal of `a` with the given offset, i.e., the collection of elements of the form ``a[i, i+offset]``. If `a` has more than two dimensions, then the axes specified by `axis1` and `axis2` are used to determine the 2-D sub-array whose diagonal is returned. The shape of the resulting array can be determined by removing `axis1` and `axis2` and appending an index to the right equal to the size of the resulting diagonals. Parameters ---------- a : array_like Array from which the diagonals are taken. offset : int, optional Offset of the diagonal from the main diagonal. Can be positive or negative. Defaults to main diagonal (0). axis1 : int, optional Axis to be used as the first axis of the 2-D sub-arrays from which the diagonals should be taken. Defaults to first axis (0). axis2 : int, optional Axis to be used as the second axis of the 2-D sub-arrays from which the diagonals should be taken. Defaults to second axis (1). Returns ------- array_of_diagonals : ndarray If `a` is 2-D, a 1-D array containing the diagonal is returned. If the dimension of `a` is larger, then an array of diagonals is returned, "packed" from left-most dimension to right-most (e.g., if `a` is 3-D, then the diagonals are "packed" along rows). Raises ------ ValueError If the dimension of `a` is less than 2. See Also -------- diag : MATLAB work-a-like for 1-D and 2-D arrays. diagflat : Create diagonal arrays. trace : Sum along diagonals. Examples -------- >>> a = np.arange(4).reshape(2,2) >>> a array([[0, 1], [2, 3]]) >>> a.diagonal() array([0, 3]) >>> a.diagonal(1) array([1]) A 3-D example: >>> a = np.arange(8).reshape(2,2,2); a array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]) >>> a.diagonal(0, # Main diagonals of two arrays created by skipping ... 0, # across the outer(left)-most axis last and ... 1) # the "middle" (row) axis first. array([[0, 6], [1, 7]]) The sub-arrays whose main diagonals we just obtained; note that each corresponds to fixing the right-most (column) axis, and that the diagonals are "packed" in rows. >>> a[:,:,0] # main diagonal is [0 6] array([[0, 2], [4, 6]]) >>> a[:,:,1] # main diagonal is [1 7] array([[1, 3], [5, 7]])
625941bf656771135c3eb7ad
def inorder(node, out): <NEW_LINE> <INDENT> if node is not None: <NEW_LINE> <INDENT> inorder(node.left, out) <NEW_LINE> out.append(node.x) <NEW_LINE> inorder(node.right, out)
Useful for traversing BST from min to max
625941bf1f037a2d8b94613f
def __getitem__(self, index): <NEW_LINE> <INDENT> start_index = (index * self.batch_size) + 1 <NEW_LINE> end_index = ((index + 1) * self.batch_size) + 1 <NEW_LINE> inds = self.indices[start_index:end_index] <NEW_LINE> contexts = [] <NEW_LINE> with open(self.context_file, 'r', encoding='utf-8') as cf: <NEW_LINE> <INDENT> for i, line in enumerate(cf, start=1): <NEW_LINE> <INDENT> line = line[:-1] <NEW_LINE> if i in inds: <NEW_LINE> <INDENT> contexts.append(line.split(' ')) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> questions = [] <NEW_LINE> with open(self.question_file, 'r', encoding='utf-8') as qf: <NEW_LINE> <INDENT> for i, line in enumerate(qf, start=1): <NEW_LINE> <INDENT> line = line[:-1] <NEW_LINE> if i in inds: <NEW_LINE> <INDENT> questions.append(line.split(' ')) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> answer_spans = [] <NEW_LINE> with open(self.span_file, 'r', encoding='utf-8') as sf: <NEW_LINE> <INDENT> for i, line in enumerate(sf, start=1): <NEW_LINE> <INDENT> line = line[:-1] <NEW_LINE> if i in inds: <NEW_LINE> <INDENT> answer_spans.append(line.split(' ')) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> if self.squad_version == 2.0: <NEW_LINE> <INDENT> is_impossible = [] <NEW_LINE> with open(self.is_impossible_file, 'r', encoding='utf-8') as isimpf: <NEW_LINE> <INDENT> for i, line in enumerate(isimpf, start=1): <NEW_LINE> <INDENT> line = line[:-1] <NEW_LINE> if i in inds: <NEW_LINE> <INDENT> is_impossible.append(line) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> for i, flag in enumerate(is_impossible): <NEW_LINE> <INDENT> contexts[i].insert(0, "unanswerable") <NEW_LINE> if flag == "1": <NEW_LINE> <INDENT> answer_spans[i] = [0, 0] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> answer_spans[i] = [int(val) + 1 for val in answer_spans[i]] <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> context_batch = self.vectors.query(contexts, pad_to_length=self.max_passage_length) <NEW_LINE> question_batch = self.vectors.query(questions, pad_to_length=self.max_query_length) <NEW_LINE> if self.max_passage_length is not None: <NEW_LINE> <INDENT> span_batch = np.expand_dims(np.array(answer_spans, dtype='float32'), axis=1).clip(0, self.max_passage_length - 1) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> span_batch = np.expand_dims(np.array(answer_spans, dtype='float32'), axis=1) <NEW_LINE> <DEDENT> return [context_batch, question_batch], [span_batch]
Generate one batch of data
625941bfa219f33f346288ae
def test_op_test_nonexistent_member(): <NEW_LINE> <INDENT> patches = [ {"op": "test", "path": "/10/20/foo", "value": "bar"} ] <NEW_LINE> jp = JSONPatcher(sample_json, *patches) <NEW_LINE> changed, tested = jp.patch() <NEW_LINE> assert changed is None <NEW_LINE> assert tested is False
Should return False even if path does not exist.
625941bf8c3a8732951582f9
def sample_by_distributions(self, distributions, nsample): <NEW_LINE> <INDENT> import pyemma.util.discrete_trajectories as dt <NEW_LINE> return dt.sample_indexes_by_distribution(self.active_state_indexes, distributions, nsample)
Generates samples according to given probability distributions Parameters ---------- distributions : list or array of ndarray ( (n) ) m distributions over states. Each distribution must be of length n and must sum up to 1.0 nsample : int Number of samples per distribution. If replace = False, the number of returned samples per state could be smaller if less than nsample indexes are available for a state. Returns ------- indexes : length m list of ndarray( (nsample, 2) ) List of the sampled indices by distribution. Each element is an index array with a number of rows equal to nsample, with rows consisting of a tuple (i, t), where i is the index of the trajectory and t is the time index within the trajectory.
625941bfd7e4931a7ee9de5e
def transform(self): <NEW_LINE> <INDENT> os.system("lcs --verbose") <NEW_LINE> predictions = self.extract_performance() <NEW_LINE> os.system("mv * " + self.savedir) <NEW_LINE> return predictions
LCS classifier ===== Function to call upon the LCS classifier to train and test on the partsfiles. Classifier needs to be properly set-up Performs classification in current directory Parameters ----- trainparts : list all train instances as line with a file reference and label testparts : list all test instances as line with a file reference and label
625941bf26238365f5f0edac
@njit <NEW_LINE> def delta_update(delta:float, old_v:float, new_v:float) -> float: <NEW_LINE> <INDENT> delta_arr = np.zeros(2) <NEW_LINE> delta_arr[0] = delta <NEW_LINE> delta_arr[1] = np.abs(old_v - new_v) <NEW_LINE> delta = np.max(delta_arr) <NEW_LINE> return delta
Calculates delta (difference between state value estimate for current and previous iteration). Args: delta (float): Previous delta old_v (float): Previous value estimate new_v (float): Current value estimate Returns: float: New delta
625941bf91f36d47f21ac431
def powerOnSam(self, wait=1, cmd=None): <NEW_LINE> <INDENT> starting = self.status(cmd=cmd) <NEW_LINE> self.dev.clear(self.bits['sam_off']) <NEW_LINE> def isSamOn(status): <NEW_LINE> <INDENT> return not bool(status & self.bits['sam_off']) <NEW_LINE> <DEDENT> ret = self.spinUntil(isSamOn, starting=starting, wait=wait, cmd=cmd) <NEW_LINE> return ret
Deassert SAM power line, turning it on.
625941bf099cdd3c635f0b9d
def get_files(file_dir): <NEW_LINE> <INDENT> fsk = [] <NEW_LINE> label_fsk = [] <NEW_LINE> ask = [] <NEW_LINE> label_ask = [] <NEW_LINE> qpsk = [] <NEW_LINE> label_qpsk = [] <NEW_LINE> for file in os.listdir(file_dir): <NEW_LINE> <INDENT> name = file.split(sep='.') <NEW_LINE> if name[0]=='fsk': <NEW_LINE> <INDENT> fsk.append(file_dir + file) <NEW_LINE> label_fsk.append(2) <NEW_LINE> <DEDENT> elif name[0]=='ask': <NEW_LINE> <INDENT> ask.append(file_dir + file) <NEW_LINE> label_ask.append(1) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> qpsk.append(file_dir + file) <NEW_LINE> label_qpsk.append(3) <NEW_LINE> <DEDENT> <DEDENT> print('There are %d fsk\nThere are %d ask\nThere are %d qpsk' %(len(fsk),len(ask),len(qpsk))) <NEW_LINE> image_list = np.hstack((ask,fsk,qpsk)) <NEW_LINE> label_list = np.hstack((label_ask,label_fsk,label_qpsk)) <NEW_LINE> temp = np.array([image_list, label_list]) <NEW_LINE> temp = temp.transpose() <NEW_LINE> np.random.shuffle(temp) <NEW_LINE> image_list = list(temp[:, 0]) <NEW_LINE> label_list = list(temp[:, 1]) <NEW_LINE> label_list = [int(i) for i in label_list] <NEW_LINE> return image_list, label_list
Args: file_dir: file directory Returns: list of images and labels
625941bf3346ee7daa2b2cab
def f_pipeline(self, pipeline): <NEW_LINE> <INDENT> self._user_pipeline = True <NEW_LINE> return self._execute_runs(pipeline)
You can make *pypet* supervise your whole experiment by defining a pipeline. `pipeline` is a function that defines the entire experiment. From pre-processing including setting up the trajectory over defining the actual simulation runs to post processing. The `pipeline` function needs to return TWO tuples with a maximum of three entries each. For example: :: return (runfunc, args, kwargs), (postproc, postproc_args, postproc_kwargs) Where `runfunc` is the actual simulation function thet gets passed the trajectory container and potentially additional arguments `args` and keyword arguments `kwargs`. This will be run by your environment with all parameter combinations. `postproc` is a post processing function that handles your computed results. The function must accept as arguments the trajectory container, a list of results (list of tuples (run idx, result) ) and potentially additional arguments `postproc_args` and keyword arguments `postproc_kwargs`. As for :func:`~pypet.environment.Environment.f_add_postproc`, this function can potentially extend the trajectory. If you don't want to apply post-processing, your pipeline function can also simply return the run function and the arguments: :: return runfunc, args, kwargs Or :: return runfunc, args Or :: return runfunc ``return runfunc, kwargs`` does NOT work, if you don't want to pass `args` do ``return runfunc, (), kwargs``. Analogously combinations like :: return (runfunc, args), (postproc,) work as well. :param pipeline: The pipleine function, taking only a single argument `traj`. And returning all functions necessary for your experiment. :return: List of the individual results returned by `runfunc`. Returns a LIST OF TUPLES, where first entry is the run idx and second entry is the actual result. In case of multiprocessing these are not necessarily ordered according to their run index, but ordered according to their finishing time. Does not contain results stored in the trajectory! In order to access these simply interact with the trajectory object, potentially after calling :func:`~pypet.trajectory.Trajectory.f_update_skeleton` and loading all results at once with :func:`~pypet.trajectory.f_load` or loading manually with :func:`~pypet.trajectory.f_load_items`. Even if you use multiprocessing without a pool the results returned by `runfunc` still need to be pickled. Results computed from `postproc` are not returned. `postproc` should not return any results except dictionaries if the trajectory should be expanded.
625941bf6aa9bd52df036ce4
def test_children(self): <NEW_LINE> <INDENT> button = self.dlg.by(class_name="Button", name="OK").find() <NEW_LINE> self.assertEqual(len(button.children()), 1) <NEW_LINE> self.assertEqual(button.children()[0].class_name(), "TextBlock")
Test getting children of a control
625941bf38b623060ff0ad30
def _testBounds(): <NEW_LINE> <INDENT> pass
>>> from defcon.test.testTools import getTestFontPath >>> from defcon.objects.font import Font >>> font = Font(getTestFontPath()) >>> glyph = font['A'] >>> glyph.bounds (0, 0, 700, 700) >>> glyph = font['B'] >>> glyph.bounds (0, 0, 700, 700) >>> glyph = font['C'] >>> glyph.bounds (0.0, 0.0, 700.0, 700.0)
625941bfcb5e8a47e48b79ee
def headerData(self, section, orientation, role=Qt.DisplayRole): <NEW_LINE> <INDENT> if orientation == Qt.Horizontal: <NEW_LINE> <INDENT> extra_col = self.extraColumnForProxyColumn(section) <NEW_LINE> if extra_col >= 0: <NEW_LINE> <INDENT> if role == Qt.DisplayRole: <NEW_LINE> <INDENT> return self.extra_headers[extra_col] <NEW_LINE> <DEDENT> return None <NEW_LINE> <DEDENT> <DEDENT> return super().headerData(section, orientation, role)
Reimplemented.
625941bfbe7bc26dc91cd546
def encrypt_private(self, message): <NEW_LINE> <INDENT> r = secrets.randbelow(self.n) <NEW_LINE> en1 = pow(r, self.d, self.n) <NEW_LINE> h1 = SHA1.SHA1(hex(r)) <NEW_LINE> h2 = bitarray(0) <NEW_LINE> h2.frombytes(bytes.fromhex(h1[2:])) <NEW_LINE> m = bitarray(0) <NEW_LINE> m.frombytes(message.encode("latin-1")) <NEW_LINE> dif = h2.length() - (m.length()%h2.length()) <NEW_LINE> for i in range(dif): m.insert(0, False) <NEW_LINE> st = "" <NEW_LINE> for i in range(int(m.length()/h2.length())): st += (h2^m[i*h2.length():(i+1)*h2.length()]).tobytes().decode("latin-1") <NEW_LINE> return en1, st
Encrypts a message using the private key. Args: message (string): The message to encrypt. Returns: (int, string) The encrypted message.
625941bf23849d37ff7b2fd1
def query_yes_no(output_stream, question, default="yes"): <NEW_LINE> <INDENT> responses = {"yes": True, "y": True, "no": False, "n": False} <NEW_LINE> if default is None: <NEW_LINE> <INDENT> prompt = " [y/n] " <NEW_LINE> <DEDENT> elif responses[default.lower()]: <NEW_LINE> <INDENT> prompt = " [Y/n] " <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> prompt = " [y/N] " <NEW_LINE> <DEDENT> while True: <NEW_LINE> <INDENT> output_stream.write(question + prompt) <NEW_LINE> choice = input().lower() <NEW_LINE> if default is not None and choice == '': <NEW_LINE> <INDENT> return responses[default] <NEW_LINE> <DEDENT> elif choice in responses: <NEW_LINE> <INDENT> return responses[choice] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> output_stream.write("Please respond with 'yes' or 'no' " "(or 'y' or 'n').\n")
Ask a yes/no question via raw_input() and return their answer. "question" is a string that is presented to the user. "default" is the presumed answer if the user just hits <Enter>. It must be "yes" (the default), "no" or None (meaning an answer is required of the user). The "answer" return value is True for "yes" or False for "no". Source: https://stackoverflow.com/a/3041990
625941bfd18da76e23532414
def run(self): <NEW_LINE> <INDENT> if self.setup['daemon']: <NEW_LINE> <INDENT> piddir = os.path.dirname(self.setup['daemon']) <NEW_LINE> if not os.path.exists(piddir): <NEW_LINE> <INDENT> os.makedirs(piddir) <NEW_LINE> os.chown(piddir, self.setup['daemon_uid'], self.setup['daemon_gid']) <NEW_LINE> os.chmod(piddir, 493) <NEW_LINE> <DEDENT> if not self._daemonize(): <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> os.umask(int(self.setup['umask'], 8)) <NEW_LINE> <DEDENT> if not self._run(): <NEW_LINE> <INDENT> self.shutdown() <NEW_LINE> return False <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> self.load_plugins() <NEW_LINE> self.fam.start() <NEW_LINE> self.fam_thread.start() <NEW_LINE> self.fam.AddMonitor(self.cfile, self) <NEW_LINE> if self.perflog_thread is not None: <NEW_LINE> <INDENT> self.perflog_thread.start() <NEW_LINE> <DEDENT> for plug in self.plugins_by_type(Threaded): <NEW_LINE> <INDENT> plug.start_threads() <NEW_LINE> <DEDENT> <DEDENT> except: <NEW_LINE> <INDENT> self.shutdown() <NEW_LINE> raise <NEW_LINE> <DEDENT> if self.setup['fam_blocking']: <NEW_LINE> <INDENT> time.sleep(1) <NEW_LINE> while self.fam.pending() != 0: <NEW_LINE> <INDENT> time.sleep(1) <NEW_LINE> <DEDENT> <DEDENT> self.set_debug(None, self.debug_flag) <NEW_LINE> self._block()
Run the server core. This calls :func:`_daemonize`, :func:`_run`, starts the :attr:`fam_thread`, and calls :func:`_block`, but note that it is the responsibility of the server core implementation to call :func:`shutdown` under normal operation. This also handles creation of the directory containing the pidfile, if necessary.
625941bf50812a4eaa59c265
def get_container_config(self): <NEW_LINE> <INDENT> for p in self.dic[DOCKER_UNIT.PORTS]: <NEW_LINE> <INDENT> key = '%d/%s' % (p[DOCKER_UNIT.PORTS_PORT], p[DOCKER_UNIT.PORTS_PROTOCOL]) <NEW_LINE> self.dic[DOCKER_UNIT.EXPOSED_PORTS][key] = {} <NEW_LINE> self.dic[DOCKER_UNIT.HOST_CONFIG][DOCKER_UNIT.HOST_CONFIG_PORT_BINDING][key] = [{DOCKER_UNIT.HOST_CONFIG_HOST_IP: '', DOCKER_UNIT.HOST_CONFIG_HOST_PORT: str(p[DOCKER_UNIT.PORTS_HOST_PORT])}] <NEW_LINE> <DEDENT> self.dic.pop(DOCKER_UNIT.NAME, "") <NEW_LINE> self.dic.pop(DOCKER_UNIT.TYPE, "") <NEW_LINE> self.dic.pop(DOCKER_UNIT.PROVIDER, "") <NEW_LINE> self.dic.pop(DOCKER_UNIT.DESCRIPTION, "") <NEW_LINE> self.dic.pop(DOCKER_UNIT.PORTS, None) <NEW_LINE> self.dic.pop(DOCKER_UNIT.REMOTE, None) <NEW_LINE> if not self.dic[DOCKER_UNIT.CMD]: <NEW_LINE> <INDENT> self.dic.pop(DOCKER_UNIT.CMD, []) <NEW_LINE> <DEDENT> if not self.dic[DOCKER_UNIT.ENTRY_POINT]: <NEW_LINE> <INDENT> self.dic.pop(DOCKER_UNIT.ENTRY_POINT, "") <NEW_LINE> <DEDENT> return self.dic
Compose post data for docker remote api create :return:
625941bfa17c0f6771cbdf94
def email_for_subscription(alert, start_date, frequency): <NEW_LINE> <INDENT> start_datetime = datetime.datetime(start_date.year, start_date.month, start_date.day) <NEW_LINE> yesterday = datetime.date.today() - datetime.timedelta(days=1) <NEW_LINE> end_datetime = datetime.datetime.combine(yesterday, datetime.time(23, 59, 59, 9999)) <NEW_LINE> qs = NewsItem.objects.select_related().filter(schema__is_public=True, pub_date__range=(start_datetime, end_datetime)).order_by('-schema__importance', 'schema__id') <NEW_LINE> if alert.include_new_schemas: <NEW_LINE> <INDENT> if alert.schemas: <NEW_LINE> <INDENT> qs = qs.exclude(schema__id__in=alert.schemas.split(',')) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> if alert.schemas: <NEW_LINE> <INDENT> qs = qs.filter(schema__id__in=alert.schemas.split(',')) <NEW_LINE> <DEDENT> <DEDENT> if alert.block: <NEW_LINE> <INDENT> place_name, place_url = alert.block.pretty_name, alert.block.url() <NEW_LINE> place = alert.block <NEW_LINE> search_buffer = make_search_buffer(alert.block.location.centroid, alert.radius) <NEW_LINE> qs = qs.filter(location__bboverlaps=search_buffer) <NEW_LINE> <DEDENT> elif alert.location: <NEW_LINE> <INDENT> place_name, place_url = alert.location.name, alert.location.url() <NEW_LINE> place = alert.location <NEW_LINE> qs = qs.filter(newsitemlocation__location__id=alert.location.id) <NEW_LINE> <DEDENT> ni_list = list(qs) <NEW_LINE> if not ni_list: <NEW_LINE> <INDENT> raise NoNews <NEW_LINE> <DEDENT> schemas_used = set([ni.schema for ni in ni_list]) <NEW_LINE> populate_attributes_if_needed(ni_list, list(schemas_used)) <NEW_LINE> text, html = email_text_for_place(alert, place, place_name, place_url, ni_list, start_date, frequency) <NEW_LINE> return place_name, text, html
Returns a (place_name, text, html) tuple for the given EmailAlert object and date.
625941bf23849d37ff7b2fd2
def test_update(self): <NEW_LINE> <INDENT> data = dict() <NEW_LINE> data['name'] = "Le duc de Lorraine" <NEW_LINE> data['phone'] = "514-555-5555" <NEW_LINE> data['address'] = "9000 Boulevard de Carrie" <NEW_LINE> data['city'] = "Trois-Rivieres" <NEW_LINE> data['zipcode'] = "H1S1S1" <NEW_LINE> data['restaurateur_id'] = 15 <NEW_LINE> data['country_id'] = 10 <NEW_LINE> response = self.put('/restaurants/5', data=data) <NEW_LINE> assert response.status_code == 200 <NEW_LINE> result = self.parse(response.data) <NEW_LINE> assert 'id' in result <NEW_LINE> restaurant = db.session.query(Restaurant).get(result['id']) <NEW_LINE> assert restaurant.name == 'Le duc de Lorraine' <NEW_LINE> assert restaurant.phone == "514-555-5555" <NEW_LINE> assert restaurant.address == '9000 Boulevard de Carrie' <NEW_LINE> assert restaurant.city == 'Trois-Rivieres' <NEW_LINE> assert restaurant.restaurateur.id == 15 <NEW_LINE> assert restaurant.country.id == 10 <NEW_LINE> restaurateur = db.session.query(Restaurateur).get(15) <NEW_LINE> assert restaurateur.restaurant.id == result['id']
PUT /restaurants/id: with valid data
625941bfcb5e8a47e48b79ef
def a_dist(data, domain, quality_function, eps, delta, bulk=False, for_sparse=False): <NEW_LINE> <INDENT> if bulk: <NEW_LINE> <INDENT> qualified_domain = quality_function(data, domain) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> qualified_domain = [quality_function(data, i) for i in domain] <NEW_LINE> <DEDENT> total_value = float(sum(qualified_domain)) <NEW_LINE> h1_score = max(qualified_domain) <NEW_LINE> h1 = domain[qualified_domain.index(h1_score)] <NEW_LINE> qualified_domain.remove(h1_score) <NEW_LINE> domain.remove(h1) <NEW_LINE> h2_score = max(qualified_domain) <NEW_LINE> noisy_gap = h1_score - h2_score + np.random.laplace(0, 1 / eps, 1) <NEW_LINE> if noisy_gap < np.log(1/delta)/eps: <NEW_LINE> <INDENT> return 'bottom' <NEW_LINE> <DEDENT> elif for_sparse: <NEW_LINE> <INDENT> return h1, total_value <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return h1
A_dist algorithm :param data: list or array of values :param domain: list of possible results :param quality_function: sensitivity-1 quality function :param eps: privacy parameter :param delta: privacy parameter :param bulk: in case that we can reduce run-time by evaluating the quality of the whole domain in bulk, the procedure will be given a 'bulk' quality function. meaning that instead of one domain element the quality function get the whole domain as input :param for_sparse: in cases that the domain is a very spared one, namely a big percent of the domain has quality 0, there is a special procedure called sparse_domain. That procedure needs, beside that result from the given mechanism, the total weight of the domain whose quality is more than 0. If that is the case A-dist will return also the total quality weight input domain. :return: an element of domain with maximum value of quality function or 'bottom'
625941bfdd821e528d63b0ec
def __init__(__self__, *, cloud_provider: Optional[str] = None, replica_set_name: Optional[str] = None): <NEW_LINE> <INDENT> if cloud_provider is not None: <NEW_LINE> <INDENT> pulumi.set(__self__, "cloud_provider", cloud_provider) <NEW_LINE> <DEDENT> if replica_set_name is not None: <NEW_LINE> <INDENT> pulumi.set(__self__, "replica_set_name", replica_set_name)
:param str cloud_provider: Cloud provider that stores this snapshot. :param str replica_set_name: Label given to a shard or config server from which Atlas took this snapshot.
625941bf15baa723493c3eb5
def get_label(self, task_id): <NEW_LINE> <INDENT> task = self.get_task(task_id) <NEW_LINE> if task: <NEW_LINE> <INDENT> return task.label <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return None
Obtains built-in data name or real image file name. Parameters ---------- task_id: int task key in the database. Returns ------- :str Realistic Map Fields.
625941bfc4546d3d9de72973
def close(channel): <NEW_LINE> <INDENT> channel.close()
Close the channel
625941bf4c3428357757c26b
def __eq__(self, other): <NEW_LINE> <INDENT> if not isinstance(other, IntegrationApiKeyResponse): <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> return self.to_dict() == other.to_dict()
Returns true if both objects are equal
625941bf3617ad0b5ed67e3a
def allowed_file(file_ext): <NEW_LINE> <INDENT> return file_ext.strip(".") in ALLOWED_EXTENSIONS
Ensures file is an image
625941bf44b2445a33931fd9
def libs_sandbox(self, path, dest=None, recursive=True, exclude_sys_libs=True): <NEW_LINE> <INDENT> if dest is None: <NEW_LINE> <INDENT> dest = "{{DIR_BASE}}/bin" <NEW_LINE> <DEDENT> dest = j.core.tools.text_replace(dest) <NEW_LINE> path = j.core.tools.text_replace(path) <NEW_LINE> self._log_info("lib sandbox:%s" % path) <NEW_LINE> def callback(dep): <NEW_LINE> <INDENT> dep.copyTo(dest) <NEW_LINE> <DEDENT> self.libs_walk(path, dest, callback, recursive, exclude_sys_libs)
kosmos 'j.tools.sandboxer.libs_sandbox(".",".",True)' find binaries on path and look for supporting libs, copy the libs to dest default dest = '%s/bin/'%j.dirs.JSBASEDIR
625941bf925a0f43d2549db6
def register_cache(name, dotted_path): <NEW_LINE> <INDENT> data_caches[name] = dotted_path
Register a new :class:`.Cache` with ``name`` which can be found at the python ``dotted_path``.
625941bf293b9510aa2c31da
def unexpected_exception(self,e): <NEW_LINE> <INDENT> self.fatal_exception("'Unexpected' exception has occured:", e)
Default message for 'unexpected' exceptions
625941bf5fdd1c0f98dc0174
def testDockerPipeChain(self, caching=True): <NEW_LINE> <INDENT> options = Job.Runner.getDefaultOptions(os.path.join(self.tempDir, 'jobstore')) <NEW_LINE> options.logLevel = 'INFO' <NEW_LINE> options.workDir = self.tempDir <NEW_LINE> options.clean = 'always' <NEW_LINE> if not caching: <NEW_LINE> <INDENT> options.disableCaching = True <NEW_LINE> <DEDENT> A = Job.wrapJobFn(_testDockerPipeChainFn) <NEW_LINE> rv = Job.Runner.startToil(A, options) <NEW_LINE> assert rv.strip() == '2'
Test for piping API for dockerCall(). Using this API (activated when list of argument lists is given as parameters), commands a piped together into a chain ex: parameters=[ ['printf', 'x y '], ['wc', '-l'] ] should execute: printf 'x y ' | wc -l
625941bf0383005118ecf526
def exit_event(self, event): <NEW_LINE> <INDENT> self.root.quit()
Quit app when "Exit" pressed
625941bf167d2b6e31218ad8
def _assertNoShadow(self, attr_name): <NEW_LINE> <INDENT> if isinstance(self._objs, reraiser_thread.ReraiserThreadGroup): <NEW_LINE> <INDENT> assert not hasattr(self._objs, '_assertNoShadow') <NEW_LINE> assert not hasattr(self._objs, attr_name) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> assert not any(hasattr(o, '_assertNoShadow') for o in self._objs) <NEW_LINE> assert not any(hasattr(o, attr_name) for o in self._objs)
Ensures that |attr_name| isn't shadowing part of the wrapped obejcts. If the wrapped objects _do_ have an |attr_name| attribute, it will be inaccessible to clients. Args: attr_name: The attribute to check. Raises: AssertionError if the wrapped objects have an attribute named 'attr_name' or '_assertNoShadow'.
625941bf0fa83653e4656efe
def read_file(abi_file): <NEW_LINE> <INDENT> data_dict = {} <NEW_LINE> fh = Dataset(join(PATH_LINUX_ABI, abi_file), mode='r') <NEW_LINE> data_dict['band_id'] = fh.variables['band_id'][0] <NEW_LINE> if (data_dict['band_id'] < 8): <NEW_LINE> <INDENT> print('\n!!! WARNING: Currently plotting non-IR satellite data !!!' ) <NEW_LINE> <DEDENT> data_dict['band_wavelength'] = "%.2f" % fh.variables['band_wavelength'][0] <NEW_LINE> data_dict['semimajor_ax'] = fh.variables['goes_imager_projection'].semi_major_axis <NEW_LINE> data_dict['semiminor_ax'] = fh.variables['goes_imager_projection'].semi_minor_axis <NEW_LINE> data_dict['inverse_flattening'] = fh.variables['goes_imager_projection'].inverse_flattening <NEW_LINE> data_dict['latitude_of_projection_origin'] = fh.variables['goes_imager_projection'].latitude_of_projection_origin <NEW_LINE> data_dict['longitude_of_projection_origin'] = fh.variables['goes_imager_projection'].longitude_of_projection_origin <NEW_LINE> data_dict['data_units'] = fh.variables['CMI'].units <NEW_LINE> add_seconds = fh.variables['t'][0] <NEW_LINE> scan_date = datetime(2000, 1, 1, 12) + timedelta(seconds=float(add_seconds)) <NEW_LINE> sat_height = fh.variables['goes_imager_projection'].perspective_point_height <NEW_LINE> sat_lon = fh.variables['goes_imager_projection'].longitude_of_projection_origin <NEW_LINE> sat_lat = fh.variables['goes_imager_projection'].latitude_of_projection_origin <NEW_LINE> lat_lon_extent = {} <NEW_LINE> lat_lon_extent['n'] = fh.variables['geospatial_lat_lon_extent'].geospatial_northbound_latitude <NEW_LINE> lat_lon_extent['s'] = fh.variables['geospatial_lat_lon_extent'].geospatial_southbound_latitude <NEW_LINE> lat_lon_extent['e'] = fh.variables['geospatial_lat_lon_extent'].geospatial_eastbound_longitude <NEW_LINE> lat_lon_extent['w'] = fh.variables['geospatial_lat_lon_extent'].geospatial_westbound_longitude <NEW_LINE> data_dict['lat_center'] = fh.variables['geospatial_lat_lon_extent'].geospatial_lat_center <NEW_LINE> data_dict['lon_center'] = fh.variables['geospatial_lat_lon_extent'].geospatial_lon_center <NEW_LINE> sat_sweep = fh.variables['goes_imager_projection'].sweep_angle_axis <NEW_LINE> data = fh.variables['CMI'][:].data <NEW_LINE> Xs = fh.variables['x'][:] <NEW_LINE> Ys = fh.variables['y'][:] <NEW_LINE> fh.close() <NEW_LINE> fh = None <NEW_LINE> data_dict['scan_date'] = scan_date <NEW_LINE> data_dict['sat_height'] = sat_height <NEW_LINE> data_dict['sat_lon'] = sat_lon <NEW_LINE> data_dict['sat_lat'] = sat_lat <NEW_LINE> data_dict['lat_lon_extent'] = lat_lon_extent <NEW_LINE> data_dict['sat_sweep'] = sat_sweep <NEW_LINE> data_dict['x'] = Xs <NEW_LINE> data_dict['y'] = Ys <NEW_LINE> data_dict['data'] = data <NEW_LINE> return data_dict
Opens & reads a GOES-16 ABI data file, returning a dictionary of data !!! NOTE: Returns implroper sat_lon value; return 75.0 but should be 75.2 for GOES-16 Parameters: ------------ fname : str Name of the GOES-16 ABI date file to be opened & processed Returns: ------------ data_dict : dictionary of str Dictionar of ABI image data & metadata from the netCDF file
625941bf15fb5d323cde0a4e
@admin.route('/tag/add/', methods=["GET", "POST"]) <NEW_LINE> @admin_login_req <NEW_LINE> @admin_auth <NEW_LINE> def tag_add(): <NEW_LINE> <INDENT> form = TagForm() <NEW_LINE> if form.validate_on_submit(): <NEW_LINE> <INDENT> data = form.data <NEW_LINE> tag = Tag.query.filter_by(name=data["name"]).count() <NEW_LINE> if tag == 1: <NEW_LINE> <INDENT> flash("标签已存在", "err") <NEW_LINE> return redirect(url_for("admin.tag_add")) <NEW_LINE> <DEDENT> tag = Tag( name=data["name"] ) <NEW_LINE> db.session.add(tag) <NEW_LINE> db.session.commit() <NEW_LINE> oplog = Oplog( admin_id=session["admin_id"], ip=request.remote_addr, reason="添加标签-%s" % data["name"] ) <NEW_LINE> db.session.add(oplog) <NEW_LINE> db.session.commit() <NEW_LINE> flash("标签添加成功", "ok") <NEW_LINE> redirect(url_for("admin.tag_add")) <NEW_LINE> <DEDENT> return render_template('admin/tag_add.html', form=form)
标签添加
625941bfd8ef3951e324347f
def link(title, url): <NEW_LINE> <INDENT> if not url: <NEW_LINE> <INDENT> return title <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return '<a target="_blank" href="%(url)s" title="%(title)s">%(title)s</a>' % {'url': url, 'title': 'Details'}
Wrap the text in a hyperlink, if the link exists.
625941bf099cdd3c635f0b9e
def splitArray(self, nums): <NEW_LINE> <INDENT> def split(nums): <NEW_LINE> <INDENT> total = sum(nums) <NEW_LINE> A = [nums[i] for i in range(len(nums))] <NEW_LINE> for i in range(1, len(nums)): <NEW_LINE> <INDENT> A[i] += A[i-1] <NEW_LINE> <DEDENT> return {A[i-1] for i in range(1, len(nums)) if A[i-1] == total - A[i]} <NEW_LINE> <DEDENT> return any(split(nums[:j]) & split(nums[j+1:]) for j in range(3, len(nums)-3))
:type nums: List[int] :rtype: bool
625941bf60cbc95b062c6484
def decode_boxes(encoded_boxes, reference_boxes, scale_factors=None): <NEW_LINE> <INDENT> t_xcenter, t_ycenter, t_w, t_h = tf.unstack(encoded_boxes, axis=1) <NEW_LINE> if scale_factors: <NEW_LINE> <INDENT> t_xcenter /= scale_factors[0] <NEW_LINE> t_ycenter /= scale_factors[1] <NEW_LINE> t_w /= scale_factors[2] <NEW_LINE> t_h /= scale_factors[3] <NEW_LINE> <DEDENT> reference_xmin, reference_ymin, reference_xmax, reference_ymax = tf.unstack(reference_boxes, axis=1) <NEW_LINE> reference_xcenter = (reference_xmin + reference_xmax) / 2. <NEW_LINE> reference_ycenter = (reference_ymin + reference_ymax) / 2. <NEW_LINE> reference_w = reference_xmax - reference_xmin <NEW_LINE> reference_h = reference_ymax - reference_ymin <NEW_LINE> predict_xcenter = t_xcenter * reference_w + reference_xcenter <NEW_LINE> predict_ycenter = t_ycenter * reference_h + reference_ycenter <NEW_LINE> predict_w = tf.exp(t_w) * reference_w <NEW_LINE> predict_h = tf.exp(t_h) * reference_h <NEW_LINE> predict_xmin = predict_xcenter - predict_w / 2. <NEW_LINE> predict_xmax = predict_xcenter + predict_w / 2. <NEW_LINE> predict_ymin = predict_ycenter - predict_h / 2. <NEW_LINE> predict_ymax = predict_ycenter + predict_h / 2. <NEW_LINE> return tf.transpose(tf.stack([predict_xmin, predict_ymin, predict_xmax, predict_ymax]))
:param encoded_boxes:[N, 4] :param reference_boxes: [N, 4] . :param scale_factors: use for scale. in the first stage, reference_boxes are anchors in the second stage, reference boxes are proposals(decode) produced by first stage :return:decode boxes [N, 4]
625941bf66656f66f7cbc0ec
def _process_bookeDetail_item(self,item): <NEW_LINE> <INDENT> pattern = re.compile('\d+') <NEW_LINE> item['novelLabel'] = item['novelLabel'].strip().replace('\n', '') <NEW_LINE> match = pattern.search(item['novelAllClick']) <NEW_LINE> item['novelAllClick'] = match.group() if match else item['novelAllClick'] <NEW_LINE> match = pattern.search(item['novelMonthClick']) <NEW_LINE> item['novelMonthClick'] = match.group() if match else item['novelMonthClick'] <NEW_LINE> match = pattern.search(item['novelWeekClick']) <NEW_LINE> item['novelWeekClick'] = match.group() if match else item['novelWeekClick'] <NEW_LINE> match = pattern.search(item['novelAllPopular']) <NEW_LINE> item['novelAllPopular'] = match.group() if match else item['novelAllPopular'] <NEW_LINE> match = pattern.search(item['novelMonthPopular']) <NEW_LINE> item['novelMonthPopular'] = match.group() if match else item['novelMonthPopular'] <NEW_LINE> match = pattern.search(item['novelWeekPopular']) <NEW_LINE> item['novelWeekPopular'] = match.group() if match else item['novelWeekPopular'] <NEW_LINE> match = pattern.search(item['novelAllComm']) <NEW_LINE> item['novelAllComm'] = match.group() if match else item['novelAllComm'] <NEW_LINE> match = pattern.search(item['novelMonthComm']) <NEW_LINE> item['novelMonthComm'] = match.group() if match else item['novelMonthComm'] <NEW_LINE> match = pattern.search(item['novelWeekComm']) <NEW_LINE> item['novelWeekComm'] = match.group() if match else item['novelWeekComm'] <NEW_LINE> self.db.bookhot.insert(dict(item))
处理小说热度 :param item: :return:
625941bfd268445f265b4db0
def write_read_iso_19115_metadata(layer_uri, keywords, keyword=None): <NEW_LINE> <INDENT> write_iso19115_metadata(layer_uri, keywords) <NEW_LINE> iso_19115_keywords = read_iso19115_metadata(layer_uri) <NEW_LINE> temp_keywords = keywords.copy() <NEW_LINE> if 'keyword_version' not in temp_keywords.keys(): <NEW_LINE> <INDENT> temp_keywords['keyword_version'] = inasafe_keyword_version <NEW_LINE> <DEDENT> if (temp_keywords != iso_19115_keywords): <NEW_LINE> <INDENT> missing_keywords = {} <NEW_LINE> missing_keys = set(keywords.keys()) - set(iso_19115_keywords.keys()) <NEW_LINE> for key in missing_keys: <NEW_LINE> <INDENT> missing_keywords[key] = keywords[key] <NEW_LINE> <DEDENT> message = 'Old metadata: %s\n' % str(keywords) <NEW_LINE> message += 'ISO metadata: %s\n' % str(iso_19115_keywords) <NEW_LINE> message += 'Layer location : %s\n' % layer_uri <NEW_LINE> message += 'Missing keywords:\n' <NEW_LINE> for key, value in missing_keywords.iteritems(): <NEW_LINE> <INDENT> message += '%s: %s\n' % (key, value) <NEW_LINE> <DEDENT> message += '--------------------\n' <NEW_LINE> different_values = {} <NEW_LINE> for key in keywords.keys(): <NEW_LINE> <INDENT> if key in iso_19115_keywords.keys(): <NEW_LINE> <INDENT> if keywords[key] != iso_19115_keywords[key]: <NEW_LINE> <INDENT> different_values[key] = ( keywords[key], iso_19115_keywords[key]) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> for key, value in different_values.iteritems(): <NEW_LINE> <INDENT> message += 'old - %s - %s - type: %s\n' % ( key, value[0], type(value[0])) <NEW_LINE> message += 'new - %s - %s - type: %s\n' % ( key, value[1], type(value[1])) <NEW_LINE> <DEDENT> raise MissingMetadata(message) <NEW_LINE> <DEDENT> if os.path.exists(os.path.splitext(layer_uri)[0] + '.keywords'): <NEW_LINE> <INDENT> os.remove(os.path.splitext(layer_uri)[0] + '.keywords') <NEW_LINE> <DEDENT> if keyword: <NEW_LINE> <INDENT> if keyword in iso_19115_keywords.keys(): <NEW_LINE> <INDENT> return iso_19115_keywords[keyword] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> return iso_19115_keywords
Write ISO Metadata, and read again. Check if the keywords same. :param layer_uri: :param keywords: :type keywords: dict :return:
625941bf3d592f4c4ed1cfb6
def create_rotmat(rotations): <NEW_LINE> <INDENT> cos_t = [np.cos(x) for x in rotations] <NEW_LINE> sin_t = [np.sin(x) for x in rotations] <NEW_LINE> rot_x = np.array([[1, 0, 0], [0, cos_t[0], -sin_t[0]], [0, sin_t[0], cos_t[0]]]) <NEW_LINE> rot_y = np.array([[cos_t[1], 0, sin_t[1]], [0, 1, 0], [-sin_t[1], 0, cos_t[1]]]) <NEW_LINE> rot_z = np.array([[cos_t[2], -sin_t[2], 0], [sin_t[2], cos_t[2], 0], [0, 0, 1]]) <NEW_LINE> return rot_z.dot(rot_y).dot(rot_x)
create rotation matrix from vector rotations Parameters ---------- rotations : 1D array rotation vectors (in radians) Returns ------- rot_mat : 2D array 4 x 4 rotation matrix concatenated from x, y, z rotations
625941bf507cdc57c6306c17
def index_html(self, REQUEST, RESPONSE): <NEW_LINE> <INDENT> field = self.getPrimaryField() <NEW_LINE> if IATBlobImage.providedBy(self): <NEW_LINE> <INDENT> return field.index_html(self, REQUEST, RESPONSE) <NEW_LINE> <DEDENT> elif field.getContentType(self) in ATFile.inlineMimetypes: <NEW_LINE> <INDENT> return field.index_html(self, REQUEST, RESPONSE) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return field.download(self, REQUEST, RESPONSE)
download the file inline or as an attachment
625941bf090684286d50ec25
def test_se_block(self): <NEW_LINE> <INDENT> images = tf.zeros((10, 128, 128, 3), dtype=tf.float32) <NEW_LINE> global_params = efficientnet_model.GlobalParams( 1.0, 1.0, 0, 'channels_last', num_classes=10, batch_norm=utils.TpuBatchNormalization) <NEW_LINE> blocks_args = [ efficientnet_model.BlockArgs( kernel_size=3, num_repeat=3, input_filters=3, output_filters=6, expand_ratio=6, id_skip=False, strides=[2, 2], se_ratio=0.8, conv_type=0, fused_conv=0, super_pixel=0) ] <NEW_LINE> model = efficientnet_model.Model(blocks_args, global_params) <NEW_LINE> outputs = model(images, training=True) <NEW_LINE> self.assertEqual((10, 10), outputs.shape)
Test for creating a model with SE block arguments.
625941bf4a966d76dd550f4f
def __init__(self): <NEW_LINE> <INDENT> self.data = [-1] * 1000000
Initialize your data structure here.
625941bf21a7993f00bc7c2e
def calc_weights_max_cov_gauss(X, Y): <NEW_LINE> <INDENT> std_x = np.std(X, axis=0) <NEW_LINE> std_y = np.std(Y, axis=0) <NEW_LINE> cov_max = np.zeros(X.shape[1]) <NEW_LINE> Y_centered = Y - np.mean(Y, axis=0) <NEW_LINE> X_centered = X - np.mean(X, axis=0) <NEW_LINE> for ind_x in range(X.shape[1]): <NEW_LINE> <INDENT> cov_temp = np.matmul(X_centered[:, ind_x:ind_x+1].transpose(), Y_centered) / X.shape[0] / std_x[ind_x] / std_y <NEW_LINE> cov_max[ind_x] = np.max(np.abs(cov_temp)) <NEW_LINE> <DEDENT> w_cols = (np.exp(cov_max**2) - 1) <NEW_LINE> w_max_cov = np.append(w_cols/std_x, np.max(w_cols)/std_y) <NEW_LINE> return w_max_cov, w_cols / np.sum(w_cols)
Calculate the distance weights between the input features and targets. The weights are related to the maximum covariance between a column of X and any column of y, normalized by the variances of X and Y, and the variance of X to normalize for input scale.
625941bf73bcbd0ca4b2bfb8
def save_tif(fname: str, ar: np.ndarray, path_root: str) -> str: <NEW_LINE> <INDENT> path_tif_dir = os.path.join(path_root, "tifs") <NEW_LINE> if not os.path.exists(path_tif_dir): <NEW_LINE> <INDENT> os.makedirs(path_tif_dir) <NEW_LINE> logger.info(f"Created: {path_tif_dir}") <NEW_LINE> <DEDENT> path_save = os.path.join(path_tif_dir, fname) <NEW_LINE> tifffile.imsave(path_save, ar, compress=2) <NEW_LINE> logger.info(f"Saved: {path_save}") <NEW_LINE> return os.path.relpath(path_save, path_root)
Saves a tif and returns tif save path relative to root save directory. Image will be stored at: 'path_root/tifs/fname' Parameters ---------- fname Basename of save path. ar Array to be saved as tif. path_root Root directory of save path. Returns ------- str Save path relative to root directory.
625941bf627d3e7fe0d68d90
def delete(self, consumer_key, rid): <NEW_LINE> <INDENT> policy = Policy.query.filter( Policy.consumer_key == consumer_key, Policy.rid == rid ).first_or_404() <NEW_LINE> policy.remove() <NEW_LINE> return '', 204
Delete the policy definition from the specified consumer.
625941bf71ff763f4b5495c9
def reverse(self): <NEW_LINE> <INDENT> added = LinkedList() <NEW_LINE> for i in range(self.length-1,-1,-1): <NEW_LINE> <INDENT> added.append(self[i]) <NEW_LINE> <DEDENT> return added
Return a copy of the list with all elements in reverse order. E.g., for [1,2,3] you shoudl return [3,2,1].
625941bffb3f5b602dac35d3
def __init__(self, locator, configurator, acting_strategy): <NEW_LINE> <INDENT> super().__init__() <NEW_LINE> self._locator = locator <NEW_LINE> self._configurator = configurator <NEW_LINE> self._running = False <NEW_LINE> self._acting_strategy = acting_strategy <NEW_LINE> self._logger = logging.getLogger(__name__)
Parameters ---------- locator : mrc.localization.locator.Locator Main localization class, based on which output robot will act configurator : mrc.configuration.configurator.Configurator Robot configuration container acting_strategy : mrc.control.acting.abstract.AbstractStrategy Strategy in according to which robot will act
625941bf94891a1f4081b9ea
def add_validated_cover(self, cover): <NEW_LINE> <INDENT> self._covers.append(cover) <NEW_LINE> if len(self._covers) == 2: <NEW_LINE> <INDENT> self._horizontal = self._covers[0].row == cover.row
Add an already validated Cover object to this move
625941bf8c3a8732951582fa
def db_exists(name, **connection_args): <NEW_LINE> <INDENT> dbc = _connect(**connection_args) <NEW_LINE> if dbc is None: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> cur = dbc.cursor() <NEW_LINE> args = {"dbname": name.replace('%', r'\%').replace('_', r'\_')} <NEW_LINE> qry = "SHOW DATABASES LIKE %(dbname)s;" <NEW_LINE> try: <NEW_LINE> <INDENT> _execute(cur, qry, args) <NEW_LINE> <DEDENT> except MySQLdb.OperationalError as exc: <NEW_LINE> <INDENT> err = 'MySQL Error {0}: {1}'.format(*exc) <NEW_LINE> __context__['mysql.error'] = err <NEW_LINE> log.error(err) <NEW_LINE> return False <NEW_LINE> <DEDENT> cur.fetchall() <NEW_LINE> return cur.rowcount == 1
Checks if a database exists on the MySQL server. CLI Example: .. code-block:: bash salt '*' mysql.db_exists 'dbname'
625941bfa8ecb033257d3011
def uiSubstractNumbers(digitBase): <NEW_LINE> <INDENT> sign1 = '+' <NEW_LINE> sign2 = '+' <NEW_LINE> number1 = readParameter("<first number>: ") <NEW_LINE> number2 = readParameter("<second number>: ") <NEW_LINE> baseNumber = readParameter("<base>: ") <NEW_LINE> sign_printed = "+" <NEW_LINE> if checkRead(number1) == True and checkRead(number2) == True: <NEW_LINE> <INDENT> if number1[0] == '-': <NEW_LINE> <INDENT> sign1 = '-' <NEW_LINE> number1 = number1[1:] <NEW_LINE> <DEDENT> if number2[0] == '-': <NEW_LINE> <INDENT> sign2 = '-' <NEW_LINE> number2 = number2[1:] <NEW_LINE> <DEDENT> lengthNumber = max(len(number1), len(number2)) <NEW_LINE> if checkAddAndSubArguments(digitBase, number1, number2, baseNumber) == True: <NEW_LINE> <INDENT> number1 = convertToList(number1, digitBase, lengthNumber) <NEW_LINE> number2 = convertToList(number2, digitBase, lengthNumber) <NEW_LINE> baseNumber = int(baseNumber) <NEW_LINE> if sign1 == '-' and sign2 == '-': <NEW_LINE> <INDENT> sign_printed = "-" <NEW_LINE> <DEDENT> elif sign1 == '+' and sign2 == '+': <NEW_LINE> <INDENT> if int(number1[0]) < int(number2[0]): <NEW_LINE> <INDENT> sign_printed = "-" <NEW_LINE> <DEDENT> <DEDENT> elif sign1 == "-": <NEW_LINE> <INDENT> sign_printed = "-" <NEW_LINE> <DEDENT> elif sign2 == "-": <NEW_LINE> <INDENT> sign_printed = "+" <NEW_LINE> <DEDENT> number1, number2 = swapNumbers(number1, number2) <NEW_LINE> if sign1 == '-' and sign2 == '-': <NEW_LINE> <INDENT> number_sub = addNumbers(number1, number2, baseNumber) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if sign1 == '+' and sign2 == '+': <NEW_LINE> <INDENT> number_sub = subNumbers(number1, number2, baseNumber) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if sign1 == '-': <NEW_LINE> <INDENT> number_sub = addNumbers(number1, number2, baseNumber) <NEW_LINE> <DEDENT> if sign2 == '-': <NEW_LINE> <INDENT> number_sub = addNumbers(number1, number2, baseNumber) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> printNumberList(number_sub, sign_printed)
User interface for substraction :param digitBase: all the digits possible :return: None
625941bf377c676e912720eb
def timestamp2time(timestamp): <NEW_LINE> <INDENT> time_array = time.localtime(timestamp) <NEW_LINE> mytime = time.strftime("%Y-%m-%d %H:%M:%S", time_array) <NEW_LINE> return mytime
时间戳转为格式化的时间字符串 :param timestamp: :return: 格式化的时间字符串
625941bfa8370b77170527e3
def biogrid_parser(fileName, baitS): <NEW_LINE> <INDENT> import os.path <NEW_LINE> from tools import prot_id_converter <NEW_LINE> print("processing BIOgrid file... ", end="") <NEW_LINE> baitStr = baitS.upper() <NEW_LINE> inpF = open(os.path.join(os.path.split(os.path.dirname(__file__))[0], "data", fileName),"r") <NEW_LINE> idL = [] <NEW_LINE> headerFlag = True <NEW_LINE> for inpLine in inpF: <NEW_LINE> <INDENT> if headerFlag: <NEW_LINE> <INDENT> headerFlag = False <NEW_LINE> continue <NEW_LINE> <DEDENT> inpList = inpLine.split("\t")[1:3] <NEW_LINE> for inpI in inpList: <NEW_LINE> <INDENT> if inpI not in idL: <NEW_LINE> <INDENT> idL.append(inpI) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> idList = prot_id_converter(idL, orgnID = "9606", inpDB = "geneid", outDB="genesymbol") <NEW_LINE> idNList = [] <NEW_LINE> for idI in idList: <NEW_LINE> <INDENT> if idI == "-" or idI.upper() == baitStr: continue <NEW_LINE> else: idNList.append(idI.upper()) <NEW_LINE> <DEDENT> print("Done.") <NEW_LINE> return idNList
open biogrid 2.0 tab format file and extract interactor protein gene IDs. Convert to refseq protein accessions. Return them a list.
625941bf32920d7e50b28110
def plot_confusion_matrix(cm, classes, title='Confusion matrix', cmap=plt.cm.Blues): <NEW_LINE> <INDENT> cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] <NEW_LINE> plt.imshow(cm, interpolation='nearest', cmap=cmap) <NEW_LINE> plt.title(title, fontsize=30) <NEW_LINE> plt.colorbar() <NEW_LINE> tick_marks = np.arange(len(classes)) <NEW_LINE> plt.xticks(tick_marks, classes, rotation=90, fontsize=22) <NEW_LINE> plt.yticks(tick_marks, classes, fontsize=22) <NEW_LINE> fmt = '.2f' <NEW_LINE> thresh = cm.max() / 2. <NEW_LINE> for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): <NEW_LINE> <INDENT> plt.text(j, i, format(cm[i, j], fmt),horizontalalignment="center",color="white" if cm[i, j] > thresh else "black") <NEW_LINE> <DEDENT> plt.ylabel('True label', fontsize=25) <NEW_LINE> plt.xlabel('Predicted label', fontsize=25)
This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`.
625941bfcc40096d61595894
def __unicode__(self): <NEW_LINE> <INDENT> return "%s" % (self.name)
Returns the custom output string for this object
625941bf29b78933be1e55f3
def verify_credentials(name, password): <NEW_LINE> <INDENT> user = [user for user in users if user['name'] == name and user['password'] == password] <NEW_LINE> if user: <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> return False
Function to check if passwords match Returns True if user already exists, else returns False
625941bfd7e4931a7ee9de5f
def num_entry(self): <NEW_LINE> <INDENT> if self._num_entry is None: <NEW_LINE> <INDENT> self._num_entry = self._arm_idx_section['sh_size'] // EHABI_INDEX_ENTRY_SIZE <NEW_LINE> <DEDENT> return self._num_entry
Number of exception handler entry in the section.
625941bf30c21e258bdfa3de
def init(conf): <NEW_LINE> <INDENT> global TRANSPORT, NOTIFIER <NEW_LINE> exmods = get_allowed_exmods() <NEW_LINE> TRANSPORT = messaging.get_transport(conf, allowed_remote_exmods=exmods, aliases=TRANSPORT_ALIASES) <NEW_LINE> serializer = RequestContextSerializer(JsonPayloadSerializer()) <NEW_LINE> NOTIFIER = messaging.Notifier(TRANSPORT, serializer=serializer)
初始化过程,实现三方面内容的初始化: 1.确定xdrs异常类的基类的处理文件xdrs.exception; 2.确定了使用rabbit这个AMQP的driver方式: 3.加载notifier各种驱动实现方式: [oslo.messaging.notify.drivers] log = oslo.messaging.notify._impl_log:LogDriver messagingv2 = oslo.messaging.notify._impl_messaging:MessagingV2Driver noop = oslo.messaging.notify._impl_noop:NoOpDriver routing = oslo.messaging.notify._impl_routing:RoutingDriver test = oslo.messaging.notify._impl_test:TestDriver messaging = oslo.messaging.notify._impl_messaging:MessagingDriver
625941bf45492302aab5e203
def gen_map_file(self, qfile): <NEW_LINE> <INDENT> r1 = qfile.write("Line 1\n") <NEW_LINE> r2 = qfile.write("Line 2\n!") <NEW_LINE> return 2
Generate map file. If this function is absent then the kernel will create the map file. This function returns number of lines in output file. 0 - empty file, -1 - write error
625941bf23e79379d52ee4a9
def __setattr__(self, name, value): <NEW_LINE> <INDENT> if not name in SetupPreferences._Parameters: <NEW_LINE> <INDENT> raise AttributeError('not defined:%s'%name) <NEW_LINE> <DEDENT> par = SetupPreferences._Parameters[name] <NEW_LINE> par.item.setValue(value)
undefined attributes might be parameters
625941bf5fc7496912cc38c0
def purge_security_groups(self, project): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> sec_groups = self.client.list_security_groups(tenant_id=project.id) <NEW_LINE> <DEDENT> except exceptions.ServiceUnavailable: <NEW_LINE> <INDENT> self.log_error('Neutron: Service Unavailable') <NEW_LINE> sec_groups = None <NEW_LINE> <DEDENT> if not sec_groups: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> for sg in sec_groups['security_groups']: <NEW_LINE> <INDENT> self.delete_security_group(sg['id'])
Remove all security groups for a project
625941bf99fddb7c1c9de2d5
def test_infoset_node_precedes(self): <NEW_LINE> <INDENT> assert not self.extensive_game.players[0].infosets[0].precedes(self.extensive_game.root) <NEW_LINE> assert self.extensive_game.players[1].infosets[0].precedes(self.extensive_game.root.children[0])
Test to check if the infoset preceding check works
625941bfdc8b845886cb5476
def get_summary(IDs): <NEW_LINE> <INDENT> result = [] <NEW_LINE> soup = open_url2(IDs) <NEW_LINE> for i in soup.find_all("div", class_= "section"): <NEW_LINE> <INDENT> result.append(i.get_text()) <NEW_LINE> <DEDENT> content = ''.join(result).split("\n") <NEW_LINE> if "Summary" in content: <NEW_LINE> <INDENT> Summary = ''.join(content[content.index("Summary")+1].split('[')[0]) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> Summary = "There is no summary" <NEW_LINE> <DEDENT> Organism = ''.join(content[content.index("Organism")+2]) <NEW_LINE> return Summary, Organism
对get_item的输出结果进行查询,输入summary和Organism的内容
625941bf462c4b4f79d1d613
def __init__(self, arr): <NEW_LINE> <INDENT> def count(inv_idx, m, left, right): <NEW_LINE> <INDENT> return bisect.bisect_right(inv_idx[m], right) - bisect.bisect_left(inv_idx[m], left) <NEW_LINE> <DEDENT> self.__arr = arr <NEW_LINE> self.__inv_idx = collections.defaultdict(list) <NEW_LINE> for i, x in enumerate(self.__arr): <NEW_LINE> <INDENT> self.__inv_idx[x].append(i) <NEW_LINE> <DEDENT> self.__segment_tree = SegmentTreeRecu(arr, functools.partial(count, self.__inv_idx))
:type arr: List[int]
625941bf4e696a04525c938f
def RLS(x,y,M): <NEW_LINE> <INDENT> n = x.shape[0] <NEW_LINE> m = M - 1 <NEW_LINE> p = np.zeros(m*m) <NEW_LINE> px = np.zeros(m) <NEW_LINE> u = np.zeros(m) <NEW_LINE> g = np.zeros(m) <NEW_LINE> w = np.zeros(m) <NEW_LINE> d = y <NEW_LINE> r = 1.0 <NEW_LINE> for i in range(m): <NEW_LINE> <INDENT> for j in range(m): <NEW_LINE> <INDENT> p[i*m+j] = 0.0 <NEW_LINE> <DEDENT> <DEDENT> for i in range(m): <NEW_LINE> <INDENT> p[i*m+i] = 1.0e+8 <NEW_LINE> <DEDENT> for k in range(n): <NEW_LINE> <INDENT> px[0] = x[k] <NEW_LINE> for j in range(m): <NEW_LINE> <INDENT> u[j] = 0.0 <NEW_LINE> for i in range(m): <NEW_LINE> <INDENT> u[j] = u[j] + (1/r)*p[j*m+i]*px[i] <NEW_LINE> <DEDENT> <DEDENT> s = 1.0 <NEW_LINE> for i in range(m): <NEW_LINE> <INDENT> s = s + u[i] * px[i] <NEW_LINE> <DEDENT> for i in range(m): <NEW_LINE> <INDENT> g[i] = u[i]/s <NEW_LINE> <DEDENT> x[k] = 0.0 <NEW_LINE> for i in range(m): <NEW_LINE> <INDENT> x[k] = x[k] + w[i] * px[i] <NEW_LINE> <DEDENT> a = d[k] - x[k] <NEW_LINE> for i in range(m): <NEW_LINE> <INDENT> w[i] = w[i] + g[i] * a <NEW_LINE> <DEDENT> for j in range(m): <NEW_LINE> <INDENT> for i in range(m): <NEW_LINE> <INDENT> p[j*m+i] = (1/r) *p[j*m+i] - g[j] * u[i] <NEW_LINE> <DEDENT> <DEDENT> for i in range(m-1,0,-1): <NEW_LINE> <INDENT> px[i] = px[i-1] <NEW_LINE> <DEDENT> <DEDENT> return d , x
递推最小二乘法自适应数字滤波 x为一维数组
625941bfe1aae11d1e749bf8
def add_namespace_to_graph(self, graph: BELGraph) -> Namespace: <NEW_LINE> <INDENT> namespace = self.upload_bel_namespace() <NEW_LINE> graph.namespace_url[namespace.keyword] = namespace.url <NEW_LINE> self._add_annotation_to_graph(graph) <NEW_LINE> return namespace
Add this manager's namespace to the graph.
625941bf7cff6e4e811178c8
def filename(*args: NestPath, pkgname: OptStr = None) -> str: <NEW_LINE> <INDENT> pkgname = pkgname or pkg.get_root_name(stack.get_caller_module_name(-2)) <NEW_LINE> path = expand(*args, pkgname=pkgname) <NEW_LINE> if path.is_dir(): <NEW_LINE> <INDENT> return '' <NEW_LINE> <DEDENT> return str(path.name)
Extract file name from a path like structure. Args: *args: Path like arguments, respectively given by a tree of strings, which can be joined to a path. Returns: String containing normalized directory path of file. Examples: >>> filename(('a', ('b', 'c')), 'base.ext') 'base.ext'
625941bf4f6381625f11497f
def countAndSay(self, n): <NEW_LINE> <INDENT> return count_say_rec(n)
:type n: int :rtype: str
625941bfe76e3b2f99f3a753
def numMagicSquaresInside(self, grid): <NEW_LINE> <INDENT> def isMagic(a,b,c,d,e,f,g,h,i): <NEW_LINE> <INDENT> return sorted([a,b,c,d,e,f,g,h,i])==list(range(1,10)) and a+b+c==d+e+f==g+h+i==a+d+g==b+e+h==c+f+i==a+e+i==c+e+g==15 <NEW_LINE> <DEDENT> ans=0 <NEW_LINE> r,c=len(grid),len(grid[0]) <NEW_LINE> for i in range(r-2): <NEW_LINE> <INDENT> for j in range(c-2): <NEW_LINE> <INDENT> if isMagic(grid[i][j],grid[i][j+1],grid[i][j+2],grid[i+1][j],grid[i+1][j+1],grid[i+1][j+2],grid[i+2][j],grid[i+2][j+1],grid[i+2][j+2]): <NEW_LINE> <INDENT> ans+=1 <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return ans
:type grid: List[List[int]] :rtype: int
625941bfbe7bc26dc91cd547
def clear_menu_cache(**kwargs): <NEW_LINE> <INDENT> menu_pool.clear(all=True)
Empty menu cache when saving categories
625941bf85dfad0860c3ad9c
def main(figureSaveLocation): <NEW_LINE> <INDENT> currentFigure = plt.figure() <NEW_LINE> gs = gridspec.GridSpec(1, 2) <NEW_LINE> gs.update(left=0.005, right=0.995, bottom=0.05, top=1, wspace=0.05) <NEW_LINE> twoNodeISGraph = plt.subplot(gs[0, 0]) <NEW_LINE> generate_graph(currentFigure, blackNodes=['B\n50 AA\n', 'C\n75 AA\n']) <NEW_LINE> oneNodeISGraph = plt.subplot(gs[0, 1]) <NEW_LINE> generate_graph(currentFigure, blackNodes=['A\n100 AA\n']) <NEW_LINE> for ax in currentFigure.get_axes(): <NEW_LINE> <INDENT> removeTickMarks(ax, xAxis=True, yAxis=True) <NEW_LINE> <DEDENT> plt.savefig(figureSaveLocation, bbox_inches='tight', transparent=True)
Create an image for demonstrating the suboptimal nature of list removal MIS approximaters.
625941bf8a43f66fc4b53faa
def _gs_iter(self): <NEW_LINE> <INDENT> system = self._system <NEW_LINE> if system._subsystems_allprocs: <NEW_LINE> <INDENT> loc = system._loc_subsys_map <NEW_LINE> <DEDENT> for isub, subsys in enumerate(system._subsystems_allprocs): <NEW_LINE> <INDENT> system._transfer('nonlinear', 'fwd', isub) <NEW_LINE> if subsys.name in loc: <NEW_LINE> <INDENT> subsys._solve_nonlinear() <NEW_LINE> <DEDENT> system._check_reconf_update(subsys)
Perform a Gauss-Seidel iteration over this Solver's subsystems.
625941bf091ae35668666ea6
def calculate_bounding_rect(self): <NEW_LINE> <INDENT> x_sorted = sorted([node.cx for node in self.nodes]) <NEW_LINE> y_sorted = sorted([node.cy for node in self.nodes]) <NEW_LINE> x_min = x_sorted[0] <NEW_LINE> x_max = x_sorted[-1] <NEW_LINE> y_min = y_sorted[0] <NEW_LINE> y_max = y_sorted[-1] <NEW_LINE> top_left_p = QPointF(x_min - 10, y_min - 10) <NEW_LINE> bottom_right_p = QPointF(x_max + 10, y_max + 10) <NEW_LINE> self.bounding_rect = QRectF(top_left_p, bottom_right_p)
Calculate the bounding rectangle of the component based on what nodes it owns.
625941bfab23a570cc2500c3
def get_arr(self, MA_arr, arr=['alpha']): <NEW_LINE> <INDENT> alpha_arr = [] <NEW_LINE> EA_arr = [] <NEW_LINE> TA_arr = [] <NEW_LINE> t_arr = [] <NEW_LINE> return_arr = [] <NEW_LINE> for MA in MA_arr: <NEW_LINE> <INDENT> EA = self.get_EA(MA) <NEW_LINE> TA = self.get_TA(MA, EA) <NEW_LINE> t = self.get_t(MA, TA) <NEW_LINE> alpha = self.get_phase(TA) <NEW_LINE> alpha_arr.append(alpha) <NEW_LINE> EA_arr.append(EA) <NEW_LINE> TA_arr.append(TA) <NEW_LINE> t_arr.append(t) <NEW_LINE> <DEDENT> for arr_ in arr: <NEW_LINE> <INDENT> if arr_=='alpha': <NEW_LINE> <INDENT> return_arr.append(np.array(alpha_arr)) <NEW_LINE> <DEDENT> elif arr_=='EA': <NEW_LINE> <INDENT> return_arr.append(np.array(EA_arr)) <NEW_LINE> <DEDENT> elif arr_=='TA': <NEW_LINE> <INDENT> return_arr.append(np.array(TA_arr)) <NEW_LINE> <DEDENT> elif arr_=='t': <NEW_LINE> <INDENT> return_arr.append(np.array(t_arr)) <NEW_LINE> <DEDENT> <DEDENT> return return_arr if len(arr)>1 else return_arr[0]
Returns a numpy array of the indicated parameter(s), where the elements correspond to the input MA values.
625941bf5fdd1c0f98dc0175
def conv_single_step(a_slice_prev, W, b): <NEW_LINE> <INDENT> s = None <NEW_LINE> Z = None <NEW_LINE> Z = None <NEW_LINE> return Z
Apply one filter defined by parameters W on a single slice (a_slice_prev) of the output activation of the previous layer. Arguments: a_slice_prev -- slice of input data of shape (f, f, n_C_prev) W -- Weight parameters contained in a window - matrix of shape (f, f, n_C_prev) b -- Bias parameters contained in a window - matrix of shape (1, 1, 1) Returns: Z -- a scalar value, the result of convolving the sliding window (W, b) on a slice x of the input data
625941bfb5575c28eb68df41
def erase(self, *args): <NEW_LINE> <INDENT> return _pyBasePython.vectorsetUL_erase(self, *args)
erase(self, iterator pos) -> iterator erase(self, iterator first, iterator last) -> iterator
625941bf16aa5153ce3623bc
@login_required <NEW_LINE> @email_verified <NEW_LINE> def create_demo_course(request): <NEW_LINE> <INDENT> user = request.user <NEW_LINE> ci = RequestContext(request) <NEW_LINE> if not is_moderator(user): <NEW_LINE> <INDENT> raise("You are not allowed to view this page") <NEW_LINE> <DEDENT> demo_course = Course() <NEW_LINE> success = demo_course.create_demo(user) <NEW_LINE> if success: <NEW_LINE> <INDENT> msg = "Created Demo course successfully" <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> msg = "Demo course already created" <NEW_LINE> <DEDENT> return prof_manage(request, msg)
creates a demo course for user
625941bf92d797404e3040cc
def test_render(self, mock_render_field, mock_pdf_wrt, mock_pdf_rdr, v3_form_data): <NEW_LINE> <INDENT> class Rdr(object): <NEW_LINE> <INDENT> getNumPages = MagicMock(return_value=2) <NEW_LINE> getPage = MagicMock() <NEW_LINE> <DEDENT> mo = mock_open(read_data=v3_form_data) <NEW_LINE> with patch('builtins.open', mo, create=True): <NEW_LINE> <INDENT> fr = FormRenderer("base_form.pdf", "form_data.json", "output_form.pdf") <NEW_LINE> <DEDENT> mock_pdf_rdr.return_value = Rdr() <NEW_LINE> with patch('builtins.open', mock_open(read_data=b'0'), create=True): <NEW_LINE> <INDENT> fr.render() <NEW_LINE> <DEDENT> assert mock_render_field.call_count == 2
Test render method. Rough. Looks like it will inspire method refactor. Merely assert that render_field() called twice.
625941bf4f88993c3716bfad
def assertIsNotNone(self, obj, msg=None): <NEW_LINE> <INDENT> if obj is None: <NEW_LINE> <INDENT> standardMsg = 'unexpectedly None' <NEW_LINE> self.fail(self._formatMessage(msg, standardMsg)) <NEW_LINE> <DEDENT> return
Included for symmetry with assertIsNone.
625941bfbd1bec0571d90571
def group_chat(cmd, uid, sock, onlinesocket): <NEW_LINE> <INDENT> print('group chat') <NEW_LINE> if cmd[0] == '0': <NEW_LINE> <INDENT> send_group_msg(int(uid), int(cmd[1:6]), onlinesocket, cmd[6:]) <NEW_LINE> <DEDENT> elif cmd[0] == '1': <NEW_LINE> <INDENT> new_group(cmd[1:], int(uid), sock) <NEW_LINE> <DEDENT> elif cmd[0] == '2': <NEW_LINE> <INDENT> invite(int(cmd[1:6]), int(cmd[6:11])) <NEW_LINE> <DEDENT> elif cmd[0] == '3': <NEW_LINE> <INDENT> pingbi(int(uid), int(cmd[1:6])) <NEW_LINE> <DEDENT> elif cmd[0] == '4': <NEW_LINE> <INDENT> cancelpingbi(int(uid), int(cmd[1:6])) <NEW_LINE> <DEDENT> elif cmd[0] == '5': <NEW_LINE> <INDENT> del_mem(int(uid), int(cmd[6:11]), int(cmd[1:6])) <NEW_LINE> <DEDENT> elif cmd[0] == '6': <NEW_LINE> <INDENT> del_group(int(uid), int(cmd[1:6])) <NEW_LINE> <DEDENT> elif cmd[0] == '7': <NEW_LINE> <INDENT> get_info(int(cmd[1:6]), sock) <NEW_LINE> <DEDENT> elif cmd[0] == '8': <NEW_LINE> <INDENT> add_ad(int(cmd[6:11]), int(uid), int(cmd[1:6])) <NEW_LINE> <DEDENT> elif cmd[0] == '9': <NEW_LINE> <INDENT> exit_group(int(cmd[1:6]), int(uid))
处理群聊相关命令
625941bf435de62698dfdb8f
def __call__(self, str_method=''): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> if str_method: <NEW_LINE> <INDENT> func = getattr(self, str_method) <NEW_LINE> <DEDENT> return func() <NEW_LINE> <DEDENT> except AttributeError: <NEW_LINE> <INDENT> return "<h1>Page Not Found</h1>"
.
625941bf6fb2d068a760efde
def buildGraphAndJunctionsDictionaryAndEdgesDictionary(mtraci): <NEW_LINE> <INDENT> Logger.info("{}Building graph, junctions dictionary and edges dictionary...".format(constants.PRINT_PREFIX_GRAPH)) <NEW_LINE> graphDict = dict() <NEW_LINE> junctionsDict = dict() <NEW_LINE> edgesDict = dict() <NEW_LINE> parser = xml.sax.make_parser() <NEW_LINE> parser.setContentHandler(NetworkHandler(graphDict, junctionsDict, edgesDict, mtraci)) <NEW_LINE> parser.parse(constants.SUMO_NETWORK_FILE) <NEW_LINE> Logger.info("{}Done".format(constants.PRINT_PREFIX_GRAPH)) <NEW_LINE> return graphDict, junctionsDict, edgesDict
Returns - A graph built as a dictionary as {Key=junctionId, Value=Dict as{Key=junction successor, Value=edge length between the junctions} - A junctions dictionary as {Key=junctionId, Value=[Set(edgesId predecessors of the junction), Set(edgesId successors of the junction)] - An edges dictionary as {Key=edgeId, Value=[junction predecessor, junction successor]
625941bf26238365f5f0edae
def initializeUniformly(self, gameState): <NEW_LINE> <INDENT> self.particles = [] <NEW_LINE> permutations = list(itertools.product(self.legalPositions, repeat = self.numGhosts)) <NEW_LINE> random.shuffle(permutations) <NEW_LINE> for i in range(self.numParticles): <NEW_LINE> <INDENT> self.particles.append(permutations[i%len(permutations)]) <NEW_LINE> <DEDENT> return 0
Initialize particles to be consistent with a uniform prior. Particles should be evenly distributed across positions in order to ensure a uniform prior.
625941bf66673b3332b91fd4
def _parse_error(self, error): <NEW_LINE> <INDENT> m = re.match(r'(\d+)\((\d+)\)\s*:\s(.*)', error ) <NEW_LINE> if m: return int(m.group(2)), m.group(3) <NEW_LINE> m = re.match(r'ERROR:\s(\d+):(\d+):\s(.*)', error ) <NEW_LINE> if m: return int(m.group(2)), m.group(3) <NEW_LINE> m = re.match( r'(\d+):(\d+)\((\d+)\):\s(.*)', error ) <NEW_LINE> if m: return int(m.group(2)), m.group(4) <NEW_LINE> raise ValueError('Unknown GLSL error format:\n{}\n'.format(error))
Parses a single GLSL error and extracts the line number and error description. Parameters ---------- error : str An error string as returned by the compilation process
625941bfac7a0e7691ed4014
def test_01(self): <NEW_LINE> <INDENT> self.read_element.login("CapUserName","CapPassWord") <NEW_LINE> self.read_element.search_y("庆科技机械") <NEW_LINE> self.read_element.get_user_element("wares").click() <NEW_LINE> sp_name = self.read_element.get_user_element("wares_name").text <NEW_LINE> print("进入商品详情页面,商品名称为:", sp_name) <NEW_LINE> self.read_element.get_user_element("news").click() <NEW_LINE> print("准备给商家发送信息。。。。。。") <NEW_LINE> self.read_element.get_user_element("input_box").click() <NEW_LINE> self.read_element.send_user_info("input_box",self.news) <NEW_LINE> time.sleep(5) <NEW_LINE> self.read_element.get_user_element("send_out").click() <NEW_LINE> time.sleep(3) <NEW_LINE> print("成功给商家发送信息:",self.news)
登录之后搜索云工厂“庆科技机械”,点击一个商品进入商品详情,获取商品名称,然后点击客服进入消息中心,发送一条消息
625941bf07d97122c41787c9
def MeanRandGal(self): <NEW_LINE> <INDENT> return _stomp.AngularBin_MeanRandGal(self)
MeanRandGal(AngularBin self) -> double
625941bf55399d3f055885f6
def isIndexValid(index): <NEW_LINE> <INDENT> index = int(index) <NEW_LINE> if((index < 0 or index >8) or boardList[index] not in defaultList): <NEW_LINE> <INDENT> print("Your entered index is not valid") <NEW_LINE> return False <NEW_LINE> <DEDENT> return True
Checks if the entered index is valid
625941bf3346ee7daa2b2cad
def etree_repr(self): <NEW_LINE> <INDENT> do_etree = ET.Element("prepare") <NEW_LINE> do_etree.text = self._do <NEW_LINE> if self._stdout_filename is not None: <NEW_LINE> <INDENT> do_etree.attrib["stdout"] = self._stdout_filename <NEW_LINE> <DEDENT> if self._stderr_filename is not None: <NEW_LINE> <INDENT> do_etree.attrib["stderr"] = self._stderr_filename <NEW_LINE> <DEDENT> if self._active != "true": <NEW_LINE> <INDENT> do_etree.attrib["active"] = self._active <NEW_LINE> <DEDENT> if self._work_dir is not None: <NEW_LINE> <INDENT> do_etree.attrib["work_dir"] = self._work_dir <NEW_LINE> <DEDENT> return do_etree
Return etree object representation
625941bf99cbb53fe6792b2a
def GetHistAvg(tag_name, start_time, end_time, period, desc_as_label=False, label=None): <NEW_LINE> <INDENT> return GetHist(tag_name, start_time, end_time, mode="avg", period=period, desc_as_label=desc_as_label, label=label)
Retrieves data from eDNA history for a given tag. The data will be averaged over the specified "period". :param tag_name: fully-qualified (site.service.tag) eDNA tag :param start_time: must be in format mm/dd/yy hh:mm:ss :param end_time: must be in format mm/dd/yy hh:mm:ss :param period: in units of seconds (e.g. 10) :param desc_as_label: use the tag description as the column name instead of the full tag :param label: supply a custom label to use as the DataFrame column name :return: a pandas DataFrame with timestamp, value, and status
625941bf097d151d1a222d9e
def canWin(self, s): <NEW_LINE> <INDENT> for i in xrange(len(s)-1): <NEW_LINE> <INDENT> if s[i:i+2] == '++' and not self.canWin(s[:i]+'--'+s[i+2:]): <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> <DEDENT> return False
:type s: str :rtype: bool
625941bf167d2b6e31218ad9
def cut(S, T, graph): <NEW_LINE> <INDENT> cut_set_value = 0 <NEW_LINE> for x in T: <NEW_LINE> <INDENT> for y in S: <NEW_LINE> <INDENT> if graph.has_edge(x, y) and graph.has_edge(y, x): <NEW_LINE> <INDENT> cut_set_value = cut_set_value + 1 <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return cut_set_value <NEW_LINE> pass
Compute the cut-set of the cut (S,T), which is the set of edges that have one endpoint in S and the other in T. Params: S.......set of nodes in first subset T.......set of nodes in second subset graph...networkx graph Returns: An int representing the cut-set. >>> cut(['A', 'B', 'C'], ['D', 'E', 'F', 'G'], example_graph()) 1
625941bf236d856c2ad4471a