code stringlengths 4 4.48k | docstring stringlengths 1 6.45k | _id stringlengths 24 24 |
|---|---|---|
def recv(self, timeout: Optional[float] = None) -> Optional[Message]: <NEW_LINE> <INDENT> start = time() <NEW_LINE> time_left = timeout <NEW_LINE> while True: <NEW_LINE> <INDENT> msg, already_filtered = self._recv_internal(timeout=time_left) <NEW_LINE> if msg and (already_filtered or self._matches_filters(msg)): <NEW_LINE> <INDENT> LOG.log(self.RECV_LOGGING_LEVEL, "Received: %s", msg) <NEW_LINE> return msg <NEW_LINE> <DEDENT> elif timeout is None: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> time_left = timeout - (time() - start) <NEW_LINE> if time_left > 0: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return None | Block waiting for a message from the Bus.
:param timeout:
seconds to wait for a message or None to wait indefinitely
:return:
None on timeout or a :class:`Message` object.
:raises can.CanError:
if an error occurred while reading | 625941bdcb5e8a47e48b79ad |
def createCopy(self, translationVector): <NEW_LINE> <INDENT> newLine = LaserLine(self.start,self.end,self.power,self.frequency,self.speed).translate(translationVector) <NEW_LINE> return newLine | Creates a copy of the the laserline and if provided translates the copy by the translationvector | 625941bd6e29344779a62514 |
def getWalls (self) -> list: <NEW_LINE> <INDENT> return [self.upperWall, self.rightWall, self.lowerWall, self.leftWall] | Returns a list of bools which represents if each of the four walls is present | 625941bd9c8ee82313fbb674 |
def __get_baseline_features(self, word_sequence, position, pos_tag=None): <NEW_LINE> <INDENT> word = get_word(word_sequence, position) <NEW_LINE> word_left1 = get_word(word_sequence, position - 1) <NEW_LINE> word_left2 = get_word(word_sequence, position - 2) <NEW_LINE> word_right1 = get_word(word_sequence, position + 1) <NEW_LINE> word_right2 = get_word(word_sequence, position + 2) <NEW_LINE> features = self.__spelling_features(word, 0) <NEW_LINE> features["word(-1)={0}".format(word_left1)] = 1 <NEW_LINE> features["word(-2)={0}".format(word_left2)] = 1 <NEW_LINE> features["word(+1)={0}".format(word_right1)] = 1 <NEW_LINE> features["word(+2)={0}".format(word_right2)] = 1 <NEW_LINE> return features | Builds the baseline features by using spelling of the word at the position
and 2 words left and right of the word.
@type word_sequence: list
@param word_sequence: sequence of words
@type position: int
@param position: position of word in the given sequence
@type pos_tag: list
@param pos_tag: sequence of pos_tag
@return: baseline features (dict) | 625941bd3539df3088e2e24a |
def test_parse_reverses_tokens_on_rtl(single_token_ordered_visitor, valid_data_string_with_group_size): <NEW_LINE> <INDENT> valid_data_string, n, expected_len = valid_data_string_with_group_size <NEW_LINE> ltr = parser.parse(valid_data_string, single_token_ordered_visitor, group_size=n) <NEW_LINE> rtl = parser.parse(valid_data_string, single_token_ordered_visitor, group_size=n, rtl=True) <NEW_LINE> assert is_a_reverse_shallow_copy_list(ltr, rtl) | Assert that :meth:`~pai_parser.parser.parse` visits and yields nodes in "reverse" order when using
right-to-left mode. | 625941bd92d797404e304088 |
def count(self, val): <NEW_LINE> <INDENT> return sum(1 for e in self.frontierpq if e[0] == val) | returns the number of elements of the frontier with value=val | 625941bd6aa9bd52df036ca2 |
def _printfNow(self): <NEW_LINE> <INDENT> now = time.localtime() <NEW_LINE> return "{year}-{month:02d}-{day:02d} {hour:02d}:{minute:02d}:{second:02d}".format(year=now.tm_year, month=now.tm_mon, day=now.tm_mday, hour=now.tm_hour, minute=now.tm_min, second=now.tm_sec) | 打印当前时刻
| 625941bd956e5f7376d70d6e |
def integrateTrimmedSum(self): <NEW_LINE> <INDENT> currentThreshold = ((max(self.currentSamples) - min(self.currentSamples))/5.0 + min(self.currentSamples)) <NEW_LINE> lowValueIndices = np.where(self.currentSamples < currentThreshold) <NEW_LINE> baseline = np.median(self.currentSamples[lowValueIndices]) <NEW_LINE> return np.trapz(self.currentSamples - baseline, self.timeSamples) | Integrate points with a baseline level subtracted.
This uses numpy's trapezoidal integrator.
Returns
-------
sum : `float`
Total charge measured.
See Also
--------
lsst.eotask.gen3.eoPtc | 625941bdd8ef3951e324343d |
def __show_execute_commands(self, commands): <NEW_LINE> <INDENT> for line in commands: <NEW_LINE> <INDENT> self._print(line.strip()) | Prints the command list without executing them | 625941bd091ae35668666e63 |
def test_float_error_handler_soft_double(self): <NEW_LINE> <INDENT> class MockConsole(object): <NEW_LINE> <INDENT> def write_line(self, s): <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> <DEDENT> vm = values.Values(None, double_math=True) <NEW_LINE> vm.set_handler(values.FloatErrorHandler(MockConsole())) <NEW_LINE> def ovflerr(x): <NEW_LINE> <INDENT> e = OverflowError() <NEW_LINE> raise e <NEW_LINE> <DEDENT> one = vm.new_double().from_int(1) <NEW_LINE> result = values._call_float_function(ovflerr, one) <NEW_LINE> assert isinstance(result, Double) | Test FloatErrorHandler. | 625941bd30dc7b7665901869 |
def get_bookmark(ui): <NEW_LINE> <INDENT> if ui.radio_bookmark_file.isChecked(): <NEW_LINE> <INDENT> type = 'file' <NEW_LINE> <DEDENT> elif ui.radio_bookmark_directory.isChecked(): <NEW_LINE> <INDENT> type = 'directory' <NEW_LINE> <DEDENT> option_var_name = get_bookmark_option_var_name(type) <NEW_LINE> ls = cmds.optionVar(q=option_var_name) <NEW_LINE> if ls == 0: <NEW_LINE> <INDENT> ls = [] <NEW_LINE> <DEDENT> return ls | 記録されているブックマーク情報を取得する
:param ui: uiのインスタンス
:return: フルパスのリスト | 625941bd851cf427c661a411 |
def test_create_image_with_min_ram(self): <NEW_LINE> <INDENT> fixture = {'name': 'fake public image', 'is_public': True, 'status': 'active', 'min_ram': 256, 'disk_format': 'vhd', 'container_format': 'ovf'} <NEW_LINE> req = webob.Request.blank('/rpc') <NEW_LINE> req.method = "POST" <NEW_LINE> cmd = [{ 'command': 'image_create', 'kwargs': {'values': fixture} }] <NEW_LINE> req.body = jsonutils.dump_as_bytes(cmd) <NEW_LINE> res = req.get_response(self.api) <NEW_LINE> self.assertEqual(200, res.status_int) <NEW_LINE> res_dict = jsonutils.loads(res.body)[0] <NEW_LINE> self.assertEqual(fixture['min_ram'], res_dict['min_ram']) | Tests that the registry API creates the image | 625941bd5fc7496912cc387d |
def dc_title(self): <NEW_LINE> <INDENT> return "Subject {0}".format(self.code_in_study) | Define the subject entity title.
| 625941bd009cb60464c632b3 |
def save_hdf_metadata(filename, metadata, groupname="data", mode="a"): <NEW_LINE> <INDENT> with _h5py.File(filename, mode) as f: <NEW_LINE> <INDENT> for key, val in metadata.items(): <NEW_LINE> <INDENT> f[groupname].attrs[key] = val | "Save a dictionary of metadata to a group's attrs. | 625941bd63f4b57ef000101f |
def get_default(self): <NEW_LINE> <INDENT> if self.has_default(): <NEW_LINE> <INDENT> if callable(self.default): <NEW_LINE> <INDENT> return self.default() <NEW_LINE> <DEDENT> return self.default <NEW_LINE> <DEDENT> return [] | Returns the default value for this field. | 625941bd0fa83653e4656ebc |
def polyCreateDefaultEdges_(poly, boundaryMarker=1, isClosed=True, **kwargs): <NEW_LINE> <INDENT> nEdges = poly.nodeCount()-1 + isClosed <NEW_LINE> bm = None <NEW_LINE> if hasattr(boundaryMarker, '__len__'): <NEW_LINE> <INDENT> if len(boundaryMarker) == nEdges: <NEW_LINE> <INDENT> bm = boundaryMarker <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise Exception("marker length != nEdges", len(boundaryMarker), nEdges) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> bm = [boundaryMarker] * nEdges <NEW_LINE> <DEDENT> for i in range(poly.nodeCount() - 1): <NEW_LINE> <INDENT> poly.createEdge(poly.node(i), poly.node(i+1), bm[i]) <NEW_LINE> <DEDENT> if isClosed: <NEW_LINE> <INDENT> poly.createEdge(poly.node(poly.nodeCount()-1), poly.node(0), bm[-1]) | INTERNAL | 625941bd10dbd63aa1bd2aa6 |
def request_twitter(self, api='', method=HTTP_METHOD, oauth_header=None, params=None): <NEW_LINE> <INDENT> if not api or api.strip() == '': <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> request_headers = None <NEW_LINE> if oauth_header: <NEW_LINE> <INDENT> request_headers = oauth_header <NEW_LINE> <DEDENT> qs = None <NEW_LINE> if params and len(params) > 0 and isinstance(params, dict): <NEW_LINE> <INDENT> qs = urllib.urlencode(params) <NEW_LINE> <DEDENT> if qs: <NEW_LINE> <INDENT> api = api + '?' + qs <NEW_LINE> <DEDENT> result = urlfetch.fetch(api, method=method, headers=request_headers) <NEW_LINE> content = result.content if result.status_code == 200 else None <NEW_LINE> return (result.status_code, content) | Requests twitter with a url and fetches results.
Doc - https://dev.twitter.com/docs/auth/oauth
Arguments:
api - url to be fetched
method - type of request GET/PUT/POST/DELETE
oauth_header - oauth Authorization header after signing properly
params - request params to be passed to twitter as request body
Returns:
response - Request response inlcudes status, content length etc etc
content - Request content e.g. tweets in XML format for tweets
request | 625941bd236d856c2ad446d5 |
def createTextureFromId(self, p_int, QSize, options, QSGEngine_CreateTextureOptions=None, QSGEngine_CreateTextureOption=None, *args, **kwargs): <NEW_LINE> <INDENT> pass | createTextureFromId(self, int, QSize, options: Union[QSGEngine.CreateTextureOptions, QSGEngine.CreateTextureOption] = QSGEngine.CreateTextureOption()) -> QSGTexture | 625941bd15fb5d323cde0a0b |
def get_dst(self, src): <NEW_LINE> <INDENT> return list(self._src_map.get(src, [])) | Returns dst list for given src, or empty list if no expansion | 625941bd57b8e32f52483399 |
def lda(data,labels,reg=0.0): <NEW_LINE> <INDENT> means = {} <NEW_LINE> priors = {} <NEW_LINE> classes = list(set(labels)) <NEW_LINE> classes.sort() <NEW_LINE> C = len(classes) <NEW_LINE> N = data.shape[0] <NEW_LINE> D = data.shape[1] <NEW_LINE> for key in classes: <NEW_LINE> <INDENT> priors[key] = float((labels == key).sum())/labels.shape[0] <NEW_LINE> means[key] = data[labels==key,:].mean(axis=0) <NEW_LINE> <DEDENT> t1 = [mean for key,mean in means.items()] <NEW_LINE> t1 = np.array(t1) <NEW_LINE> t2 = t1.mean(axis=0) <NEW_LINE> t3 = t2 - t1 <NEW_LINE> Sb = np.dot(t3.T,t3)/(C-1) <NEW_LINE> assert Sb.shape == (D,D) <NEW_LINE> data_w = data.copy() <NEW_LINE> for key in classes: <NEW_LINE> <INDENT> c_mean = means[key].reshape(1,D) <NEW_LINE> data_w[labels == key,:] -= c_mean <NEW_LINE> <DEDENT> Sw = np.dot(data_w.T,data_w) / (N-C) <NEW_LINE> assert Sw.shape == (D,D) <NEW_LINE> if reg >= 0: <NEW_LINE> <INDENT> Sw = Sw+reg*np.eye(Sw.shape[0]) <NEW_LINE> <DEDENT> val,vec = la.eigh(Sb,Sw) <NEW_LINE> order = val.argsort()[::-1] <NEW_LINE> val = val[order] <NEW_LINE> vec = vec[:,order] <NEW_LINE> val = val[:C-1] <NEW_LINE> vec = vec[:,:C-1] <NEW_LINE> val = val/val.sum() <NEW_LINE> return val,vec,means,priors | Compute the lda basis vectors. Based on Wikipedia and verified against R
@param data: the data matrix with features in rows.
@type data: np.array
@param labels: a corresponding 1D array of labels, one label per row in data
@type labels: np.array (int or str)
@return: (lda_values,lda_basis,means,priors)
@rtype: (np.array,np.array,dict,dict) | 625941bd15baa723493c3e73 |
def max_pool_forward_naive(x, pool_param): <NEW_LINE> <INDENT> out = None <NEW_LINE> N, C, H, W = x.shape <NEW_LINE> pool_height = pool_param['pool_height'] <NEW_LINE> pool_width = pool_param['pool_width'] <NEW_LINE> stride = pool_param['stride'] <NEW_LINE> H_out = 1 + (H - pool_height) / stride <NEW_LINE> W_out = 1 + (W - pool_width) / stride <NEW_LINE> out = np.zeros((N, C, H_out, W_out)) <NEW_LINE> for n in range(N): <NEW_LINE> <INDENT> for c in range(C): <NEW_LINE> <INDENT> for h in range(H_out): <NEW_LINE> <INDENT> for w in range(W_out): <NEW_LINE> <INDENT> out[n, c, h, w] = np.max(x[n, c, h*stride:h*stride+pool_height, w*stride:w*stride+pool_width]) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> cache = (x, pool_param) <NEW_LINE> return out, cache | A naive implementation of the forward pass for a max pooling layer.
Inputs:
- x: Input data, of shape (N, C, H, W)
- pool_param: dictionary with the following keys:
- 'pool_height': The height of each pooling region
- 'pool_width': The width of each pooling region
- 'stride': The distance between adjacent pooling regions
Returns a tuple of:
- out: Output data
- cache: (x, pool_param) | 625941bd3346ee7daa2b2c69 |
def parse_in(fname, ftype='param', params=None): <NEW_LINE> <INDENT> f = open(fname, 'r') <NEW_LINE> ret = [] <NEW_LINE> for line in f: <NEW_LINE> <INDENT> if ')' == line[0]: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> if ftype == 'param': <NEW_LINE> <INDENT> temp = [x.strip(' ') for x in line.split('=')] <NEW_LINE> <DEDENT> elif ftype == 'body': <NEW_LINE> <INDENT> if '=' not in line: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> t = line.split(' ') <NEW_LINE> d = [] <NEW_LINE> d.append(t[0]) <NEW_LINE> for p in t[1:]: <NEW_LINE> <INDENT> k, v = p.split('=') <NEW_LINE> if typecheck(params): <NEW_LINE> <INDENT> if k.lower() not in params: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> <DEDENT> d.append(v) <NEW_LINE> <DEDENT> ret.append(d) <NEW_LINE> continue <NEW_LINE> <DEDENT> temp[0] = '_'.join(temp[0].split(' ')) <NEW_LINE> if typecheck(params): <NEW_LINE> <INDENT> if temp[0].lower() not in params: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> <DEDENT> ret.append(temp) <NEW_LINE> <DEDENT> return ret | Parse Input Files. | 625941bd32920d7e50b280cd |
def update_items(self, all = False, comment = False): <NEW_LINE> <INDENT> self.refresh_items() <NEW_LINE> self.__create_model(all, comment) | 'all' parameter used to show the hide item,
'comment' parameter used to show the comment of program | 625941bd711fe17d82542270 |
def get_fts_columns_for_model(self, model): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> return model.FTS.columns <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> return {} | Return the column definition for the given model or empty list. | 625941bda05bb46b383ec724 |
def plotTheBot(inputDF,figureName): <NEW_LINE> <INDENT> inputDF = inputDF.groupby(['Search Term']) <NEW_LINE> fig, ax = plt.subplots() <NEW_LINE> ax.set_title(figureName) <NEW_LINE> for name, group in inputDF: <NEW_LINE> <INDENT> group.plot(y='nImpactScore',ax=ax,label = name) <NEW_LINE> <DEDENT> plt.show() | Straightforward plotting of the normalized impact score for
each keyword over time | 625941bde5267d203edcdb9f |
def group_notes_into_chords(note_list, ticks): <NEW_LINE> <INDENT> output = [] <NEW_LINE> group = [] <NEW_LINE> for i, note in enumerate(note_list[:-1]): <NEW_LINE> <INDENT> next_note = note_list[i + 1] <NEW_LINE> if note[1:] == next_note[1:]: <NEW_LINE> <INDENT> group.append(note) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> group.append(note) <NEW_LINE> output.append(convert_group_to_chord(group, ticks)) <NEW_LINE> group = [] <NEW_LINE> <DEDENT> <DEDENT> group.append(note_list[-1]) <NEW_LINE> output.append(convert_group_to_chord(group, ticks)) <NEW_LINE> return output | Takes a list of notes and returns a list of
lists. Each sublist contains notes that have
the same start time and end time (thus belong
in a chord). | 625941bdfb3f5b602dac3590 |
def max_sublist_sum(arr): <NEW_LINE> <INDENT> max_ending_here = 0 <NEW_LINE> max_so_far = 0 <NEW_LINE> for x in arr: <NEW_LINE> <INDENT> max_ending_here = max(0, max_ending_here + x) <NEW_LINE> max_so_far = max(max_so_far, max_ending_here) <NEW_LINE> <DEDENT> return max_so_far | Efficient equivalent to max(sum(arr[i:j]) for 0 <= i <= j <= len(arr))
Algorithm source: WordAligned.org by Thomas Guest
Input:
arr: A list of ints
Output:
The maximum sublist sum | 625941bd63f4b57ef0001020 |
def rotateRight(self, head, k): <NEW_LINE> <INDENT> n, tail = getLengthAndTail(head) <NEW_LINE> if n < 2: <NEW_LINE> <INDENT> return head <NEW_LINE> <DEDENT> k = k % n <NEW_LINE> if k == 0: <NEW_LINE> <INDENT> return head <NEW_LINE> <DEDENT> tail.next = head <NEW_LINE> node = head <NEW_LINE> for _ in xrange(n - k - 1): <NEW_LINE> <INDENT> node = node.next <NEW_LINE> <DEDENT> res = node.next <NEW_LINE> node.next = None <NEW_LINE> return res | :type head: ListNode
:type k: int
:rtype: ListNode | 625941bd851cf427c661a412 |
def save_dict(fileName, dictName): <NEW_LINE> <INDENT> with open(fileName, 'w') as outfile: <NEW_LINE> <INDENT> outfile.write(json.dumps(dictName, indent=4, sort_keys=True, separators=(',', ': '), ensure_ascii=False)) | writes dict: dictName to file: fileName | 625941bd4e4d5625662d42db |
def test_mdf_gs(self): <NEW_LINE> <INDENT> self.assertion_mdo(*run_openlego(6)) | Solve the Sellar problem using the MDF architecture and a Gauss-Seidel convergence scheme. | 625941bdac7a0e7691ed3fd8 |
def test_revoke(mock_login, mock_get_signoffs, mock_revoke_package, localdb): <NEW_LINE> <INDENT> result = runner.invoke(entrypoint, STANDARD_ARGS + ['--revoke', 'linux', '--noconfirm', '--db-path', localdb]) <NEW_LINE> assert result.exit_code == 0 <NEW_LINE> assert result.output == 'Revoked sign-off for linux.\n' | Revoke non-existant package | 625941bd6fece00bbac2d63c |
def get_num_docs(self): <NEW_LINE> <INDENT> return self.num_docs | Return the total number of documents in the IDF corpus. | 625941bd5fc7496912cc387e |
def _get_resident_name(self, botengine): <NEW_LINE> <INDENT> residents = botengine.get_location_user_names(to_residents=True, to_supporters=False, sms_only=False) <NEW_LINE> name = "" <NEW_LINE> if len(residents) == 0: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> elif len(residents) == 1: <NEW_LINE> <INDENT> name = "{} {}".format(residents[0]['firstName'], residents[0]['lastName']).strip() <NEW_LINE> <DEDENT> elif len(residents) == 2: <NEW_LINE> <INDENT> a = _("and") <NEW_LINE> name = "{} {} {}".format(residents[0]['firstName'], a, residents[1]['firstName']) <NEW_LINE> <DEDENT> elif len(residents) > 2: <NEW_LINE> <INDENT> a = _("and") <NEW_LINE> name = "{}, {}, {} {}".format(residents[0]['firstName'], residents[1]['firstName'], a, residents[2]['firstName']) <NEW_LINE> <DEDENT> return name | Get the name of the resident in a way that we can use this in a sentence
:param botengine:
:return: | 625941bd3eb6a72ae02ec3d5 |
def is_max(A): <NEW_LINE> <INDENT> if len(A) == 1: <NEW_LINE> <INDENT> return A[0] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> B = is_max(A[1:]) <NEW_LINE> return B if B>A[0] else A[0] | returns the maximum of integers in list n.
| 625941bd91f36d47f21ac3ef |
def test_category_invalid(self): <NEW_LINE> <INDENT> ques = question(title=u'q1 audio', save=True) <NEW_LINE> ques.tags.add(u'desktop') <NEW_LINE> ans = answer(question=ques, save=True) <NEW_LINE> answervote(answer=ans, helpful=True, save=True) <NEW_LINE> d1 = document(title=u'd1 audio', locale=u'en-US', category=10, is_archived=False, save=True) <NEW_LINE> d1.tags.add(u'desktop') <NEW_LINE> revision(document=d1, is_approved=True, save=True) <NEW_LINE> self.refresh() <NEW_LINE> qs = {'a': 1, 'w': 3, 'format': 'json', 'category': 'invalid'} <NEW_LINE> response = self.client.get(reverse('search.advanced'), qs) <NEW_LINE> eq_(2, json.loads(response.content)['total']) | Tests passing an invalid category | 625941bd26238365f5f0ed6a |
def multiply(a, b): <NEW_LINE> <INDENT> print(a * b) | return a * b
there is limit that there is fixed number of paramters. | 625941bdfff4ab517eb2f33a |
def logfbank(signal,samplerate=22040,winlen=0.025,winstep=0.01, nfilt=26,nfft=552,lowfreq=0,highfreq=None,preemph=0.97): <NEW_LINE> <INDENT> feat,energy = fbank(signal,samplerate,winlen,winstep,nfilt,nfft,lowfreq,highfreq,preemph) <NEW_LINE> return numpy.log(feat) | Compute log Mel-filterbank energy features from an audio signal.
:param signal: the audio signal from which to compute features. Should be an N*1 array
:param samplerate: the samplerate of the signal we are working with.
:param winlen: the length of the analysis window in seconds. Default is 0.025s (25 milliseconds)
:param winstep: the step between successive windows in seconds. Default is 0.01s (10 milliseconds)
:param nfilt: the number of filters in the filterbank, default 26.
:param nfft: the FFT size. Default is 512.
:param lowfreq: lowest band edge of mel filters. In Hz, default is 0.
:param highfreq: highest band edge of mel filters. In Hz, default is samplerate/2
:param preemph: apply preemphasis filter with preemph as coefficient. 0 is no filter. Default is 0.97.
:returns: A numpy array of size (NUMFRAMES by nfilt) containing features. Each row holds 1 feature vector. | 625941bd4527f215b584c35a |
def __eq__(self, other): <NEW_LINE> <INDENT> if not isinstance(other, RegexpLiteral): <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> return self.__dict__ == other.__dict__ | Returns true if both objects are equal | 625941bd7b180e01f3dc4703 |
def testClickHiddenIcon(self): <NEW_LINE> <INDENT> if is_x64_Python() != is_x64_OS(): <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> orig_hid_state = _toggle_notification_area_icons( show_all=False, debug_img="%s_01" % (self.id()) ) <NEW_LINE> self.dlg.minimize() <NEW_LINE> _wait_minimized(self.dlg) <NEW_LINE> app2 = Application() <NEW_LINE> app2.start(os.path.join(mfc_samples_folder, u"TrayMenu.exe")) <NEW_LINE> dlg2 = app2.top_window() <NEW_LINE> dlg2.wait('visible', timeout=self.tm) <NEW_LINE> dlg2.minimize() <NEW_LINE> _wait_minimized(dlg2) <NEW_LINE> taskbar.explorer_app.wait_cpu_usage_lower(threshold=5, timeout=40) <NEW_LINE> taskbar.ClickHiddenSystemTrayIcon('MFCTrayDemo', double=True) <NEW_LINE> self.dlg.wait('visible', timeout=self.tm) <NEW_LINE> _toggle_notification_area_icons(show_all=orig_hid_state, debug_img="%s_02" % (self.id())) <NEW_LINE> dlg2.send_message(win32defines.WM_CLOSE) | Test minimizing a sample app into the hidden area of the tray
and restoring the app back | 625941bdeab8aa0e5d26da5e |
def getChildLogger(self, suffix): <NEW_LINE> <INDENT> if self.root is not self: <NEW_LINE> <INDENT> suffix = '.'.join((self.name, suffix)) <NEW_LINE> <DEDENT> result = self.manager.getLogger(suffix) <NEW_LINE> self._children.add(result) <NEW_LINE> return result | Get a logger which is a descendant to this one.
This is a convenience method, such that
logging.getLogger('abc').getChild('def.ghi')
is the same as
logging.getLogger('abc.def.ghi')
It's useful, for example, when the parent logger is named using
__name__ rather than a literal string. | 625941bd5e10d32532c5ee27 |
def do_datetime(dt, format=None): <NEW_LINE> <INDENT> if dt is None: <NEW_LINE> <INDENT> return '' <NEW_LINE> <DEDENT> if format is None: <NEW_LINE> <INDENT> formatted_date = dt.strftime('%Y-%m-%d - %A') <NEW_LINE> formatted_time = dt.strftime('%I:%M%p').lstrip('0').lower() <NEW_LINE> formatted = '%s at %s' % (formatted_date, formatted_time) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> formatted = dt.strftime(format) <NEW_LINE> <DEDENT> return formatted | Jinja template filter to format a datetime object with date & time. | 625941bd50485f2cf553cc98 |
def _set_proxy_list(self): <NEW_LINE> <INDENT> options = webdriver.ChromeOptions() <NEW_LINE> options.add_argument("start-maximized") <NEW_LINE> options.add_experimental_option("excludeSwitches", ["enable-automation"]) <NEW_LINE> options.add_experimental_option('useAutomationExtension', False) <NEW_LINE> driver = webdriver.Chrome(options=options, executable_path=self._path) <NEW_LINE> driver.get("https://sslproxies.org/") <NEW_LINE> ips = [my_elem.get_attribute("innerHTML") for my_elem in WebDriverWait(driver, 5).until(EC.visibility_of_all_elements_located((By.XPATH, constants.proxy_ips)))] <NEW_LINE> ports = [my_elem.get_attribute("innerHTML") for my_elem in WebDriverWait(driver, 5).until(EC.visibility_of_all_elements_located((By.XPATH, constants.proxy_ports)))] <NEW_LINE> driver.quit() <NEW_LINE> for i in range(0, len(ips)): <NEW_LINE> <INDENT> self._proxy_list.append(ips[i]+':'+ports[i]) | Below proxy setup logic from DebanjanB: https://stackoverflow.com/a/59410739 | 625941bd3d592f4c4ed1cf75 |
def __call__(self, row): <NEW_LINE> <INDENT> row_values = [field(row) for field in self.fields] <NEW_LINE> return dict(zip(self.field_names, row_values)) | Extract row data by applying field class instances to a row
:param row: lxml Element
:return: dict() | 625941bd63b5f9789fde6fe6 |
def getMediaName(prefix, slideNumber, frmt='png'): <NEW_LINE> <INDENT> return prefix + '-' + str(slideNumber) + '.' + frmt | Returns the relative name of the media file. | 625941bd293b9510aa2c3199 |
def predict( self, X_test ): <NEW_LINE> <INDENT> prob = self._session.run( self._y_out_op, feed_dict = { self._X_holder: X_test, self._keep_prob_holder: 1.0 } ) <NEW_LINE> predicts = numpy.argmax( prob, axis = 1 ) <NEW_LINE> return predicts | fitting 処理したモデルで、推定を行い、時系列データの予想値を返す。
[Input]
X_test : numpy.ndarry ( shape = [n_samples, n_features(=n_in_sequence), dim] )
予想したい特徴行列(時系列データの行列)
n_samples : シーケンスに分割した時系列データのサンプル数
n_features(=n_in_sequence) : 1つのシーケンスのサイズ
dim : 各シーケンスの要素の次元数
[Output]
predicts : numpy.ndarry ( shape = [n_samples] )
予想結果(分類モデルの場合は、クラスラベル) | 625941bd23849d37ff7b2f91 |
def __init__(self, nestedList): <NEW_LINE> <INDENT> self.queue = [] <NEW_LINE> self.idx = 0 <NEW_LINE> def flatten(nestedList): <NEW_LINE> <INDENT> for elem in nestedList: <NEW_LINE> <INDENT> if elem.isInteger(): <NEW_LINE> <INDENT> self.queue.append(elem.getInteger()) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> sub_nest = elem.getList() <NEW_LINE> flatten(sub_nest) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> flatten(nestedList) | Initialize your data structure here.
:type nestedList: List[NestedInteger] | 625941bd4f6381625f11493e |
def test_create_no_args(self): <NEW_LINE> <INDENT> with self.assertRaises(exceptions.TokenError): <NEW_LINE> <INDENT> tokens.AccessToken() | Test exception raised if no data or token string argument. | 625941bd656771135c3eb76c |
def esp32_data_callback(self, parameter, data): <NEW_LINE> <INDENT> if parameter == 'Done.' and data is None and not self._running: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> status = self._data_f.add_data_point(parameter, data) <NEW_LINE> if not status: <NEW_LINE> <INDENT> print(f"\033[91mERROR: Will ingore parameter {parameter}.\033[0m") | This method is called everytime there is a new read from
the ESP32, and it receives the parameters read, and
the data associated to it | 625941bdbe7bc26dc91cd505 |
def test_repr(self): <NEW_LINE> <INDENT> assert "<Event TestEvent[L]>" == str(ha.Event("TestEvent")) <NEW_LINE> assert "<Event TestEvent[R]: beer=nice>" == str(ha.Event("TestEvent", {"beer": "nice"}, ha.EventOrigin.remote)) | Test that repr method works. | 625941bd4527f215b584c35b |
def Delayed_Init(self): <NEW_LINE> <INDENT> if self.init_complete: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> self.Load_Patches() <NEW_LINE> self.init_complete = True <NEW_LINE> return | Load old patches and do any other delayed initialization.
Since patches will load xml files to find patched nodes,
this should be called after settings and the file system
are set up, but before objects are loaded. | 625941bd7d847024c06be1b9 |
def build_indices(self): <NEW_LINE> <INDENT> common.log.debug("cache: building indices ...") <NEW_LINE> self._reset_indices() <NEW_LINE> unsorted = list() <NEW_LINE> for id, blog_post in self.cache.iteritems(): <NEW_LINE> <INDENT> date_tuple = common.date_tuple(blog_post) <NEW_LINE> self.days.setdefault(date_tuple, set()).add(id) <NEW_LINE> self.months.setdefault(date_tuple[:2], set()).add(id) <NEW_LINE> self.years.setdefault(date_tuple[:1], set()).add(id) <NEW_LINE> for tag in blog_post.headers["tags"]: <NEW_LINE> <INDENT> self.tags.setdefault(tag, set()).add(id) <NEW_LINE> <DEDENT> unsorted.append( (blog_post.headers["created"], id) ) <NEW_LINE> <DEDENT> self.sorted_ids = [id for created, id in sorted(unsorted, reverse=True)] <NEW_LINE> common.log.debug("cache: ... done") | builds the indices for dates and tags from the cache | 625941bdcb5e8a47e48b79ae |
@app.route('/') <NEW_LINE> def index(): <NEW_LINE> <INDENT> return render_template('index.html', flask_message = 'flask is great') | Video streaming home page. | 625941bdbaa26c4b54cb1023 |
def child_added(self, child): <NEW_LINE> <INDENT> super(ToolkitObject, self).child_added(child) <NEW_LINE> if isinstance(child, ToolkitObject) and self.proxy_is_active: <NEW_LINE> <INDENT> if not child.proxy_is_active: <NEW_LINE> <INDENT> child.activate_proxy() <NEW_LINE> <DEDENT> self.proxy.child_added(child.proxy) | A reimplemented child added event handler.
This handler will invoke the superclass handler and then invoke
the 'child_added()' method on an active proxy. | 625941bd8a349b6b435e8074 |
def cambiarCartas(jug): <NEW_LINE> <INDENT> global cartasSeleccionadas <NEW_LINE> global puedeCambiarCartas <NEW_LINE> if cartasImagenes and puedeCambiarCartas: <NEW_LINE> <INDENT> jugadores[jug].cambiarCartas(cartasSeleccionadas, mazo) <NEW_LINE> cartasSeleccionadas = [] <NEW_LINE> <DEDENT> puedeCambiarCartas = False | Función para cambiar las cartas seleccionadas
y quitar las imágenes de las viejas
Parámetros
----------
arg1 obj Jugador jug: Jugador del turno actual | 625941bd4f88993c3716bf6c |
def get(self): <NEW_LINE> <INDENT> self.counts[six.b('lock:') + self.name] += 1 <NEW_LINE> previous = self.store.get(self.name, _NOT_LOCKED) <NEW_LINE> self.store[self.name] = _LOCKED <NEW_LINE> return previous == _NOT_LOCKED | lock.get() | 625941bd21a7993f00bc7bec |
def test_notes_and_notes_file(self, mocked_client_class): <NEW_LINE> <INDENT> runner = testing.CliRunner() <NEW_LINE> with runner.isolated_filesystem(): <NEW_LINE> <INDENT> with open('notefile.txt', 'w') as f: <NEW_LINE> <INDENT> f.write('This is a --notes-file note!') <NEW_LINE> <DEDENT> result = runner.invoke( cli.edit, ['FEDORA-2017-cc8582d738', '--notes', 'this is a notey note', '--notes-file', 'notefile.txt', '--url', 'http://localhost:6543']) <NEW_LINE> assert result.exit_code == 1 <NEW_LINE> assert result.output == 'ERROR: Cannot specify --notes and --notes-file\n' | Assert providing both --notes-file and --notes parameters to an otherwise successful
updates edit request results in an error. | 625941bddc8b845886cb5434 |
def plot_porosity_bar(iso,core_names,sample_names,save =False): <NEW_LINE> <INDENT> fig,axies = plt.subplots(2,4,figsize=(14,10)) <NEW_LINE> for i,core_name in enumerate(core_names): <NEW_LINE> <INDENT> data_micro = list() <NEW_LINE> data_meso = list() <NEW_LINE> data_total = list() <NEW_LINE> xticklabel = list() <NEW_LINE> for sample_name in sample_names[i]: <NEW_LINE> <INDENT> data_micro.append(iso[core_name][sample_name]['vpore_micro']) <NEW_LINE> data_meso.append(iso[core_name][sample_name]['vpore_meso']) <NEW_LINE> data_total.append(iso[core_name][sample_name]['vpore_total']) <NEW_LINE> xticklabel.append(sample_name) <NEW_LINE> <DEDENT> data_micro = tuple(data_micro) <NEW_LINE> data_meso = tuple(data_meso) <NEW_LINE> ax_0 = i // 4 <NEW_LINE> ax_1 = i % 4 <NEW_LINE> print(i,ax_0,ax_1) <NEW_LINE> width =0.35 <NEW_LINE> ind = np.arange(len(data_micro)) <NEW_LINE> rects1 = axies[ax_0,ax_1].bar(ind,data_micro,width,color='r') <NEW_LINE> rects2 = axies[ax_0,ax_1].bar(ind,data_meso,width,bottom=data_micro,color='y') <NEW_LINE> axies[ax_0,ax_1].set_title(core_name) <NEW_LINE> plt.setp(axies[ax_0,ax_1],xticks=ind+width/2,xticklabels=xticklabel) <NEW_LINE> <DEDENT> plt.tight_layout() <NEW_LINE> plt.show() | inside one figure
plot bar chart for all the sample in each core in terms of mesopore and micropore
:param iso:
:param core_names:
:param sample_names:
:param save:
:return: | 625941bd796e427e537b04c4 |
def get_low(pair, path="https://api.kraken.com/0/public"): <NEW_LINE> <INDENT> return _get_ticker(pair, 'l', path) | Get the last 24h low price of `pair`.
Parameters
----------
pair : str
Code of the requested pair(s). Comma delimited if several pair.
path : str
Path of the exchange to request.
Returns
-------
float or dict
Last 24h lower price(s). | 625941bdadb09d7d5db6c692 |
def get_real_game(apps): <NEW_LINE> <INDENT> for _ in range(20): <NEW_LINE> <INDENT> appid = str(random.choice(apps)['appid']) <NEW_LINE> params = {'key': STEAM_KEY, 'appid': appid} <NEW_LINE> try: <NEW_LINE> <INDENT> result = requests.get("http://api.steampowered.com/ISteamUserStats/GetSchemaForGame/v2/", params=params) <NEW_LINE> game_content = result.json() <NEW_LINE> return appid, find_real_name(appid) <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> <DEDENT> return 0, "Unable to find a game. Please try again." | Tries to find a real game in the list of choices
Steam sometimes returns junk, and so we have to check
to make sure it is something that someone would play.
Otherwise, we try again. If we have no luck, eventually
we just let the user know that we couldn't get it working. | 625941bdfb3f5b602dac3591 |
def str2float(astr): <NEW_LINE> <INDENT> astr = astr.strip().replace(',', '.') <NEW_LINE> if astr == '': <NEW_LINE> <INDENT> return 0. <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return float(astr) | Converts a string to a float. The only difference with float() is that
this returns 0 if the string is empty.
Args:
astr (str): the string.
Returns:
float.
Raises: | 625941bd01c39578d7e74d3c |
def info(self): <NEW_LINE> <INDENT> method = 'GET' <NEW_LINE> url = '/me?circle-token={token}'.format(token=self.client.api_token) <NEW_LINE> json_data = self.client.request(method, url) <NEW_LINE> return json_data | Return information about the user as a dictionary. | 625941bdde87d2750b85fc90 |
def __init__(self, lower, upper, relative=False, description=None): <NEW_LINE> <INDENT> description = description or 'Constant error.' <NEW_LINE> self.lower = lower <NEW_LINE> self.upper = upper <NEW_LINE> self.relative = relative <NEW_LINE> super().__init__(description) | The lower and upper error bars are defined independently and apply to
the entire range of data to which this error class is applied. The
errors values must be correspond to 1 standard deviation.
By default the errors are absolute (relative=False). If relative is set
to True, the lower and upper errors are treated as relative errors e.g.
lower = 0.1 corresponds to a 10% relative error.
:param lower: The lower uncertainty.
:param upper: The upper uncertainty.
:param relative: True if the error is relative, False if absolute (default=False).
:param description: A text description of the error. | 625941bd24f1403a92600a6a |
def make_heatmap(what='tasks.indate', from_table='mvs100k.tasks'): <NEW_LINE> <INDENT> df = get_psql_table.get_table(what=what, from_table=from_table) <NEW_LINE> df = add_number_of_launchings_column(df) <NEW_LINE> df = add_day_of_week_column(df) <NEW_LINE> df = transform_week_column(df) <NEW_LINE> sns.heatmap(pivot_the_dataframe(df)) <NEW_LINE> plt.show() | Plot a heatmap basing on a single time row as a column.
See the example in the module description.
:param what: the information you need to get from the table
:param from_table: the table you need to get info from | 625941bd6fb2d068a760ef9b |
def write_to_compressed(compression, path, data, dest="test"): <NEW_LINE> <INDENT> args: tuple[Any, ...] = (data,) <NEW_LINE> mode = "wb" <NEW_LINE> method = "write" <NEW_LINE> compress_method: Callable <NEW_LINE> if compression == "zip": <NEW_LINE> <INDENT> compress_method = zipfile.ZipFile <NEW_LINE> mode = "w" <NEW_LINE> args = (dest, data) <NEW_LINE> method = "writestr" <NEW_LINE> <DEDENT> elif compression == "gzip": <NEW_LINE> <INDENT> compress_method = gzip.GzipFile <NEW_LINE> <DEDENT> elif compression == "bz2": <NEW_LINE> <INDENT> compress_method = bz2.BZ2File <NEW_LINE> <DEDENT> elif compression == "zstd": <NEW_LINE> <INDENT> compress_method = import_optional_dependency("zstandard").open <NEW_LINE> <DEDENT> elif compression == "xz": <NEW_LINE> <INDENT> compress_method = get_lzma_file() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise ValueError(f"Unrecognized compression type: {compression}") <NEW_LINE> <DEDENT> with compress_method(path, mode=mode) as f: <NEW_LINE> <INDENT> getattr(f, method)(*args) | Write data to a compressed file.
Parameters
----------
compression : {'gzip', 'bz2', 'zip', 'xz', 'zstd'}
The compression type to use.
path : str
The file path to write the data.
data : str
The data to write.
dest : str, default "test"
The destination file (for ZIP only)
Raises
------
ValueError : An invalid compression value was passed in. | 625941bd656771135c3eb76d |
@app.route('/login') <NEW_LINE> def login(): <NEW_LINE> <INDENT> service_url = get_service_url(request) <NEW_LINE> cas_login_url = cas_client.get_login_url(service_url=service_url) <NEW_LINE> return redirect(cas_login_url) | Send a user to the CAS endpoint where they can authenticate | 625941bd21bff66bcd684856 |
def background_thread(): <NEW_LINE> <INDENT> count = 0 <NEW_LINE> global service <NEW_LINE> service = build("customsearch", "v1", developerKey="** your developer key **") <NEW_LINE> while True: <NEW_LINE> <INDENT> socketio.sleep(10) <NEW_LINE> count += 1 | Example of how to send server generated events to clients. | 625941bd29b78933be1e55b2 |
def _path_similarity(chunk): <NEW_LINE> <INDENT> sims=[] <NEW_LINE> for synset1, synset2 in chunk: <NEW_LINE> <INDENT> synset1=wn.synset(synset1) <NEW_LINE> synset2=wn.synset(synset2) <NEW_LINE> sims.append(synset1.path_similarity(synset2)) <NEW_LINE> <DEDENT> return sims | Wrapper method for use with parallelizing path similarity | 625941bdd18da76e235323d4 |
def __iter__(self): <NEW_LINE> <INDENT> for shape in self.shapeList: <NEW_LINE> <INDENT> yield shape | Iterates over the shapes in the body | 625941bd1f037a2d8b946100 |
def create_wireless_profile(self, profileDetails=None, headers=None, payload=None, active_validation=True, **request_parameters): <NEW_LINE> <INDENT> check_type(headers, dict) <NEW_LINE> check_type(payload, dict) <NEW_LINE> if headers is not None: <NEW_LINE> <INDENT> if 'X-Auth-Token' in headers: <NEW_LINE> <INDENT> check_type(headers.get('X-Auth-Token'), basestring, may_be_none=False) <NEW_LINE> <DEDENT> <DEDENT> _params = { } <NEW_LINE> _params.update(request_parameters) <NEW_LINE> _params = dict_from_items_with_values(_params) <NEW_LINE> path_params = { } <NEW_LINE> _payload = { 'profileDetails': profileDetails, } <NEW_LINE> _payload.update(payload or {}) <NEW_LINE> _payload = dict_from_items_with_values(_payload) <NEW_LINE> if active_validation: <NEW_LINE> <INDENT> self._request_validator('jsd_b95201b6a6905a10b463e036bf591166_v2_2_1') .validate(_payload) <NEW_LINE> <DEDENT> with_custom_headers = False <NEW_LINE> _headers = self._session.headers or {} <NEW_LINE> if headers: <NEW_LINE> <INDENT> _headers.update(dict_of_str(headers)) <NEW_LINE> with_custom_headers = True <NEW_LINE> <DEDENT> e_url = ('/dna/intent/api/v1/wireless/profile') <NEW_LINE> endpoint_full_url = apply_path_params(e_url, path_params) <NEW_LINE> if with_custom_headers: <NEW_LINE> <INDENT> json_data = self._session.post(endpoint_full_url, params=_params, json=_payload, headers=_headers) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> json_data = self._session.post(endpoint_full_url, params=_params, json=_payload) <NEW_LINE> <DEDENT> return self._object_factory('bpm_b95201b6a6905a10b463e036bf591166_v2_2_1', json_data) | Creates Wireless Network Profile on DNAC and associates sites and SSIDs to it.
Args:
profileDetails(object): Wireless's profileDetails.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
payload(dict): A JSON serializable Python object to send in the
body of the Request.
active_validation(bool): Enable/Disable payload validation.
Defaults to True.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error. | 625941bde8904600ed9f1e2a |
@pytest.fixture(scope="class") <NEW_LINE> def bing_first_time(bingham_dataset): <NEW_LINE> <INDENT> df = obsplus.events_to_df(bingham_dataset.event_client.get_events()) <NEW_LINE> return to_utc(df.iloc[0]["time"]) | Get a new time based on the first event in bingham_test event + 1 | 625941bd67a9b606de4a7dbd |
def get_collection_metadata_path(ctx, coll): <NEW_LINE> <INDENT> path = '{}/{}'.format(coll, constants.IIJSONMETADATA) <NEW_LINE> if data_object.exists(ctx, path): <NEW_LINE> <INDENT> return path <NEW_LINE> <DEDENT> return None | Check if a collection has a JSON metadata file and provide its path, if any.
:param ctx: Combined type of a callback and rei struct
:param coll: Path of collection to check for metadata
:returns: String with path to metadata file | 625941bd442bda511e8be31d |
def main(): <NEW_LINE> <INDENT> number_of_steps = int(input().strip()) <NEW_LINE> print_staircase(number_of_steps) | Program entry point. | 625941bd1f5feb6acb0c4a55 |
def serve(engine: Engine, id: str) -> int: <NEW_LINE> <INDENT> person = Person(id=id) <NEW_LINE> try: <NEW_LINE> <INDENT> engine.load(person) <NEW_LINE> <DEDENT> except MissingObjects: <NEW_LINE> <INDENT> return 404 <NEW_LINE> <DEDENT> entry = QueueEntry(position=person.position) <NEW_LINE> try: <NEW_LINE> <INDENT> engine.load(entry) <NEW_LINE> <DEDENT> except MissingObjects: <NEW_LINE> <INDENT> return 404 <NEW_LINE> <DEDENT> entry.served_at = pendulum.now() <NEW_LINE> try: <NEW_LINE> <INDENT> engine.save(entry, condition=QueueEntry.served_at.is_(None)) <NEW_LINE> <DEDENT> except ConstraintViolation: <NEW_LINE> <INDENT> return 409 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return 200 | 200 success, 404 missing row, 409 already served | 625941bd3617ad0b5ed67df9 |
def testGetSplits(self): <NEW_LINE> <INDENT> self.assertEqual(set(self.split_external_tx), set((self.split1, self.split2))) <NEW_LINE> self.assertEqual(set(self.split_internal_tx), set((self.split3, self.split4))) <NEW_LINE> self.assertEqual(set(self.external_tx), set(self.split5)) <NEW_LINE> self.assertEqual(set(self.simple_tx), set(self.split6)) | Check that the property ``.splits`` works as advertised | 625941bdd7e4931a7ee9de1d |
def main(): <NEW_LINE> <INDENT> parser = argparse.ArgumentParser(description='Delete a genome.') <NEW_LINE> parser.add_argument('genome_id', metavar='genome_id') <NEW_LINE> args = parser.parse_args() <NEW_LINE> genome_id = args.genome_id <NEW_LINE> json_response = delete_genome(genome_id) <NEW_LINE> try: <NEW_LINE> <INDENT> sys.stdout.write(json.dumps(json_response, indent=4)) <NEW_LINE> <DEDENT> except KeyError: <NEW_LINE> <INDENT> if json_response.get('description'): <NEW_LINE> <INDENT> sys.stdout.write('Error: {}\n'.format(json_response.get('description'))) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> sys.stdout.write('Something went wrong...') | Main function. Upload a specified VCF file to a specified project.
| 625941bdd53ae8145f87a175 |
def query_json_repr(query): <NEW_LINE> <INDENT> output = [] <NEW_LINE> for k, l in query.iteritems(): <NEW_LINE> <INDENT> for v in l: <NEW_LINE> <INDENT> output.append({ 'name': k, 'value': v }) <NEW_LINE> <DEDENT> <DEDENT> return output | json_repr for query dicts | 625941bd32920d7e50b280ce |
@pytest.mark.parametrize( "user_group,allowed", [ ("Administrators", True), ("Services", False), ("Developers", False), ("Investigators", False), ("Bioinformatics", False), (None, False), ], ) <NEW_LINE> def test_cancel_release(db, clients, user_group, allowed): <NEW_LINE> <INDENT> client = clients.get(user_group) <NEW_LINE> release = ReleaseFactory(state="running") <NEW_LINE> resp = client.post( "/graphql", data={ "query": CANCEL_RELEASE, "variables": {"release": to_global_id("ReleaseNode", release.pk)}, }, content_type="application/json", ) <NEW_LINE> if allowed: <NEW_LINE> <INDENT> assert resp.json()["data"]["cancelRelease"]["release"] is not None <NEW_LINE> assert ( resp.json()["data"]["cancelRelease"]["release"]["state"] == "canceling" ) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> assert resp.json()["errors"][0]["message"] == "Not allowed" | Test the cancel mutation. | 625941bd50812a4eaa59c225 |
def vminfo(self, element=None): <NEW_LINE> <INDENT> raise | Returns a dictionary with all available information for the
Virtual Machine. | 625941bdad47b63b2c509e81 |
def _beta_ani(self, r, r_ani): <NEW_LINE> <INDENT> return r**2/(r_ani**2 + r**2) | anisotropy parameter beta
:param r:
:param r_ani:
:return: | 625941bd5fcc89381b1e15be |
def _get_top_data(topfile): <NEW_LINE> <INDENT> orig_topfile = topfile <NEW_LINE> topfile = os.path.join(_hubble_dir()[1], topfile) <NEW_LINE> log.debug('reading nova topfile=%s (%s)', topfile, orig_topfile) <NEW_LINE> try: <NEW_LINE> <INDENT> with open(topfile) as handle: <NEW_LINE> <INDENT> topdata = yaml.safe_load(handle) <NEW_LINE> <DEDENT> <DEDENT> except Exception as exc: <NEW_LINE> <INDENT> log.error('error loading nova topfile: %s', exc) <NEW_LINE> return list() <NEW_LINE> <DEDENT> if not isinstance(topdata, dict) or 'nova' not in topdata or (not isinstance(topdata['nova'], dict)): <NEW_LINE> <INDENT> raise CommandExecutionError('Nova topfile not formatted correctly') <NEW_LINE> <DEDENT> topdata = topdata['nova'] <NEW_LINE> ret = [] <NEW_LINE> for match, data in topdata.items(): <NEW_LINE> <INDENT> if __mods__['match.compound'](match): <NEW_LINE> <INDENT> ret.extend(data) <NEW_LINE> <DEDENT> <DEDENT> return ret | Helper method to retrieve and parse the nova topfile | 625941bdcc40096d61595853 |
def _prop_match(item, filter_): <NEW_LINE> <INDENT> filter_length = len(filter_) <NEW_LINE> if item.collection.get_meta("tag") == "VCALENDAR": <NEW_LINE> <INDENT> for component in item.components(): <NEW_LINE> <INDENT> if component.name in ("VTODO", "VEVENT", "VJOURNAL"): <NEW_LINE> <INDENT> vobject_item = component <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> vobject_item = item.item <NEW_LINE> <DEDENT> if filter_length == 0: <NEW_LINE> <INDENT> return filter_.get("name").lower() in vobject_item.contents <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if filter_length == 1: <NEW_LINE> <INDENT> if filter_[0].tag == _tag("C", "is-not-defined"): <NEW_LINE> <INDENT> return filter_.get("name").lower() not in vobject_item.contents <NEW_LINE> <DEDENT> <DEDENT> if filter_[0].tag == _tag("C", "time-range"): <NEW_LINE> <INDENT> if not _time_range_match(item, filter_[0]): <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> filter_.remove(filter_[0]) <NEW_LINE> <DEDENT> elif filter_[0].tag == _tag("C", "text-match"): <NEW_LINE> <INDENT> if not _text_match(item, filter_[0]): <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> filter_.remove(filter_[0]) <NEW_LINE> <DEDENT> return all( _param_filter_match(item, param_filter) for param_filter in filter_) | Check whether the ``item`` matches the prop ``filter_``.
See rfc4791-9.7.2 and rfc6352-10.5.1. | 625941bde8904600ed9f1e2b |
def rmdir(d, recursive=False): <NEW_LINE> <INDENT> if recursive: <NEW_LINE> <INDENT> shutil.rmtree(d) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> os.rmdir(d) | Remove directory `d`. Set `recursive=True` if the directory is not empty. | 625941bd07f4c71912b11387 |
def _calc_uvw(cache, name, antA, antB): <NEW_LINE> <INDENT> antA_group, antB_group = 'Antennas/%s/' % (antA,), 'Antennas/%s/' % (antB,) <NEW_LINE> antennaA, antennaB = cache.get(antA_group + 'antenna')[0], cache.get(antB_group + 'antenna')[0] <NEW_LINE> u, v, w = np.empty(len(cache.timestamps)), np.empty(len(cache.timestamps)), np.empty(len(cache.timestamps)) <NEW_LINE> targets = cache.get('Observation/target') <NEW_LINE> for segm, target in targets.segments(): <NEW_LINE> <INDENT> u[segm], v[segm], w[segm] = target.uvw(antennaA, cache.timestamps[segm], antennaB) <NEW_LINE> <DEDENT> cache[antA_group + 'u_%s' % (antB,)] = u <NEW_LINE> cache[antA_group + 'v_%s' % (antB,)] = v <NEW_LINE> cache[antA_group + 'w_%s' % (antB,)] = w <NEW_LINE> return u if name.startswith(antA_group + 'u') else v if name.startswith(antA_group + 'v') else w | Calculate (u,v,w) coordinates using sensor cache contents. | 625941bdd4950a0f3b08c252 |
def _get_shap_values_in_fold(self, x, feats, n_fold): <NEW_LINE> <INDENT> explainer = shap.TreeExplainer(self.model.estimator) <NEW_LINE> shap_values = explainer.shap_values(x) <NEW_LINE> shap_values_df = pd.DataFrame() <NEW_LINE> shap_values_df["feature"] = feats <NEW_LINE> if isinstance(shap_values, list): <NEW_LINE> <INDENT> for i, shap_value in enumerate(shap_values): <NEW_LINE> <INDENT> abs_mean_shap_values = np.mean(np.abs(shap_value), axis=0) <NEW_LINE> expected_value = explainer.expected_value[i] if explainer.expected_value[i] is not None else None <NEW_LINE> shap_values_df["shap_value_target_{}".format(str(i))] = abs_mean_shap_values <NEW_LINE> shap_values_df["expected_value_target_{}".format(str(i))] = expected_value <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> abs_mean_shap_values = np.mean(np.abs(shap_values), axis=0) <NEW_LINE> expected_value = explainer.expected_value if explainer.expected_value is not None else None <NEW_LINE> shap_values_df["shap_value"] = abs_mean_shap_values <NEW_LINE> shap_values_df["expected_value"] = expected_value <NEW_LINE> shap_values_df.sort_values('shap_value', ascending=False) <NEW_LINE> <DEDENT> shap_values_df["fold"] = n_fold + 1 <NEW_LINE> return shap_values_df | This method prepares DF with the mean absolute shap value for each feature per fold
:param x: numpy.array or pandas.DataFrame on which to explain the model's output.
:param feats: list of features names
:param n_fold: fold index
:return: pandas DF with feature names and mean absolute shap values (in each considered fold) | 625941bd3617ad0b5ed67dfa |
def _aee_f(flow_2d, flow_3d, flow_mean): <NEW_LINE> <INDENT> square = np.square(flow_2d - flow_mean) <NEW_LINE> square[np.where(flow_3d[:, :, 2] == 0)] = 0 <NEW_LINE> x = square[:, :, :, 0] <NEW_LINE> y = square[:, :, :, 1] <NEW_LINE> sqr = np.sqrt(x + y) <NEW_LINE> aee = np.true_divide(sqr.sum(1),(sqr!=0).sum(1)).astype(np.float32) <NEW_LINE> return aee | average end point error | 625941bd6aa9bd52df036ca4 |
def _format_msg(self, format_str, *args): <NEW_LINE> <INDENT> if not args: <NEW_LINE> <INDENT> format_str = six.moves.urllib.parse.unquote(format_str) <NEW_LINE> <DEDENT> return u"{0} - - [{1}] {2}\n".format( self.client_address[0], self.log_date_time_string(), format_str % args ) | Format message for logging.
`format_str` is a string with old-style Python format escaping;
`args` is an array of values to fill into the string. | 625941bd23e79379d52ee468 |
def __init__(self, dims,full_support = False): <NEW_LINE> <INDENT> self.spaces = torch.tensor([[0,0,0],dims], device=cuda).unsqueeze(0).double() <NEW_LINE> self.items = torch.tensor([[0,0,0],[0,0,0]], device=cuda).unsqueeze(0).double() <NEW_LINE> self.box = torch.tensor(dims).double() <NEW_LINE> self.full_support = full_support | Input:
dims: size of box as list of three numbers | 625941bd3317a56b86939b61 |
def generate_random_complex(amplitude: float = 1.0) -> complex: <NEW_LINE> <INDENT> coefficients = numpy.random.rand(2) <NEW_LINE> norm = numpy.linalg.norm(coefficients) <NEW_LINE> return (coefficients[0] + 1.0j * coefficients[1]) * amplitude / norm | Generate a random complex number.
:param amplitude: The amplitude of the complex number to generate.
:return: a random complex number of the desired amplitude. | 625941bd046cf37aa974cc4b |
def encrypt(self, keyid, data): <NEW_LINE> <INDENT> logger.info('encrypt; recipient: {}'.format(keyid)) <NEW_LINE> p = self._popen([ '--armor', '--trust-model', 'always', '--encrypt', '--no-encrypt-to', '--recipient', keyid ]) <NEW_LINE> stdout, stderr = p.communicate(data) <NEW_LINE> if p.returncode != 0: <NEW_LINE> <INDENT> raise RuntimeError( 'gpg --encrypt returned {}'.format(p.returncode) ) <NEW_LINE> <DEDENT> return stdout | Encrypt ``data`` to the given ``keyid`` (with armour). | 625941bd30dc7b766590186b |
def __init__(self, filename, mimetype=None, chunksize=256*1024, resumable=False): <NEW_LINE> <INDENT> self._filename = filename <NEW_LINE> self._size = os.path.getsize(filename) <NEW_LINE> self._fd = None <NEW_LINE> if mimetype is None: <NEW_LINE> <INDENT> (mimetype, encoding) = mimetypes.guess_type(filename) <NEW_LINE> <DEDENT> self._mimetype = mimetype <NEW_LINE> self._chunksize = chunksize <NEW_LINE> self._resumable = resumable | Constructor.
Args:
filename: string, Name of the file.
mimetype: string, Mime-type of the file. If None then a mime-type will be
guessed from the file extension.
chunksize: int, File will be uploaded in chunks of this many bytes. Only
used if resumable=True.
resumable: bool, True if this is a resumable upload. False means upload
in a single request. | 625941bd31939e2706e4cd6f |
def update_metric(self, eval_metric, labels): <NEW_LINE> <INDENT> for texec, labels in zip(self.execs, labels): <NEW_LINE> <INDENT> eval_metric.update(labels, texec.outputs) | Accumulate the performance according to `eval_metric` on all devices.
Parameters
----------
eval_metric : EvalMetric
The metric used for evaluation.
labels : list of NDArray
Typically comes from `label` of a `DataBatch`. | 625941bdcad5886f8bd26ee1 |
def run(): <NEW_LINE> <INDENT> cmdline = "%s %s" % (_modules[name], " ".join(args)) <NEW_LINE> logger.debug("Running command line: %s" % cmdline) <NEW_LINE> process = subprocess.Popen(cmdline, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) <NEW_LINE> out, err = process.communicate() <NEW_LINE> out = out.decode("utf-8") <NEW_LINE> err = err.decode("utf-8") <NEW_LINE> return out + err | Execute external command, capture output. | 625941bd091ae35668666e65 |
def forwards(self, orm): <NEW_LINE> <INDENT> verified_modes = orm['course_modes.CourseMode'].objects.filter( mode_slug__in=["verified", "professional"], expiration_datetime__isnull=False, ) <NEW_LINE> for mode in verified_modes: <NEW_LINE> <INDENT> orm.VerificationDeadline.objects.create( course_key=mode.course_id, deadline=mode.expiration_datetime, ) | This migration populates the "verification deadline" model with
the "expiration datetime" from the course modes table for verified
courses.
In the past, the course modes expiration (really an upgrade deadline)
and the verification deadline were always set to the same value.
With this change, the verification deadline will now be tracked in a separate
model owned by the verify_student app. | 625941bda219f33f3462886f |
def check_gradient(f, x, delta=1e-5, tol=1e-4): <NEW_LINE> <INDENT> assert isinstance(x, np.ndarray) <NEW_LINE> assert x.dtype == np.float <NEW_LINE> orig_x = x.copy() <NEW_LINE> fx, analytic_grad = f(x) <NEW_LINE> assert np.all(np.isclose(orig_x, x, tol)), "Functions shouldn't modify input variables" <NEW_LINE> assert analytic_grad.shape == x.shape <NEW_LINE> analytic_grad = analytic_grad.copy() <NEW_LINE> it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite']) <NEW_LINE> while not it.finished: <NEW_LINE> <INDENT> ix = it.multi_index <NEW_LINE> analytic_grad_at_ix = analytic_grad[ix] <NEW_LINE> orig_x = x.copy() <NEW_LINE> orig_x[ix]+=delta <NEW_LINE> fx_plus=f(orig_x)[0] <NEW_LINE> orig_x = x.copy() <NEW_LINE> orig_x[ix]-=delta <NEW_LINE> fx_minus=f(orig_x)[0] <NEW_LINE> divider=2*delta <NEW_LINE> numeric_grad_at_ix = (fx_plus-fx_minus)/divider <NEW_LINE> if not np.isclose(numeric_grad_at_ix, analytic_grad_at_ix, tol): <NEW_LINE> <INDENT> print("Gradients are different at %s. Analytic: %2.5f, Numeric: %2.5f" % (ix, analytic_grad_at_ix, numeric_grad_at_ix)) <NEW_LINE> return False <NEW_LINE> <DEDENT> it.iternext() <NEW_LINE> <DEDENT> print("Gradient check passed!") <NEW_LINE> return True | Checks the implementation of analytical gradient by comparing
it to numerical gradient using two-point formula
Arguments:
f: function that receives x and computes value and gradient
x: np array, initial point where gradient is checked
delta: step to compute numerical gradient
tol: tolerance for comparing numerical and analytical gradient
Return:
bool indicating whether gradients match or not | 625941bd0fa83653e4656ebe |
def makeChange(coins, total): <NEW_LINE> <INDENT> if total == 0: <NEW_LINE> <INDENT> return 0 <NEW_LINE> <DEDENT> if total < 0: <NEW_LINE> <INDENT> return 0 <NEW_LINE> <DEDENT> if len(coins) == 0: <NEW_LINE> <INDENT> return 0 <NEW_LINE> <DEDENT> return min(makeChange(coins, total - coin) + 1 for coin in coins) | determine the fewest number of coins needed to meet a given amount | 625941bd66673b3332b91f92 |
def linear_gradient(x, y, theta): <NEW_LINE> <INDENT> slope, intersect = theta <NEW_LINE> y_hat = slope * x + intersect <NEW_LINE> error = y_hat - y <NEW_LINE> gradient = [error * x, error] <NEW_LINE> return gradient | 특정 데이터 (x, y)에서 기울기와 y절편에 대한
편미분 벡터 리턴
:param x: 실제 데이터
:param y: 실제 데이터
:param theta: [theta1, theta2] 벡터(리스트). [기울기, y절편] | 625941bd50812a4eaa59c226 |
def compare(fname, s1, s1_name, outpath): <NEW_LINE> <INDENT> print('PID:',os.getpid(),' is calculating file', fname) <NEW_LINE> ffname = 'log_'+s1_name+'_'+fname.split('/')[-1][4:-3] + '.txt' <NEW_LINE> min_diff = [] <NEW_LINE> min_j = [] <NEW_LINE> print(fname) <NEW_LINE> hx = h5py.File(fname, 'r') <NEW_LINE> sx = hx['data'].value <NEW_LINE> ax = hx['angle'].value <NEW_LINE> num = len(s1) <NEW_LINE> numx = len(sx) <NEW_LINE> f = open(outpath+ffname,'w') <NEW_LINE> tic = time.time() <NEW_LINE> for i in range(num): <NEW_LINE> <INDENT> diff = [] <NEW_LINE> diff = np.linalg.norm(s1[i]-sx[i]) <NEW_LINE> min_diff.append(diff) <NEW_LINE> f.write(str(diff)+','+str(ax[i])+'\n') <NEW_LINE> <DEDENT> mean_diff = np.mean(min_diff) <NEW_LINE> toc = time.time() - tic <NEW_LINE> print(mean_diff) <NEW_LINE> f.close() <NEW_LINE> POS = fname.find('lstc') <NEW_LINE> output_name = s1_name + '_' + fname[POS:-3] + '.txt' <NEW_LINE> f = open(outpath+output_name,'w+') <NEW_LINE> print('write file:', output_name) <NEW_LINE> f.write(str(mean_diff)+'\n') <NEW_LINE> f.write('time usage:'+str(toc)+' s') <NEW_LINE> f.close() <NEW_LINE> hx.close() | compare 2 files | 625941bdf9cc0f698b1404ff |
def test_code_c9(gb): <NEW_LINE> <INDENT> gb.memory.write_8bit(0x8010, 0x50) <NEW_LINE> gb.memory.write_8bit(0x8011, 0x40) <NEW_LINE> gb.cpu.register.SP = 0x8010 <NEW_LINE> gb.cpu.register.PC = 0x0000 <NEW_LINE> cycles = op.code_c9(gb) <NEW_LINE> assert cycles == 16 <NEW_LINE> assert_registers(gb, SP=0x8012, PC=0x4050) <NEW_LINE> assert_memory(gb, {0x8010: 0x50, 0x8011: 0x40}) | RET - Pop two bytes from stack and jump to that address | 625941bd442bda511e8be31e |
def curry_node_type(klass, node_type, extradoc=None): <NEW_LINE> <INDENT> if extradoc is None: <NEW_LINE> <INDENT> extradoc = "Automatically set to %s type." % (node_type,) <NEW_LINE> <DEDENT> doc = klass.__doc__ <NEW_LINE> result = partial(klass, node_type=node_type) <NEW_LINE> if doc is None: <NEW_LINE> <INDENT> doc = '' <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> doc = "\n".join(line.lstrip() for line in doc.split("\n")) + "\n" <NEW_LINE> doc += extradoc <NEW_LINE> <DEDENT> return pretty_docs(result, doc) | Helper function for creating restrictions of a certain type.
This uses :obj:`partial` to pass a node_type to the wrapped class,
and extends the docstring.
:param klass: callable (usually a class) that is wrapped.
:param node_type: value passed as node_type.
:param extradoc: addition to the docstring. Defaults to
"Automatically set to %s type." % node_type
:return: a wrapped callable. | 625941bd32920d7e50b280cf |
def showRunAgain(self): <NEW_LINE> <INDENT> if self._isUp2date is True and self._showAgain: <NEW_LINE> <INDENT> warning = QMessageBox(self) <NEW_LINE> warning.setText("Attention\nLes parametres on changés, il faut relancer les calculs") <NEW_LINE> warning.setIcon(QMessageBox.Warning) <NEW_LINE> warning.setWindowTitle("Attention") <NEW_LINE> checkBox = QCheckBox() <NEW_LINE> checkBox.setText(" Ne plus me demander") <NEW_LINE> warning.setCheckBox(checkBox) <NEW_LINE> warning.exec_() <NEW_LINE> self._showAgain = not checkBox.isChecked() <NEW_LINE> self._isUp2date = False | Show a message indicating to start again computation. | 625941bd8e7ae83300e4aecd |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.