code
stringlengths
4
4.48k
docstring
stringlengths
1
6.45k
_id
stringlengths
24
24
def subsetsWithDup(self, nums): <NEW_LINE> <INDENT> result = [[]] <NEW_LINE> nums.sort() <NEW_LINE> for num in nums: <NEW_LINE> <INDENT> result.extend([i+[num] for i in result]) <NEW_LINE> <DEDENT> result.sort() <NEW_LINE> itmp = None <NEW_LINE> tmp = [] <NEW_LINE> for i in result: <NEW_LINE> <INDENT> if i != itmp: <NEW_LINE> <INDENT> tmp.append(i) <NEW_LINE> <DEDENT> itmp = i <NEW_LINE> <DEDENT> result = tmp <NEW_LINE> return result
:type nums: List[int] :rtype: List[List[int]]
625941be0fa83653e4656ee3
def skipIfRemote(func): <NEW_LINE> <INDENT> if isinstance(func, type) and issubclass(func, unittest2.TestCase): <NEW_LINE> <INDENT> raise Exception("@skipIfRemote can only be used to decorate a test method") <NEW_LINE> <DEDENT> @wraps(func) <NEW_LINE> def wrapper(*args, **kwargs): <NEW_LINE> <INDENT> from unittest2 import case <NEW_LINE> if lldb.remote_platform: <NEW_LINE> <INDENT> self = args[0] <NEW_LINE> self.skipTest("skip on remote platform") <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> func(*args, **kwargs) <NEW_LINE> <DEDENT> <DEDENT> return wrapper
Decorate the item to skip tests if testing remotely.
625941be4f6381625f114964
def __init__(self, inf, sup=True): <NEW_LINE> <INDENT> if isinstance(inf, Range): <NEW_LINE> <INDENT> assert sup is True <NEW_LINE> sup = inf.sup() <NEW_LINE> inf = inf.inf() <NEW_LINE> <DEDENT> assert inf is not None <NEW_LINE> self.__inf = inf <NEW_LINE> if sup is True: <NEW_LINE> <INDENT> sup = inf <NEW_LINE> <DEDENT> self.__sup = sup
Create a numeric range with the given boundaries inf -- the inferior boundary. sup -- the superior boundary. If unspecified, equals the inferior boundary. If None, there is no upper bound to the range (it includes any number superior or equal to inf). >>> 4 in Range(5) False >>> 5 in Range(5) True >>> 6 in Range(5) False >>> 4 in Range(5, 7) False >>> 5 in Range(5, 7) True >>> 6 in Range(5, 7) True >>> 7 in Range(5, 7) True >>> 8 in Range(5, 7) False >>> 42 in Range(5, None) True
625941be5166f23b2e1a5080
def test_power(self): <NEW_LINE> <INDENT> assert 343 == power(7,3)
This will test the functionality of the power() function. This function will take in the n and e values and raise the n value to the e. This test will pass if the returned value is equivalent to n^e, otherwise it will fail.
625941be0c0af96317bb810f
def _processNestedSlaveItem(self, config, binlogEvent): <NEW_LINE> <INDENT> values = binlogEvent['values'] <NEW_LINE> configKey = config.key <NEW_LINE> configList = config.getLocatedConfigList() <NEW_LINE> relativedConfigs = configList.getDependentItems(configKey, withSelf=True) <NEW_LINE> context = HandlerContext(relativedConfigs, { configKey: values }) <NEW_LINE> script = self._getNestedSlaveItemScript(config, relativedConfigs, context) <NEW_LINE> nestedQuery = self._getNestedBoolQuery(config) <NEW_LINE> nestedQuery = context.exp_data(nestedQuery, config) <NEW_LINE> body = { 'query': nestedQuery, 'script': script } <NEW_LINE> self._updateByQuery(config.esIndex, config.esType, body)
DeleteEventProcessor
625941be4f88993c3716bf91
def fib_list(n): <NEW_LINE> <INDENT> fib_l = [None]*10000 <NEW_LINE> fib_l[:2] = [0, 1] <NEW_LINE> def _fib_list(n): <NEW_LINE> <INDENT> if fib_l[n] is None: <NEW_LINE> <INDENT> fib_l[n] = _fib_list(n - 1) + _fib_list(n - 2) <NEW_LINE> <DEDENT> return fib_l[n] <NEW_LINE> <DEDENT> return _fib_list(n)
Вычисление n-ного числа Фибоначчи, с сохранением промежуточного результа в список.
625941be1d351010ab855a43
def exclusion(array: np.ndarray, excluded_ids: List[int]) -> np.ndarray: <NEW_LINE> <INDENT> all_ids = np.arange(array.size) <NEW_LINE> relevant_array = array[~np.in1d(all_ids, excluded_ids)] <NEW_LINE> return relevant_array
take in array of IoU/Acc., return non-excluded IoU/acc values
625941be3cc13d1c6d3c72a2
@click.command() <NEW_LINE> @click.option('-b', '--buffer-size', default=10000, help='The size of batch for inserting operation.') <NEW_LINE> @click.option('-f', '--force-recreate/--no-force-recreate', default=False, help='Force recreate the table before loading') <NEW_LINE> @click.argument('spec_file', nargs=1) <NEW_LINE> @click.argument('pg_url', nargs=1) <NEW_LINE> def console(spec_file, pg_url, buffer_size, force_recreate): <NEW_LINE> <INDENT> spec = imp.load_source('', spec_file) <NEW_LINE> engine = prepare_db(pg_url, spec.meta, force_recreate) <NEW_LINE> load_data(engine, spec.all_tables, spec.all_files, buffer_size)
Bulk Loading the data according to the spec :spec: The spec of the csv to load in the db :pg_url: The url for the database to load the data
625941beab23a570cc2500a7
def _get_data(self): <NEW_LINE> <INDENT> pass
Тут будет информация о зале - количество участников, доступные столы, прочая игровая информация.
625941be56ac1b37e62640fb
def generate_test_file(path, filename, functions, classtree): <NEW_LINE> <INDENT> with open("{}/test_{}".format(path, filename), "w+") as f: <NEW_LINE> <INDENT> f.write("import pytest\n") <NEW_LINE> f.write("import {} as totest\n\n".format(filename)) <NEW_LINE> f.write("# Call this function if your function is under 5 lines and has a 0%\n") <NEW_LINE> f.write("# chance of breaking\n") <NEW_LINE> f.write("def i_am_sure_theres_no_issue():\n") <NEW_LINE> f.write("\tassert 1\n\n") <NEW_LINE> f.write("#--------------------#\n") <NEW_LINE> f.write("# TESTING FUNCTIONS #\n") <NEW_LINE> f.write("#--------------------#\n") <NEW_LINE> for function in functions: <NEW_LINE> <INDENT> f.write("def test_{}():\n".format(function)) <NEW_LINE> f.write("\t#output = totest.{}(*args_here*)\n".format(function)) <NEW_LINE> f.write("\t#expected = *expected_output_here*\n") <NEW_LINE> f.write("\t#assert output == expected\n") <NEW_LINE> f.write("\tassert 0\n\n") <NEW_LINE> <DEDENT> f.write("#------------------#\n") <NEW_LINE> f.write("# TESTING CLASSES #\n") <NEW_LINE> f.write("#------------------#\n") <NEW_LINE> for classname, funcs in classtree.items(): <NEW_LINE> <INDENT> f.write("\"\"\" TESTING {} CLASS \"\"\"\n".format(classname)) <NEW_LINE> f.write("@pytest.fixture(scope=\"module\")\n") <NEW_LINE> f.write("def setup_{}(request):\n".format(classname)) <NEW_LINE> f.write("\t#test_class = totest.{}(*args_here*)\n".format(classname)) <NEW_LINE> f.write("\t#return test_class #allows modules being tested to use one central class\n") <NEW_LINE> f.write("\tdef teardown():\n") <NEW_LINE> f.write("\t\t#place teardown stuff here\n") <NEW_LINE> f.write("\t\tassert 0\n") <NEW_LINE> f.write("\trequest.addfinalizer(teardown)\n") <NEW_LINE> f.write("\tassert 0\n\n") <NEW_LINE> for func in funcs: <NEW_LINE> <INDENT> f.write("def test_{}_{}(setup_{}):\n".format(classname, func, classname)) <NEW_LINE> f.write("\t#output = setup_{}.{}(*args_here*)\n".format(classname, func)) <NEW_LINE> f.write("\t#expected = *expected_output_here*\n") <NEW_LINE> f.write("\t#assert output == expected\n") <NEW_LINE> f.write("\tassert 0\n\n")
Generates the test file Procedurally generate the test file based on gathered resources Args: path: the path to the file where the test function will be written filename: the name of the file to generate a test case for functions: a list containing all function names in the module classtree: a dictionary containing all classes and their mapped functions Returns: none Raises: none
625941be31939e2706e4cd94
def setup_sources(self, toc_names): <NEW_LINE> <INDENT> def _func(toc_name): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> return self.find_source(toc_name) <NEW_LINE> <DEDENT> except RuntimeError: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> <DEDENT> with concurrent.futures.ThreadPoolExecutor( max_workers=8, thread_name_prefix="loader") as executor: <NEW_LINE> <INDENT> fs = list(map( lambda t: executor.submit(_func, t), toc_names)) <NEW_LINE> rs = [ f.result() for f in concurrent.futures.as_completed(fs) ] <NEW_LINE> <DEDENT> self._finish_setup_sources([s for s in rs if s is not None])
Populate :attr:`sources` with a list of :obj:`CurseProject` that provide the addons specified with `toc_names` by searching the `Curseforge` site.
625941be66656f66f7cbc0d1
def _read_file(self, filepath): <NEW_LINE> <INDENT> import tarfile <NEW_LINE> tar_filename = filepath.rsplit('/', 1)[1].rsplit('.')[0] <NEW_LINE> tmp_dir = 'tmp/' + tar_filename <NEW_LINE> with tarfile.open(filepath) as tf: <NEW_LINE> <INDENT> for entry in tf: <NEW_LINE> <INDENT> filepath = "{0}/{1}".format(tmp_dir, entry.name) <NEW_LINE> tf.extract(entry, path=tmp_dir) <NEW_LINE> if entry.name == 'matrix.mtx' or os.path.basename(filepath)== 'matrix.mtx': <NEW_LINE> <INDENT> adata = sc.read(filepath, cache=False).transpose() <NEW_LINE> <DEDENT> elif entry.name == 'barcodes.tsv' or os.path.basename(filepath)== 'barcodes.tsv': <NEW_LINE> <INDENT> obs = pd.read_csv(filepath, sep='\t', index_col=0, header=None, names=['observations']) <NEW_LINE> <DEDENT> elif entry.name == 'genes.tsv' or os.path.basename(filepath)=='genes.tsv': <NEW_LINE> <INDENT> var = pd.read_csv(filepath, sep='\t', index_col=0, header=None, names=['genes', 'gene_symbol']) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> adata.var = var <NEW_LINE> adata.obs = obs <NEW_LINE> self.adata = adata <NEW_LINE> self.originalFile = filepath <NEW_LINE> return self
Reads in TAR archive that contains the 3 MEX files: matrix.mtx, barcodes.tsv, and genes.tsv. Output is directed to the FileUploader object. Input ----- filepath - /path/to/your/upload.tar - TAR archive must end with file extension '.tar' - The must contain 3 files by the exact names as below: matrix.mtx barcodes.tsv genes.tsv Output ------ Output is assigned to the DatasetUploader object: DatasetUploader.adata = adata DatasetUploader.originalFile = filepath 'adata' is an AnnData object where data of each MEX file is assigned to the AnnData object: AnnData.X = matrix.mtx AnnData.obs = barcodes.tsv AnnData.var = genes.tsv 'filepath' is the file path of the original file
625941be07d97122c41787ad
def get_is_break(result_dict, search_id): <NEW_LINE> <INDENT> single_search = SingleSearch.objects(apply_number=search_id).first() <NEW_LINE> if not single_search: <NEW_LINE> <INDENT> return 0 <NEW_LINE> <DEDENT> func_list = single_search.permission_func <NEW_LINE> _e_business_danger = tel_risk(result_dict.get('credit_telrisklist')) <NEW_LINE> _info_dangers = info_danger(result_dict.get("credit_newsplatrisk")) <NEW_LINE> _multiple_loan = many_plat(result_dict.get('credit_manyplatcheck')) <NEW_LINE> _phone_relate = phone_relative(result_dict.get("credit_phonedevicecheck")) <NEW_LINE> _loan_over_time_blacklist = loan_over(result_dict.get("credit_netblacklist")) <NEW_LINE> _no_faith_list = no_faith(result_dict.get("credit_shixin")) <NEW_LINE> _phone_mark_blaklist = phone_mark_black(result_dict.get("credit_phoneblack")) <NEW_LINE> _social_dangers = social_danger(result_dict.get("credit_socialblacklist")) <NEW_LINE> _third_negative_info = mashang_negative(result_dict.get("mashang_negative", {})) <NEW_LINE> _third_over_time = mashang_overdue(result_dict.get("mashang_overdue", {})) <NEW_LINE> _third_anti_fraud = mashang_credit(result_dict.get("mashang_credit", {})) <NEW_LINE> _verifyInfo = horse_idcard(result_dict.get("mashang_idcard", {})) <NEW_LINE> _verifyInfo.update(horse_online(result_dict.get("mashang_online", {}))) <NEW_LINE> if 'channel_bankby3' in func_list: <NEW_LINE> <INDENT> _verifyInfo.update(bank_three(result_dict.get("channel_bankby3"))) <NEW_LINE> <DEDENT> if 'channel_name_card_account' in func_list: <NEW_LINE> <INDENT> _verifyInfo.update(name_idcard_account(result_dict.get("channel_name_card_account"))) <NEW_LINE> <DEDENT> _cell_phone = handle_cellphone(result_dict.get('cellphone_get', {})) <NEW_LINE> undesirable_info = handle_undesirable_info(single_search.undesirable_info) <NEW_LINE> data_list = [ _e_business_danger, _info_dangers, _multiple_loan, _phone_mark_blaklist, _social_dangers, _third_anti_fraud, _third_negative_info, _third_over_time, ] <NEW_LINE> data_other_list = [ _loan_over_time_blacklist, _phone_relate, _no_faith_list, undesirable_info ] <NEW_LINE> project_logger.warn("[data_list|%s][data_other_list|%s][_verifyInfo|%s]", data_list, data_other_list, _verifyInfo) <NEW_LINE> is_break_rule = 0 <NEW_LINE> if 2 in [i.get("isTarget", 0) for i in data_other_list]: <NEW_LINE> <INDENT> is_break_rule = 1 <NEW_LINE> <DEDENT> if 1 in [i.get('isTarget', 0) for i in data_list]: <NEW_LINE> <INDENT> is_break_rule = 1 <NEW_LINE> <DEDENT> if 3 in _verifyInfo.values(): <NEW_LINE> <INDENT> is_break_rule = 1 <NEW_LINE> <DEDENT> if _cell_phone.get('isPhone') == 3: <NEW_LINE> <INDENT> is_break_rule = 1 <NEW_LINE> <DEDENT> return is_break_rule
计算是否触犯规则
625941be596a8972360899ea
def get_feature_from_image(self, img_path): <NEW_LINE> <INDENT> if not tf.gfile.Exists(img_path): <NEW_LINE> <INDENT> tf.logging.fatal('File does not exist %s', img_path) <NEW_LINE> <DEDENT> image_data = tf.gfile.FastGFile(img_path, 'rb').read() <NEW_LINE> prediction = self.sess.run(self.softmax_tensor, {'DecodeJpeg/contents:0': image_data}) <NEW_LINE> prediction = np.squeeze(prediction) <NEW_LINE> return prediction
Runs extract the feature from the image. Args: img_path: Image file name. Returns: predictions: 2048 * 1 feature vector
625941be16aa5153ce3623a0
def distance_matrix_filter(origin, destination, trips): <NEW_LINE> <INDENT> trips_by_id = {trip.trip_id: trip for trip in trips} <NEW_LINE> base_url = ('https://maps.googleapis.com/maps/api/distancematrix/' 'json?') <NEW_LINE> payload = { "origins": convert.location_list(origin), "destinations": convert.location_list(destination), "units": "imperial" } <NEW_LINE> r = requests.get(base_url, params=payload) <NEW_LINE> drop_off_distances = {} <NEW_LINE> if r.status_code != 200: <NEW_LINE> <INDENT> print('HTTP status code {} received, program terminated.' .format(r.status_code)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> response_dict = json.loads(r.text) <NEW_LINE> for offset, trip in enumerate(trips): <NEW_LINE> <INDENT> cell = response_dict['rows'][0]['elements'][offset] <NEW_LINE> if cell['status'] == 'OK': <NEW_LINE> <INDENT> drop_off_distances[trip.trip_id] = cell['distance']['value'] <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> drop_off_distances = {key: value for key, value in drop_off_distances .items() if value <= 72421} <NEW_LINE> drop_offs_nearby = {} <NEW_LINE> for trip_idx in drop_off_distances: <NEW_LINE> <INDENT> trip = trips_by_id[trip_idx] <NEW_LINE> drop_offs_nearby[trip_idx] = trip <NEW_LINE> <DEDENT> return drop_offs_nearby
Return dictionary as trip_id: distance pairs.
625941be3c8af77a43ae36c5
def threads(request, forum_slug): <NEW_LINE> <INDENT> forum = get_object_or_404(Forum, slug=forum_slug) <NEW_LINE> if not forum.allows_viewing_by(request.user): <NEW_LINE> <INDENT> raise Http404 <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> sort = int(request.GET.get('sort', 0)) <NEW_LINE> <DEDENT> except ValueError: <NEW_LINE> <INDENT> sort = 0 <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> desc = int(request.GET.get('desc', 0)) <NEW_LINE> <DEDENT> except ValueError: <NEW_LINE> <INDENT> desc = 0 <NEW_LINE> <DEDENT> desc_toggle = 0 if desc else 1 <NEW_LINE> threads_ = sort_threads(forum.thread_set, sort, desc) <NEW_LINE> count = threads_.count() <NEW_LINE> threads_ = threads_.select_related('creator', 'last_post', 'last_post__author') <NEW_LINE> threads_ = paginate(request, threads_, per_page=constants.THREADS_PER_PAGE, count=count) <NEW_LINE> feed_urls = ((reverse('forums.threads.feed', args=[forum_slug]), ThreadsFeed().title(forum)),) <NEW_LINE> is_watching_forum = (request.user.is_authenticated() and NewThreadEvent.is_notifying(request.user, forum)) <NEW_LINE> return render(request, 'forums/threads.html', { 'forum': forum, 'threads': threads_, 'is_watching_forum': is_watching_forum, 'sort': sort, 'desc_toggle': desc_toggle, 'feeds': feed_urls})
View all the threads in a forum.
625941be7d43ff24873a2bc5
def update_attraction(force, index, value): <NEW_LINE> <INDENT> if index in force: <NEW_LINE> <INDENT> force[index] += value <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> force[index] = value <NEW_LINE> <DEDENT> if force[index] == 0: <NEW_LINE> <INDENT> del force[index]
Increase the value, or delete the item, if the value is zero.
625941be9f2886367277a7b7
def __init__(self): <NEW_LINE> <INDENT> self.messages = [] <NEW_LINE> self.webhooks = []
Initialize the mock responder.
625941bed53ae8145f87a19b
def test_user_profile_uploadphoto_two_users(url, registered_user): <NEW_LINE> <INDENT> user_data = registered_user <NEW_LINE> img_url = "https://i.ytimg.com/vi/CPhihTANyPo/maxresdefault.jpg" <NEW_LINE> requests.post(f"{url}/user/profile/uploadphoto", json={ 'token': user_data['token'], 'img_url' : img_url, 'x_start' : 300, 'y_start' : 0, 'x_end' : 920, 'y_end' : 600 }) <NEW_LINE> user2 = requests.post(f"{url}/auth/register", json={ 'email' : 'john.smith2@gmail.com', 'password' : 'abcd1081$#', 'name_first' : 'John', 'name_last' : 'Smith' }) <NEW_LINE> user2_data = user2.json() <NEW_LINE> requests.post(f"{url}/user/profile/uploadphoto", json={ 'token': user2_data['token'], 'img_url' : img_url, 'x_start' : 300, 'y_start' : 0, 'x_end' : 920, 'y_end' : 600 }) <NEW_LINE> r = requests.get(f"{url}/user/profile?token={user_data['token']}&u_id={user_data['u_id']}") <NEW_LINE> profile_data = r.json() <NEW_LINE> assert profile_data['user']['profile_img_url'] == f"{url}img/{profile_data['user']['u_id']}.jpg" <NEW_LINE> r = requests.get(f"{url}/user/profile?token={user2_data['token']}&u_id={user_data['u_id']}") <NEW_LINE> profile2_data = r.json() <NEW_LINE> assert profile2_data['user']['profile_img_url'] == f"{url}img/{profile_data['user']['u_id']}.jpg"
test that multiple users can set their profile photos with given img_url on the flask server
625941beff9c53063f47c11c
def test_ex_ret_stringed_args(self): <NEW_LINE> <INDENT> self.assertEqual(ex(Exception(303)), 'error 303')
Test exception returns stringed args
625941be97e22403b379cec0
def remove(self): <NEW_LINE> <INDENT> self.from_neuron.gates[GateDirection.OUT.value].remove(self) <NEW_LINE> self.to_neuron.gates[GateDirection.IN.value].remove(self)
Elimina i gates dalle liste dei neuroni
625941be92d797404e3040b1
def parse_arguments(arguments): <NEW_LINE> <INDENT> global ld_sources <NEW_LINE> parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) <NEW_LINE> parser.add_argument('vocabulary', nargs=1, help="vocabulary to harvest, legal values are: " + ', '.join(ld_sources.keys())) <NEW_LINE> parser.add_argument('format', nargs=1, help="format for retreived data") <NEW_LINE> parser.add_argument('id', nargs='*', help="IDs to retrieve, read from stdin if no IDs on command line") <NEW_LINE> parser.add_argument('-p', '--pause', type=int, default=3, help="sleep (seconds) between requests") <NEW_LINE> parser.add_argument('-o', '--outfile', help="Output file", default=sys.stdout, type=argparse.FileType('w')) <NEW_LINE> args = parser.parse_args(arguments) <NEW_LINE> vocab = args.vocabulary[0] <NEW_LINE> if vocab not in ld_sources: <NEW_LINE> <INDENT> tmpl = "Unuspported vocabulary {vocab}; supported vocabularies are: {vocab_list}" <NEW_LINE> msg = tmpl.format(vocab=vocab, vocab_list=', '.join(iter(ld_sources.keys()))) <NEW_LINE> parser.exit(msg) <NEW_LINE> <DEDENT> format = args.format[0] <NEW_LINE> format_list = ld_sources[vocab].formats <NEW_LINE> if format not in format_list: <NEW_LINE> <INDENT> tmpl = "Format {format} not available for {vocabulary}, available formats are: {formats}" <NEW_LINE> msg = tmpl.format(format=format, vocabulary=vocab, formats=', '.join(iter(ld_sources[vocab].formats))) <NEW_LINE> parser.exit(msg) <NEW_LINE> <DEDENT> return args
parse command-line arguments and return a Namespace object
625941bebaa26c4b54cb104a
def numSquares(self, n): <NEW_LINE> <INDENT> coins = [] <NEW_LINE> for i in range(1,n+1): <NEW_LINE> <INDENT> if i*i <= n: <NEW_LINE> <INDENT> coins.append(i*i) <NEW_LINE> <DEDENT> <DEDENT> import sys <NEW_LINE> dp = [sys.maxint]*(n+1) <NEW_LINE> dp[0]=0 <NEW_LINE> for i in range(1,n+1): <NEW_LINE> <INDENT> for c in coins: <NEW_LINE> <INDENT> if i>=c: <NEW_LINE> <INDENT> dp[i] = min(dp[i],dp[i-c]+1) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return dp[-1]
:type n: int :rtype: int
625941be91f36d47f21ac417
def __init__(self, style=Style(), tokenOverride=None): <NEW_LINE> <INDENT> self.style = style <NEW_LINE> self.indentation = Indentation(style.indent) <NEW_LINE> self.inlineBlock = InlineBlock(style.inlineMaxLength) <NEW_LINE> self.subQuery = SubQuery() <NEW_LINE> self.tokenizer = Tokenizer(style=style) <NEW_LINE> self.tokenOverride = tokenOverride <NEW_LINE> self.previousKeyword = None <NEW_LINE> self.previousTopLevelKeyword = None <NEW_LINE> self.tokens = [] <NEW_LINE> self.index = 0
Paramters style: sparksqlformatter.src.style.Style() object Styleurations for the query language. tokenOverride: function Function that takes token, previousKeyword and returns a token to overwrite given token (?).
625941be9c8ee82313fbb69c
def load_word2vec(params): <NEW_LINE> <INDENT> word2vec_dict = load_pkl(params['word2vec_output']) <NEW_LINE> vocab_dict = open(params['vocab_path'], encoding='utf-8').readlines() <NEW_LINE> embedding_matrix = np.zeros((params['vocab_size'], params['embed_size'])) <NEW_LINE> for line in vocab_dict[:params['vocab_size']]: <NEW_LINE> <INDENT> word_id = line.split() <NEW_LINE> word, i = word_id <NEW_LINE> embedding_vector = word2vec_dict.get(word) <NEW_LINE> if embedding_vector is not None: <NEW_LINE> <INDENT> embedding_matrix[int(i)] = embedding_vector <NEW_LINE> <DEDENT> <DEDENT> return embedding_matrix
load pretrain word2vec weight matrix 加载与训练的word2vec 权重矩阵 :param vocab_size: 读取词数 :return:
625941be851cf427c661a439
def __init__(self, page_height=None, page_width=None): <NEW_LINE> <INDENT> self.swagger_types = { 'page_height': 'str', 'page_width': 'str' } <NEW_LINE> self.attribute_map = { 'page_height': 'pageHeight', 'page_width': 'pageWidth' } <NEW_LINE> self._page_height = page_height <NEW_LINE> self._page_width = page_width
PageSize - a model defined in Swagger :param dict swaggerTypes: The key is attribute name and the value is attribute type. :param dict attributeMap: The key is attribute name and the value is json key in definition.
625941befb3f5b602dac35b8
def all_files(self, reporter, policies_manager=DEFAULT_SCAN_MANAGER): <NEW_LINE> <INDENT> yield from self._walk_relative_paths(self.root, '', reporter, policies_manager)
Yield all files. :param reporter: a place to report errors :param policies_manager: a policy manager object, default is DEFAULT_SCAN_MANAGER
625941be287bf620b61d398d
def create_cluster_around_atom(self, atoms, atom_id, hydrogenate=False): <NEW_LINE> <INDENT> cluster_set = set([atom_id]) <NEW_LINE> edge_neighbours = set([atom_id]) <NEW_LINE> for i in range(self.small_cluster_hops): <NEW_LINE> <INDENT> new_neighbours = set() <NEW_LINE> for index in edge_neighbours: <NEW_LINE> <INDENT> new_neighbours |= set(self.find_neighbours(atoms, index)[0]) <NEW_LINE> <DEDENT> edge_neighbours = new_neighbours - cluster_set <NEW_LINE> cluster_set = cluster_set | edge_neighbours <NEW_LINE> <DEDENT> if hydrogenate: <NEW_LINE> <INDENT> self.hydrogenate_cluster(atoms, cluster_set) <NEW_LINE> <DEDENT> return cluster_set
Carve a cluster around the atom with atom_id This function operates on sets and returns a set Parameters ---------- atoms : ase.Atoms Whole structure atom_id : int Atomic index hydrogenate : bool If true, hydrogenate the resulting structure Returns ------- list atoms in the new cluster
625941be0fa83653e4656ee4
def _get_rtransform(self): <NEW_LINE> <INDENT> return self._rtransform
The RTransform object of the grid.
625941be7d43ff24873a2bc6
def predict(self, X_test): <NEW_LINE> <INDENT> X_test = check_array(X_test, accept_sparse="csc", dtype=np.float64, order="F") <NEW_LINE> assert sp.isspmatrix_csc(X_test) <NEW_LINE> assert X_test.shape[1] == len(self.w_) <NEW_LINE> return ffm.ffm_predict(self.w0_, self.w_, self.V_, X_test)
Return predictions Parameters ---------- X : scipy.sparse.csc_matrix, (n_samples, n_features) Returns ------ T : array, shape (n_samples) The labels are returned for classification.
625941be3346ee7daa2b2c92
def specklefilter(off, area=25, th=0): <NEW_LINE> <INDENT> @jit(nopython=True) <NEW_LINE> def find(i,idx): <NEW_LINE> <INDENT> if idx.flat[i] == i: <NEW_LINE> <INDENT> return i <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> ret = find(idx.flat[i],idx) <NEW_LINE> return ret <NEW_LINE> <DEDENT> <DEDENT> @jit(nopython=True) <NEW_LINE> def dsf(D, th=0): <NEW_LINE> <INDENT> h,w = D.shape[0],D.shape[1] <NEW_LINE> idx = np.zeros((h,w),dtype=np.int64) <NEW_LINE> for j in range(h): <NEW_LINE> <INDENT> for i in range(w): <NEW_LINE> <INDENT> idx[j,i] = j*w + i <NEW_LINE> <DEDENT> <DEDENT> for j in range(h): <NEW_LINE> <INDENT> for i in range(w): <NEW_LINE> <INDENT> if(i>0): <NEW_LINE> <INDENT> if( abs(D[j,i] - D[j,i-1])<= th ): <NEW_LINE> <INDENT> a = find(idx[j,i],idx) <NEW_LINE> b = find(idx[j,i-1],idx) <NEW_LINE> idx[j,i] = idx[j,i-1] <NEW_LINE> idx.flat[a] = b <NEW_LINE> <DEDENT> <DEDENT> if(j>0): <NEW_LINE> <INDENT> if( abs(D[j,i] - D[j-1,i])<= th ): <NEW_LINE> <INDENT> a = find(idx[j,i],idx) <NEW_LINE> b = find(idx[j-1,i],idx) <NEW_LINE> idx[j,i] = idx[j-1,i] <NEW_LINE> idx.flat[a] = b <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> return idx <NEW_LINE> <DEDENT> @jit(nopython=True) <NEW_LINE> def labels(idx): <NEW_LINE> <INDENT> h,w=idx.shape[0],idx.shape[1] <NEW_LINE> lab = idx*0 <NEW_LINE> for i in range(h*w): <NEW_LINE> <INDENT> ind = find(i,idx) <NEW_LINE> lab.flat[i] = ind <NEW_LINE> <DEDENT> return lab <NEW_LINE> <DEDENT> @jit(nopython=True) <NEW_LINE> def areas(lab): <NEW_LINE> <INDENT> h,w=lab.shape[0],lab.shape[1] <NEW_LINE> area = np.zeros((h,w),dtype=np.int64) <NEW_LINE> LL = np.zeros((h,w),dtype=np.int64) <NEW_LINE> for i in range(w*h): <NEW_LINE> <INDENT> area.flat[lab.flat[i]] += 1 <NEW_LINE> <DEDENT> for i in range(w*h): <NEW_LINE> <INDENT> LL.flat[i] = area.flat[lab.flat[i]] <NEW_LINE> <DEDENT> return LL <NEW_LINE> <DEDENT> ind = dsf(off, th=th) <NEW_LINE> lab = labels(ind) <NEW_LINE> are = areas(lab) <NEW_LINE> filtered = np.where((are>area), off, np.inf) <NEW_LINE> return filtered
speckle filter of dispairt map off Args: off: numpy array with the input disparity map area: the surface (in pixels) of the smallest allowed connected component of disparity th: similarity threshold used to determin if two neighboring pixels have the same value Returns: numpy array with the filtered disparity map, removed points are set to infinity
625941bed4950a0f3b08c279
def Parse(self): <NEW_LINE> <INDENT> (start_line, lang) = self.ParseDesc() <NEW_LINE> if start_line < 0: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> if 'python' == lang: <NEW_LINE> <INDENT> self.ParsePythonFlags(start_line) <NEW_LINE> <DEDENT> elif 'c' == lang: <NEW_LINE> <INDENT> self.ParseCFlags(start_line) <NEW_LINE> <DEDENT> elif 'java' == lang: <NEW_LINE> <INDENT> self.ParseJavaFlags(start_line)
Parse program output.
625941be4e696a04525c9374
def decode_json_web_token(token, leeway=settings.JWT_LEEWAY): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> decoded = jwt.decode(token, settings.SECRET_KEY, leeway=leeway, algorithms=[settings.JWT_ALGORITHM]) <NEW_LINE> <DEDENT> except jwt.exceptions.DecodeError: <NEW_LINE> <INDENT> raise Unauthorized() <NEW_LINE> <DEDENT> except jwt.exceptions.ExpiredSignatureError: <NEW_LINE> <INDENT> raise Unauthorized() <NEW_LINE> <DEDENT> return decoded
Decode a JWT. Leeway time may be provided.
625941bea8370b77170527c8
def restart(self): <NEW_LINE> <INDENT> logger.info("Restarting daemon: %s" % self.name) <NEW_LINE> self._restart()
Restart the daemon
625941bebf627c535bc130f6
def __init__(self, node_name, *, enable_communication_interface: bool = True, **kwargs): <NEW_LINE> <INDENT> Node.__init__(self, node_name, **kwargs) <NEW_LINE> LifecycleNodeMixin.__init__( self, enable_communication_interface=enable_communication_interface)
Create a lifecycle node. See rclpy.lifecycle.LifecycleNodeMixin.__init__() and rclpy.node.Node() for the documentation of each parameter.
625941be004d5f362079a25d
@njit(nogil=True) <NEW_LINE> def h_Frsn(x1, y1, z1, x2, y2, z2, wvl, pp): <NEW_LINE> <INDENT> z = z2 - z1 <NEW_LINE> r = ((x1-x2)*(x1-x2) + (y1-y2)*(y1-y2)) / (2*z) <NEW_LINE> t = (wvl * z) / (2 * pp) <NEW_LINE> if (x1 - t < x2 < x1 + t) and (y1 - t < y2 < y1 + t): <NEW_LINE> <INDENT> h_r = np.cos(k(wvl) * r) <NEW_LINE> h_i = np.sin(k(wvl) * r) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> h_r = 0 <NEW_LINE> h_i = 0 <NEW_LINE> <DEDENT> return h_r, h_i
impulse response function of Fresnel propagation method
625941be6e29344779a6253c
def print_2d_list(list_2d): <NEW_LINE> <INDENT> lines = '' <NEW_LINE> for row in list_2d: <NEW_LINE> <INDENT> lines += '\t'.join(map(str, row)) + '\n' <NEW_LINE> <DEDENT> return lines
print 2d python list :param 2d_list: 2 dimension list :return: lines
625941befff4ab517eb2f362
def get_wager_amount(): <NEW_LINE> <INDENT> wager ='' <NEW_LINE> while wager == '': <NEW_LINE> <INDENT> wager = input ('Enter a wager (dollars): ') <NEW_LINE> return int(wager)
Prompts the player for a wager on a particular roll. The wager is returned as an int.
625941be796e427e537b04eb
def calculate_axis(point, mid): <NEW_LINE> <INDENT> x_point = str(point[0]) + ',' + str(mid[1]) <NEW_LINE> y_point = str(mid[0]) + ',' + str(point[1]) <NEW_LINE> x_distance = calculate_distance(x_point, str(mid[0]) + ',' + str(mid[1])) / 2 <NEW_LINE> y_distance = calculate_distance(y_point, str(mid[0]) + ',' + str(mid[1])) / 2 <NEW_LINE> if point[0] < mid[0]: <NEW_LINE> <INDENT> x = 250 - x_distance <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> x = 250 + x_distance <NEW_LINE> <DEDENT> if point[1] < mid[1]: <NEW_LINE> <INDENT> y = 250 + y_distance <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> y = 250 - y_distance <NEW_LINE> <DEDENT> return x, y
计算两点相对坐标距离 :param point: 目标点 :param mid: 中点 :return: 坐标
625941be3cc13d1c6d3c72a3
def connect(self, db, collection): <NEW_LINE> <INDENT> self.collection = pymongo.MongoClient()[db][collection]
Connect to a MongoDB collection Args: db: The database to connect to collection: The collection to connect to
625941be6aa9bd52df036ccb
def kill_by_id(task_info): <NEW_LINE> <INDENT> redis_server = redis_init() <NEW_LINE> try: <NEW_LINE> <INDENT> pid = redis_server.get(get_pid_key(task_info['gameID'])) <NEW_LINE> if not pid: <NEW_LINE> <INDENT> task_info['description'] = 'Process No Found' <NEW_LINE> return json.dumps(task_info) <NEW_LINE> <DEDENT> action_state = game_process_control(pid, task_info['action']) <NEW_LINE> task_info['description'] = action_state <NEW_LINE> return json.dumps(task_info) <NEW_LINE> <DEDENT> except KeyError as e: <NEW_LINE> <INDENT> task_info['description'] = 'Unexpected Key' <NEW_LINE> return json.dumps(task_info) <NEW_LINE> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> print(e) <NEW_LINE> task_info['description'] = 'Unexpected Error' <NEW_LINE> return json.dumps(task_info)
尝试对正在进行的游戏进程作操作 :param task_info: {'game_id':'998', 'action':'terminate'} :return: result = {'game_id':'998', 'description':'success'}
625941be4428ac0f6e5ba719
def get_order(self): <NEW_LINE> <INDENT> return self.order
Return the order of the element (may be different from the degree).
625941bea8ecb033257d2ff6
def send_command(self, command, match_expected_word=None, match_case=False): <NEW_LINE> <INDENT> response = None <NEW_LINE> expected_word_found = False <NEW_LINE> if self.is_connected(): <NEW_LINE> <INDENT> TCPLogger.info("<{client_id}> sending command: '{command}'".format( client_id=self.get_id(), command=command)) <NEW_LINE> self.send_message(command) <NEW_LINE> response = self.receive_response() <NEW_LINE> if response.endswith('\r\n'): <NEW_LINE> <INDENT> response = response[:-2] <NEW_LINE> <DEDENT> TCPLogger.info("<{client_id}> receive response: '{cmdResponse}'".format( client_id=self.get_id(), cmdResponse=response)) <NEW_LINE> if match_expected_word is not None: <NEW_LINE> <INDENT> if str_to_bool(match_case): <NEW_LINE> <INDENT> if str(match_expected_word) in response: <NEW_LINE> <INDENT> expected_word_found = True <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> if str(match_expected_word).upper() in response.upper(): <NEW_LINE> <INDENT> expected_word_found = True <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> cmd_log = TcpClient.CommandLog(command, response, match_expected_word) <NEW_LINE> self._command_log_list.append(cmd_log) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise ExceptionClientDisconnected(self) <NEW_LINE> <DEDENT> return response, expected_word_found
Sends a trace tcp command to the server and it returns the response along with a flag in case the expected word was found on the response
625941be8e71fb1e9831d6d2
def prompt_int(self, question, default=None, validators=None): <NEW_LINE> <INDENT> if validators is None: <NEW_LINE> <INDENT> validators = [] <NEW_LINE> <DEDENT> def _int(answer): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> answer = int(answer) <NEW_LINE> <DEDENT> except ValueError: <NEW_LINE> <INDENT> raise ValueError("Cannot convert %s to integer" % answer) <NEW_LINE> <DEDENT> <DEDENT> validators.append(_int) <NEW_LINE> if default is not None: default = str(default) <NEW_LINE> return int(self.prompt_text(question, default, validators))
Prompts for a integer value. An answer is valid if it can be converted to a integer. :arg question: question to ask :arg default: default value when no answer is given :arg validators: :class:`list` of functions :rtype: :class:`int`
625941bea8ecb033257d2ff7
def read_timestamp_from_grass(self): <NEW_LINE> <INDENT> if not self.has_grass_timestamp(): <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> check, dates = self.ciface.read_raster3d_timestamp(self.get_name(), self.get_mapset(),) <NEW_LINE> if check < 1: <NEW_LINE> <INDENT> self.msgr.error(_("Unable to read timestamp file " "for 3D raster map <%s>" % (self.get_map_id()))) <NEW_LINE> return False <NEW_LINE> <DEDENT> if len(dates) == 2: <NEW_LINE> <INDENT> self.set_absolute_time(dates[0], dates[1]) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.set_relative_time(dates[0], dates[1], dates[2]) <NEW_LINE> <DEDENT> return True
Read the timestamp of this map from the map metadata in the grass file system based spatial database and set the internal time stamp that should be insert/updated in the temporal database. :return: True if success, False on error
625941be956e5f7376d70d97
def process_latin(latin): <NEW_LINE> <INDENT> if latin == None: <NEW_LINE> <INDENT> return [] <NEW_LINE> <DEDENT> latin = [(x[0].strip(), x[1]) for x in latin] <NEW_LINE> if latin != []: <NEW_LINE> <INDENT> latin[0] = (latin[0][0].split('–')[1], latin[0][1]) <NEW_LINE> <DEDENT> if len(latin) > 1: <NEW_LINE> <INDENT> latin[-1] = (latin[-1][0].split('–')[0], latin[-1][1]) <NEW_LINE> <DEDENT> latin = [x for x in latin if x[0] not in ['', 'f.', '.', 'f']] <NEW_LINE> latin[0] = (latin[0][0].lstrip('f. '), latin[0][1]) <NEW_LINE> latin = ' '.join([x[0] for x in latin]) <NEW_LINE> latin = latin.strip() <NEW_LINE> return latin
Process Latin names of the species.
625941be66673b3332b91fb9
def test_similar_words_have_label_key(self): <NEW_LINE> <INDENT> words = self.interface.prediction({'words': ['cat']})['words'] <NEW_LINE> similar_words = [] <NEW_LINE> for similar_words_list in words.values(): <NEW_LINE> <INDENT> similar_words.extend(similar_words_list) <NEW_LINE> <DEDENT> self.assertTrue(all('label' in word for word in similar_words))
All similar words must have a label key.
625941be2eb69b55b151c7d4
def fit(self, training_data, classes, sample_weight=None): <NEW_LINE> <INDENT> if self.learner() is None: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> return self.learner().fit(training_data, classes)
Train the learner with the given data and known classes :param training_data: A multidimensional numpy array of training data :param classes: A numpy array of known classes :return: nothing
625941beac7a0e7691ed3ff9
def check_flash(self): <NEW_LINE> <INDENT> flash = Indicator( 'flash', 0, _type=int, name='Flash objects', description='Number of embedded Flash objects (SWF files) detected ' 'in OLE streams. Not 100% accurate, there may be false ' 'positives.', risk=RISK.NONE) <NEW_LINE> self.indicators.append(flash) <NEW_LINE> if not self.ole: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> for stream in self.ole.listdir(): <NEW_LINE> <INDENT> data = self.ole.openstream(stream).read() <NEW_LINE> found = detect_flash(data) <NEW_LINE> flash.value += len(found) <NEW_LINE> <DEDENT> if flash.value > 0: <NEW_LINE> <INDENT> flash.risk = RISK.MEDIUM <NEW_LINE> <DEDENT> return flash
Check whether this file contains flash objects :returns: :py:class:`Indicator` for count of flash objects or None if file was not opened
625941be3317a56b86939b88
def decode_ogl_snorm_to_fp(x, *, dtype=np.float32, nbits=None): <NEW_LINE> <INDENT> assert (x.dtype.kind == 'u'), '`dtype` of the argument `x` must be unsigned integer types.' <NEW_LINE> assert (dtype().dtype.kind == 'f'), '`dtype` of the argument `dtype` must be floating point types.' <NEW_LINE> max_nbits = x.itemsize * 8 <NEW_LINE> if nbits is None: <NEW_LINE> <INDENT> nbits = max_nbits <NEW_LINE> <DEDENT> assert (1 < nbits <= max_nbits), '`nbits` value is out of range.' <NEW_LINE> assert _can_express_norm(nbits-1, dtype), 'Can\'t be expressed with the specified number of bits.' <NEW_LINE> mask = np.invert(x.dtype.type(np.iinfo(x.dtype).max) >> x.dtype.type(max_nbits - nbits)) <NEW_LINE> uint_x = x >> x.dtype.type(nbits-1) <NEW_LINE> uint_x *= mask <NEW_LINE> uint_x |= x <NEW_LINE> max_uint = x.dtype.type((1 << (nbits-1)) - 1) <NEW_LINE> temp = dtype(uint_x.view(x.dtype.name[1:])) <NEW_LINE> temp /= dtype(max_uint) <NEW_LINE> out = temp if isinstance(temp, np.ndarray) else None <NEW_LINE> return np.maximum(temp, dtype(-1.), out=out)
Decode signed normalized integers to floating-points. Args: x: The type should be `np.uint`, or an array in `np.uint`. dtype: The type should be `np.float`. nbits: The number of bits to use. Returns: The resulting floating-points.
625941be009cb60464c632dc
def gradients(self, data: numpy.ndarray, predicted: numpy.ndarray, actual: numpy.ndarray, hidden_input: numpy.ndarray) -> Gradients: <NEW_LINE> <INDENT> difference = predicted - actual <NEW_LINE> batch_size = difference.shape[1] <NEW_LINE> l1 = numpy.maximum(numpy.dot(self.hidden_weights.T, difference), 0) <NEW_LINE> input_weights_gradient = numpy.dot(l1, data.T)/batch_size <NEW_LINE> hidden_weights_gradient = numpy.dot(difference, hidden_input.T)/batch_size <NEW_LINE> input_bias_gradient = numpy.sum(l1, axis=Axis.COLUMNS.value, keepdims=True)/batch_size <NEW_LINE> hidden_bias_gradient = numpy.sum(difference, axis=Axis.COLUMNS.value, keepdims=True)/batch_size <NEW_LINE> return Gradients(input_weights=input_weights_gradient, hidden_weights=hidden_weights_gradient, input_bias=input_bias_gradient, hidden_bias=hidden_bias_gradient)
does the gradient calculation for back-propagation This is broken out to be able to troubleshoot/compare it Args: data: the input x value predicted: what our model predicted the labels for the data should be actual: what the actual labels should have been hidden_input: the input to the hidden layer Returns: Gradients for input_weight, hidden_weight, input_bias, hidden_bias
625941bed486a94d0b98e06d
def test_success(self): <NEW_LINE> <INDENT> test_add_writer = { "method": "add_writer", "params": FixtureDict.param_add_writer, "jsonrpc": "2.0", "id": 123, } <NEW_LINE> response = self.client.generic( "POST", "", data=json.dumps(test_add_writer), content_type="application/json", ) <NEW_LINE> self.assertEqual(response.status_code, HTTPStatus.OK) <NEW_LINE> self.assertEqual(Writer.objects.count(), 1)
все параметры корректные
625941be1f5feb6acb0c4a7c
def eventFilter(self, o, e): <NEW_LINE> <INDENT> return bool()
bool Sonnet.Highlighter.eventFilter(QObject o, QEvent e)
625941bed164cc6175782c76
def fetch_svn(self, svn_uri, directory): <NEW_LINE> <INDENT> if not command_successful(['svn', '--version']): <NEW_LINE> <INDENT> raise YolkException('Do you have subversion installed?') <NEW_LINE> <DEDENT> if os.path.exists(directory): <NEW_LINE> <INDENT> raise YolkException( 'Checkout directory exists - {}'.format(directory)) <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> os.mkdir(directory) <NEW_LINE> <DEDENT> except OSError as err_msg: <NEW_LINE> <INDENT> raise YolkException('' + str(err_msg)) <NEW_LINE> <DEDENT> cwd = os.path.realpath(os.curdir) <NEW_LINE> os.chdir(directory) <NEW_LINE> status, _ = run_command(['svn', 'checkout', svn_uri]) <NEW_LINE> os.chdir(cwd)
Fetch subversion repository. @param svn_uri: subversion repository uri to check out @type svn_uri: string @param directory: directory to download to @type directory: string
625941bedc8b845886cb545c
def rel_path_convertor(parent_path: Union[str, Path]) -> Callable: <NEW_LINE> <INDENT> def _convertor(option: Union[str, Path]) -> Any: <NEW_LINE> <INDENT> if not option: <NEW_LINE> <INDENT> return option <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return Path(parent_path) / option <NEW_LINE> <DEDENT> <DEDENT> return _convertor
Convertor factory which makes option path relative to parent_path supplied during the convertor initialization.
625941be090684286d50ec0b
def import_module(self, path): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> module = importlib.import_module(path) <NEW_LINE> return module <NEW_LINE> <DEDENT> except (ImportError, AttributeError, KeyError) as err: <NEW_LINE> <INDENT> pass
Import a module. Because this function is meant for use by the Python interpreter and not for general use it is better to use importlib.import_module() to programmatically import a module syntax: importlib.import_module('abc.XXX.def.YYY')
625941be796e427e537b04ec
def register(session, **kw): <NEW_LINE> <INDENT> if not isinstance(session, ftrack_api.Session): <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> subscription = "topic=ftrack.connect.application.launch" <NEW_LINE> session.event_hub.subscribe(subscription, modify_launch)
Register event listener.
625941be94891a1f4081b9d1
def end(self): <NEW_LINE> <INDENT> pb = self.pb <NEW_LINE> pb.stop() <NEW_LINE> pb["value"] = float(pb["maximum"]) <NEW_LINE> pb.update() <NEW_LINE> return
stop progress bar and display 100% (completed)
625941be2ae34c7f2600d05a
def delete_qs(self, query, using): <NEW_LINE> <INDENT> innerq = query.query <NEW_LINE> innerq.get_initial_alias() <NEW_LINE> self.get_initial_alias() <NEW_LINE> innerq_used_tables = [t for t in innerq.tables if innerq.alias_refcount[t]] <NEW_LINE> if not innerq_used_tables or innerq_used_tables == self.tables: <NEW_LINE> <INDENT> self.where = innerq.where <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> pk = query.model._meta.pk <NEW_LINE> if not connections[using].features.update_can_self_select: <NEW_LINE> <INDENT> values = list(query.values_list('pk', flat=True)) <NEW_LINE> if not values: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> return self.delete_batch(values, using) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> innerq.clear_select_clause() <NEW_LINE> innerq.select = [ pk.get_col(self.get_initial_alias()) ] <NEW_LINE> values = innerq <NEW_LINE> <DEDENT> self.where = self.where_class() <NEW_LINE> self.add_q(Q(pk__in=values)) <NEW_LINE> <DEDENT> cursor = self.get_compiler(using).execute_sql(CURSOR) <NEW_LINE> return cursor.rowcount if cursor else 0
Delete the queryset in one SQL query (if possible). For simple queries this is done by copying the query.query.where to self.query, for complex queries by using subquery.
625941be7c178a314d6ef383
def twoSum2(self, numbers, target): <NEW_LINE> <INDENT> dict = {} <NEW_LINE> for i, num in enumerate(numbers): <NEW_LINE> <INDENT> if target - num in dict: <NEW_LINE> <INDENT> return [dict[target - num] + 1, i + 1] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> dict[num] = i
:type numbers: List[int] :type target: int :rtype: List[int]
625941bea934411ee37515bc
def futures_coin_continous_klines(self, **params): <NEW_LINE> <INDENT> return self._request_futures_coin_api("get", "continuousKlines", data=params)
Kline/candlestick bars for a specific contract type. Klines are uniquely identified by their open time. https://binance-docs.github.io/apidocs/delivery/en/#continuous-contract-kline-candlestick-data
625941bee8904600ed9f1e52
def Add(self, element): <NEW_LINE> <INDENT> pass
Add(self: WebRequestModuleElementCollection, element: WebRequestModuleElement) Adds an element to the collection. element: The System.Net.Configuration.WebRequestModuleElement to add to the collection.
625941be0a50d4780f666db9
def read_all(self): <NEW_LINE> <INDENT> print("\nCRUD: Read (all) test case") <NEW_LINE> docs = self.workbenches.read() <NEW_LINE> self.show_docs(docs, 5)
Run Read(all) test case
625941bed7e4931a7ee9de45
def _call_with_frames_removed(func, *args, **kwds): <NEW_LINE> <INDENT> return func(*args, **kwds)
remove_importlib_frames in import.c will always remove sequences of importlib frames that end with a call to this function Use it instead of a normal call in places where including the importlib frames introduces unwanted noise into the traceback (e.g. when executing module code)
625941be24f1403a92600a91
@pytest.mark.tier(2) <NEW_LINE> def test_bottlenecks_report_time_zone(temp_appliance_extended_db, db_restore, db_tbl, db_events): <NEW_LINE> <INDENT> with temp_appliance_extended_db: <NEW_LINE> <INDENT> view = navigate_to(Bottlenecks, 'All') <NEW_LINE> row = view.report.event_details[0] <NEW_LINE> db_row = db_events.filter(db_tbl.message == row[5].text) <NEW_LINE> assert row[0].text == db_row[0][0].strftime(parsetime.american_with_utc_format) <NEW_LINE> view.report.time_zone.fill('(GMT-04:00) La Paz') <NEW_LINE> row = view.report.event_details[0] <NEW_LINE> assert row[0].text == (db_row[0][0] - timedelta(hours=4)).strftime("%m/%d/%y %H:%M:%S -04")
Checks time zone selectbox in report tab. It should change time zone of events in table
625941be7b25080760e39383
def __init__(self): <NEW_LINE> <INDENT> CClef.__init__(self) <NEW_LINE> self.line = 3 <NEW_LINE> self.lowestLine = (7*3) + 4
>>> from music21 import * >>> a = clef.AltoClef() >>> a.sign 'C'
625941be1b99ca400220a9da
def trans(self): <NEW_LINE> <INDENT> res = self.copy() <NEW_LINE> res.cte = res.cte.trans() <NEW_LINE> for op in res.ops: <NEW_LINE> <INDENT> op.qobj = op.qobj.trans() <NEW_LINE> <DEDENT> return res
Return the matrix transpose.
625941be7d847024c06be1e2
def spmeta2npmeta(filePath, style=None): <NEW_LINE> <INDENT> if style is None: <NEW_LINE> <INDENT> motorI=filePath.rfind('motor') <NEW_LINE> apparatusI=filePath.rfind('aparatus') <NEW_LINE> if motorI==apparatusI: <NEW_LINE> <INDENT> raise ValueError("Don't know what kind of meta file {0} is.".format(filePath)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> style = 'motor' if motorI>apparatusI else 'apparatus' <NEW_LINE> <DEDENT> <DEDENT> lines=open(filePath).readlines() <NEW_LINE> if style=='apparatus': <NEW_LINE> <INDENT> endian='>' <NEW_LINE> <DEDENT> elif style=='motor': <NEW_LINE> <INDENT> endian='<' <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise NotImplementedError("Don't know what style '{}' is".format(style)) <NEW_LINE> <DEDENT> output= ['{}, {}f8\r\n'.format(lines.pop(0).strip(), endian)] <NEW_LINE> output+= ['{}, {}f4\r\n'.format(line.strip(), endian) for line in lines] <NEW_LINE> ind=filePath.rfind('.meta') <NEW_LINE> outPath=filePath[:ind] + '.npmeta' <NEW_LINE> open(outPath, 'w').writelines(output)
Style can be 'apparatus' or 'motor'
625941be442bda511e8be345
def get_user_compensar(self, id, id_type): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> elemento_usos = [] <NEW_LINE> record = records = Usuarios.objects( id_usuario__exact=id, tipo_id_usurio__exact=id_type.upper()) <NEW_LINE> if record.count() == 0: <NEW_LINE> <INDENT> return {'msg': 'User Not Activated', 'code': 0} <NEW_LINE> <DEDENT> records = Usuarios.objects(id_usuario__exact=id, tipo_id_usurio__exact=id_type.upper()).aggregate(*[ { '$lookup': { 'from': Usos._get_collection_name(), 'localField': 'id_trabajador', 'foreignField': 'id_trabajador', 'as': 'Usos' }, }, { '$lookup': { 'from': Cluster._get_collection_name(), 'localField': 'cluster_id', 'foreignField': 'cluster_id', 'as': 'Cluster' } }, ]) <NEW_LINE> for element in records: <NEW_LINE> <INDENT> elemento_usos = element <NEW_LINE> elemento_usos = copy.deepcopy(elemento_usos) <NEW_LINE> if len(element['Usos']) == 0: <NEW_LINE> <INDENT> elemento_usos['Usos'] = {'red': 0, 'aliados': 0} <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> elemento_usos['Usos'] = [] <NEW_LINE> elemento_usos['Usos'] = { "red": element['Usos'][0]['usos_red'], "aliados": element['Usos'][0]['usos_aliados'], } <NEW_LINE> <DEDENT> <DEDENT> return elemento_usos <NEW_LINE> <DEDENT> except Exception as err: <NEW_LINE> <INDENT> print(err) <NEW_LINE> return err
Get Usuarios active or inactive :param id: :return:
625941beb5575c28eb68df27
def test_logout(self): <NEW_LINE> <INDENT> login = LoginPage(self.driver) <NEW_LINE> login.open() <NEW_LINE> inventory = login.login(_DEF_USER, _DEF_PASSWORD) <NEW_LINE> inventory.display_menu() <NEW_LINE> inventory.click_logout()
Test logout
625941bedc8b845886cb545d
def check_string_concatenation(self, label_col, other_columns, constraint={}, sep='.', convert_to_base26 = {}): <NEW_LINE> <INDENT> oc_converted = [SQL('to_base26({0} + {1})').format(Identifier(col), Literal(int(convert_to_base26[col]))) if col in convert_to_base26 else Identifier(col) for col in other_columns] <NEW_LINE> oc = [oc_converted[i//2] if i%2 == 0 else Literal(sep) for i in range(2*len(oc_converted)-1)] <NEW_LINE> return self._run_query(SQL(" != ").join([SQL(" || ").join(oc), Identifier(label_col)]), constraint)
Check that the label_column is the concatenation of the other columns with the given separator Input: - ``label_col`` -- the label_column - ``other_columns`` -- the other columns from which we can deduce the label - ``constraint`` -- a dictionary, as passed to the search method - ``sep`` -- the separator for the join - ``convert_to_base26`` -- a dictionary where the keys are columns that we need to convert to base26, and the values is that the shift that we need to apply
625941bebe7bc26dc91cd52e
def set_reentrant(self, value): <NEW_LINE> <INDENT> self.__REENTRANT = bool(value)
:type value: bool
625941be63b5f9789fde700e
def save_all_boards_from_game_as_npy(pgn_file, output_filename, max_games=100000, print_interval=5000): <NEW_LINE> <INDENT> prev_time = time.time() <NEW_LINE> root_struct = create_node_info_from_python_chess_board(chess.Board()) <NEW_LINE> collected = [] <NEW_LINE> for j, game in enumerate(game_iterator(pgn_file)): <NEW_LINE> <INDENT> if j == max_games: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> if j % print_interval == 0: <NEW_LINE> <INDENT> print("%d games complete with %d boards collected (not unique) with %f time since last print."%(j, len(collected), time.time() - prev_time)) <NEW_LINE> prev_time = time.time() <NEW_LINE> <DEDENT> struct = root_struct.copy() <NEW_LINE> move_iterator = (np.array([[move.from_square, move.to_square, 0 if move.promotion is None else move.promotion]]) for move in game.main_line()) <NEW_LINE> for move_ary in move_iterator: <NEW_LINE> <INDENT> push_moves(struct, move_ary) <NEW_LINE> collected.append(struct.copy()) <NEW_LINE> <DEDENT> <DEDENT> print("Completed board acquisition") <NEW_LINE> unique_structs = np.unique(np.array(collected)) <NEW_LINE> print("%d unique boards produced." % len(unique_structs)) <NEW_LINE> set_up_move_arrays(unique_structs) <NEW_LINE> structs_with_less_than_max_moves = unique_structs[unique_structs['children_left'] <= MAX_MOVES_LOOKED_AT] <NEW_LINE> print("Moves have now been set up.") <NEW_LINE> np.save(output_filename, structs_with_less_than_max_moves)
Goes through the games in a pgn file and saves the unique boards in NumPy file format (with dtype numpy_node_info_dtype). Prior to being saved the legal move arrays are set up. :param pgn_file: The pgn file to gather boards from :param output_filename: The name for the database file to be created :param max_games: The maximum number of games to go through :param print_interval: The number of games between each progress update NOTES: 1) This function uses a large amount of memory (mainly caused by np.unique)
625941bee1aae11d1e749bde
def get_qos_frames_count(self, iface, prio): <NEW_LINE> <INDENT> pytest.skip("Method is not supported by TRex TG")
Get captured QoS frames count. Args: iface(str): Interface name. prio(int): Priority. Returns: int: captured QoS frames count.
625941be4e4d5625662d4304
def get_default_projection(): <NEW_LINE> <INDENT> proj = osr.SpatialReference() <NEW_LINE> proj.ImportFromEPSG(4326) <NEW_LINE> return proj
Create a default projection object (wgs84)
625941be67a9b606de4a7de5
def get_dep_updates_and_hints( update_deps, recipe_dir, attrs, python_nodes, version_key, ): <NEW_LINE> <INDENT> if update_deps in ["hint", "hint-source", "update-source"]: <NEW_LINE> <INDENT> dep_comparison = get_depfinder_comparison( recipe_dir, attrs, python_nodes, ) <NEW_LINE> logger.info("source dep. comp: %s", pprint.pformat(dep_comparison)) <NEW_LINE> kind = "source code inspection" <NEW_LINE> hint = generate_dep_hint(dep_comparison, kind) <NEW_LINE> <DEDENT> elif update_deps in ["hint-grayskull", "update-grayskull"]: <NEW_LINE> <INDENT> dep_comparison, gs_recipe = get_grayskull_comparison( attrs, version_key=version_key, ) <NEW_LINE> logger.info("grayskull dep. comp: %s", pprint.pformat(dep_comparison)) <NEW_LINE> kind = "grayskull" <NEW_LINE> hint = generate_dep_hint(dep_comparison, kind) <NEW_LINE> <DEDENT> elif update_deps in ["hint-all", "update-all"]: <NEW_LINE> <INDENT> df_dep_comparison = get_depfinder_comparison( recipe_dir, attrs, python_nodes, ) <NEW_LINE> logger.info("source dep. comp: %s", pprint.pformat(df_dep_comparison)) <NEW_LINE> dep_comparison, gs_recipe = get_grayskull_comparison( attrs, version_key=version_key, ) <NEW_LINE> logger.info("grayskull dep. comp: %s", pprint.pformat(dep_comparison)) <NEW_LINE> dep_comparison = merge_dep_comparisons( copy.deepcopy(dep_comparison), copy.deepcopy(df_dep_comparison), ) <NEW_LINE> logger.info("combined dep. comp: %s", pprint.pformat(dep_comparison)) <NEW_LINE> kind = "source code inspection+grayskull" <NEW_LINE> hint = generate_dep_hint(dep_comparison, kind) <NEW_LINE> <DEDENT> return dep_comparison, hint
Get updated deps and hints. Parameters ---------- update_deps : str An update kind. See the code below for what is supported. recipe_dir : str The directory with the recipe. attrs : dict-like the bot node attrs for the feedstock. python_nodes : set-like A set of all bot python nodes. version_key : str The version key in the node attrs to use for grayskull. Returns ------- dep_comparison : dict of dicts of sets A dictionary with the dep updates. See the hint generation code below to understand its contents. hint : str The dependency update hint.
625941be187af65679ca5047
def _ask_pipenv(self, linter_name): <NEW_LINE> <INDENT> cwd = self.get_working_dir() <NEW_LINE> if cwd is None: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> pipfile = os.path.join(cwd, 'Pipfile') <NEW_LINE> if not os.path.exists(pipfile): <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> venv = ask_pipenv_for_venv(linter_name, cwd) <NEW_LINE> <DEDENT> except Exception: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> executable = find_script_by_python_env(venv, linter_name) <NEW_LINE> if not executable: <NEW_LINE> <INDENT> self.logger.info( "{} is not installed in the virtual env at '{}'." .format(linter_name, venv) ) <NEW_LINE> return None <NEW_LINE> <DEDENT> return executable
Ask pipenv for a virtual environment and maybe resolve the linter.
625941beec188e330fd5a6cd
def get_arch(image_meta): <NEW_LINE> <INDENT> if image_meta: <NEW_LINE> <INDENT> image_arch = image_meta.get('properties', {}).get('architecture') <NEW_LINE> if image_arch is not None: <NEW_LINE> <INDENT> return arch.canonicalize(image_arch) <NEW_LINE> <DEDENT> <DEDENT> return arch.from_host()
Determine the architecture of the guest (or host). This method determines the CPU architecture that must be supported by the hypervisor. It gets the (guest) arch info from image_meta properties, and it will fallback to the patron-compute (host) arch if no architecture info is provided in image_meta. :param image_meta: the metadata associated with the instance image :returns: guest (or host) architecture
625941be097d151d1a222d85
def top_item(self): <NEW_LINE> <INDENT> return self._stack[self._tos]
Returns the item at the top of the list.
625941be5510c4643540f314
def is_one_list_in_another_list(one='', two=''): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> return any(x in one for x in two) <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> return False
Compares two iterables. Returns True if ANY element of one iterable is in another iterable
625941be4a966d76dd550f36
def lossGradient(self, X, Y, Yhat): <NEW_LINE> <INDENT> return [max(0,i) for i in -dot(Y,X)]
The inputs are in the matrix X, the true values are in the vector Y; the predicted values are in Yhat; compute the gradient of the loss associated with these predictions.
625941be5f7d997b871749be
def withLockedLU(func): <NEW_LINE> <INDENT> @functools.wraps(func) <NEW_LINE> def wrapper(*args, **kwargs): <NEW_LINE> <INDENT> test = args[0] <NEW_LINE> assert isinstance(test, CmdlibTestCase) <NEW_LINE> op = None <NEW_LINE> for attr_name in ["op", "_op", "opcode", "_opcode"]: <NEW_LINE> <INDENT> if hasattr(test, attr_name): <NEW_LINE> <INDENT> op = getattr(test, attr_name) <NEW_LINE> break <NEW_LINE> <DEDENT> <DEDENT> assert op is not None <NEW_LINE> prepare_fn = None <NEW_LINE> if hasattr(test, "PrepareLU"): <NEW_LINE> <INDENT> prepare_fn = getattr(test, "PrepareLU") <NEW_LINE> assert callable(prepare_fn) <NEW_LINE> <DEDENT> def callWithLU(lu): <NEW_LINE> <INDENT> if prepare_fn: <NEW_LINE> <INDENT> prepare_fn(lu) <NEW_LINE> <DEDENT> new_args = list(args) <NEW_LINE> new_args.append(lu) <NEW_LINE> func(*new_args, **kwargs) <NEW_LINE> <DEDENT> return test.RunWithLockedLU(op, callWithLU) <NEW_LINE> <DEDENT> return wrapper
Convenience decorator which runs the decorated method with the LU. This uses L{CmdlibTestCase.RunWithLockedLU} to run the decorated method. For this to work, the opcode to run has to be an instance field named "op", "_op", "opcode" or "_opcode". If the instance has a method called "PrepareLU", this method is invoked with the LU right before the test method is called.
625941bed8ef3951e3243466
def stmt_while(self, result: Result) -> Result: <NEW_LINE> <INDENT> return result
<stmt> -> <whileStmt>
625941be009cb60464c632dd
def get_test_page(graph): <NEW_LINE> <INDENT> templatefile = open(os.path.join( os.path.dirname(os.path.abspath(__file__)), 'templates', 'test_page.html')) <NEW_LINE> template = templatefile.read() <NEW_LINE> template = template.replace("{{ graph.series_json|safe }}", graph.series_json) <NEW_LINE> template = template.replace("{{ graph.options_json|safe }}", graph.options_json) <NEW_LINE> out = open(os.path.join(os.getcwd(), 'testgraph.html'), 'w') <NEW_LINE> out.write(template) <NEW_LINE> out.close()
Renders a test page
625941be16aa5153ce3623a2
def __get_testfile_path(self, path): <NEW_LINE> <INDENT> path = os.path.relpath( path, os.path.join(self.__data_path, os.pardir)) <NEW_LINE> return path
Takes in a path, and returns the same path relative to the appropriate directory for the test file.
625941becad5886f8bd26f03
def bignum_mod_dec(num: str, m: int) -> int: <NEW_LINE> <INDENT> ans = 0 <NEW_LINE> for c in num: <NEW_LINE> <INDENT> ans = (ans * 10 + int(c)) % m <NEW_LINE> <DEDENT> return ans
十进制大整数取模运算
625941be0383005118ecf50d
def parsehtml(self, htmlsource): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> tree = fromstring(htmlsource) <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> logger.warning("HTML tree cannot be parsed") <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> category = tree.xpath('//*[@id="main"]//ol//a//text()')[1] <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> category = "" <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> title = " ".join(tree.xpath("//h1//text()")) <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> title = "" <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> teaser = "".join(tree.xpath('//*[@class="leadIn"]//text()')) <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> teaser = "" <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> text = ( " ".join(tree.xpath('//*[@class="textBlock"]//text()')) .replace("\xa0", "") .replace("\n", "") .replace("\t", "") .replace("\r", "") ) <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> text = "" <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> source = tree.xpath('//*[@class="created"]//text()') <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> source = "" <NEW_LINE> <DEDENT> extractedinfo = { "category": category, "teaser": teaser.strip(), "text": text.strip(), "bylinesource": source, "title": title, } <NEW_LINE> return extractedinfo
Parses the html source to retrieve info that is not in the RSS-keys In particular, it extracts the following keys (which should be available in most online news: section sth. like economy, sports, ... text the plain text of the article byline the author, e.g. "Bob Smith" byline_source sth like ANP
625941be15fb5d323cde0a35
def get_participante_as_row(self, id_participante): <NEW_LINE> <INDENT> if id_participante: <NEW_LINE> <INDENT> participante = self.get_participante(id_participante) <NEW_LINE> if participante: <NEW_LINE> <INDENT> participante_to_row = campos_sie_lower([participante])[0] <NEW_LINE> participante_to_row['id'] = participante_to_row['id_participante'] <NEW_LINE> participante_to_row['descr_mail'] = participante_to_row['descr_mail'].strip() <NEW_LINE> participante_to_row['funcao'] = participante_to_row['funcao_item'] <NEW_LINE> participante_to_row['dt_final'] = datetime.strptime(participante_to_row['dt_final'].strip(), '%Y-%m-%d').date() if participante_to_row[ 'dt_final'] else None <NEW_LINE> participante_to_row['dt_inicial'] = datetime.strptime(participante_to_row['dt_inicial'].strip(), '%Y-%m-%d').date() if participante_to_row[ 'dt_inicial'] else None <NEW_LINE> participante_row = Row(**participante_to_row) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> participante_row = None <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> participante_row = None <NEW_LINE> <DEDENT> return participante_row
Este método retorna um dicionário contendo os dados referentes ao participante convertidos para o formato compatível com o modelo no web2py. :param id_participante: integer, :return: gluon.pydal.objects.Row contendo as informações, None caso não exista participante com a id informada/erro.
625941be55399d3f055885dc
def load(self, filename): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> classifier = pickle.load(open(filename, "rb")) <NEW_LINE> <DEDENT> except pickle.UnpicklingError: <NEW_LINE> <INDENT> raise <NEW_LINE> <DEDENT> except os.error: <NEW_LINE> <INDENT> raise <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self._remember(filename) <NEW_LINE> self.send("Classifier", classifier)
Load the object from filename and send it to output.
625941be26238365f5f0ed94
def if_content_matches(self, content_doc, log): <NEW_LINE> <INDENT> if self.if_content is None: <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> sel_type, els, attributes = self.select_elements( self.if_content, content_doc, theme=False) <NEW_LINE> matched = bool(els) <NEW_LINE> if sel_type == 'elements': <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> elif sel_type == 'children': <NEW_LINE> <INDENT> matched = False <NEW_LINE> for el in els: <NEW_LINE> <INDENT> if el.text or len(el): <NEW_LINE> <INDENT> matched = True <NEW_LINE> break <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> elif sel_type == 'attributes': <NEW_LINE> <INDENT> matched = False <NEW_LINE> for el in els: <NEW_LINE> <INDENT> if attributes: <NEW_LINE> <INDENT> for attr in attributes: <NEW_LINE> <INDENT> if attr in el.attrib: <NEW_LINE> <INDENT> matched = True <NEW_LINE> break <NEW_LINE> <DEDENT> <DEDENT> if matched: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> <DEDENT> elif el.attrib: <NEW_LINE> <INDENT> matched = True <NEW_LINE> break <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> assert 0 <NEW_LINE> <DEDENT> if ((not matched and not self.if_content.inverted) or (matched and self.if_content.inverted)): <NEW_LINE> <INDENT> log.info(self, 'skipping rule because if-content="%s" does not match', self.if_content) <NEW_LINE> return False <NEW_LINE> <DEDENT> return True
Returns true if the if-content selector matches something, i.e., if this rule should be executed.
625941be92d797404e3040b3
def get_user_nodes(self, search_type, excluded_nodes = ()): <NEW_LINE> <INDENT> if self._recursion_in_progress or len(self._user_nodes) == 0: <NEW_LINE> <INDENT> return [] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self._recursion_in_progress = True <NEW_LINE> results = [p for p in self._user_nodes if isinstance(p, search_type) and not isinstance(p, excluded_nodes)] <NEW_LINE> results += [r for p in self._user_nodes if not self.ignore(p) and not isinstance(p, (search_type, excluded_nodes)) for r in p.get_user_nodes(search_type, excluded_nodes = excluded_nodes)] <NEW_LINE> self._recursion_in_progress = False <NEW_LINE> return results
Returns all objects of the requested type which use the current object Parameters ---------- search_type : ClassType or tuple of ClassTypes The types which we are looking for excluded_nodes : tuple of types Types for which get_user_nodes should not be called Results ------- list : List containing all objects of the requested type which contain self
625941be0383005118ecf50e
def back(self) -> "std::vector< int >::value_type const &": <NEW_LINE> <INDENT> return _moduleconnectorwrapper.iVector_back(self)
back(iVector self) -> std::vector< int >::value_type const &
625941bedd821e528d63b0d4
@blueprint_web.route('/register', methods=['POST']) <NEW_LINE> def register(): <NEW_LINE> <INDENT> if request.method == 'POST': <NEW_LINE> <INDENT> username = request.form.getlist('user[login]')[0] <NEW_LINE> email = request.form.getlist('user[email]')[0] <NEW_LINE> password = request.form.getlist('user[password]')[0] <NEW_LINE> account = Account() <NEW_LINE> if (password and validate_password(password)): <NEW_LINE> <INDENT> if ( username and not account.check_username(username)['result'] ): <NEW_LINE> <INDENT> if ( email and isValidEmail(email) and not account.check_email(email)['result'] ): <NEW_LINE> <INDENT> hashed = hash_pass(str(password)) <NEW_LINE> result = Account().save_account( username, email, hashed ) <NEW_LINE> if result: <NEW_LINE> <INDENT> return json.dumps({ 'status': 0, 'username': username, 'email': email }) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return json.dumps({ 'status': 4, 'username': username, }) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> return json.dumps({ 'status': 3, 'username': username, 'email': email }) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> return json.dumps({ 'status': 2, 'username': username }) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> return json.dumps({ 'status': 1, 'username': username })
This router function attempts to register a new username. During its attempt, it returns a json string, with three possible values: - integer, codified indicator of registration attempt: - 0, successful account creation - 1, password doesn't meet minimum requirements - 2, username already exists in the database - 3, email already exists in the database - 4, internal database errors - username, string value of the user - email, is returned if the value already exists in the database, or the registration process was successful
625941be30dc7b7665901892
def run(self, accessor, opts): <NEW_LINE> <INDENT> os.environ["DJANGO_SETTINGS_MODULE"] = "graphite.settings" <NEW_LINE> accessor.connect() <NEW_LINE> from django.conf import settings as django_settings <NEW_LINE> django_settings.CARBONLINK_HOSTS = [] <NEW_LINE> django_settings.LOG_FILE_INFO = "-" <NEW_LINE> django_settings.LOG_FILE_EXCEPTION = "-" <NEW_LINE> django_settings.LOG_FILE_CACHE = "-" <NEW_LINE> django_settings.LOG_FILE_RENDERING = "-" <NEW_LINE> from graphite import util as graphite_util <NEW_LINE> from biggraphite.plugins import graphite <NEW_LINE> settings = bg_settings.settings_from_confattr(opts, prefix="") <NEW_LINE> metadata_cache = bg_cache_factory.cache_from_settings(accessor, settings) <NEW_LINE> metadata_cache.open() <NEW_LINE> if opts.profile == "flamegraph": <NEW_LINE> <INDENT> flamegraph.start_profile_thread(fd=open("./perf.log", "w")) <NEW_LINE> <DEDENT> finder = graphite.Finder( directories=[], accessor=accessor, metadata_cache=metadata_cache ) <NEW_LINE> time_start = graphite_util.timestamp(opts.time_start) <NEW_LINE> time_end = graphite_util.timestamp(opts.time_end) <NEW_LINE> output_csv = opts.output_csv <NEW_LINE> results = finder.fetch(opts.patterns, time_start, time_end) <NEW_LINE> for i, result in enumerate(results): <NEW_LINE> <INDENT> metric = _FakeMetric(result["path"]) <NEW_LINE> time_start, time_end, step = result["time_info"] <NEW_LINE> points = [] <NEW_LINE> for i, v in enumerate(result["values"]): <NEW_LINE> <INDENT> v = 0 if v is None else v <NEW_LINE> points.append((time_start + step * i, v)) <NEW_LINE> <DEDENT> result = (points, time_start, time_end, step) <NEW_LINE> if not opts.no_output: <NEW_LINE> <INDENT> self._display_metric(metric, result, output_csv)
Ask fake Graphite Web for points. See command.CommandBase.
625941beb830903b967e9837
def __init__(self, lattice_sizes, l1=0.0, l2=0.0): <NEW_LINE> <INDENT> self.lattice_sizes = lattice_sizes <NEW_LINE> self.l1 = l1 <NEW_LINE> self.l2 = l2
Initializes an instance of `TorsionRegularizer`. Args: lattice_sizes: Lattice sizes of `tfl.layers.Lattice` to regularize. l1: l1 regularization amount. Either single float or list or tuple of floats to specify different regularization amount per dimension. The amount of regularization for the interaction term between two dimensions is the product of the corresponding per dimension amounts. l2: l2 regularization amount. Either single float or list or tuple of floats to specify different regularization amount per dimension. The amount of regularization for the interaction term between two dimensions is the product of the corresponding per dimension amounts.
625941be57b8e32f524833c3
def event_callback(self, event_data): <NEW_LINE> <INDENT> stdout = event_data.get('stdout', None) <NEW_LINE> if stdout.startswith('\r\nTASK'): <NEW_LINE> <INDENT> task_description = re.search(r'\[(.*)\]', stdout).group(1) <NEW_LINE> logger.debug("Running task '{}'".format(task_description)) <NEW_LINE> self.active_tasks.append(task_description) <NEW_LINE> <DEDENT> elif stdout.startswith('\r\nPLAY RECAP'): <NEW_LINE> <INDENT> self.active_tasks.append("<ENDED>") <NEW_LINE> <DEDENT> elif stdout.startswith('\r\nPLAY '): <NEW_LINE> <INDENT> self.active_tasks.append("<STARTED>") <NEW_LINE> <DEDENT> if 'uuid' in event_data: <NEW_LINE> <INDENT> filename = '{}-partial.json'.format(event_data['uuid']) <NEW_LINE> partial_filename = os.path.join(self.config.artifact_dir, 'job_events', filename) <NEW_LINE> full_filename = os.path.join(self.config.artifact_dir, 'job_events', '{}-{}.json'.format( event_data['counter'], event_data['uuid'])) <NEW_LINE> try: <NEW_LINE> <INDENT> with codecs.open(partial_filename, 'r', encoding='utf-8') as read_file: <NEW_LINE> <INDENT> partial_event_data = json.load(read_file) <NEW_LINE> <DEDENT> event_data.update(partial_event_data) <NEW_LINE> with codecs.open(full_filename, 'w', encoding='utf-8') as write_file: <NEW_LINE> <INDENT> json.dump(event_data, write_file) <NEW_LINE> <DEDENT> if self.remove_partials: <NEW_LINE> <INDENT> os.remove(partial_filename) <NEW_LINE> <DEDENT> <DEDENT> except IOError as e: <NEW_LINE> <INDENT> print("Failed writing event data: {}".format(e))
Invoked for every Ansible event to collect stdout with the event data and store it for later use
625941be60cbc95b062c646c
def post_group(self): <NEW_LINE> <INDENT> if self.targets == '*': <NEW_LINE> <INDENT> for tg in self.groups[self.cur]: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> f = tg.post <NEW_LINE> <DEDENT> except AttributeError: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> f() <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> elif self.targets: <NEW_LINE> <INDENT> if self.cur < self._min_grp: <NEW_LINE> <INDENT> for tg in self.groups[self.cur]: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> f = tg.post <NEW_LINE> <DEDENT> except AttributeError: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> f() <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> for tg in self._exact_tg: <NEW_LINE> <INDENT> tg.post() <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> ln = self.launch_node() <NEW_LINE> for tg in self.groups[self.cur]: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> f = tg.post <NEW_LINE> <DEDENT> except AttributeError: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if tg.path.is_child_of(ln): <NEW_LINE> <INDENT> f()
Post the task generators from the group indexed by self.cur, used by :py:meth:`waflib.Build.BuildContext.get_build_iterator`
625941be5fdd1c0f98dc015c
def GetHostIds(self): <NEW_LINE> <INDENT> pass
GetHostIds(self: WallSweep) -> IList[ElementId] Gets a list of all host walls on which the sweep resides. Returns: The list of wall ids.
625941be293b9510aa2c31c2
def accept(self, id): <NEW_LINE> <INDENT> data = Friendship.get(self.id, id).all() <NEW_LINE> if not data or data[0].state != PENDING: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> if data[0].action_user_id == self.id: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> return Friendship.update(data, self.id, ACCEPTED)
:return: Iterable :class:`.Friendship` instances if the pending request can be accepted, otherwise None.
625941be9b70327d1c4e0cfe
def reclassifySelectedFeatures(self, destinationLayer, reclassificationDict): <NEW_LINE> <INDENT> selectedDict = self.getSelectedFeaturesFromCanvasLayers() <NEW_LINE> parameterDict = self.getDestinationParameters(destinationLayer) <NEW_LINE> reclassifyCount = 0 <NEW_LINE> destinationLayer.startEditing() <NEW_LINE> destinationLayer.beginEditCommand(self.tr('DsgTools reclassification')) <NEW_LINE> for lyr, featureList in [(k, v) for k, v in selectedDict.items() if len(v[0]) > 0]: <NEW_LINE> <INDENT> featureList = featureList[0] if isinstance( featureList, tuple) else featureList <NEW_LINE> coordinateTransformer = self.getCoordinateTransformer( lyr, destinationLayer) <NEW_LINE> newFeatList, deleteList = self.featureHandler.reclassifyFeatures( featureList, lyr, reclassificationDict, coordinateTransformer, parameterDict) <NEW_LINE> featuresAdded = destinationLayer.addFeatures(newFeatList) <NEW_LINE> if featuresAdded: <NEW_LINE> <INDENT> lyr.startEditing() <NEW_LINE> lyr.deleteFeatures(deleteList) <NEW_LINE> reclassifyCount += len(deleteList) <NEW_LINE> <DEDENT> <DEDENT> return reclassifyCount
Gets a destination layer and uses reclassificationDict to reclassify each selected feature
625941be50485f2cf553ccc2