code stringlengths 4 4.48k | docstring stringlengths 1 6.45k | _id stringlengths 24 24 |
|---|---|---|
def draw_string(self, img, x_pos, y_pos, text, color): <NEW_LINE> <INDENT> prev_char = 0 <NEW_LINE> pen = freetype.Vector() <NEW_LINE> pen.x = x_pos << 6 <NEW_LINE> pen.y = y_pos << 6 <NEW_LINE> hscale = 1.0 <NEW_LINE> matrix = freetype.Matrix(int(hscale)*0x10000, int(0.2*0x10000), int(0.0*0x10000), int(1.1*0x10000)) <NEW_LINE> cur_pen = freetype.Vector() <NEW_LINE> pen_translate = freetype.Vector() <NEW_LINE> image = copy.deepcopy(img) <NEW_LINE> for cur_char in text: <NEW_LINE> <INDENT> self._face.set_transform(matrix, pen_translate) <NEW_LINE> self._face.load_char(cur_char) <NEW_LINE> kerning = self._face.get_kerning(prev_char, cur_char) <NEW_LINE> pen.x += kerning.x <NEW_LINE> slot = self._face.glyph <NEW_LINE> bitmap = slot.bitmap <NEW_LINE> cur_pen.x = pen.x <NEW_LINE> cur_pen.y = pen.y - slot.bitmap_top * 64 <NEW_LINE> self.draw_ft_bitmap(image, bitmap, cur_pen, color) <NEW_LINE> pen.x += slot.advance.x <NEW_LINE> prev_char = cur_char <NEW_LINE> <DEDENT> return image | draw string
:param x_pos: text x-postion on img
:param y_pos: text y-postion on img
:param text: text (unicode)
:param color: text color
:return: image | 625941bf507cdc57c6306c04 |
def test_gist_without_filename(self): <NEW_LINE> <INDENT> self.setHtmlFromRst(self.sample_without_filename) <NEW_LINE> output = 'https://gist.github.com/fake_id2.js' <NEW_LINE> self.assertHTMLContains("script", attributes={"src": output}) <NEW_LINE> self.assertHTMLContains("pre", text="raw_gist") | Test the gist directive without filename | 625941bf21bff66bcd684884 |
def test_api_failure_on_docker_memory_limit(self): <NEW_LINE> <INDENT> response = Mock(status_code=500, reason='Internal Server Error') <NEW_LINE> self.mocks.configure_mock( 'docker_client', { 'exec_create.side_effect': DockerAPIError( 'Failure creating container', response, 'Failure creating container'), }) <NEW_LINE> build_env = DockerBuildEnvironment( version=self.version, project=self.project, build={'id': DUMMY_BUILD_ID}, ) <NEW_LINE> with build_env: <NEW_LINE> <INDENT> build_env.run('echo test', cwd='/tmp') <NEW_LINE> <DEDENT> self.assertEqual(build_env.commands[0].exit_code, -1) <NEW_LINE> self.assertEqual(build_env.commands[0].error, None) <NEW_LINE> self.assertTrue(build_env.failed) <NEW_LINE> self.assertFalse(self.mocks.api()(DUMMY_BUILD_ID).put.called) <NEW_LINE> self.mocks.mocks['api_v2.build']().put.assert_called_with({ 'id': DUMMY_BUILD_ID, 'version': self.version.pk, 'success': False, 'project': self.project.pk, 'setup_error': u'', 'exit_code': -1, 'length': mock.ANY, 'error': '', 'setup': u'', 'output': u'', 'state': u'finished', 'builder': mock.ANY, }) | Docker exec_create raised memory issue on `exec` | 625941bf3346ee7daa2b2c99 |
def process_or_validate_classifier_output_features( output_features, class_labels, supports_class_scores=True ): <NEW_LINE> <INDENT> def raise_error(msg): <NEW_LINE> <INDENT> raise ValueError("Classifier error: %s" % msg) <NEW_LINE> <DEDENT> class_labels = list(class_labels) <NEW_LINE> _int_types = _integer_types + (bool, _np.bool_, _np.int32, _np.int64) <NEW_LINE> if all(isinstance(cl, _int_types) for cl in class_labels): <NEW_LINE> <INDENT> output_class_type = datatypes.Int64() <NEW_LINE> <DEDENT> elif all(isinstance(cl, _string_types) for cl in class_labels): <NEW_LINE> <INDENT> output_class_type = datatypes.String() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise ValueError("Class labels must be all of type int or all of type string.") <NEW_LINE> <DEDENT> if output_features is None: <NEW_LINE> <INDENT> out = [("classLabel", output_class_type)] <NEW_LINE> if supports_class_scores: <NEW_LINE> <INDENT> out += [("classProbability", datatypes.Dictionary(output_class_type))] <NEW_LINE> <DEDENT> <DEDENT> elif isinstance(output_features, _string_types): <NEW_LINE> <INDENT> out = [(output_features, output_class_type)] <NEW_LINE> if supports_class_scores: <NEW_LINE> <INDENT> out += [("classProbability", datatypes.Dictionary(output_class_type))] <NEW_LINE> <DEDENT> <DEDENT> elif ( isinstance(output_features, (list, tuple)) and all(isinstance(fn, _string_types) for fn in output_features) and len(output_features) == 2 ): <NEW_LINE> <INDENT> if supports_class_scores: <NEW_LINE> <INDENT> out = [ (output_features[0], output_class_type), (output_features[1], datatypes.Dictionary(output_class_type)), ] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise ValueError( "Classifier model (as trained) does not support output scores for classes." ) <NEW_LINE> <DEDENT> <DEDENT> elif is_valid_feature_list(output_features): <NEW_LINE> <INDENT> output_features = [ (k, datatypes._normalize_datatype(dt)) for k, dt in output_features ] <NEW_LINE> if len(output_features) == 1 or not supports_class_scores: <NEW_LINE> <INDENT> if not output_features[0][1] == output_class_type: <NEW_LINE> <INDENT> raise ValueError( "Type of output class feature does not match type of class labels." ) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> if isinstance(output_features[0][1], datatypes.Dictionary) and isinstance( output_features[1][1], output_class_type ): <NEW_LINE> <INDENT> output_features[0], output_features[1] = ( output_features[1], output_features[0], ) <NEW_LINE> <DEDENT> if not isinstance(output_features[1][1], datatypes.Dictionary): <NEW_LINE> <INDENT> raise_error("Output features class scores should be dictionary type.") <NEW_LINE> <DEDENT> if output_features[1][1].key_type != output_class_type: <NEW_LINE> <INDENT> raise_error( "Class scores dictionary key type does not match type of class labels." ) <NEW_LINE> <DEDENT> if output_features[0][1] != output_class_type: <NEW_LINE> <INDENT> raise_error( "Specified type of output class does not match type of class labels." ) <NEW_LINE> <DEDENT> <DEDENT> out = output_features <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise_error("Form of output features not recognized") <NEW_LINE> <DEDENT> return out | Given a list of class labels and a list of output_features, validate the
list and return a valid version of output_features with all the correct
data type information included. | 625941bf2ae34c7f2600d061 |
def update(self, instance, validated_data): <NEW_LINE> <INDENT> instance.role_id = validated_data.get('role_id', instance.role_id) <NEW_LINE> instance.role_name = validated_data.get('role_name', instance.role_name) <NEW_LINE> instance.activity = validated_data.get('activity', instance.activity) <NEW_LINE> instance.save() <NEW_LINE> return instance | Update and return an existing `Role` instance, given the validated data. | 625941bf046cf37aa974cc79 |
def create_or_update_availabilityset(self): <NEW_LINE> <INDENT> self.log("Creating availabilityset {0}".format(self.name)) <NEW_LINE> try: <NEW_LINE> <INDENT> params_sku = self.compute_models.Sku( name=self.sku ) <NEW_LINE> params = self.compute_models.AvailabilitySet( location=self.location, tags=self.tags, platform_update_domain_count=self.platform_update_domain_count, platform_fault_domain_count=self.platform_fault_domain_count, sku=params_sku ) <NEW_LINE> response = self.compute_client.availability_sets.create_or_update(self.resource_group, self.name, params) <NEW_LINE> <DEDENT> except CloudError as e: <NEW_LINE> <INDENT> self.log('Error attempting to create the availability set.') <NEW_LINE> self.fail("Error creating the availability set: {0}".format(str(e))) <NEW_LINE> <DEDENT> return availability_set_to_dict(response) | Method calling the Azure SDK to create or update the AS.
:return: void | 625941bf82261d6c526ab3cb |
def add(self, additionalSel): <NEW_LINE> <INDENT> for cut in additionalSel.cuts: <NEW_LINE> <INDENT> self.cuts.append(cut) <NEW_LINE> <DEDENT> self._filter=None | add all cuts defined in a different filter to this one | 625941bf0383005118ecf513 |
def create_release_branch(old, new): <NEW_LINE> <INDENT> LOGGER.info("Bumping version from %s to %s", old, new) <NEW_LINE> run_cmd("git push --all origin") <NEW_LINE> run_cmd(f"git flow release start {new}") <NEW_LINE> with open(SETUP_PY_PATH, encoding="UTF-8") as f: <NEW_LINE> <INDENT> setup_file = f.readlines() <NEW_LINE> <DEDENT> version_line_num, version_line_content = [ (index, line) for index, line in enumerate(setup_file) if line.strip().lower().startswith("version=") ][0] <NEW_LINE> setup_file[version_line_num] = version_line_content.replace(old, new) <NEW_LINE> with open(SETUP_PY_PATH, "w", encoding="UTF-8") as f: <NEW_LINE> <INDENT> f.writelines(setup_file) <NEW_LINE> <DEDENT> run_cmd(f"git add {SETUP_PY_PATH}") <NEW_LINE> run_cmd(f'git commit -m "VB {new}"') <NEW_LINE> run_cmd(f"git push --set-upstream origin release/{new}") <NEW_LINE> run_cmd(f'git tag -a {new} -m ""') <NEW_LINE> run_cmd(f"git flow release finish -n {new}") <NEW_LINE> run_cmd("git push --all") | Creates (and completes!) a release branch with the new release number
Args:
old (str): the old release number (x.y.z)
new (str): the new release number (x.y.z) | 625941bf7d43ff24873a2bcd |
def validate_request(query: str) -> List[str]: <NEW_LINE> <INDENT> request_categories = query.split(DELIMITER) <NEW_LINE> if len(request_categories) == 0 or any( len(cat.strip()) == 0 for cat in request_categories ): <NEW_LINE> <INDENT> raise FeedIndexerError( f"Invalid archive specification '{query}'. Correct format is one " f"or more archive names delimited by '{DELIMITER}'. Each name can " f"be either of the form 'archive' or 'archive.category'. For " f"example: 'math+cs.CG' (all from math and only computational " f"geometry from computer science)." ) <NEW_LINE> <DEDENT> for category in request_categories: <NEW_LINE> <INDENT> parts = category.split(".") <NEW_LINE> if not parts[0] in taxonomy.ARCHIVES: <NEW_LINE> <INDENT> raise FeedIndexerError( f"Bad archive '{parts[0]}'. Valid archive names are: " f"{', '.join(taxonomy.ARCHIVES.keys())}." ) <NEW_LINE> <DEDENT> if len(parts) == 2 and category not in taxonomy.CATEGORIES: <NEW_LINE> <INDENT> skip = len(parts[0]) + 1 <NEW_LINE> groups = [ key[skip:] for key in taxonomy.CATEGORIES.keys() if key.startswith(parts[0] + ".") ] <NEW_LINE> raise FeedIndexerError( f"Bad subject class '{parts[1]}'. Valid subject classes for " f"the archive '{parts[0]}' are: {', '.join(groups)}." ) <NEW_LINE> <DEDENT> <DEDENT> return request_categories | Validate the provided archive/category specification.
Return a list of its named archives and categories.
Parameters
----------
query : str
A concatenation of archive/category specifiers separated by delimiter
characters.
Raises
------
RssIndexerError
If the provided archive string is malformed or specifies an invalid
archive or category name.
Returns
-------
request_categories : List[str]
If validation was as successful, a list of archive/category names.
Otherwise, and empty list. | 625941bf099cdd3c635f0b8c |
def clickProject3(self): <NEW_LINE> <INDENT> self.elementClick(locator="//img[@alt='" + self._project3 + "']//parent::div", locatorType="xpath") | Click the project 3 box that is still under development
:return: | 625941bf6fb2d068a760efca |
def mean(spec, x1, x2): <NEW_LINE> <INDENT> bin1 = bin(spec, x1) <NEW_LINE> bin2 = bin(spec, x2) <NEW_LINE> if bin1 == bin2 or x2 == (spec.x + spec.step / 2)[-1]: <NEW_LINE> <INDENT> return spec.y[bin1], spec.v[bin1] <NEW_LINE> <DEDENT> binbetween = range(bin1[0][0] + 1, bin2[0][0]) <NEW_LINE> flux1 = spec.y[bin1] * ((spec.x + spec.step / 2)[bin1] - x1) <NEW_LINE> flux2 = spec.y[bin2] * (x2 + (- spec.x + spec.step / 2)[bin2]) <NEW_LINE> fluxbetween = sum((spec.y * spec.step)[binbetween]) <NEW_LINE> retflux = (flux1 + flux2 + fluxbetween) / (x2 - x1) <NEW_LINE> var1 = spec.v[bin1] * ((spec.x + spec.step / 2)[bin1] - x1)**2 <NEW_LINE> var2 = spec.v[bin2] * (x2 + (- spec.x + spec.step / 2)[bin2])**2 <NEW_LINE> varbetween = sum((spec.v * spec.step**2)[binbetween]) <NEW_LINE> retvar = (var1 + var2 + varbetween) / (x2 - x1)**2 <NEW_LINE> if len(retflux) == 0: <NEW_LINE> <INDENT> raise ValueError("Bound error %f %f"%(x1, x2)) <NEW_LINE> <DEDENT> return retflux, retvar | Compute a mean value bewteen x1 and x1.
Compute the integral of the flux over the wavelength range defined as [x1,x2]
divided by the wavelength range in order to get a flux / wavelength.
the variance of this quantity is returned as a 2nd parameter
Raises ValueError if the spec range soesn't cover the intended bin width | 625941bf9f2886367277a7bf |
def get_access_key(): <NEW_LINE> <INDENT> return get_config_handler().get_access_key() | Return the access key for the account user. | 625941bfde87d2750b85fcbf |
@executions.command(name='resume', short_help='Resume a stopped execution') <NEW_LINE> @aria.argument('execution-id') <NEW_LINE> @aria.options.dry_execution <NEW_LINE> @aria.options.retry_failed_tasks <NEW_LINE> @aria.options.mark_pattern() <NEW_LINE> @aria.options.verbose() <NEW_LINE> @aria.pass_model_storage <NEW_LINE> @aria.pass_resource_storage <NEW_LINE> @aria.pass_plugin_manager <NEW_LINE> @aria.pass_logger <NEW_LINE> def resume(execution_id, retry_failed_tasks, dry, mark_pattern, model_storage, resource_storage, plugin_manager, logger): <NEW_LINE> <INDENT> executor = DryExecutor() if dry else ProcessExecutor(plugin_manager=plugin_manager) <NEW_LINE> execution_to_resume = model_storage.execution.get(execution_id) <NEW_LINE> if execution_to_resume.status != execution_to_resume.CANCELLED: <NEW_LINE> <INDENT> logger.info("Can't resume execution {execution.id} - " "execution is in status {execution.status}. " "Can only resume executions in status {execution.CANCELLED}" .format(execution=execution_to_resume)) <NEW_LINE> return <NEW_LINE> <DEDENT> workflow_ctx = execution_preparer.ExecutionPreparer( model_storage, resource_storage, plugin_manager, execution_to_resume.service, execution_to_resume.workflow_name ).prepare(execution_id=execution_to_resume.id) <NEW_LINE> engine = Engine(executor) <NEW_LINE> logger.info('Resuming {0}execution. Press Ctrl+C cancel'.format('dry ' if dry else '')) <NEW_LINE> _run_execution(engine, workflow_ctx, logger, model_storage, dry, mark_pattern, engine_kwargs=dict(resuming=True, retry_failed=retry_failed_tasks)) | Resume a stopped execution
EXECUTION_ID is the unique ID of the execution. | 625941bf8e05c05ec3eea2a2 |
def union(self, rdds): <NEW_LINE> <INDENT> first_jrdd_deserializer = rdds[0]._jrdd_deserializer <NEW_LINE> if any(x._jrdd_deserializer != first_jrdd_deserializer for x in rdds): <NEW_LINE> <INDENT> rdds = [x._reserialize() for x in rdds] <NEW_LINE> <DEDENT> first = rdds[0]._jrdd <NEW_LINE> rest = [x._jrdd for x in rdds[1:]] <NEW_LINE> rest = ListConverter().convert(rest, self._gateway._gateway_client) <NEW_LINE> return RDD(self._jsc.union(first, rest), self, rdds[0]._jrdd_deserializer) | Build the union of a list of RDDs.
This supports unions() of RDDs with different serialized formats,
although this forces them to be reserialized using the default
serializer:
>>> path = os.path.join(tempdir, "union-text.txt")
>>> with open(path, "w") as testFile:
... testFile.write("Hello")
>>> textFile = sc.textFile(path)
>>> textFile.collect()
[u'Hello']
>>> parallelized = sc.parallelize(["World!"])
>>> sorted(sc.union([textFile, parallelized]).collect())
[u'Hello', 'World!'] | 625941bf287bf620b61d3995 |
def parse_stack(st): <NEW_LINE> <INDENT> return u'%s(%d).%s' % (os.path.basename(st[1]), st[2], st[3]) | desc:
Generates a nice looking stacktrace for a single item.
arguments:
st: A stacktrace item.
returns:
A string for the stacktrace item. | 625941bf30dc7b7665901898 |
def test_st1(self): <NEW_LINE> <INDENT> self.driver.get(os.path.join(self.base_url, 'register')) <NEW_LINE> time.sleep(WAIT_TIME) <NEW_LINE> try: <NEW_LINE> <INDENT> search_box = self.driver.find_element_by_name('username') <NEW_LINE> search_box.send_keys('heng') <NEW_LINE> <DEDENT> except NoSuchElementException: <NEW_LINE> <INDENT> raise Exception('Cannot find Element name') <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> search_box = self.driver.find_element_by_name('email') <NEW_LINE> search_box.send_keys('sok@lim.ca') <NEW_LINE> <DEDENT> except NoSuchElementException: <NEW_LINE> <INDENT> raise Exception('Cannot find Element name') <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> search_box = self.driver.find_element_by_name('password') <NEW_LINE> search_box.send_keys('heng') <NEW_LINE> <DEDENT> except NoSuchElementException: <NEW_LINE> <INDENT> raise Exception('Cannot find Element name') <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> search_box = self.driver.find_element_by_name('confirm_pwd') <NEW_LINE> search_box.send_keys('heng') <NEW_LINE> <DEDENT> except NoSuchElementException: <NEW_LINE> <INDENT> raise Exception('Cannot find Element name') <NEW_LINE> <DEDENT> assert "No results found." not in self.driver.page_source <NEW_LINE> search_box.submit() <NEW_LINE> time.sleep(WAIT_TIME) <NEW_LINE> self.take_screen_shot('test_st1') | Test for registration. Note, It is the left over from the first sprint, so we do not implement all the test cases
:return: | 625941bf6fece00bbac2d66c |
def test01a(self): <NEW_LINE> <INDENT> a = np.arange(1e2) <NEW_LINE> b = bcolz.carray(a, chunklen=10, rootdir=self.rootdir) <NEW_LINE> sl = slice(1) <NEW_LINE> assert_array_equal(a[sl], b[sl], "Arrays are not equal") | Testing `__getitem()__` method with only a start | 625941bf26068e7796caec0a |
def calc_pvalue(p_value: float) -> str: <NEW_LINE> <INDENT> if p_value <= 0.0005: <NEW_LINE> <INDENT> p = '***' <NEW_LINE> <DEDENT> elif p_value <= 0.005: <NEW_LINE> <INDENT> p = '**' <NEW_LINE> <DEDENT> elif p_value <= 0.05: <NEW_LINE> <INDENT> p = '*' <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> p = 'ns' <NEW_LINE> <DEDENT> return p | returns a string with the pvalue ready to plot | 625941bfd6c5a10208143f78 |
def __init__(self, queue): <NEW_LINE> <INDENT> self.id = queue.id <NEW_LINE> self.is_canceled = queue.is_canceled <NEW_LINE> self.configuration = queue.get_execution_configuration() <NEW_LINE> self.interface = queue.get_job_interface() <NEW_LINE> self.priority = queue.priority <NEW_LINE> self.required_resources = queue.get_resources() <NEW_LINE> self.scheduled_agent_id = None <NEW_LINE> self._queue = queue <NEW_LINE> self._scheduled_node_id = None <NEW_LINE> self._scheduled_resources = None | Constructor
:param queue: The queue model
:type queue: :class:`queue.models.Queue` | 625941bf50485f2cf553ccc8 |
def get_metrics(perfdata, nag): <NEW_LINE> <INDENT> results = [] <NEW_LINE> for metric in perfdata.split(): <NEW_LINE> <INDENT> label = perfdata.split('=')[0] <NEW_LINE> path = "%s.%s.%s" % (nag.GRAPHITEPREFIX, nag.HOSTNAME, label) <NEW_LINE> value = nag.VALUE <NEW_LINE> results.append((path, value)) <NEW_LINE> <DEDENT> return results | returns a [(<path>, <value>)] where each is the metric to send to carbon | 625941bf50812a4eaa59c254 |
def osu_run1(data_set="osu_run1", sample_every=4): <NEW_LINE> <INDENT> path = os.path.join(DATAPATH, data_set) <NEW_LINE> if not data_available(data_set): <NEW_LINE> <INDENT> import zipfile <NEW_LINE> download_data(data_set) <NEW_LINE> zip = zipfile.ZipFile(os.path.join(DATAPATH, data_set, "run1TXT.ZIP"), "r") <NEW_LINE> for name in zip.namelist(): <NEW_LINE> <INDENT> zip.extract(name, path) <NEW_LINE> <DEDENT> <DEDENT> from . import mocap <NEW_LINE> Y, connect = mocap.load_text_data("Aug210106", path) <NEW_LINE> Y = Y[0:-1:sample_every, :] <NEW_LINE> return data_details_return({"Y": Y, "connect": connect}, data_set) | Ohio State University's Run1 motion capture data set. | 625941bf60cbc95b062c6472 |
def _multiply_loss_ggn_factor_transpose(self, loss_vecs): <NEW_LINE> <INDENT> mult_func = lambda loss, vec: loss.multiply_ggn_factor_transpose(vec) <NEW_LINE> return self._multiply_across_losses(mult_func, loss_vecs, coeff_mode="sqrt") | Multiply loss_vecs by transpose factor of GGN of total loss. | 625941bf0c0af96317bb8118 |
def play_game(board): <NEW_LINE> <INDENT> print("Ready to play ...\n") <NEW_LINE> discovered=["*"]*len(board) <NEW_LINE> guesses = 0 <NEW_LINE> while discovered != board: <NEW_LINE> <INDENT> print_board(discovered) <NEW_LINE> print("\n") <NEW_LINE> p1 = 0 <NEW_LINE> p2 = 0 <NEW_LINE> while p1 == p2 or p1 not in range(1, len(board) + 1) or p2 not in range(1, len(board) + 1) or discovered[p1-1]!='*' or discovered[p2-1]!='*': <NEW_LINE> <INDENT> print("") <NEW_LINE> print("Enter two distinct positions on the board that you want revealed.\ni.e two integers in the range [1, " + str(len(board)) + "]") <NEW_LINE> p1 = int(input("Enter position 1: ")) <NEW_LINE> p2 = int(input("Enter position 2: ")) <NEW_LINE> flag=True <NEW_LINE> if p1 not in range(1, len(board) + 1) or p2 not in range(1, len(board) + 1): <NEW_LINE> <INDENT> print("One of both of your chosen positions is out of range.") <NEW_LINE> flag=False <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if discovered[p1-1]!='*' or discovered[p2-1]!='*': <NEW_LINE> <INDENT> print("One or both of your chosen positions has already been paired.") <NEW_LINE> flag=False <NEW_LINE> <DEDENT> if p1 == p2: <NEW_LINE> <INDENT> print("You chose the same positions.") <NEW_LINE> flag=False <NEW_LINE> <DEDENT> <DEDENT> if not flag: <NEW_LINE> <INDENT> print("Please try again. This guess did not count. You current number of guesses is ", str(guesses)+".") <NEW_LINE> <DEDENT> <DEDENT> print_revealed(discovered, p1, p2, board) <NEW_LINE> wait_for_player() <NEW_LINE> print("\n"*50) <NEW_LINE> if discovered[p1 - 1] != discovered[p2 - 1]: <NEW_LINE> <INDENT> discovered[p1 - 1] = "*" <NEW_LINE> discovered[p2 - 1] = "*" <NEW_LINE> <DEDENT> guesses += 1 <NEW_LINE> <DEDENT> print("Congratulations! You completed the game with " + str(guesses) + " guesses. That is " + str(guesses - len(board)//2) + " more than the best possible.") | (list of str)->None
Plays a concentration game using the given board
Precondition: board a list representing a playable deck | 625941bf9b70327d1c4e0d04 |
def test_v_date_invalid(self): <NEW_LINE> <INDENT> date_str = "2013_44_01" <NEW_LINE> with self.assertRaises(ValueError): <NEW_LINE> <INDENT> v_date(date_str) <NEW_LINE> <DEDENT> date_str = "2013-44-01" <NEW_LINE> with self.assertRaises(ValueError): <NEW_LINE> <INDENT> v_date(date_str) | Test v_date validator with invalid dates | 625941bfd4950a0f3b08c281 |
def getMaxTimeOfImmobility(self, time = 20, startTime = 0, minSpeed = 10, skip = 12, smooth = 2, forGraph = False): <NEW_LINE> <INDENT> time = time * 60000 <NEW_LINE> start = self.findStart(startTime) <NEW_LINE> t0 = startTime <NEW_LINE> x0, y0 = self.data[start][7:9] <NEW_LINE> speeds = deque() <NEW_LINE> prev = t0 <NEW_LINE> maxIm = 0 <NEW_LINE> immobility = [] <NEW_LINE> for content in self.data[start+skip::skip]: <NEW_LINE> <INDENT> if content[1] > time: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> x1, y1 = content[7:9] <NEW_LINE> t1 = content[1] <NEW_LINE> speeds.append((sqrt(((x1 - x0)**2 + (y1 - y0)**2)) / self.trackerResolution) / ((t1 - t0) / 1000)) <NEW_LINE> if len(speeds) == smooth: <NEW_LINE> <INDENT> if sum(speeds) / len(speeds) > minSpeed: <NEW_LINE> <INDENT> maxIm = max(maxIm, t0 - prev) <NEW_LINE> if forGraph: <NEW_LINE> <INDENT> immobility.append((prev, t0)) <NEW_LINE> <DEDENT> prev = t1 <NEW_LINE> <DEDENT> speeds.popleft() <NEW_LINE> <DEDENT> x0, y0, t0 = x1, y1, t1 <NEW_LINE> <DEDENT> maxIm = max(maxIm, t0 - prev) <NEW_LINE> immobility.append((prev, t0)) <NEW_LINE> if forGraph: <NEW_LINE> <INDENT> return immobility <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return round(maxIm / 60000, 2) | returns maximum continuous time that the rat was immobile
minSpeed argument is in cm/s, smooth and skip are represented in data points | 625941bf004d5f362079a265 |
def test_handle_bad_request(): <NEW_LINE> <INDENT> router = MockRouter() <NEW_LINE> backend = RapidHttpBacked(name='test', router=router) <NEW_LINE> response = backend.handle_request(HttpRequest()) <NEW_LINE> assert_true(isinstance(response, HttpResponseBadRequest)) | handle_request must return a HttpResponse | 625941bfd10714528d5ffc10 |
def set(self, table, item, value): <NEW_LINE> <INDENT> command = 'UPDATE ' + str(table) + " SET item='" + str(value) + "' WHERE item='" + str(item) + "';" <NEW_LINE> self.cur.execute(command) <NEW_LINE> self.conn.commit() <NEW_LINE> print('\n' + command) | SQL set call
Args:
table (str): 'FROM' statement
item (str): 'WHERE' statement
value (str): 'SET' statement | 625941bf8da39b475bd64ea0 |
def AddValue(self, value, timestamp=None, offset=False): <NEW_LINE> <INDENT> timestamp = timestamp or int(time.time()) <NEW_LINE> Values.AddValue(self, timestamp, value, offset=offset) | Add a value to this TimeSeries.
Finds or creates the appropriate Values child object and adds the new
value to it.
Args:
value: integer
timestamp: UNIX timestamp; defaults to now
offset: if True, values are offsets from previous value | 625941bf63f4b57ef000104f |
def createWidgets(self): <NEW_LINE> <INDENT> titleLabel = Label(self,text='Jambalaya Text',fg='midnight blue',font='Verdana 30 bold') <NEW_LINE> titleLabel.grid(row=1,columnspan=3, sticky=N+E+W+S) <NEW_LINE> pic = PhotoImage(file='JumbledWords.gif') <NEW_LINE> imageLabel = Label(self, image=pic,borderwidth=0) <NEW_LINE> imageLabel.pic = pic <NEW_LINE> imageLabel.grid(row=3,column=1, sticky = W) <NEW_LINE> directionsLabel = Label(self, text = 'Directions: Guess all possible words\nusing all the provided letters \nin 60 seconds.Good luck!', font = 'Verdana 14') <NEW_LINE> directionsLabel.grid(row = 3, column = 2, sticky = E) <NEW_LINE> textLabel3 = Label(self, text='Jumbled Word:') <NEW_LINE> textLabel3.grid(row= 4,column=1,sticky=N+E+W+S) <NEW_LINE> self.string = StringVar() <NEW_LINE> stringLabel = Label(self, fg='blue', font='Verdana 20', textvariable = self.string) <NEW_LINE> stringLabel.grid(row=4,column=2,sticky=N+E+W+S) <NEW_LINE> self.randomWord() <NEW_LINE> self.string2 = StringVar() <NEW_LINE> self.textLabel2 = Label(self, textvariable= self.string2) <NEW_LINE> self.string2.set(str(self.lengthOfValues) +' words remaining') <NEW_LINE> self.textLabel2.grid(row=5,column = 1, sticky=N+E+W+S) <NEW_LINE> textLabel = Label(self, text='Word guess:') <NEW_LINE> textLabel.grid(row= 6,column=1,sticky=N+E+W+S) <NEW_LINE> self.textEntry = Entry(self) <NEW_LINE> self.textEntry.grid(row=6,column=2,sticky=N+E+W+S) <NEW_LINE> self.timer = 60 <NEW_LINE> self.timerLabel = Label(self,text= str(self.timer), fg = 'dark violet', font='Verdana 14 bold') <NEW_LINE> self.timerLabel.grid(row = 8, column = 1, sticky = N+E+W+S) <NEW_LINE> textLabelTimer = Label(self, text='Seconds Left: ', fg = 'dark violet', font='Verdana 14 bold') <NEW_LINE> textLabelTimer.grid(row= 7,column=1,sticky=N+E+W+S) <NEW_LINE> self.SubmitButton = Button(self, fg='red', bg='yellow', text='Guess', command=self.Compare) <NEW_LINE> self.SubmitButton.grid(row=7, column = 2, sticky=N+E+W+S) <NEW_LINE> textLabel2 = Label(self, text='Past words that you have correctly guessed:') <NEW_LINE> textLabel2.grid(row=11,column=1, sticky=N+E+W+S) <NEW_LINE> self.AddingText = StringVar() <NEW_LINE> guessWords = Label(self, textvariable = self.AddingText) <NEW_LINE> guessWords.grid(row=11,column=2) <NEW_LINE> QuitButton2 = Button(self, fg='brown', bg='green', text='Exit Game', command=self.onQuitButtonClick2) <NEW_LINE> QuitButton2.grid(row=14, column = 2, sticky=E) <NEW_LINE> self.wrongLabel = Label(self,text='', fg = 'red',font='Verdana 20 bold' ) <NEW_LINE> self.wrongLabel.grid(row=13,column=1) | Makes the general gameboard:Creates the titles and labels for the
program; Creates Exit, Play Again, Guess buttons; Creates the textbox | 625941bff9cc0f698b14052d |
def transpose_time_pattern(self, reward_time: Series, time_up: float, time_down: float) -> Tuple[Any, ndarray]: <NEW_LINE> <INDENT> reward_time_round = np.around(reward_time.diff(), decimals=4) <NEW_LINE> reward_time_round[(reward_time_round != time_up) & (reward_time_round != time_down)] = 'C' <NEW_LINE> if time_up == time_down: <NEW_LINE> <INDENT> reward_time_round[reward_time_round == time_up] = 'A' <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> reward_time_round[reward_time_round == time_up] = 'A' <NEW_LINE> reward_time_round[reward_time_round == time_down] = 'B' <NEW_LINE> <DEDENT> f = np.array(reward_time_round) <NEW_LINE> f = f.sum() <NEW_LINE> return f, reward_time_round | Cette méthode convertie les intervalles entre les events en séquance alphabétique simple
:param reward_time: contient les temps en seconde
:param time_up: temps haut "A"
:param time_down: temps bas "B" | 625941bf5f7d997b871749c5 |
def parseFor(tokens, j): <NEW_LINE> <INDENT> (asgnCl, condCl, incrCl, codeCl, rc) = sepForCls(tokens, j); <NEW_LINE> pAsgns = parseForAssignments(asgnCl); <NEW_LINE> pCond = parseExp(condCl); <NEW_LINE> pIncrs = parseForAssignments(incrCl); <NEW_LINE> pCode = yacc(codeCl) + pIncrs; <NEW_LINE> map(tree.append, pAsgns); <NEW_LINE> tree.append(['while', pCond, pCode]); <NEW_LINE> return rc + 1; | Helps yacc(..) in parsing for stmt (as while). | 625941bfe1aae11d1e749be5 |
def get_center(square_num): <NEW_LINE> <INDENT> half_square = SQUARE_SIZE // 2 <NEW_LINE> (x_ll, y_ll) = get_LL_corner(square_num) <NEW_LINE> return (x_ll + half_square, y_ll + half_square) | Returns the center coordinate of the square_num | 625941bf73bcbd0ca4b2bfa6 |
def __inner_predict(self, data_idx): <NEW_LINE> <INDENT> if data_idx >= self.__num_dataset: <NEW_LINE> <INDENT> raise ValueError("Data_idx should be smaller than number of dataset") <NEW_LINE> <DEDENT> if self.__inner_predict_buffer[data_idx] is None: <NEW_LINE> <INDENT> if data_idx == 0: <NEW_LINE> <INDENT> n_preds = self.train_set.num_data() * self.__num_class <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> n_preds = self.valid_sets[data_idx - 1].num_data() * self.__num_class <NEW_LINE> <DEDENT> self.__inner_predict_buffer[data_idx] = np.zeros(n_preds, dtype=np.float64) <NEW_LINE> <DEDENT> if not self.__is_predicted_cur_iter[data_idx]: <NEW_LINE> <INDENT> tmp_out_len = ctypes.c_int64(0) <NEW_LINE> data_ptr = self.__inner_predict_buffer[data_idx].ctypes.data_as(ctypes.POINTER(ctypes.c_double)) <NEW_LINE> _safe_call(_LIB.LGBM_BoosterGetPredict( self.handle, ctypes.c_int(data_idx), ctypes.byref(tmp_out_len), data_ptr)) <NEW_LINE> if tmp_out_len.value != len(self.__inner_predict_buffer[data_idx]): <NEW_LINE> <INDENT> raise ValueError("Wrong length of predict results for data %d" % (data_idx)) <NEW_LINE> <DEDENT> self.__is_predicted_cur_iter[data_idx] = True <NEW_LINE> <DEDENT> return self.__inner_predict_buffer[data_idx] | Predict for training and validation dataset | 625941bf07d97122c41787b6 |
@with_debug_logging <NEW_LINE> def normalize_journal_titles(obj, eng): <NEW_LINE> <INDENT> publications = obj.data.get('publication_info', []) <NEW_LINE> for publication in publications: <NEW_LINE> <INDENT> normalize_journal_title_entry(obj, publication, add_inspire_categories=True) <NEW_LINE> <DEDENT> references = obj.data.get("references", []) <NEW_LINE> for reference in references: <NEW_LINE> <INDENT> publication_info = get_value(reference, 'reference.publication_info') <NEW_LINE> if not publication_info: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> normalize_journal_title_entry(obj, publication_info) <NEW_LINE> <DEDENT> if obj.extra_data.get('journal_inspire_categories'): <NEW_LINE> <INDENT> obj.extra_data['journal_inspire_categories'] = dedupe_list(obj.extra_data['journal_inspire_categories']) | Normalize the journal titles
Normalize the journal titles stored in the `journal_title` field of each object
contained in `publication_info` and for each `publication_info.journal_title` in references.
Note:
The DB is queried in order to get the `$ref` of each journal and add it in
`journal_record` as well as inspire categories.
TODO:
Refactor: it must be checked that `normalize_journal_title` is appropriate.
Args:
obj: a workflow object.
eng: a workflow engine.
Returns:
None | 625941bf442bda511e8be34c |
def log(exception: Exception) -> Path: <NEW_LINE> <INDENT> log_file = make_log_file("error") <NEW_LINE> with open(log_file, "w") as f: <NEW_LINE> <INDENT> exc = traceback.format_exception( type(exception), exception, tb=exception.__traceback__ ) <NEW_LINE> for l in exc: <NEW_LINE> <INDENT> f.write(l) <NEW_LINE> <DEDENT> f.write("\n\nStack dump:\n\n") <NEW_LINE> for frame in get_stack_frames(): <NEW_LINE> <INDENT> stack_locals = json.dumps( dict(frame.f_locals), indent=4, default=str ) <NEW_LINE> f.write(f"{stack_locals}\n") <NEW_LINE> <DEDENT> <DEDENT> return log_file | Logs exception. Writes traceback and contents of the
interpreter's stack frames to a new log file. | 625941bf4428ac0f6e5ba721 |
def get_fast_rcnn_blob_names(is_training=True): <NEW_LINE> <INDENT> blob_names = ['rois'] <NEW_LINE> if is_training: <NEW_LINE> <INDENT> blob_names += ['labels_int32'] <NEW_LINE> <DEDENT> if is_training: <NEW_LINE> <INDENT> blob_names += ['bbox_targets'] <NEW_LINE> blob_names += ['bbox_inside_weights'] <NEW_LINE> blob_names += ['bbox_outside_weights'] <NEW_LINE> blob_names += ['rois_to_gt_ind_map'] <NEW_LINE> blob_names += ['rois_max_overlaps'] <NEW_LINE> <DEDENT> if is_training and cfg.MODEL.MASK_ON: <NEW_LINE> <INDENT> blob_names += ['mask_rois'] <NEW_LINE> blob_names += ['roi_has_mask_int32'] <NEW_LINE> blob_names += ['masks_int32'] <NEW_LINE> <DEDENT> if is_training and cfg.MODEL.KEYPOINTS_ON: <NEW_LINE> <INDENT> blob_names += ['keypoint_rois'] <NEW_LINE> blob_names += ['keypoint_locations_int32'] <NEW_LINE> blob_names += ['keypoint_weights'] <NEW_LINE> blob_names += ['keypoint_loss_normalizer'] <NEW_LINE> <DEDENT> if cfg.FPN.FPN_ON and cfg.FPN.MULTILEVEL_ROIS: <NEW_LINE> <INDENT> k_max = cfg.FPN.ROI_MAX_LEVEL <NEW_LINE> k_min = cfg.FPN.ROI_MIN_LEVEL <NEW_LINE> for lvl in range(k_min, k_max + 1): <NEW_LINE> <INDENT> blob_names += ['rois_fpn' + str(lvl)] <NEW_LINE> <DEDENT> blob_names += ['rois_idx_restore_int32'] <NEW_LINE> if is_training: <NEW_LINE> <INDENT> if cfg.MODEL.MASK_ON: <NEW_LINE> <INDENT> for lvl in range(k_min, k_max + 1): <NEW_LINE> <INDENT> blob_names += ['mask_rois_fpn' + str(lvl)] <NEW_LINE> <DEDENT> blob_names += ['mask_rois_idx_restore_int32'] <NEW_LINE> <DEDENT> if cfg.MODEL.KEYPOINTS_ON: <NEW_LINE> <INDENT> for lvl in range(k_min, k_max + 1): <NEW_LINE> <INDENT> blob_names += ['keypoint_rois_fpn' + str(lvl)] <NEW_LINE> <DEDENT> blob_names += ['keypoint_rois_idx_restore_int32'] <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return blob_names | Fast R-CNN blob names. | 625941bf8c0ade5d55d3e8e8 |
def eul57(n): <NEW_LINE> <INDENT> count = 0 <NEW_LINE> for i in range(1, n+1): <NEW_LINE> <INDENT> num = 1 <NEW_LINE> den = 2 <NEW_LINE> for j in range(i-1, 0, -1): <NEW_LINE> <INDENT> num += 2 * den <NEW_LINE> tmp = den <NEW_LINE> den = num <NEW_LINE> num = tmp <NEW_LINE> <DEDENT> num += den <NEW_LINE> if num_len(num) > num_len(den): <NEW_LINE> <INDENT> count += 1 <NEW_LINE> <DEDENT> <DEDENT> return count | In the first n = one-thousand expansions,
how many fractions contain a numerator with more digits than denominator? | 625941bfd10714528d5ffc11 |
def tag_id_meta(train, val): <NEW_LINE> <INDENT> tag_to_id = {} <NEW_LINE> id_to_tag = {} <NEW_LINE> data = [train, val] <NEW_LINE> tag_id = 0 <NEW_LINE> for df in data: <NEW_LINE> <INDENT> for idx in df.index: <NEW_LINE> <INDENT> for tag in df["tags"][idx]: <NEW_LINE> <INDENT> if tag not in tag_to_id: <NEW_LINE> <INDENT> tag_to_id[tag] = tag_id <NEW_LINE> id_to_tag[tag_id] = tag <NEW_LINE> tag_id += 1 <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> return tag_to_id, id_to_tag | train, val : list of pandas.DataFrame
@returns : (dictionary, dictionary) | 625941bf711fe17d825422a1 |
def _LJ_epsilonsigma_to_ab(coeffs): <NEW_LINE> <INDENT> A = 4.0 * coeffs['epsilon'] * coeffs['sigma']**12.0 <NEW_LINE> B = 4.0 * coeffs['epsilon'] * coeffs['sigma']**6.0 <NEW_LINE> return {"A": A, "B": B} | Convert epsilon/sigma representation to AB representation of the LJ
potential | 625941bf2c8b7c6e89b356f2 |
def test_publish_token_authorization_error(self): <NEW_LINE> <INDENT> token = 'asdfasdfasdfasdf' <NEW_LINE> load = {'user': 'foo', 'fun': 'test.arg', 'tgt': 'test_minion', 'arg': 'bar', 'kwargs': {'token': token}} <NEW_LINE> mock_token = {'token': token, 'eauth': 'foo', 'name': 'test'} <NEW_LINE> mock_ret = {'error': {'name': 'AuthorizationError', 'message': 'Authorization error occurred.'}} <NEW_LINE> with patch('salt.acl.PublisherACL.user_is_blacklisted', MagicMock(return_value=False)), patch('salt.acl.PublisherACL.cmd_is_blacklisted', MagicMock(return_value=False)), patch('salt.auth.LoadAuth.authenticate_token', MagicMock(return_value=mock_token)), patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=[])): <NEW_LINE> <INDENT> self.assertEqual(mock_ret, self.local_funcs.publish(load)) | Asserts that an AuthorizationError is returned when the token authenticates, but is not
authorized. | 625941bfec188e330fd5a6d4 |
def test_lookup(self): <NEW_LINE> <INDENT> dist_a = Distribution.objects.create(name='Foo', version_name='Bar', version_number='1.2') <NEW_LINE> dist_b = Distribution.objects.create(name='Foo', version_name='Lorem', version_number='1.1') <NEW_LINE> Lookup.objects.create(distribution=dist_a, content='eegahw2S\n') <NEW_LINE> pack = Package.objects.create(name='eegahw2S', latest_version='1.0.0', link='http://www.foo.bar', description='lorem ipsum', last_update=datetime.now()) <NEW_LINE> DisPack.objects.create(name='eegahw2S', version='1.0.0', distribution=dist_a, link='http://www.foo.bar', package=pack, package_version='1.0.0') <NEW_LINE> DisPack.objects.create(name='eegahw2S', version='1.0.0', distribution=dist_b, link='http://www.foo.bar', package=pack, package_version='1.0.0') <NEW_LINE> result = views.lookup(pack) <NEW_LINE> self.assertTrue(type(result) is dict) <NEW_LINE> self.assertEqual(result['result'], 1) <NEW_LINE> self.assertEqual(result['found'], 2) | Do a lookup with existing datas | 625941bf99fddb7c1c9de2c2 |
def _get_actor_id(actor: ObjectOrIDType) -> str: <NEW_LINE> <INDENT> if isinstance(actor, dict): <NEW_LINE> <INDENT> return actor["id"] <NEW_LINE> <DEDENT> return actor | Helper for retrieving an actor `id`. | 625941bfe64d504609d74770 |
@cythonized("u") <NEW_LINE> def dmp_ground_LC(f, u, K): <NEW_LINE> <INDENT> if not u: <NEW_LINE> <INDENT> return dup_LC(f, K) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return dmp_ground_LC(dmp_LC(f, K), u-1, K) | Returns ground leading coefficient. | 625941bf7047854f462a133c |
def initial_segmentation(image, PATCH_RATIO): <NEW_LINE> <INDENT> from skimage.filters import threshold_li, threshold_otsu, threshold_minimum <NEW_LINE> from skimage.morphology import closing, square <NEW_LINE> thresh_li = threshold_li(image) <NEW_LINE> thresh_otsu = threshold_otsu(image) <NEW_LINE> try: <NEW_LINE> <INDENT> thresh_min = threshold_minimum(image) <NEW_LINE> <DEDENT> except RuntimeError: <NEW_LINE> <INDENT> thresh_min = thresh_otsu + 100 <NEW_LINE> <DEDENT> if thresh_min < thresh_otsu: <NEW_LINE> <INDENT> threshold_steps = range(int(thresh_li ), int(thresh_otsu), abs(int((thresh_otsu-thresh_li)/5))) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> threshold_steps = [thresh_otsu] <NEW_LINE> <DEDENT> binary_image = closing(image > threshold_steps[0], square(3)) <NEW_LINE> label_image = label(binary_image) <NEW_LINE> regions = regionprops(label_image, image) <NEW_LINE> regions_above_noise = [] <NEW_LINE> areas = [] <NEW_LINE> for region in regions: <NEW_LINE> <INDENT> if region.area >= 9: <NEW_LINE> <INDENT> areas.append(region.area) <NEW_LINE> regions_above_noise.append(region) <NEW_LINE> <DEDENT> <DEDENT> median=np.median(areas) <NEW_LINE> patch = int((PATCH_RATIO*median)**(0.5)) <NEW_LINE> return (threshold_steps,patch, regions_above_noise, label_image) | Initialization of the image segmentation.
:param image: A numpy matrix. Original image.
:param PATCH_RATIO: Constant. Ratio between patch area against cell mean cell area. | 625941bf4c3428357757c25a |
def bandpass(X, cutoff=(0.125, 0.375), order=2, axis=0, fs=1.0, **kws): <NEW_LINE> <INDENT> b,a = _bandpass_ba(cutoff=cutoff, order=order, fs=fs) <NEW_LINE> Y = filtfilt(b, a, X, axis=axis, **kws) <NEW_LINE> return Y | Butterworth bandpass filter.
*Parameters*:
X: ndarray.
cutoff: (float, float), low/high-frequency cut, default=(0.125, 0.375) (unit is sample freqency).
order: int, default=2.
axis: int, default=0.
fs: number, sample freqency, default=1.0
*Return*:
Y: ndarray, highpassed X. | 625941bf94891a1f4081b9d8 |
def add_insect(self, insect): <NEW_LINE> <INDENT> if insect.is_ant: <NEW_LINE> <INDENT> if self.ant is None: <NEW_LINE> <INDENT> self.ant = insect <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if self.ant.can_contain(insect): <NEW_LINE> <INDENT> self.ant.contain_ant(insect) <NEW_LINE> <DEDENT> elif insect.can_contain(self.ant): <NEW_LINE> <INDENT> insect.contain_ant(self.ant) <NEW_LINE> self.ant = insect <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> assert self.ant is None, 'Two ants in {0}'.format(self) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> self.bees.append(insect) <NEW_LINE> <DEDENT> insect.place = self | Add an Insect to this Place.
There can be at most one Ant in a Place, unless exactly one of them is
a container ant, in which case there can be two. If add_insect
tries to add more Ants than is allowed, an assertion error is raised.
There can be any number of Bees in a Place. | 625941bf5166f23b2e1a5089 |
def _get_inputs_by_name(self, name): <NEW_LINE> <INDENT> inputs = self._collection.list() <NEW_LINE> logger.info('%s: %d inputs in total. filtering by name (%s)...' % (self.input_name, len(inputs), name)) <NEW_LINE> inputs_dict = merge_util.match_inputs(self.input_name, inputs, name) <NEW_LINE> logger.info('%s: %d inputs after filtering.' % (self.input_name, len(inputs_dict))) <NEW_LINE> return (inputs_dict, inputs) | Get the inputs and entities based on input name.
Args:
name: The name of input
Returns:
(dict, list): The dict of inputs and the original input list (entities) | 625941bfde87d2750b85fcc0 |
def update_current_location(client, access_token): <NEW_LINE> <INDENT> data = dict({ 'lat': 51.5112139, 'lng': -0.1198244}) <NEW_LINE> rv = client.put( '/user/location', headers=dict({ "Authorization": "Bearer %s" % access_token }), data=data) <NEW_LINE> return rv | Update user current location | 625941bfdc8b845886cb5464 |
def __init__ (self, point_coordinates, force_unique=True): <NEW_LINE> <INDENT> points = np.asarray(point_coordinates) <NEW_LINE> if points.ndim == 1: <NEW_LINE> <INDENT> points = points.reshape((len(points),1)) <NEW_LINE> <DEDENT> if force_unique: <NEW_LINE> <INDENT> unique = list({tuple(pt) for pt in points}) <NEW_LINE> points = np.array(unique) <NEW_LINE> <DEDENT> points = np.sort(points) <NEW_LINE> Lattice.__init__(self,points.shape[-1],origin=None, scale=None,rotation=None) <NEW_LINE> self.points = points <NEW_LINE> object.__delattr__(self,'origin') <NEW_LINE> object.__delattr__(self,'scale') <NEW_LINE> object.__delattr__(self,'rotation') | Parameters
---------
point_coordinates : ndarray, size=(npoints , ndims)
force_unique : boolean
Force the point coordinates to be unique | 625941bf6fb2d068a760efcb |
def __ge__(self, other): <NEW_LINE> <INDENT> return self.compare(self, other) >= 0 | Method implements the behavior of the '>=' operator.
Returns:
:bool: whether or not *self* >= *other*. | 625941bf26068e7796caec0b |
def test_load_works(self, monkeypatch): <NEW_LINE> <INDENT> monkeypatch.setattr( "numpy.load", lambda *args, **kwargs: np.zeros((528, 320, 456), dtype=np.float32), ) <NEW_LINE> monkeypatch.setattr( "atlalign.data.img_as_float32", lambda *args, **kwargs: np.zeros((320, 456), dtype=np.float32), ) <NEW_LINE> x_atlas = nissl_volume() <NEW_LINE> assert x_atlas.shape == (528, 320, 456, 1) <NEW_LINE> assert np.all(np.isfinite(x_atlas)) <NEW_LINE> assert x_atlas.min() >= 0 <NEW_LINE> assert x_atlas.max() <= 1 <NEW_LINE> assert x_atlas.dtype == np.float32 | Test that loading works. | 625941bf9c8ee82313fbb6a5 |
def _get_word_ngrams(n, words): <NEW_LINE> <INDENT> assert len(words) > 0 <NEW_LINE> assert n > 0 <NEW_LINE> return _get_ngrams(n, words) | Calculates word n-grams for multiple sentences.
| 625941bf0a50d4780f666dc0 |
def _next_break(primitive_boundaries, pos, expects): <NEW_LINE> <INDENT> for i in xrange(pos, len(primitive_boundaries)): <NEW_LINE> <INDENT> sb = primitive_boundaries[i][1] <NEW_LINE> if sb in expects: <NEW_LINE> <INDENT> return sb <NEW_LINE> <DEDENT> <DEDENT> return None | (internal)
| 625941bf498bea3a759b99e0 |
def new_category(name): <NEW_LINE> <INDENT> category = Category() <NEW_LINE> category.name = name <NEW_LINE> return category | new_category creates a new Category object given the name | 625941bf8c3a8732951582e8 |
def test_init(self): <NEW_LINE> <INDENT> self.assertLessEqual(self.pmo.max_pos_, 1.0) <NEW_LINE> self.assertGreaterEqual(self.pmo.max_pos_, 0.0) <NEW_LINE> self.assertLess(self.pmo.min_pos_, 1.0) <NEW_LINE> self.assertGreaterEqual(self.pmo.min_pos_, 0.0) <NEW_LINE> self.assertLessEqual(self.pmo.portfolio_size_, len(self.pmo.asset_basket_)) | Test initialization of PortfolopOptimizer class
| 625941bf796e427e537b04f4 |
def main(**kwargs): <NEW_LINE> <INDENT> pass | main function | 625941bf23e79379d52ee497 |
def parse_tracelogging_event(bv: binaryninja.binaryview.BinaryView, stream: Stream) -> Event: <NEW_LINE> <INDENT> channel = stream.read_u8() <NEW_LINE> if channel != 11: <NEW_LINE> <INDENT> raise ETWBreakerUnexpectedToken(11, channel) <NEW_LINE> <DEDENT> level = stream.read_u8() <NEW_LINE> opcode = stream.read_u8() <NEW_LINE> keyword = stream.read_u64() <NEW_LINE> size = stream.read_u16() <NEW_LINE> stream.read(size - 2) <NEW_LINE> return Event(bv, 0, 0, channel, level, opcode, 0, keyword) | A tracelogging event is identified by its channel number_of_channel
that are always 11. Actually we can't handle tracelogging event
because the lonk between event and provider is made during code execution
:ivar stream: current stream use to parse the event
:ret: An event object for tracelogging | 625941bf7047854f462a133d |
@jit <NEW_LINE> def greyscale_filter_numba(filename): <NEW_LINE> <INDENT> image = cv2.imread(filename) <NEW_LINE> imageAsRGB = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) <NEW_LINE> for i in range(len(imageAsRGB)): <NEW_LINE> <INDENT> for j in range(len(imageAsRGB[i])): <NEW_LINE> <INDENT> sum = (imageAsRGB[i, j, 0] * .29 + imageAsRGB[i, j, 1] * .72 + imageAsRGB[i, j, 2] * 0.07) <NEW_LINE> imageAsRGB[i, j, 0] = sum <NEW_LINE> imageAsRGB[i, j, 1] = sum <NEW_LINE> imageAsRGB[i, j, 2] = sum <NEW_LINE> <DEDENT> <DEDENT> cv2.imwrite("rain_grayscale.jpeg", imageAsRGB) <NEW_LINE> return imageAsRGB | Function to read image and make it greyscale using numba.
:param filename: image name in filepath
:return: new greyscaled 3d array that represents image | 625941bf07f4c71912b113b1 |
def newWorld(self, world): <NEW_LINE> <INDENT> if world.all_write == False: <NEW_LINE> <INDENT> self.in_publicworld = False <NEW_LINE> self.var_blockchcount = 0 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.in_publicworld = True | Hook to reset griefer blockcount abilities in new worlds if not op. | 625941bf7c178a314d6ef38c |
def var_mul ( v1 , v2 , name = '' , title = '' ) : <NEW_LINE> <INDENT> f1 = isinstance ( v1 , num_types ) <NEW_LINE> f2 = isinstance ( v2 , num_types ) <NEW_LINE> if f1 and f2 : <NEW_LINE> <INDENT> r = float ( v1 ) * float ( v2 ) <NEW_LINE> return ROOT.RooRealConstant.value ( r ) <NEW_LINE> <DEDENT> elif f1 : v1 = ROOT.RooRealConstant.value ( float ( v1 ) ) <NEW_LINE> elif f2 : v2 = ROOT.RooRealConstant.value ( float ( v2 ) ) <NEW_LINE> return Ostap.MoreRooFit.Product ( v1 , v2 , name , title ) | Product of two RooAbsReal objects
>>> v1 = ...
>>> v2 = ...
>>> v = var_mul ( v1 , v2 ) | 625941bf091ae35668666e93 |
def SetCookie(self, cookie): <NEW_LINE> <INDENT> return 0 | Asocia la cookie.
Parametros:
cookie -- la cookie a asociar | 625941bf5e10d32532c5ee58 |
def test_builddependency4(self): <NEW_LINE> <INDENT> bdep = BuildDependency.fromdata('deb', 'amd64', 'foo', '1.4', '4') <NEW_LINE> self.assertEqual(bdep.get('binarytype'), 'deb') <NEW_LINE> self.assertEqual(bdep.get('arch'), 'amd64') <NEW_LINE> self.assertEqual(bdep.get('name'), 'foo') <NEW_LINE> self.assertEqual(bdep.get('version'), '1.4') <NEW_LINE> self.assertEqual(bdep.get('release'), '4') <NEW_LINE> self.assertEqual(bdep.get('filename'), 'foo_1.4-4_amd64.deb') <NEW_LINE> self.assertIsNone(bdep.get('project')) <NEW_LINE> self.assertIsNone(bdep.get('repository')) <NEW_LINE> bdep = BuildDependency.fromdata('deb', 'all', 'baz', '4.2') <NEW_LINE> self.assertEqual(bdep.get('binarytype'), 'deb') <NEW_LINE> self.assertEqual(bdep.get('arch'), 'all') <NEW_LINE> self.assertEqual(bdep.get('name'), 'baz') <NEW_LINE> self.assertEqual(bdep.get('version'), '4.2') <NEW_LINE> self.assertIsNone(bdep.get('release')) <NEW_LINE> self.assertEqual(bdep.get('filename'), 'baz_4.2_all.deb') <NEW_LINE> self.assertIsNone(bdep.get('project')) <NEW_LINE> self.assertIsNone(bdep.get('repository')) <NEW_LINE> bdep = BuildDependency.fromdata('deb', 'amd64', 'bar', '1.0.0', '0', 'Debian:Etch', 'standard') <NEW_LINE> self.assertEqual(bdep.get('binarytype'), 'deb') <NEW_LINE> self.assertEqual(bdep.get('arch'), 'amd64') <NEW_LINE> self.assertEqual(bdep.get('name'), 'bar') <NEW_LINE> self.assertEqual(bdep.get('version'), '1.0.0') <NEW_LINE> self.assertEqual(bdep.get('release'), '0') <NEW_LINE> self.assertEqual(bdep.get('filename'), 'bar_1.0.0-0_amd64.deb') <NEW_LINE> self.assertEqual(bdep.get('project'), 'Debian:Etch') <NEW_LINE> self.assertEqual(bdep.get('repository'), 'standard') | test BuildDependency (fromdata binarytype deb) | 625941bff548e778e58cd4ad |
def get_build_architecture(): <NEW_LINE> <INDENT> prefix = " bit (" <NEW_LINE> i = sys.version.find(prefix) <NEW_LINE> if i == -1: <NEW_LINE> <INDENT> return "Intel" <NEW_LINE> <DEDENT> j = sys.version.find(")", i) <NEW_LINE> return sys.version[i+len(prefix):j] | Return the processor architecture.
Possible results are "Intel" or "AMD64". | 625941bf0a50d4780f666dc1 |
def remove_duplicates(list1): <NEW_LINE> <INDENT> if ( len(list1) == 0 ) or ( len(list1) == 1 ): <NEW_LINE> <INDENT> return list1 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if list1[0] == list1[1]: <NEW_LINE> <INDENT> return remove_duplicates(list1[1:]) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return list1[0:1] + remove_duplicates(list1[1:]) | Eliminate duplicates in a sorted list.
Returns a new sorted list with the same elements in list1, but
with no duplicates.
This function can be iterative. | 625941bfbe7bc26dc91cd535 |
def add(request): <NEW_LINE> <INDENT> form = request.form() <NEW_LINE> b = Blog.new(form) <NEW_LINE> return redirect('/blog/index') | 保存新的博文 | 625941bf236d856c2ad44707 |
def place(self, target: Any, value: Any, **kwargs: dict) -> None: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> target[self.name] = value <NEW_LINE> <DEDENT> except KeyError: <NEW_LINE> <INDENT> raise NullNameError(str(self)) <NEW_LINE> <DEDENT> except IndexError: <NEW_LINE> <INDENT> if isinstance(target, List) and isinstance(self.name, int): <NEW_LINE> <INDENT> for i in range(self.name + 1 - len(target)): <NEW_LINE> <INDENT> target.append(None) <NEW_LINE> <DEDENT> target[self.name] = value <NEW_LINE> return <NEW_LINE> <DEDENT> raise NullNameError(str(self)) | Sets ``value`` at Index/Key of ``target``
:param target: object to set ``value`` on.
:param value: value to set.
:return: None
:raises NullNameError: When Index/Key cannot be set
:raises TypeError: When ``target`` does not support ``__setitem__``
Changing an existing dict key:
>>> from gemma import Item
>>>
>>> data_dict = {"a": "a value", "b": "b value"}
>>> existing_item = Item("b")
>>>
>>> existing_item.place(data_dict, "changed")
>>> data_dict
{'a': 'a value', 'b': 'changed'}
Setting a new dict key:
>>> new_item = Item("c")
>>> new_item.place(data_dict, "new")
>>> data_dict
{'a': 'a value', 'b': 'changed', 'c': 'new'}
Changing an existing list index:
>>> data_list = ["zero", "one", "two"]
>>> item = Item(0)
>>>
>>> item.place(data_list, "changed")
>>> data_list
['changed', 'one', 'two']
Changing an index out of range does not result in :class:`NullNameError`,
as it does with :func:`Item.fetch`.
>>> out_of_index = Item(5)
>>> out_of_index.place(data_list, "new value")
>>> data_list
['changed', 'one', 'two', None, None, 'new value']
``None`` is inserted in any missing indexes between the last existing index and
the new index.
Attempting to place a value on a ``target`` that does not support
``__setitem__`` raises a ``TypeError``:
>>> data_tuple = ("zero", "one", "two")
>>> cannot_set = Item(0)
>>>
>>> cannot_set.place(data_tuple, "changed")
Traceback (most recent call last):
...
TypeError: 'tuple' object does not support item assignment
If the object would normally raise a ``KeyError`` or ``IndexError``, it is cast
to a :class:`NullNameError`.
Let us create a dict class that raises a ``KeyError`` when attempting to set any
key that is not present upon initialization:
>>> class StrictDict(dict):
... def __setitem__(self, item, value):
... if not item in self:
... raise KeyError
... super().__setitem__(item, value)
...
>>> strict = StrictDict({"a": "a value", "b": "b value"})
>>> strict["c"] = "changed"
Traceback (most recent call last):
...
KeyError
:class:`Item` 's place method will cast the ``KeyError`` to a
:class:`NullNameError`.
>>> raises_key = Item("c")
>>> raises_key.place(strict, "changed")
Traceback (most recent call last):
...
gemma._exceptions.NullNameError: [c] | 625941bf004d5f362079a266 |
def set_callbacks(self, shutdown, restart): <NEW_LINE> <INDENT> self.shutdowncb = shutdown <NEW_LINE> self.restartcb = restart | Sets callbacks for the global buttons
@param shutdown: shutdown callback
@param restart: restart callback | 625941bf67a9b606de4a7dec |
def tileswrap(ihtORsize, numtilings, floats, wrawidths, ints=None, readonly=False): <NEW_LINE> <INDENT> if ints is None: <NEW_LINE> <INDENT> ints = [] <NEW_LINE> <DEDENT> qfloats = [floor(f * numtilings) for f in floats] <NEW_LINE> Tiles = [] <NEW_LINE> for tiling in range(numtilings): <NEW_LINE> <INDENT> tilingX2 = tiling * 2 <NEW_LINE> coords = [tiling] <NEW_LINE> b = tiling <NEW_LINE> for q, width in zip_longest(qfloats, wrapwidths): <NEW_LINE> <INDENT> c = (q + b % numtilings) // numtilings <NEW_LINE> coords.append(c % width if width else c) <NEW_LINE> b += tilingX2 <NEW_LINE> <DEDENT> coords.extend(ints) <NEW_LINE> Tiles.append(hashcoords(coords, ihtORsize, readonly)) <NEW_LINE> <DEDENT> return Tiles | returns num-tilings tile indices corresponding to the floats and ints, wrapping some floats | 625941bf4f6381625f11496e |
def print_last_recorded_balance(table_names): <NEW_LINE> <INDENT> for table_name in table_names: <NEW_LINE> <INDENT> repository = TransactionsRepository(Table.get_from_config(table_name)) <NEW_LINE> latest_balance = repository.get_latest_balance() <NEW_LINE> print('Latest balance for {}: {}'.format(table_name, latest_balance)) <NEW_LINE> <DEDENT> print() | Print the latest recorded balance of the given tables.
:param list(str) table_names:
:return None: | 625941bfd18da76e23532404 |
def load_modules(self): <NEW_LINE> <INDENT> self.logger.info('Loading Modules') <NEW_LINE> module_paths = self._tree_read(self.config, ['module-directories'], []) <NEW_LINE> if self._tree_read(self.config, ['include-default-modules'], True): <NEW_LINE> <INDENT> module_paths.append(self.get_default_modules_path()) <NEW_LINE> <DEDENT> self.modules.extend([p() for p in load_modules(*module_paths)]) | Loading modules | 625941bf1f037a2d8b946130 |
def generateReferenceLink(self,anaphora,antecedent,confidence): <NEW_LINE> <INDENT> link = self.atomspace.add_link(types.ReferenceLink, [anaphora, antecedent], TruthValue(.98, TruthValue().confidence_to_count(confidence))) <NEW_LINE> log.fine("Generated a Reference :\n") <NEW_LINE> log.fine("{0}\n".format(link)) <NEW_LINE> log.fine("===========================================================") | Generates a reference Link for a pair of anaphora and antecedent with confidence "confidence". | 625941bfab23a570cc2500b1 |
def get_row(A, i): <NEW_LINE> <INDENT> return A[i] | return row i from A. | 625941bf16aa5153ce3623a9 |
@pytest.mark.provider( [VMwareProvider, RHEVMProvider, OpenStackProvider, AzureProvider], scope="module", selector=ONE_PER_TYPE ) <NEW_LINE> @pytest.mark.meta(blockers=[BZ(1702018, forced_streams=["5.11"])], automates=[1702018]) <NEW_LINE> def test_action_prevent_vm_retire(request, vm, vm_on, policy_for_testing): <NEW_LINE> <INDENT> policy_for_testing.assign_actions_to_event("VM Retire Request", ["Prevent current event from proceeding"]) <NEW_LINE> @request.addfinalizer <NEW_LINE> def _cleanup(): <NEW_LINE> <INDENT> policy_for_testing.unassign_events("VM Retire Request") <NEW_LINE> <DEDENT> vm.retire() <NEW_LINE> def _fail_func(): <NEW_LINE> <INDENT> view = navigate_to(vm, "Details") <NEW_LINE> vm.refresh_relationships(from_details=True) <NEW_LINE> view.toolbar.reload.click() <NEW_LINE> return <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> wait_for(lambda: vm.is_retired, num_sec=300, delay=15, message="Waiting for vm retiring", fail_func=_fail_func) <NEW_LINE> <DEDENT> except TimedOutError: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> pytest.fail(f"CFME did not prevent retire of the VM {vm.name}") | This test sets the policy that prevents VM retiring.
Metadata:
test_flag: actions, provision
Bugzilla:
1702018
Polarion:
assignee: dgaikwad
initialEstimate: 1/6h
casecomponent: Control | 625941bf45492302aab5e1f2 |
def _handle_rate_limit(self, r): <NEW_LINE> <INDENT> retry_time = int(r.headers['Retry-After']) <NEW_LINE> assert(retry_time > 0) <NEW_LINE> if self.debug: <NEW_LINE> <INDENT> print("-> Sleeping for {0} seconds".format(retry_time)) <NEW_LINE> <DEDENT> time.sleep(retry_time) | Sleep for length of retry time
:param r: request object | 625941bf56ac1b37e6264105 |
def tree_str(self, lemma=True, arrow=True): <NEW_LINE> <INDENT> s = '' <NEW_LINE> for lfnode in self.leafNodes: <NEW_LINE> <INDENT> if not arrow: <NEW_LINE> <INDENT> if lemma: s += '{} '.format(lfnode.word) <NEW_LINE> else: s += '{} '.format(lfnode.word_raw) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if lfnode.cat.monotonicity is not None: <NEW_LINE> <INDENT> if lemma: s += '{}{} '.format(lfnode.word, lfnode.cat.monotonicity) <NEW_LINE> else: s += '{}{} '.format(lfnode.word_raw, lfnode.cat.monotonicity) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if lemma: s += '{}{} '.format(lfnode.word, '=') <NEW_LINE> else: s += '{}{} '.format(lfnode.word_raw, '=') <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> if arrow: <NEW_LINE> <INDENT> s = s.replace('DOWN', '\u2193').replace('UP', '\u2191').replace('UNK', '=') <NEW_LINE> <DEDENT> return s | return the sentence as string | 625941bf8a43f66fc4b53f99 |
def do_unfollow_user(self, arg): <NEW_LINE> <INDENT> if self.login_status == False: <NEW_LINE> <INDENT> print_error_not_login() <NEW_LINE> return <NEW_LINE> <DEDENT> parameters = arg.split() <NEW_LINE> if len(parameters) != 1: <NEW_LINE> <INDENT> print_error_param_num() <NEW_LINE> return <NEW_LINE> <DEDENT> condition = "UserID=\"{}\"".format( parameters[0] ) <NEW_LINE> result = self.check_record_exist("Users", condition) <NEW_LINE> if len(result) == 0: <NEW_LINE> <INDENT> print_error_record_not_found("User") <NEW_LINE> <DEDENT> elif len(result) == 1: <NEW_LINE> <INDENT> user_name = result[0][1] <NEW_LINE> condition = "UserID=\"{}\" and FollowUserID=\"{}\"".format( self.user_id, parameters[0] ) <NEW_LINE> result = self.check_record_exist("UserFollowsUser", condition) <NEW_LINE> if len(result) == 0: <NEW_LINE> <INDENT> print_error_not_following(parameters[0]) <NEW_LINE> <DEDENT> elif len(result) == 1: <NEW_LINE> <INDENT> condition = "UserID=\"{}\" and FollowUserID=\"{}\"".format( self.user_id, parameters[0] ) <NEW_LINE> self.remove_record("UserFollowsUser", condition) <NEW_LINE> print_unfollow_success("user", user_name) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> print_error_duplicate_record_found() <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> print_error_duplicate_record_found() | Command: unfollow_user {UserID}
Description: Unfollow a user | 625941bff8510a7c17cf962c |
def savefile(cdata,writer): <NEW_LINE> <INDENT> writer.writerows(cdata) | 写入数据 | 625941bf5f7d997b871749c6 |
def crawl_daili66(self, page_count=8): <NEW_LINE> <INDENT> print('开始获取 66ip 的代理') <NEW_LINE> start_url = 'http://www.66ip.cn/{}.html' <NEW_LINE> urls = [start_url.format(page) for page in range(1, page_count + 1)] <NEW_LINE> for url in urls: <NEW_LINE> <INDENT> print('Crawling', url) <NEW_LINE> html = PageGetter().get_page(url) <NEW_LINE> if html: <NEW_LINE> <INDENT> doc = etree.HTML(html) <NEW_LINE> trs = doc.xpath('//div[contains(@class, "containerbox")]//table//tr[position()>1]') <NEW_LINE> for tr in trs: <NEW_LINE> <INDENT> tds = tr.getchildren() <NEW_LINE> ip = tds[0].text <NEW_LINE> port = tds[1].text <NEW_LINE> yield ':'.join([ip, port]) | 获取代理,来源为66ip
:param page_count: 页码
:return: 代理 | 625941bf4a966d76dd550f3e |
def spop(self, name): <NEW_LINE> <INDENT> return self.execute_command('SPOP', name) | Remove and return a random member of set ``name`` | 625941bfa219f33f3462889e |
def helium_tiny_ansatz(ab): <NEW_LINE> <INDENT> return Program( X(0), X(1), RX(np.pi/2, 0), H(1), CNOT(0, 1), RZ( ab[0] )(1), CNOT(0, 1), RX(-np.pi/2)(0), H(1), H(0), RX(np.pi/2)(1), CNOT(0, 1), RZ( ab[1] )(1), CNOT(0, 1), H(0), RX(-np.pi/2, 1) ) | in this trial, we also explicitly supply the UCC ansatz | 625941bf009cb60464c632e5 |
def thresh_vlim(data, thresh=0.01, Nbin=200): <NEW_LINE> <INDENT> thresh = thresh*np.ones(2) if np.size(thresh) < 2 else thresh <NEW_LINE> H, bin_edges = np.histogram(data[~np.isnan(data)], Nbin) <NEW_LINE> vmin, vmax = bin_edges[0], bin_edges[-1] <NEW_LINE> csH = H.cumsum() / np.nansum(H) <NEW_LINE> ilo = np.where(csH < thresh[0])[0] <NEW_LINE> ihi = np.where(csH > 1.0 - thresh[1])[0] <NEW_LINE> if len(ilo) > 0: vmin = bin_edges[np.nanmax(ilo) + 1] <NEW_LINE> if len(ihi) > 0: vmax = bin_edges[np.nanmin(ihi) + 1] <NEW_LINE> return vmin, vmax | Calculate colormap vlim based on a histogram threshold.
Excludes fraction on top and bottom given by scalar or 2-tuple thresh. | 625941bf21bff66bcd684886 |
def p_valor(p): <NEW_LINE> <INDENT> pass | valor : llamada
| identificador
| constante | 625941bf596a8972360899f4 |
def __init__(self, image_dims=[64, 64, 3]): <NEW_LINE> <INDENT> self.name = "Conv_model_2" <NEW_LINE> self.image_dims = image_dims | Sets hyper-parameters
Input:
image_dims: image dimensions (default [64, 64, 3])
bottleneck_dim: dimension of bottleneck layer (default 40) | 625941bf44b2445a33931fc8 |
def p_repeat_instr(self, p): <NEW_LINE> <INDENT> p[0] = AST.RepeatUntil(p[2], p[4]) | repeat_instr : REPEAT instructions UNTIL condition ';' | 625941bf1d351010ab855a4e |
def serve_version(self, **kwargs): <NEW_LINE> <INDENT> kwargs['_return_http_data_only'] = True <NEW_LINE> if kwargs.get('async_req'): <NEW_LINE> <INDENT> return self.__serve_version_with_http_info(**kwargs) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> (data) = self.__serve_version_with_http_info(**kwargs) <NEW_LINE> return data | serve_version # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.serve_version(async_req=True)
>>> result = thread.get()
:param async_req: bool
:return: str
If the method is called asynchronously,
returns the request thread. | 625941bf0383005118ecf515 |
def close(self): <NEW_LINE> <INDENT> if hasattr(self.array, 'close'): <NEW_LINE> <INDENT> self.array.close() | Close array. | 625941bfa05bb46b383ec755 |
def init(): <NEW_LINE> <INDENT> ctx = {} <NEW_LINE> @click.command() <NEW_LINE> @click.option('--cell', required=True, envvar='TREADMILL_CELL', callback=cli.handle_context_opt, expose_value=False) <NEW_LINE> @click.option('--wsapi', required=False, help='Websocket API.', metavar='URL', envvar='TREADMILL_WSAPI') <NEW_LINE> @click.option('--check-state', is_flag=True, default=False) <NEW_LINE> @click.option('--watch', is_flag=True, default=False) <NEW_LINE> @click.option('--separator', default=' ') <NEW_LINE> @click.argument('app') <NEW_LINE> @click.argument('endpoint', required=False, default='*:*') <NEW_LINE> def discovery(wsapi, check_state, watch, separator, app, endpoint): <NEW_LINE> <INDENT> ctx['wsapi'] = wsapi <NEW_LINE> if ':' not in endpoint: <NEW_LINE> <INDENT> endpoint = '*:' + endpoint <NEW_LINE> <DEDENT> proto, endpoint_name = endpoint.split(':') <NEW_LINE> def on_message(result): <NEW_LINE> <INDENT> instance = ':'.join([ result['name'], result['proto'], result['endpoint'] ]) <NEW_LINE> host = result['host'] <NEW_LINE> port = result['port'] <NEW_LINE> hostport = '%s:%s' % (host, port) <NEW_LINE> if host is not None: <NEW_LINE> <INDENT> record = [instance, hostport.decode()] <NEW_LINE> if check_state: <NEW_LINE> <INDENT> sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) <NEW_LINE> sock.settimeout(1) <NEW_LINE> try: <NEW_LINE> <INDENT> sock.connect((host, int(port))) <NEW_LINE> sock.close() <NEW_LINE> state = 'up' <NEW_LINE> <DEDENT> except socket.error: <NEW_LINE> <INDENT> state = 'down' <NEW_LINE> <DEDENT> record.append(state) <NEW_LINE> <DEDENT> output = separator.join(record) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> output = instance <NEW_LINE> <DEDENT> print(output) <NEW_LINE> return True <NEW_LINE> <DEDENT> def on_error(result): <NEW_LINE> <INDENT> click.echo('Error: %s' % result['_error'], err=True) <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> return ws_client.ws_loop( ctx['wsapi'], {'topic': '/endpoints', 'filter': app, 'proto': proto, 'endpoint': endpoint_name}, not watch, on_message, on_error ) <NEW_LINE> <DEDENT> except ws_client.ConnectionError: <NEW_LINE> <INDENT> click.echo('Could not connect to any Websocket APIs', err=True) <NEW_LINE> sys.exit(-1) <NEW_LINE> <DEDENT> <DEDENT> return discovery | Return top level command handler. | 625941bf8e05c05ec3eea2a3 |
@task <NEW_LINE> def write_code_workspace_file(c, cw_path=None): <NEW_LINE> <INDENT> if not cw_path: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> cw_path = next(iglob(str(PROJECT_ROOT / "doodba.*.code-workspace"))) <NEW_LINE> <DEDENT> except StopIteration: <NEW_LINE> <INDENT> cw_path = f"doodba.{PROJECT_ROOT.name}.code-workspace" <NEW_LINE> <DEDENT> <DEDENT> if not Path(cw_path).is_absolute(): <NEW_LINE> <INDENT> cw_path = PROJECT_ROOT / cw_path <NEW_LINE> <DEDENT> cw_config = {} <NEW_LINE> try: <NEW_LINE> <INDENT> with open(cw_path) as cw_fd: <NEW_LINE> <INDENT> cw_config = json.load(cw_fd) <NEW_LINE> <DEDENT> <DEDENT> except (FileNotFoundError, json.decoder.JSONDecodeError): <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> cw_config["folders"] = [] <NEW_LINE> addon_repos = glob(str(SRC_PATH / "private")) <NEW_LINE> addon_repos += glob(str(SRC_PATH / "*" / ".git" / "..")) <NEW_LINE> for subrepo in sorted(addon_repos): <NEW_LINE> <INDENT> subrepo = Path(subrepo) <NEW_LINE> cw_config["folders"].append({"path": str(subrepo.relative_to(PROJECT_ROOT))}) <NEW_LINE> <DEDENT> cw_config["folders"].append({"path": "."}) <NEW_LINE> with open(cw_path, "w") as cw_fd: <NEW_LINE> <INDENT> json.dump(cw_config, cw_fd, indent=2) <NEW_LINE> cw_fd.write("\n") | Generate code-workspace file definition.
Some other tasks will call this one when needed, and since you cannot specify
the file name there, if you want a specific one, you should call this task
before.
Most times you just can forget about this task and let it be run automatically
whenever needed.
If you don't define a workspace name, this task will reuse the 1st
`doodba.*.code-workspace` file found inside the current directory.
If none is found, it will default to `doodba.$(basename $PWD).code-workspace`.
If you define it manually, remember to use the same prefix and suffix if you
want it git-ignored by default.
Example: `--cw-path doodba.my-custom-name.code-workspace` | 625941bf29b78933be1e55e2 |
def rescale(image, scale): <NEW_LINE> <INDENT> if isinstance(scale, (tuple, list)): <NEW_LINE> <INDENT> scale = np.random.uniform(scale[0], scale[1]) <NEW_LINE> <DEDENT> elif not isinstance(scale, (int, float)): <NEW_LINE> <INDENT> raise ValueError('scale type should be one of int, float, tuple, list.') <NEW_LINE> <DEDENT> return image.point(lambda i: i*scale) | Rescale apply to image.
new pixel = image * scale
Args:
image: a Image instance.
scale: if int float, value multiply with image.
if tuple list, randomly picked in the interval
`[central_rate[0], central_rate[1])`, value multiply with image.
Returns:
a Image instance.
Raises:
scale type error. | 625941bfd53ae8145f87a1a5 |
def test_input_symmetry_XOR(): <NEW_LINE> <INDENT> n = XOR() <NEW_LINE> k_s, true_k_s = n.input_symmetry(bound='upper', norm=False), 8 / 4 <NEW_LINE> assert (k_s == true_k_s), ('Input Symmetry (node,upper bound) for XOR node does not match. %s != %s' % (k_s, true_k_s)) <NEW_LINE> k_s, true_k_s = n.input_symmetry(bound='lower', norm=False), 8 / 4 <NEW_LINE> assert (k_s == true_k_s), ('Input Symmetry (node,lower bound) for XOR node does not match. %s != %s' % (k_s, true_k_s)) <NEW_LINE> k_s, true_k_s = n.input_symmetry(bound='upper', norm=True), (8 / 4) / 2 <NEW_LINE> assert (k_s == true_k_s), ('Input Symmetry (node,upper bound,normed) for XOR node does not match. %s != %s' % (k_s, true_k_s)) <NEW_LINE> k_s, true_k_s = n.input_symmetry(bound='lower', norm=True), (8 / 4) / 2 <NEW_LINE> assert (k_s == true_k_s), ('Input Symmetry (node,lower bound,normed) for XOR node does not match. %s != %s' % (k_s, true_k_s)) | Test Input Symmetry - XOR | 625941bfb830903b967e983f |
def path_info_split(path_info): <NEW_LINE> <INDENT> if not path_info: <NEW_LINE> <INDENT> return None, '' <NEW_LINE> <DEDENT> assert path_info.startswith('/'), ( "PATH_INFO should start with /: %r" % path_info) <NEW_LINE> path_info = path_info.lstrip('/') <NEW_LINE> if '/' in path_info: <NEW_LINE> <INDENT> first, rest = path_info.split('/', 1) <NEW_LINE> return first, '/' + rest <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return path_info, '' | Splits off the first segment of the path. Returns (first_part,
rest_of_path). first_part can be None (if PATH_INFO is empty), ''
(if PATH_INFO is '/'), or a name without any /'s. rest_of_path
can be '' or a string starting with /. | 625941bfff9c53063f47c126 |
def assignment(self): <NEW_LINE> <INDENT> col = self.collaboration() <NEW_LINE> if not col: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> parent = col.get_parent() <NEW_LINE> if not parent: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> return parent.content_object | Returns the Project object that this Project is a response to,
or None if this Project is not a response to any other. | 625941bf23849d37ff7b2fc2 |
def remove_conference(self, conf_id): <NEW_LINE> <INDENT> self.options['usr_conferenc_id'] = conf_id <NEW_LINE> self.options['actions'] = 'conference.remove' <NEW_LINE> return self.call(self.options) | Remove a conference that is not active
keyword argument:
conf_id -- ID of conference to remove | 625941bf851cf427c661a443 |
def __init__(self, flow_graph): <NEW_LINE> <INDENT> self.ctrl_mask = False <NEW_LINE> self.mod1_mask = False <NEW_LINE> self._flow_graph = flow_graph <NEW_LINE> gtk.DrawingArea.__init__(self) <NEW_LINE> self.set_size_request(MIN_WINDOW_WIDTH, MIN_WINDOW_HEIGHT) <NEW_LINE> self.connect('realize', self._handle_window_realize) <NEW_LINE> self.connect('configure-event', self._handle_window_configure) <NEW_LINE> self.connect('expose-event', self._handle_window_expose) <NEW_LINE> self.connect('motion-notify-event', self._handle_mouse_motion) <NEW_LINE> self.connect('button-press-event', self._handle_mouse_button_press) <NEW_LINE> self.connect('button-release-event', self._handle_mouse_button_release) <NEW_LINE> self.connect('scroll-event', self._handle_mouse_scroll) <NEW_LINE> self.add_events( gtk.gdk.BUTTON_PRESS_MASK | gtk.gdk.POINTER_MOTION_MASK | gtk.gdk.BUTTON_RELEASE_MASK | gtk.gdk.LEAVE_NOTIFY_MASK | gtk.gdk.ENTER_NOTIFY_MASK | gtk.gdk.FOCUS_CHANGE_MASK ) <NEW_LINE> self.drag_dest_set(gtk.DEST_DEFAULT_ALL, DND_TARGETS, gtk.gdk.ACTION_COPY) <NEW_LINE> self.connect('drag-data-received', self._handle_drag_data_received) <NEW_LINE> self._focus_flag = False <NEW_LINE> self.get_focus_flag = lambda: self._focus_flag <NEW_LINE> def _handle_notify_event(widget, event, focus_flag): self._focus_flag = focus_flag <NEW_LINE> self.connect('leave-notify-event', _handle_notify_event, False) <NEW_LINE> self.connect('enter-notify-event', _handle_notify_event, True) <NEW_LINE> self.set_flags(gtk.CAN_FOCUS) <NEW_LINE> self.connect('focus-out-event', self._handle_focus_lost_event) | DrawingArea contructor.
Connect event handlers.
Args:
main_window: the main_window containing all flow graphs | 625941bf8a349b6b435e80a5 |
def model(flags): <NEW_LINE> <INDENT> ds_filters = utils.parse(flags.ds_filters) <NEW_LINE> ds_repeat = utils.parse(flags.ds_repeat) <NEW_LINE> ds_kernel_size = utils.parse(flags.ds_kernel_size) <NEW_LINE> ds_stride = utils.parse(flags.ds_stride) <NEW_LINE> ds_dilation = utils.parse(flags.ds_dilation) <NEW_LINE> ds_residual = utils.parse(flags.ds_residual) <NEW_LINE> ds_pool = utils.parse(flags.ds_pool) <NEW_LINE> ds_padding = utils.parse(flags.ds_padding) <NEW_LINE> ds_filter_separable = utils.parse(flags.ds_filter_separable) <NEW_LINE> for l in (ds_repeat, ds_kernel_size, ds_stride, ds_dilation, ds_residual, ds_pool, ds_padding, ds_filter_separable): <NEW_LINE> <INDENT> if len(ds_filters) != len(l): <NEW_LINE> <INDENT> raise ValueError('all input lists have to be the same length') <NEW_LINE> <DEDENT> <DEDENT> input_audio = tf.keras.layers.Input( shape=modes.get_input_data_shape(flags, modes.Modes.TRAINING), batch_size=flags.batch_size) <NEW_LINE> net = input_audio <NEW_LINE> if flags.preprocess == 'raw': <NEW_LINE> <INDENT> net = speech_features.SpeechFeatures( speech_features.SpeechFeatures.get_params(flags))( net) <NEW_LINE> <DEDENT> net = tf.keras.backend.expand_dims(net, axis=2) <NEW_LINE> for filters, repeat, ksize, stride, sep, dilation, res, pool, pad in zip( ds_filters, ds_repeat, ds_kernel_size, ds_stride, ds_filter_separable, ds_dilation, ds_residual, ds_pool, ds_padding): <NEW_LINE> <INDENT> net = resnet_block(net, repeat, ksize, filters, dilation, stride, sep, res, pad, flags.dropout, flags.activation, flags.ds_scale, flags.data_stride <= 1) <NEW_LINE> if pool > 1: <NEW_LINE> <INDENT> if flags.ds_max_pool: <NEW_LINE> <INDENT> net = tf.keras.layers.MaxPooling2D( pool_size=(pool, 1), strides=(pool, 1) )(net) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> net = tf.keras.layers.AveragePooling2D( pool_size=(pool, 1), strides=(pool, 1) )(net) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> net = stream.Stream(cell=tf.keras.layers.GlobalAveragePooling2D())(net) <NEW_LINE> net = tf.keras.layers.Flatten()(net) <NEW_LINE> net = tf.keras.layers.Dense(units=flags.label_count)(net) <NEW_LINE> if flags.return_softmax: <NEW_LINE> <INDENT> net = tf.keras.layers.Activation('softmax')(net) <NEW_LINE> <DEDENT> return tf.keras.Model(input_audio, net) | MatchboxNet model.
It is based on paper
MatchboxNet: 1D Time-Channel Separable Convolutional Neural Network
Architecture for Speech Commands Recognition
https://arxiv.org/pdf/2004.08531.pdf
Args:
flags: data/model parameters
Returns:
Keras model for training
Raises:
ValueError: if any of input list has different length from any other;
or if padding is not supported | 625941bffb3f5b602dac35c2 |
def test_delete_failure_not_accepted_right(self): <NEW_LINE> <INDENT> self.file_repository_right.accepted = False <NEW_LINE> self.file_repository_right.save() <NEW_LINE> url = reverse('file_repository') <NEW_LINE> data = { 'file_repository_id': self.file_repository.id, } <NEW_LINE> self.client.force_authenticate(user=self.test_user_obj) <NEW_LINE> response = self.client.delete(url, data) <NEW_LINE> self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) | Tests to delete a file repository without accepting the right | 625941bf30dc7b766590189a |
def set_start_state(self, msg): <NEW_LINE> <INDENT> self._g.set_start_state(conversions.msg_to_string(msg)) | Specify a start state for the group.
Parameters
----------
msg : moveit_msgs/RobotState
Examples
--------
>>> from moveit_msgs.msg import RobotState
>>> from sensor_msgs.msg import JointState
>>> joint_state = JointState()
>>> joint_state.header = Header()
>>> joint_state.header.stamp = rospy.Time.now()
>>> joint_state.name = ['joint_a', 'joint_b']
>>> joint_state.position = [0.17, 0.34]
>>> moveit_robot_state = RobotState()
>>> moveit_robot_state.joint_state = joint_state
>>> group.set_start_state(moveit_robot_state) | 625941bf9c8ee82313fbb6a6 |
def test_table_from_bool_fields2(self): <NEW_LINE> <INDENT> arr = np.array([(False,), (True,), (False,)], dtype=[('a', '?')]) <NEW_LINE> hdu = fits.BinTableHDU(data=arr) <NEW_LINE> assert (hdu.data['a'] == arr['a']).all() | Regression test for https://trac.assembla.com/pyfits/ticket/215
Tests the case where a multi-field ndarray (not a recarray) containing
a bool field is used to initialize a `BinTableHDU`. | 625941bf6fece00bbac2d66e |
def dropAllTables(self): <NEW_LINE> <INDENT> cursor = self.conn.cursor() <NEW_LINE> cursor.execute('''DROP TABLE IF EXISTS artist''') <NEW_LINE> cursor.execute('''DROP TABLE IF EXISTS album''') <NEW_LINE> cursor.execute('''DROP TABLE IF EXISTS track''') <NEW_LINE> self.conn.commit() <NEW_LINE> self.conn.close() | Drops all tables in the database | 625941bf4d74a7450ccd40f5 |
def negotiate_tls(tcp_conn, context): <NEW_LINE> <INDENT> tls_conn = context.wrap_socket(tcp_conn, server_side=True) <NEW_LINE> negotiated_protocol = tls_conn.selected_alpn_protocol() <NEW_LINE> if negotiated_protocol is None: <NEW_LINE> <INDENT> negotiated_protocol = tls_conn.selected_npn_protocol() <NEW_LINE> <DEDENT> if negotiated_protocol != "h2": <NEW_LINE> <INDENT> raise RuntimeError("Didn't negotiate HTTP/2!") <NEW_LINE> <DEDENT> return tls_conn | Given an established TCP connection and a HTTP/2-appropriate TLS context,
this function:
1. wraps TLS around the TCP connection.
2. confirms that HTTP/2 was negotiated and, if it was not, throws an error. | 625941bf9b70327d1c4e0d05 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.