code
stringlengths
59
4.4k
docstring
stringlengths
5
7.69k
def _datetime_to_utc_int(date): if date is None: return None epoch = dsub_util.replace_timezone(datetime.utcfromtimestamp(0), pytz.utc) return (date - epoch).total_seconds()
Convert the integer UTC time value into a local datetime.
def exp_schedule(k=20, lam=0.005, limit=100): "One possible schedule function for simulated annealing" return lambda t: if_(t < limit, k * math.exp(-lam * t), 0)
One possible schedule function for simulated annealing
def write_directory(self, directory: str) -> bool: current_md5_hash = self.get_namespace_hash() md5_hash_path = os.path.join(directory, f'{self.module_name}.belns.md5') if not os.path.exists(md5_hash_path): old_md5_hash = None else: with open(md5_hash_path) as fil...
Write a BEL namespace for identifiers, names, name hash, and mappings to the given directory.
def show_response_messages(response_json): message_type_kwargs = { 'warning': {'fg': 'yellow'}, 'error': {'fg': 'red'}, } for message in response_json.get('messages', []): click.secho(message['text'], **message_type_kwargs.get(message['type'], {}))
Show all messages in the `messages` key of the given dict.
def _tag_matches_pattern(tag, pattern): for char1, char2 in zip(tag, pattern): if char2 not in ('%', char1): return False return True
Return true if MARC 'tag' matches a 'pattern'. 'pattern' is plain text, with % as wildcard Both parameters must be 3 characters long strings. .. doctest:: >>> _tag_matches_pattern("909", "909") True >>> _tag_matches_pattern("909", "9%9") True >>> _tag_matches_patt...
def create(self, name, region, size, image, ssh_keys=None, backups=None, ipv6=None, private_networking=None, wait=True): if ssh_keys and not isinstance(ssh_keys, (list, tuple)): raise TypeError("ssh_keys must be a list") resp = self.post(name=name, region=region, size=size, im...
Create a new droplet Parameters ---------- name: str Name of new droplet region: str slug for region (e.g., sfo1, nyc1) size: str slug for droplet size (e.g., 512mb, 1024mb) image: int or str image id (e.g., 12352) or slug ...
def _load_cpp4(self, filename): ccp4 = CCP4.CCP4() ccp4.read(filename) grid, edges = ccp4.histogramdd() self.__init__(grid=grid, edges=edges, metadata=self.metadata)
Initializes Grid from a CCP4 file.
def contains_all(set1, set2, warn): for elem in set2: if elem not in set1: raise ValueError(warn) return True
Checks if all elements from set2 are in set1. :param set1: a set of values :param set2: a set of values :param warn: the error message that should be thrown when the sets are not containd :return: returns true if all values of set2 are in set1
def status(self): status = [] if self.provider: status = self.provider.status(self.blocks.values()) return status
Return status of all blocks.
def to_html(self, max_rows=None): n = len(self.data) div_id = _get_divid(self) out = r .format(div_id, self.namespace, n) out += r .format(div_id) out += r .format(self.annotation_metadata._repr_html_()) out += r .format(self.sandbox._repr_html_()) out += r .forma...
Render this annotation list in HTML Returns ------- rendered : str An HTML table containing this annotation's data.
def map_2d_array_to_masked_1d_array_from_array_2d_and_mask(mask, array_2d): total_image_pixels = mask_util.total_regular_pixels_from_mask(mask) array_1d = np.zeros(shape=total_image_pixels) index = 0 for y in range(mask.shape[0]): for x in range(mask.shape[1]): if not mask[y, x]: ...
For a 2D array and mask, map the values of all unmasked pixels to a 1D array. The pixel coordinate origin is at the top left corner of the 2D array and goes right-wards and downwards, such that for an array of shape (3,3) where all pixels are unmasked: - pixel [0,0] of the 2D array will correspond to inde...
def create_from_wkt(self, wkt, item_type, ingest_source, **attributes): geojson = load_wkt(wkt).__geo_interface__ vector = { 'type': "Feature", 'geometry': geojson, 'properties': { 'item_type': item_type, 'ingest_source': ingest_source,...
Create a single vector in the vector service Args: wkt (str): wkt representation of the geometry item_type (str): item_type of the vector ingest_source (str): source of the vector attributes: a set of key-value pairs of attributes Returns: id...
def getJsonFromApi(view, request): jsonText = view(request) jsonText = json.loads(jsonText.content.decode('utf-8')) return jsonText
Return json from querying Web Api Args: view: django view function. request: http request object got from django. Returns: json format dictionary
def is_rarfile(filename): mode = constants.RAR_OM_LIST_INCSPLIT archive = unrarlib.RAROpenArchiveDataEx(filename, mode=mode) try: handle = unrarlib.RAROpenArchiveEx(ctypes.byref(archive)) except unrarlib.UnrarException: return False unrarlib.RARCloseArchive(handle) return (archiv...
Return true if file is a valid RAR file.
def show(self, exclude=[]): ordering = self.constant_keys + self.varying_keys spec_lines = [', '.join(['%s=%s' % (k, s[k]) for k in ordering if (k in s) and (k not in exclude)]) for s in self.specs] print('\n'.join(['%d: %s' % (i,l) for (i,l...
Convenience method to inspect the available argument values in human-readable format. The ordering of keys is determined by how quickly they vary. The exclude list allows specific keys to be excluded for readability (e.g. to hide long, absolute filenames).
def create_position(self, params={}): url = "/2/positions/" body = params data = self._post_resource(url, body) return self.position_from_json(data["position"])
Creates a position http://dev.wheniwork.com/#create-update-position
def select_samples(dbsamples, samples, pidx=None): samples = [i.name for i in samples] if pidx: sidx = [list(dbsamples[pidx]).index(i) for i in samples] else: sidx = [list(dbsamples).index(i) for i in samples] sidx.sort() return sidx
Get the row index of samples that are included. If samples are in the 'excluded' they were already filtered out of 'samples' during _get_samples.
def update(self, shuffled=True, cohesion=100, separation=10, alignment=5, goal=20, limit=30): from random import shuffle if shuffled: shuffle(self) m1 = 1.0 m2 = 1.0 m3 = 1.0 m4...
Calculates the next motion frame for the flock.
def dist(self, src, tar): if src == tar: return 0.0 src = src.encode('utf-8') tar = tar.encode('utf-8') self._compressor.compress(src) src_comp = self._compressor.flush(zlib.Z_FULL_FLUSH) self._compressor.compress(tar) tar_comp = self._compressor.flush...
Return the NCD between two strings using zlib compression. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison Returns ------- float Compression distance Examples ...
def _google_v2_parse_arguments(args): if (args.zones and args.regions) or (not args.zones and not args.regions): raise ValueError('Exactly one of --regions and --zones must be specified') if args.machine_type and (args.min_cores or args.min_ram): raise ValueError( '--machine-type not supported toget...
Validated google-v2 arguments.
def _AssertIsLocal(path): from six.moves.urllib.parse import urlparse if not _UrlIsLocal(urlparse(path)): from ._exceptions import NotImplementedForRemotePathError raise NotImplementedForRemotePathError
Checks if a given path is local, raise an exception if not. This is used in filesystem functions that do not support remote operations yet. :param unicode path: :raises NotImplementedForRemotePathError: If the given path is not local
def phase_select_property(phase=None, s=None, l=None, g=None, V_over_F=None): r if phase == 's': return s elif phase == 'l': return l elif phase == 'g': return g elif phase == 'two-phase': return None elif phase is None: return None else: raise...
r'''Determines which phase's property should be set as a default, given the phase a chemical is, and the property values of various phases. For the case of liquid-gas phase, returns None. If the property is not available for the current phase, or if the current phase is not known, returns None. Paramet...
def set_doc_version(self, doc, value): if not self.doc_version_set: self.doc_version_set = True m = self.VERS_STR_REGEX.match(value) if m is None: raise SPDXValueError('Document::Version') else: doc.version = version.Version(major=i...
Set the document version. Raise SPDXValueError if malformed value, CardinalityError if already defined
def get_object(cls, api_token, droplet_id): droplet = cls(token=api_token, id=droplet_id) droplet.load() return droplet
Class method that will return a Droplet object by ID. Args: api_token (str): token droplet_id (int): droplet id
def assess(model, reaction, flux_coefficient_cutoff=0.001, solver=None): reaction = model.reactions.get_by_any(reaction)[0] with model as m: m.objective = reaction if _optimize_or_value(m, solver=solver) >= flux_coefficient_cutoff: return True else: results = dict...
Assesses production capacity. Assesses the capacity of the model to produce the precursors for the reaction and absorb the production of the reaction while the reaction is operating at, or above, the specified cutoff. Parameters ---------- model : cobra.Model The cobra model to assess ...
async def _on_connect(self): self._user_list, self._conv_list = ( await hangups.build_user_conversation_list(self._client) ) self._conv_list.on_event.add_observer(self._on_event) conv_picker = ConversationPickerWidget(self._conv_list, ...
Handle connecting for the first time.
def acquire( self, url, lock_type, lock_scope, lock_depth, lock_owner, timeout, principal, token_list, ): url = normalize_lock_root(url) self._lock.acquire_write() try: self._check_lock_permission( ...
Check for permissions and acquire a lock. On success return new lock dictionary. On error raise a DAVError with an embedded DAVErrorCondition.
def get_local_songs( filepaths, include_filters=None, exclude_filters=None, all_includes=False, all_excludes=False, exclude_patterns=None, max_depth=float('inf')): logger.info("Loading local songs...") supported_filepaths = get_supported_filepaths(filepaths, SUPPORTED_SONG_FORMATS, max_depth=max_depth) incl...
Load songs from local filepaths. Parameters: filepaths (list or str): Filepath(s) to search for music files. include_filters (list): A list of ``(field, pattern)`` tuples. Fields are any valid mutagen metadata fields. Patterns are Python regex patterns. Local songs are filtered out if the given metada...
def StandardizePath(path, strip=False): path = path.replace(SEPARATOR_WINDOWS, SEPARATOR_UNIX) if strip: path = path.rstrip(SEPARATOR_UNIX) return path
Replaces all slashes and backslashes with the target separator StandardPath: We are defining that the standard-path is the one with only back-slashes in it, either on Windows or any other platform. :param bool strip: If True, removes additional slashes from the end of the path.
def _run(self): if self._has_run: raise RuntimeError("This spreader instance has already been run: " "create a new Spreader object for a new run.") i = 1 while self.event_heap.size() > 0 and len(self._uninfected_stops) > 0: event = self.even...
Run the actual simulation.
def rmse(a, b): return np.sqrt(np.square(a - b).mean())
Returns the root mean square error betwwen a and b
def write(self, msg): if self.redirect is not None: self.redirect.write(msg) if six.PY2: from xdoctest.utils.util_str import ensure_unicode msg = ensure_unicode(msg) super(TeeStringIO, self).write(msg)
Write to this and the redirected stream
def setdefault(self, key: str, value: str) -> str: set_key = key.lower().encode("latin-1") set_value = value.encode("latin-1") for idx, (item_key, item_value) in enumerate(self._list): if item_key == set_key: return item_value.decode("latin-1") self._list.appe...
If the header `key` does not exist, then set it to `value`. Returns the header value.
def createEncoder(): consumption_encoder = ScalarEncoder(21, 0.0, 100.0, n=50, name="consumption", clipInput=True) time_encoder = DateEncoder(timeOfDay=(21, 9.5), name="timestamp_timeOfDay") encoder = MultiEncoder() encoder.addEncoder("consumption", consumption_encoder) encoder.addEncoder("timestamp", t...
Create the encoder instance for our test and return it.
def create_rcontext(self, size, frame): self.frame = frame width, height = size meta_surface = cairo.RecordingSurface(cairo.CONTENT_COLOR_ALPHA, (0, 0, width, height)) ctx = cairo.Context(meta_surface) return ctx
Creates a recording surface for the bot to draw on :param size: The width and height of bot
def nworker(data, chunk): oldlimit = set_mkl_thread_limit(1) with h5py.File(data.database.input, 'r') as io5: seqview = io5["bootsarr"][:] maparr = io5["bootsmap"][:, 0] smps = io5["quartets"][chunk:chunk+data._chunksize] nall_mask = seqview[:] == 78 rquartets = np.zeros((smp...
Worker to distribute work to jit funcs. Wraps everything on an engine to run single-threaded to maximize efficiency for multi-processing.
def nav_to_vcf_dir(ftp, build): if build == 'b37': ftp.cwd(DIR_CLINVAR_VCF_B37) elif build == 'b38': ftp.cwd(DIR_CLINVAR_VCF_B38) else: raise IOError("Genome build not recognized.")
Navigate an open ftplib.FTP to appropriate directory for ClinVar VCF files. Args: ftp: (type: ftplib.FTP) an open connection to ftp.ncbi.nlm.nih.gov build: (type: string) genome build, either 'b37' or 'b38'
def __post_save_receiver(self, instance, **kwargs): logger.debug('RECEIVE post_save FOR %s', instance.__class__) self.save_record(instance, **kwargs)
Signal handler for when a registered model has been saved.
def update_field_from_proxy(field_obj, pref_proxy): attr_names = ('verbose_name', 'help_text', 'default') for attr_name in attr_names: setattr(field_obj, attr_name, getattr(pref_proxy, attr_name))
Updates field object with data from a PrefProxy object. :param models.Field field_obj: :param PrefProxy pref_proxy:
def _build_pipeline_request(self, task_view): job_metadata = task_view.job_metadata job_params = task_view.job_params job_resources = task_view.job_resources task_metadata = task_view.task_descriptors[0].task_metadata task_params = task_view.task_descriptors[0].task_params task_resources = task_...
Returns a Pipeline objects for the job.
def snapengage(parser, token): bits = token.split_contents() if len(bits) > 1: raise TemplateSyntaxError("'%s' takes no arguments" % bits[0]) return SnapEngageNode()
SnapEngage set-up template tag. Renders Javascript code to set-up SnapEngage chat. You must supply your widget ID in the ``SNAPENGAGE_WIDGET_ID`` setting.
def create_scan(self, host_ips): now = datetime.datetime.now() data = { "uuid": self.get_template_uuid(), "settings": { "name": "jackal-" + now.strftime("%Y-%m-%d %H:%M"), "text_targets": host_ips } } response = requests...
Creates a scan with the given host ips Returns the scan id of the created object.
def temporal_network(gtfs, start_time_ut=None, end_time_ut=None, route_type=None): events_df = gtfs.get_transit_events(start_time_ut=start_time_ut, end_time_ut=end_time_ut, ...
Compute the temporal network of the data, and return it as a pandas.DataFrame Parameters ---------- gtfs : gtfspy.GTFS start_time_ut: int | None start time of the time span (in unix time) end_time_ut: int | None end time of the time span (in unix time) route_type: int | None ...
def delete(ctx): user, project_name, _job = get_job_or_local(ctx.obj.get('project'), ctx.obj.get('job')) if not click.confirm("Are sure you want to delete job `{}`".format(_job)): click.echo('Existing without deleting job.') sys.exit(1) try: response = PolyaxonClient().job.delete_job...
Delete job. Uses [Caching](/references/polyaxon-cli/#caching) Example: \b ```bash $ polyaxon job delete ```
def limit_sentences (path, word_limit=100): word_count = 0 if isinstance(path, str): path = json_iter(path) for meta in path: if not isinstance(meta, SummarySent): p = SummarySent(**meta) else: p = meta sent_text = p.text.strip().split(" ") sen...
iterator for the most significant sentences, up to a specified limit
def start_tag(self): direct_attributes = (attribute.render(self) for attribute in self.render_attributes) attributes = () if hasattr(self, '_attributes'): attributes = ('{0}="{1}"'.format(key, value) for key, value in self.attributes.items...
Returns the elements HTML start tag
def callback(self, *incoming): message = incoming[0] if message: address, command = message[0], message[2] profile = self.get_profile(address) if profile is not None: try: getattr(profile, command)(self, message) exc...
Gets called by the CallbackManager if a new message was received
def create(self, server): return server.post( 'challenge_admin', self.as_payload(), replacements={'slug': self.slug})
Create the challenge on the server
def update_members(self, list_id, data): self.list_id = list_id if 'members' not in data: raise KeyError('The update must have at least one member') else: if not len(data['members']) <= 500: raise ValueError('You may only batch sub/unsub 500 members at a t...
Batch subscribe or unsubscribe list members. Only the members array is required in the request body parameters. Within the members array, each member requires an email_address and either a status or status_if_new. The update_existing parameter will also be considered required to help pr...
def _rc_rpoplpush(self, src, dst): rpop = self.rpop(src) if rpop is not None: self.lpush(dst, rpop) return rpop return None
RPOP a value off of the ``src`` list and LPUSH it on to the ``dst`` list. Returns the value.
def run_rcr(graph, tag='dgxp'): hypotheses = defaultdict(set) increases = defaultdict(set) decreases = defaultdict(set) for u, v, d in graph.edges(data=True): hypotheses[u].add(v) if d[RELATION] in CAUSAL_INCREASE_RELATIONS: increases[u].add(v) elif d[RELATION] in CAU...
Run the reverse causal reasoning algorithm on a graph. Steps: 1. Get all downstream controlled things into map (that have at least 4 downstream things) 2. calculate population of all things that are downstream controlled .. note:: Assumes all nodes have been pre-tagged with data :param pybel.BEL...
def status(self): if self.provider: status = self.provider.status(self.engines) else: status = [] return status
Returns the status of the executor via probing the execution providers.
def readtxt(filepath): with open(filepath, 'rt') as f: lines = f.readlines() return ''.join(lines)
read file as is
def generateFromNumbers(self, numbers): sequence = [] for number in numbers: if number == None: sequence.append(number) else: pattern = self.patternMachine.get(number) sequence.append(pattern) return sequence
Generate a sequence from a list of numbers. Note: Any `None` in the list of numbers is considered a reset. @param numbers (list) List of numbers @return (list) Generated sequence
def resetVector(x1, x2): size = len(x1) for i in range(size): x2[i] = x1[i]
Copies the contents of vector x1 into vector x2. @param x1 (array) binary vector to be copied @param x2 (array) binary vector where x1 is copied
def includes(self, query_date, query_time=None): if self.start_date and query_date < self.start_date: return False if self.end_date and query_date > self.end_date: return False if query_date.weekday() not in self.weekdays: return False if not query_tim...
Does this schedule include the provided time? query_date and query_time are date and time objects, interpreted in this schedule's timezone
def cmd_up(opts): config = load_config(opts.config) b = get_blockade(config, opts) containers = b.create(verbose=opts.verbose, force=opts.force) print_containers(containers, opts.json)
Start the containers and link them together
def make_join_request(self, password = None, history_maxchars = None, history_maxstanzas = None, history_seconds = None, history_since = None): self.clear_muc_child() self.muc_child=MucX(parent=self.xmlnode) if (history_maxchars is not None or history_maxstanzas is not No...
Make the presence stanza a MUC room join request. :Parameters: - `password`: password to the room. - `history_maxchars`: limit of the total number of characters in history. - `history_maxstanzas`: limit of the total number of messages in history. ...
def get_file_relative_path_by_name(self, name, skip=0): if skip is None: paths = [] else: paths = None for path, info in self.walk_files_info(): _, n = os.path.split(path) if n==name: if skip is None: paths.appen...
Get file relative path given the file name. If file name is redundant in different directories in the repository, this method ensures to return all or some of the files according to skip value. Parameters: #. name (string): The file name. #. skip (None, integer): As file...
def stop_image_acquisition(self): if self.is_acquiring_images: self._is_acquiring_images = False if self.thread_image_acquisition.is_running: self.thread_image_acquisition.stop() with MutexLocker(self.thread_image_acquisition): self.device.node...
Stops image acquisition. :return: None.
def put(self, pid, record, key): try: data = json.loads(request.data.decode('utf-8')) new_key = data['filename'] except KeyError: raise WrongFile() new_key_secure = secure_filename(new_key) if not new_key_secure or new_key != new_key_secure: ...
Handle the file rename through the PUT deposit file. Permission required: `update_permission_factory`. :param pid: Pid object (from url). :param record: Record object resolved from the pid. :param key: Unique identifier for the file in the deposit.
def summarize_paths(samples, cone_std=(1., 1.5, 2.), starting_value=1.): cum_samples = ep.cum_returns(samples.T, starting_value=starting_value).T cum_mean = cum_samples.mean(axis=0) cum_std = cum_samples.std(axis=0) if isinstance(cone_std, (float, int)): cone_std...
Gnerate the upper and lower bounds of an n standard deviation cone of forecasted cumulative returns. Parameters ---------- samples : numpy.ndarray Alternative paths, or series of possible outcomes. cone_std : list of int/float Number of standard devations to use in the boundaries of...
async def lookup_entities(client, args): lookup_spec = _get_lookup_spec(args.entity_identifier) request = hangups.hangouts_pb2.GetEntityByIdRequest( request_header=client.get_request_header(), batch_lookup_spec=[lookup_spec], ) res = await client.get_entity_by_id(request) for entity_...
Search for entities by phone number, email, or gaia_id.
def escape_ID(cobra_model): for x in chain([cobra_model], cobra_model.metabolites, cobra_model.reactions, cobra_model.genes): x.id = _escape_str_id(x.id) cobra_model.repair() gene_renamer = _GeneEscaper() for rxn, rule in iteritems(get_com...
makes all ids SBML compliant
def add_bpmn_xml(self, bpmn, svg=None, filename=None): xpath = xpath_eval(bpmn) processes = xpath('.//bpmn:process') for process in processes: process_parser = self.PROCESS_PARSER_CLASS( self, process, svg, filename=filename, doc_xpath=xpath) if process_pa...
Add the given lxml representation of the BPMN file to the parser's set. :param svg: Optionally, provide the text data for the SVG of the BPMN file :param filename: Optionally, provide the source filename.
def draw(self, projection_matrix=None, view_matrix=None, camera_matrix=None, time=0): if self.mesh_program: self.mesh_program.draw( self, projection_matrix=projection_matrix, view_matrix=view_matrix, camera_matrix=camera_matrix, ...
Draw the mesh using the assigned mesh program :param projection_matrix: projection_matrix (bytes) :param view_matrix: view_matrix (bytes) :param camera_matrix: camera_matrix (bytes)
def end_timing(self): if self._callback != None: elapsed = time.clock() * 1000 - self._start self._callback.end_timing(self._counter, elapsed)
Completes measuring time interval and updates counter.
def delete(self, template_id): self.template_id = template_id return self._mc_client._delete(url=self._build_path(template_id))
Delete a specific template. :param template_id: The unique id for the template. :type template_id: :py:class:`str`
def _update_dict(data, default_data, replace_data=False): if not data: data = default_data.copy() return data if not isinstance(data, dict): raise TypeError('Value not dict type') if len(data) > 255: raise ValueError('More than 255 values defined')...
Update algorithm definition type dictionaries
def get(self, request, enterprise_uuid, course_id): enrollment_course_mode = request.GET.get('course_mode') enterprise_catalog_uuid = request.GET.get('catalog') if not enrollment_course_mode: return redirect(LMS_DASHBOARD_URL) enrollment_api_client = EnrollmentApiClient() ...
Handle the enrollment of enterprise learner in the provided course. Based on `enterprise_uuid` in URL, the view will decide which enterprise customer's course enrollment record should be created. Depending on the value of query parameter `course_mode` then learner will be either redire...
def update_backend(use_pypi=False, index='dev', build=True, user=None, version=None): get_vars() if value_asbool(build): upload_backend(index=index, user=user) with fab.cd('{apphome}'.format(**AV)): if value_asbool(use_pypi): command = 'bin/pip install --upgrade briefkasten' ...
Install the backend from the given devpi index at the given version on the target host and restart the service. If version is None, it defaults to the latest version Optionally, build and upload the application first from local sources. This requires a full backend development environment on the machine r...
def _runpf_worker(task): (lcfile, outdir, timecols, magcols, errcols, lcformat, lcformatdir, pfmethods, pfkwargs, getblssnr, sigclip, nworkers, minobservations, excludeprocessed) = task if os.path.exists(lcfile): pfresult = runpf(lcfile, outdir, ...
This runs the runpf function.
def build_environ(scope: Scope, body: bytes) -> dict: environ = { "REQUEST_METHOD": scope["method"], "SCRIPT_NAME": scope.get("root_path", ""), "PATH_INFO": scope["path"], "QUERY_STRING": scope["query_string"].decode("ascii"), "SERVER_PROTOCOL": f"HTTP/{scope['http_version']}...
Builds a scope and request body into a WSGI environ object.
def get_user(self, username): if hasattr(self._bot, 'user_manager'): user = self._bot.user_manager.get_by_username(username) if user: return user user = SlackUser.get_user(self._bot.sc, username) self._bot.user_manager.set(user) return ...
Utility function to query slack for a particular user :param username: The username of the user to lookup :return: SlackUser object or None
def stop(ctx, yes): user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) if not yes and not click.confirm("Are sure you want to stop " "exper...
Stop experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \b ```bash $ polyaxon experiment stop ``` \b ```bash $ polyaxon experiment -xp 2 stop ```
def validate_string_list(value): try: if sys.version_info.major < 3: from locale import getpreferredencoding encoding = getpreferredencoding() value = value.decode(encoding) return [x.strip() for x in value.split(u",")] except (Attr...
Validator for string lists to be used with `add_setting`.
def _parse_exchange_token_response(content): resp = {} content = _helpers._from_bytes(content) try: resp = json.loads(content) except Exception: resp = _helpers.parse_unique_urlencoded(content) if resp and 'expires' in resp: resp['expires_in'] = resp.pop('expires') return...
Parses response of an exchange token request. Most providers return JSON but some (e.g. Facebook) return a url-encoded string. Args: content: The body of a response Returns: Content as a dictionary object. Note that the dict could be empty, i.e. {}. That basically indicates a ...
def load_matlab_model(infile_path, variable_name=None, inf=inf): if not scipy_io: raise ImportError('load_matlab_model requires scipy') data = scipy_io.loadmat(infile_path) possible_names = [] if variable_name is None: meta_vars = {"__globals__", "__header__", "__version__"} poss...
Load a cobra model stored as a .mat file Parameters ---------- infile_path: str path to the file to to read variable_name: str, optional The variable name of the model in the .mat file. If this is not specified, then the first MATLAB variable which looks like a COBRA mod...
def _get_referenced_libs(specs): active_libs = set() for app_spec in specs['apps'].values(): for lib in app_spec['depends']['libs']: active_libs.add(lib) return active_libs
Returns all libs that are referenced in specs.apps.depends.libs
def fourier_sinusoidal_residual(fourierparams, times, mags, errs): modelmags, phase, ptimes, pmags, perrs = ( fourier_sinusoidal_func(fourierparams, times, mags, errs) ) return (pmags - modelmags)/perrs
This returns the residual between the model mags and the actual mags. Parameters ---------- fourierparams : list This MUST be a list of the following form like so:: [period, epoch, [amplitude_1, amplitude_2, amplitude_3, ..., amplitude_X], [phase...
def create_graph_from_data(self, data): self.arguments['{SCORE}'] = self.scores[self.score] self.arguments['{VERBOSE}'] = str(self.verbose).upper() results = self._run_gies(data, verbose=self.verbose) return nx.relabel_nodes(nx.DiGraph(results), {idx: i fo...
Run the GIES algorithm. Args: data (pandas.DataFrame): DataFrame containing the data Returns: networkx.DiGraph: Solution given by the GIES algorithm.
def _finalize(self): self._logger.info( "Finished: modelID=%r; %r records processed. Performing final activities", self._modelID, self._currentRecordIndex + 1) self._updateModelDBResults() if not self._isKilled: self.__updateJobResults() else: self.__deleteOutputCache(self._model...
Run final activities after a model has run. These include recording and logging the final score
def convert_args_dict_to_list(dict_extra_args): list_extra_args = [] if 'component_parallelism' in dict_extra_args: list_extra_args += ["--component_parallelism", ','.join(dict_extra_args['component_parallelism'])] if 'runtime_config' in dict_extra_args: list_extra_args += ["--runt...
flatten extra args
def ramp(x, v_min=0, v_max=1, name=None): return tf.clip_by_value(x, clip_value_min=v_min, clip_value_max=v_max, name=name)
Ramp activation function. Parameters ---------- x : Tensor input. v_min : float cap input to v_min as a lower bound. v_max : float cap input to v_max as a upper bound. name : str The function name (optional). Returns ------- Tensor A ``Tensor...
def recombine(self, other, d=0.7): a, b = self, other d1 = max(0, min(d, 1)) d2 = d1 c = ColorTheme( name=a.name[:int(len(a.name) * d1)] + b.name[int(len(b.name) * d2):], ranges=a.ranges[:int(len(a.ranges) * d1)] + b.ranges[int(...
Genetic recombination of two themes using cut and splice technique.
def add_user_to_allow(self, name, user): if not self.remove_user_from_acl(name, user): return False if name not in self._acl: return False self._acl[name]['allow'].append(user) return True
Add a user to the given acl allow block.
def ReplaceAll(pattern, rep, s): if pattern not in _regexp_compile_cache: _regexp_compile_cache[pattern] = sre_compile.compile(pattern) return _regexp_compile_cache[pattern].sub(rep, s)
Replaces instances of pattern in a string with a replacement. The compiled regex is kept in a cache shared by Match and Search. Args: pattern: regex pattern rep: replacement text s: search string Returns: string with replacements made (or original string if no replacements)
def install(packages, update=False, options=None, version=None): manager = MANAGER if update: update_index() if options is None: options = [] if version is None: version = '' if version and not isinstance(packages, list): version = '=' + version if not isinstance(...
Install one or more packages. If *update* is ``True``, the package definitions will be updated first, using :py:func:`~burlap.deb.update_index`. Extra *options* may be passed to ``apt-get`` if necessary. Example:: import burlap # Update index, then install a single package b...
def disassemble_all(bytecode, pc=0, fork=DEFAULT_FORK): if isinstance(bytecode, bytes): bytecode = bytearray(bytecode) if isinstance(bytecode, str): bytecode = bytearray(bytecode.encode('latin-1')) bytecode = iter(bytecode) while True: instr = disassemble_one(bytecode, pc=pc, for...
Disassemble all instructions in bytecode :param bytecode: an evm bytecode (binary) :type bytecode: str | bytes | bytearray | iterator :param pc: program counter of the first instruction(optional) :type pc: int :param fork: fork name (optional) :type fork: str :re...
def right_complement(clr): right = split_complementary(clr)[2] colors = complementary(clr) colors[3].h = right.h colors[4].h = right.h colors[5].h = right.h colors = colorlist( colors[0], colors[2], colors[1], colors[5], colors[4], colors[3] ) return colors
Returns the right half of the split complement.
def deleteMultiple(self, objs): conn = self._get_connection() pipeline = conn.pipeline() numDeleted = 0 for obj in objs: numDeleted += self.deleteOne(obj, pipeline) pipeline.execute() return numDeleted
deleteMultiple - Delete multiple objects @param objs - List of objects @return - Number of objects deleted
def create_validator(): field_names = ( 'study_id', 'patient_id', 'gender', 'age_years', 'age_months', 'date_inclusion' ) validator = CSVValidator(field_names) validator.a...
Create an example CSV validator for patient demographic data.
def add_unique_check(self, key, code=UNIQUE_CHECK_FAILED, message=MESSAGES[UNIQUE_CHECK_FAILED]): if isinstance(key, basestring): assert key in self._field_names, 'unexpected field name: %s' % key else: for f in key: ...
Add a unique check on a single column or combination of columns. Arguments --------- `key` - a single field name (string) specifying a field in which all values are expected to be unique, or a sequence of field names (tuple or list of strings) specifying a compound key ...
def set_script(self, i): if i in range(1, 10): n = i - 1 else: raise IllegalInput("Invalid Value for ATR %s" % (hex(i))) if n > -1: self.curr_script = n self.delta = n * DELTA return
set the value of delta to reflect the current codepage
def show_perf_attrib_stats(returns, positions, factor_returns, factor_loadings, transactions=None, pos_in_dollars=True): risk_exposures, perf_attrib_data = perf_attrib( retu...
Calls `perf_attrib` using inputs, and displays outputs using `utils.print_table`.
def create_entrypoint(self): with open(os.path.join(self.template_dir, 'manage.py'), 'r') as fd: data = fd.read().format(project_name=self.project_name) with open('manage.py', 'w') as fd: fd.write(data) os.chmod('manage.py', 0o777)
Write manage.py in the current directory
def anumb_to_atom(self, anumb): assert isinstance(anumb, int), "anumb must be integer" if not self._anumb_to_atom: if self.atoms: for atom in self.atoms: self._anumb_to_atom[atom.number] = atom return self._anumb_to_atom[anumb] ...
Returns the atom object corresponding to an atom number
def getAllEncodings(self): numEncodings=self.fields[0].numEncodings assert (all(field.numEncodings==numEncodings for field in self.fields)) encodings = [self.getEncoding(index) for index in range(numEncodings)] return encodings
Returns encodings for all the records
def _gunzip_sqlitecurve(sqlitecurve): cmd = 'gunzip -k %s' % sqlitecurve try: subprocess.check_output(cmd, shell=True) return sqlitecurve.replace('.gz','') except subprocess.CalledProcessError: return None
This just uncompresses the sqlitecurve in gzip format. FIXME: this doesn't work with gzip < 1.6 or non-GNU gzip (probably).
def getCustomDict(cls): if not os.path.exists(cls.getPath()): return dict() properties = Configuration._readConfigFile(os.path.basename( cls.getPath()), os.path.dirname(cls.getPath())) values = dict() for propName in properties: if 'value' in properties[propName]: values[propNa...
Returns a dict of all temporary values in custom configuration file