content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
import argparse
def parse_args():
"""Parse the arguments to the main dirsum script."""
parser = argparse.ArgumentParser(description="Summarize directory contents.")
parser.add_argument('root', help="Root directory to summarize.")
parser.add_argument('--size', dest='min_size', default='1GB',
... | 73699396b5cc950c267c114b79c659f91a013997 | 690,141 |
from typing import List
def calcCellDeltaList(cell: List[int]) -> List[int]:
"""Calculates the list of step sizes for a cell
Args:
cell (list): List of bits in a cell (i.e. [0, 1, 2, 3])
Returns:
list: List of step sizes between levels in the cell
"""
l = len(cell)
prev = 0
... | 47d9ef4d30b517656f5fd84f1d8bad207fae30d2 | 690,142 |
def _name_filter(files, name):
"""Filtre par nom de fichier"""
filtered_files = []
for file in files:
file_name = ".".join("".join(file.split('/')[-1]).split('.')[:-1])
if file_name == name:
filtered_files.append(file)
return filtered_files | 50eda4376b910eb12886ed5abe2c6cdc4c00b03d | 690,144 |
def some(predicate, seq):
"""If some element x of seq satisfies predicate(x), return predicate(x).
>>> some(callable, [min, 3])
1
>>> some(callable, [2, 3])
0
"""
for x in seq:
px = predicate(x)
if px: return px
return False | f39d8fb62081f06eaf229041423e50ca3b2b817d | 690,145 |
import re
def get_first_stacktrace(stderr_data):
"""If |stderr_data| contains stack traces, only returns the first one.
Otherwise returns the entire string."""
# Use question mark after .+ for non-greedy, otherwise it will match more
# than one stack trace.
sanitizer_stacktrace_regex = r'ERROR: [A-z]+Sanit... | 080ec4a56e7fd0c1377936fb881d72f09388d8ac | 690,146 |
import torch
def autograd_individual_gradients(X, y, forward_fn, parameters):
"""Compute individual gradients with a for-loop using autograd.
Loop over data (xₙ, yₙ) and compute ∇ℓ(xₙ, yₙ) with respect to `parameters`,
where ℓ is the forward function.
Note:
Individual gradients only make sen... | 7548eef0091c8c83815063525fa98bfb8124dbb5 | 690,148 |
import typing
def hex(b: typing.Optional[bytes]) -> str:
"""convert an optional bytes into hex-encoded str, returns "" if bytes is None"""
return b.hex() if b else "" | c8877b6509635a2ba0a43f5dd275eb802fbb4224 | 690,149 |
def solution(ranks):
"""
Getting counts of soldiers in a certain rank n reporting to at least one soldier in rank n + 1 (where n + 1 exists)
and returning their total sum across all ranks
"""
counts = {}
for i in range(len(ranks)):
if ranks[i] in counts:
counts[ranks[i]] += 1... | cd39bf5aa030556cb1f777a23cb3b7fc9591317d | 690,150 |
def get_reserved_price(client, availability_zone, instance_type):
"""Gets price of a given reserved Linux instance type in a given availability zone."""
resp = client.describe_reserved_instances_offerings(
InstanceType=instance_type,
AvailabilityZone=availability_zone,
OfferingType="No U... | 05f3448eaba2b853409a04109bf094f3c5dcbb88 | 690,151 |
import os
def validate_metadata(target_dir, metadata):
"""
Check that the files listed in metadata exactly match files in target dir.
:param target_dir: This field is the target directory from which to
match metadata
:param metadata: This field contains the metadata to be matched.
"""
... | 2d1cda89890146f9236ad4533c20fe96d25acf32 | 690,152 |
import requests
def getdoilink(doi):
"""Pings universal DOI server for a given DOI number; returns URL of journal article"""
if doi == 'N/A' or doi is None:
return 'N/A'
else:
pingurl = 'http://dx.doi.org/' + doi
pingdoi = requests.Session()
print('Pinging DOI...')
... | 86245a09d00bd3d4c009d8d0cf6400b8bde48e50 | 690,153 |
def createMemberDict(conn):
"""build a dictionary of memberType IDs from database"""
memberDict = {}
with conn:
with conn.cursor() as cur:
cur.execute("SELECT id, type FROM member_types")
rows = cur.fetchall()
for row in rows:
(memberId, typeName) = row
memberDict[typeName] =... | b0cd157a80a0e18597993c3060587aeea56827bb | 690,154 |
def get_class_with_tablename(cls):
"""
Returns the first parent found (or the class itself) for given class which
has __tablename__ attribute set.
This function is needed for slug uniqueness testing when using concrete
inheritance.
:param cls: class to inspect
"""
mapper_args = {}
... | 82027fcb8c97dc048ac8ea35cd6e68edf2a67aed | 690,155 |
from datetime import datetime
def convert_date(measured_time):
"""
Convert obtained from provider api date to correct for influxdb
:param measured_time:
:return: example - '2019-01-31T19:25:00Z'
"""
converted_measured_time = datetime.strptime(measured_time, "%Y-%m-%d %H:%M:%S")
return conv... | 638a43b8bc52f00a510e678569c9703cd6208dcf | 690,156 |
import subprocess
import os
def bootstrap_bcbionextgen(anaconda, args, remotes):
"""Install bcbio-nextgen to bootstrap rest of installation process.
"""
subprocess.check_call([anaconda["pip"], "install", "fabric"])
subprocess.check_call([anaconda["pip"], "install", "-r", remotes["requirements"]])
... | aef799bc8b45688385ee38b25485679ca1a800c1 | 690,157 |
def escape_fb_key(repo_target):
"""GitHub repos contain '/' which is a path delimiter in firestore."""
return repo_target.replace("/", "_") | 6721686582fa77143a241d89e50250da213f6757 | 690,158 |
def split_train_test(X, Y, trs_len=0.80):
"""
Split both X and Y into train and test sets.
trs_len - how much data should we use for training?
by default it's 0.80 meaning 80%, the remining
20% of the data will be used for testing.
"""
lx = len(X)
# 1 year train set ... | 2932f5d40ac332b154d494a1b576279f9e24976d | 690,159 |
def find(l, predicate):
"""
Utility function to find element in given list
"""
results = [x for x in l if predicate(x)]
return results[0] if len(results) > 0 else None | a25a62652e974bbaac139b894ea8f9de03690c24 | 690,160 |
def pair_right(f):
"""Returns a function that given a value x, returns a tuple of the form: (x, f(x)).
>>> add_one = pair_right(lambda x: x + 1)
>>> add_one(3)
(3, 4)
"""
def pair_right(x):
return x, f(x)
return pair_right | 7b3134defe85eb2cc1d759384a9bd808f79f957f | 690,161 |
def npqt(fmp, f0p, fmf0=4.88):
"""Calculate NPQt
NPQt = (4.88 / ((fmp / f0p) - 1)) - 1
:param fmp: Fm'
:param f0p: F0'
:param fmf0: Fv/Fm (default: 4.88)
:returns: NPQ (float)
"""
return (fmf0 / ((fmp / f0p) - 1)) - 1 | 4a33b57f53272f56b0d2ba55d80ef817509298b3 | 690,162 |
def _crc_update(cur, crc, table):
"""helper for crc calculation
:param cur
:param crc
:param table
"""
l_crc = (0x000000ff & cur)
tmp = crc ^ l_crc
crc = (crc >> 8) ^ table[(tmp & 0xff)]
return crc | af9902b8dc6d26b72c81043b45d399c402c4684d | 690,164 |
import torch
def euclidean_distance(x, y):
"""
x, y have shapes (batch_size, num_examples, embedding_size).
x is prototypes, y are embeddings in most cases
"""
return torch.sum((x.unsqueeze(2) - y.unsqueeze(1)) ** 2, dim=-1) | 22553fc40cf52244cda2c8dba8c8cfffb9b03d68 | 690,165 |
def parse_arguments(parser):
"""Read user arguments"""
parser.add_argument('--path_model',
type=str, default='Model/weights.01.hdf5',
help='Path to the model to evaluate')
parser.add_argument('--path_data', type=str, default='data/data_test.pkl',
... | f6677799ed81161727c96b45b6010c8442dece97 | 690,166 |
from datetime import datetime
def convert_datetime(timestamp: int) -> datetime:
"""Convert a java microseconds timestamp from the ToonAPI to a datetime."""
return datetime.utcfromtimestamp(timestamp // 1000.0).replace(
microsecond=timestamp % 1000 * 1000
) | 311070f45c96f917fcf912d2d455db13b26c7d40 | 690,167 |
from typing import List
import os
import sys
def start_command() -> List[str]:
"""
Returns a command to re-execute HomeControlwith the same parameters
except the daemon parameter
"""
# pylint: disable=line-too-long
if (os.path.basename(sys.argv[0]) == "__main__.py"
or (os.path.spli... | bdaebdcb21d99d8743371d42147b1e27119b1691 | 690,168 |
import os
import csv
def load_csv(fn, num_skip=0, converter=None):
"""Load data in csv format.
Args:
fn (str): file name
num_skip (int, optional): number of lines to skip. Defaults to 0.
converter (type, optional): convert str to desired type. Defaults to None.
Returns:
L... | d2be8d112812d579d186cb2aeccec91b275884d4 | 690,169 |
import re
def ParseRevision(lines):
"""Parse the revision number out of the raw lines of the patch.
Returns 0 (new file) if no revision number was found.
"""
for line in lines[:10]:
if line.startswith('@'):
break
m = re.match(r'---\s.*\(.*\s(\d+)\)\s*$', line)
if m:
return int(m.group... | c892881677ca4ab96b5acf951e54c924b3baa43c | 690,170 |
def get_features_and_classifier(model):
"""works for nn.Sequential with last linear layer"""
features = model[:-1]
classifier = model[-1]
return features, classifier | bbbd3a6575ca29c3a0de7071da5ecfac50f98dc6 | 690,171 |
import subprocess
def is_dirty():
"""Return True or False depending on whether the working tree is dirty (considers untracked files as well)"""
return len(subprocess.getoutput("git status -s")) != 0 | 215169f1f8fb7c51544f73af233779db413dbf68 | 690,172 |
from datetime import datetime
def dicom_strfdate( dt: datetime ) -> str:
"""
datetime -> dicom date
"""
return dt.strftime( "%Y%m%d" ) | e31e1765afb6e3127988ddeff56e22527e346f99 | 690,173 |
import random
def generateListWithImportance(inputList, minValue, valueMax):
""""Generate list with elements with importance"""
result = []
for _ in range(0, random.randint(minValue, valueMax)):
findIt = False
while not findIt:
element = inputList[random.randint(0, len(inputLi... | df8802cea1620c9e6b58aaddfec45b79af43b62d | 690,174 |
def get_depth(phylo_tree):
"""
Returns the depth of a tree.
"""
depth = 0
for terminal_node in phylo_tree.get_terminals(order='preorder'):
path_length = len(phylo_tree.get_path(target=terminal_node))
if path_length > depth:
depth = path_length
return depth | 051665b7a7bc7561c93e33ee5071eaa762c635e7 | 690,175 |
def get_rating_date(comment):
"""
"""
return comment.xpath(
".//div[@itemprop=\"datePublished\"]"
)[0].attrib.get("content") | 17837224b394022cdbde0700ff3d6236d032a0e2 | 690,176 |
import requests
def get_response_from_url(url):
"""Returns the Hue API response to a URL as json."""
response = requests.get(url).json()
return response | b540701a6ab856f89689b470b1a986f1a106605d | 690,177 |
def check_neighboring_blocks(block, grid):
"""
Given a block, return the immediate neighbors of the block.
Parameters
----------
block : tuple
The row and column of a block (row, col)
grid : ndarray
The numpy 2D array of containing blocks (1) and empty space (0)
Returns
... | a0b185d5d6056503a9b2ed5ee48dc01367a8977f | 690,179 |
def left_of_line(point, p1, p2):
""" True if the point self is left of the line p1 -> p2
"""
# check if a and b are on the same vertical line
if p1[0] == p2[0]:
# compute # on which site of the line self should be
should_be_left = p1[1] < p2[1]
if should_be_left:
retu... | 5cb130fecd46fe7eb74cee5179f4705b8ee4760f | 690,180 |
def distinct_words(corpus):
""" Determine a list of distinct words for the corpus.
Params:
corpus (list of list of strings): corpus of documents
Return:
corpus_words (list of strings): list of distinct words across the corpus, sorted (using python 'sorted' function)
... | d663b4970f0163a1cd8b3d9d898ac193b776377b | 690,181 |
def extractFeatures(pages, dataset):
"""
Extract the amount of page views for each student for each page
\n
:param pages: A list of all the (unique) pages to use \t
:type pages: list \n
:param dataset: A list of dictionaries, each dictionary representing one student and having at least the key "... | 2c44219c5143278602ad66b555292b41e6c26a63 | 690,182 |
def get_element_index(net, element, name, exact_match=True, regex=False):
"""
Returns the element identified by the element string and a name.
INPUT:
**net** - pandapower network
**element** - line indices of lines that are considered. If None, all lines in the network are \
... | c5f7cfa5898de6f93ce283954658afe51b48891b | 690,183 |
import torch
def rmse(hat_y, y):
"""RMSE
Args:
hat_y: 预测值
y: 真实值
Return:
('rmse', rmse): 评价指标名称,评价结果
"""
rmse = torch.sqrt(torch.mean(torch.pow(y - hat_y, 2)))
return 'rmse', rmse | 9023684d0ecff28e1e6039d9610ea782fb4983d6 | 690,184 |
from typing import Tuple
def manhattan_distance(point1: Tuple[int, int], point2: Tuple[int, int]) -> int:
"""Calculate and return the Manhattan distance between two points.
:param point1: first point
:param point2: second point
:return: Manhattan distance between the two points
"""
x1, y1 = ... | 58138cb5c171aa85d5b21ceda316694f310b2994 | 690,185 |
import csv
def read_gs_file(gs_file_name):
"""
reads gold standard file
Args:
gs_file_name (str): the file path
Returns:
target_word_vec1 (list(str)): words vector
definition_vec1 (list(str)): words definitions vector - definition per target word
hypernym_vec1 (li... | 6cffe1d85517fee1f2e76ee6c50c35089de60f22 | 690,186 |
def get_dofs(data):
"""
Gets the number of target DOFs (i.e. the number of different motion
classes required to complete a trial).
"""
return len(data['target']['pose']) | a8c958b226228311e762119ef4ccef7a34276de1 | 690,187 |
def precompute_idfs(wglobal, dfs, total_docs):
"""Pre-compute the inverse document frequency mapping for all terms.
Parameters
----------
wglobal : function
Custom function for calculating the "global" weighting function.
See for example the SMART alternatives under :func:`~gensim.model... | 55247dfe65c4c6b113554ce59233d8e34097e16a | 690,188 |
def parse_tensor_name_with_slicing(in_str):
"""Parse tensor name, potentially suffixed by slicing string.
Args:
in_str: (str) Input name of the tensor, potentially followed by a slicing
string. E.g.: Without slicing string: "hidden/weights/Variable:0", with
slicing string: "hidden/weights... | f42b4aba99284cc971698fa46ae7afb1220a3bee | 690,189 |
def binary_class_func(y):
"""Define a binary SST task.
Parameters
----------
y : str
Assumed to be one of the SST labels.
Returns
-------
str or None
None values are ignored by `build_dataset` and thus left out of
the experiments.
"""
if y in ("0", "1"):
... | 00ecf93e443e1d339129be3c77e82565a107ce8a | 690,190 |
def element(atomic_number):
"""
Return the element of a given atomic number.
:param atomic_number:
The atomic number for the element in question (e.g., 26).
:type atomic_number:
int-like
:returns:
The short-hand element for a given atomic number.
:rtype:
str
... | 6d7d3e9ef9592362138666fb6046d41ac6822552 | 690,191 |
def get_runtime_and_maxRAM(dataset):
""" Return runtime in hours and max RAM in GB """
curr_dir = dataset + "/"
log = {}
logfile = curr_dir + dataset + ".log"
with open(logfile) as f:
for line in f:
if line.startswith("="):
continue
(key, val) = line.... | 51874b07529b88ebba2f5916b76f48672db55e48 | 690,192 |
def identify_journals(line, kb_journals):
"""Attempt to identify all periodical titles in a reference line.
Titles will be identified, their information (location in line,
length in line, and non-standardised version) will be recorded,
and they will be replaced in the working line by underscore... | 2944b66984bfda878114b4bac602b8aa74d383c0 | 690,193 |
import hashlib
def double_sha256(string, as_hex=False):
"""
Get double SHA256 hash of string
:param string: String to be hashed
:type string: bytes
:param as_hex: Return value as hexadecimal string. Default is False
:type as_hex
:return bytes, str:
"""
if not as_hex:
retu... | bce1607fbbab0c3c9a3b3dd2dcd2e74b6cb84f87 | 690,194 |
def vertices_to_label(selected_vert_indices, all_vert_coords, header="#!ascii label"):
"""
Write a string in FreeSurfer label format from the vertices.
Write a string in FreeSurfer label format from the vertices. This can be used to create a label from a list of vertices, e.g., for displaying the vertices ... | bfa915aa6e4e770f845df8505b9473be47287c13 | 690,195 |
def lower_dict_keys(some_dict):
"""Convert all keys to lowercase"""
result = {}
for key, value in some_dict.items():
try:
result[key.lower()] = value
except AttributeError:
result[key] = value
return result | df205f482aab8f39c063d13f63b1ef69d65f794b | 690,197 |
import time
import math
def time_since(since):
"""
Calculate processed time in min and sec
:param since: time data when processing started
:return: string to show min and sec
"""
now = time.time()
s = now - since
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s) | 5e368bfbe3f629c5e975a4b3ce09f15307d48980 | 690,198 |
def add_jekyll_header(html_str, layout, title, description):
"""
Add the Jekyll header to the html strings.
Args:
html_str (str): HTML of converted notebook.
layout (str): Jekyll layout to use.
title (str): Title to use.
description (str): Description to use
Returns:
... | 038ab0b2821ef87292918a8acd80c14f76e3dc9a | 690,199 |
def parse_track(trackelement):
"""Extract info from every track entry and output to list."""
if trackelement.find('artist').getchildren():
# artist info is nested in loved/banned tracks xml
artistname = trackelement.find('artist').find('name').text
artistmbid = trackelement.find('artist'... | e7df7af49da544bf6a3c1733f2c41451b7eeae92 | 690,200 |
def get_postings_for_query(words, results_path):
"""
Scans through index files to find doc_ids where each word occurs
:param words: list of words to compare with
:param results_path: path where index files are stored
:return: list of lists with doc_ids for each word
"""
postings = []
n_f... | 8cc90678f6d7396ab7f8116629f8529e55f05171 | 690,201 |
def send_data(data, conn):
"""
Simplify sending information to client or disconnect the client if requested
"""
try:
if data == "!!!DISCONNECT_CLIENT!!!":
return data
else:
if data is not None:
conn.send((data +"\n").encode())
retur... | 8a323641b9a542d8ee55c99263a843078f266e70 | 690,202 |
from datetime import datetime
def parse_iso8601(t):
"""Return datetime from ISO8601 string."""
return datetime.strptime(t, '%Y-%m-%dT%H:%M:%S.%fZ') | 0771b1cb4a8be0e40ee92f31fb7efd93a8c381c5 | 690,203 |
import re
def processing(df, positive_polarity_words, negative_polarity_words):
""" Does pre-processing on a dataframe"""
# Remove punctuation using re.sub(pattern, replace, input): \w: letter/num/underscore, \s: space. Also, make everything lowercase
df['processed'] = df['sentence'].apply(lambda x: re.su... | efd63a05b22b74aa771e26a30d0c390464c705f4 | 690,205 |
import re
def get_first_line_text_from_html(html_string):
"""reduce html to the first line of text and strip all html tags
"""
if html_string is None:
return None
p_div = re.compile(r"</?(p|div|br).*?>",
re.IGNORECASE | re.DOTALL)
html_string = p_div.sub("\n", html_s... | 287ff2036c2aed0e1d013d8040628a7216ca2ba7 | 690,206 |
def cskp(self, kcn="", kcs="", porig="", pxaxs="", pxypl="", par1="",
par2="", **kwargs):
"""Defines a local coordinate system by three keypoint locations.
APDL Command: CSKP
Parameters
----------
kcn
Arbitrary reference number assigned to this coordinate system.
Must be g... | 7b8f1357dae8cba6c3b2cc6a7d7e72a6f4a7ff9e | 690,207 |
import re
def structure_from_viewer(status, atlas_layer, atlas):
"""
Get brain region info from mouse position in napari viewer.
Extract nx3 coordinate pair from napari window status string.
Return brainglobe (BG) structure number, name, hemisphere, and a
"pretty" string that can be displayed for... | a0f92a90cf13b1bc01081167f95c248a1b74c046 | 690,208 |
def altera_caractere(s, c, i):
"""
Altera um caractere em uma string
:param s: str; palavra a ser editada
:param c: chr; caractere substituto
:param i: int; indice do caractere a ser substituido
:return: str
"""
x = list(s)
x[i] = c
return ''.join(x) | 3e7efde5355f28c2a798d871b7071903c76c5bad | 690,209 |
import os
def text_files_from_folder(folder_path):
"""
Return a list of .txt files in a folder.
:param folder_path: The path of the folder to inspect
"""
files = os.listdir(folder_path)
# Get a list of only the text files corresponding to the contour arrays
contour_filenames = []
for f... | 1e46b55e7ba8dcaf176c37293fe8956f96472945 | 690,210 |
def normalize_context_key(string):
"""Normalize context keys
Function will normalize the string (remove white spaces and tailings)
Args:
string (str):
Returns:
Normalized string
"""
tmp = string[:1].upper() + string[1:]
return tmp.replace(" ", "") | dfb699aab7e2e7661cd89aaedec0d9186e92afef | 690,211 |
def cli(ctx, history_id, dataset_id, follow=False):
"""Get details related to how dataset was created (``id``, ``job_id``, ``tool_id``, ``stdout``, ``stderr``, ``parameters``, ``inputs``, etc...).
Output:
Dataset provenance information
For example::
{'id': '6fbd9b2274c62ebe',
... | 289e08ff939459bb4035790e2eed69cd5977cc49 | 690,212 |
import six
def safe_utf8(string):
"""Returns bytes on Py3 and an utf-8 encoded string on Py2."""
if six.PY2:
return string.encode("utf-8")
else:
return string | c044b34e73c4fdc07299f6d1a99ad9418ffecb41 | 690,213 |
def clip_colours(colour_value):
""" This function ensures that our auto white balance module does not
exceed the limits of our RGB spectrum.
:param colour_value: The value of our colour channel.
:return: The normalised value of our colour channel.
"""
if colour_value <= 0:
# Value of ... | 5da84bf971ea2f41824b6b8723c762d8ee3aa883 | 690,214 |
def check_filename(file):
"""Check if the str in file has the proper form."""
filename = file.name
if (isinstance(filename, str)
and filename.startswith("heightmap_sequence-")
and filename.endswith(".csv")
and len(filename.split('_'))==4
and filename.split('_')[2].startswith... | 38280f128591c3f7ce84218e12a7bdaaeb088854 | 690,216 |
def parse_cron_options(argstring):
"""
Parse periodic task options.
Obtains configuration value, returns dictionary.
Example::
my_periodic_task = 60; now=true
"""
parts = argstring.split(';')
options = {'interval': float(parts[0].strip())}
for part in parts[1:]:
name, ... | 1a5b5f2a17fb4e9b5b6a41b00073f27e597afbc8 | 690,217 |
def float_or(val, or_val=None):
"""return val if val is integer
Args:
val (?): input value to test
or_val (?): value to return if val is not an int
Returns:
?: val as int otherwise returns or_val
"""
try:
return(float(val))
except: return(or_val) | b5eb5566f3565e052153632cd2ab4f270abe15ee | 690,219 |
import math
def mu_a(x, alpha_L=0.1, alpha_R=0.1):
"""
Calculates the asymetric alpha-trimmed mean
"""
# sort pixels by intensity - for clipping
x = sorted(x)
# get number of pixels
K = len(x)
# calculate T alpha L and T alpha R
T_a_L = math.ceil(alpha_L*K)
T_a_R = math.floor... | c72f07c43ada924104d9d0eefc75d0788929bbfd | 690,220 |
import re
import argparse
def is_aws_region(region: str) -> str:
"""Validate region by format only"""
region_check = re.compile(
r"(us(-gov)?|ap|ca|cn|eu|sa)-(central|(north|south)?(east|west)?)-\d"
)
if region_check.match(region):
return region
else:
raise argparse.Argume... | 996965a2d844e16c0a154c294510f671b458dc8b | 690,221 |
import torch
def get_relative_position_matrix(length, max_relative_position, direction, offset=True):
""" Generate matrix of relative positions between inputs ([..., length])."""
range_vec = torch.arange(length).long()
if torch.cuda.is_available():
range_vec = range_vec.cuda()
range_mat = rang... | 0ed57ce5e33f5c8dc6f582a0efd9237cbd496f4e | 690,223 |
import yaml
def read_config(fname):
"""Read configuration file."""
with open(fname, 'r') as fid:
config = yaml.load(fid, yaml.SafeLoader)
return config | 206be0741b3be273c38e3aaeb8fc4bf83f6481b2 | 690,224 |
from typing import List
def quick_sort(nums: List[int]) -> List[int]:
""" Does recursive sorting using quick sort """
if len(nums) < 2:
return nums
mid: int = (len(nums) - 1)//2
smaller_values: List[int] = [num for i, num in enumerate(nums)
if num <= ... | cd8234271e93436736261604c00c033ae5e86e6f | 690,225 |
def is_runnable(graph, obj):
"""Check if a task within a graph is runnable."""
connections_to_remove = []
for pred in graph.predecessors[obj.name]:
is_done = pred.done
if not is_done:
return False
else:
connections_to_remove.append(pred)
# removing nodes t... | dd872032e2ee7cf530dfad94433a42232aa88d13 | 690,226 |
import os
def ls(arg=None):
"""ls."""
directory = os.getcwd() if arg is None else arg
files = [file_object.name for file_object in os.scandir(directory) if file_object.is_file()] # noqa E501
dirs = [folder.name for folder in os.scandir(directory) if folder.is_dir()]
return files + dirs | 6395fa3287c580fd510c0363c4695707f37e6d03 | 690,227 |
import re
def read_attribute_file (attr_file, attr_conf):
"""
Collect annotation evidence for protein families used for prioritization
Input: filename of the characterization file
Output: ann = {Cluster_XYZ: {qvalue:0.001, coef:-0.3, ...}, ...}
"""
required = {}
annotation = {}
split = {}
flags = {}
title... | 7a9b9e89c340aac7a0e5b405df3091bbbbb0c66a | 690,228 |
import collections
def getWireDecl(slot_to_io, top_rtl_parser):
"""
declare connecting wires
need to filter out the IOs
also filter out the ap signals (separately handled)
"""
wire_decl = []
for slot, io_list in slot_to_io.items():
for io in io_list:
if top_rtl_parser.isIO(io[-1]):
co... | 2c37431457dfb7e80b3e9270af627cfff8b926b2 | 690,229 |
def PyLong_AsDouble(space, w_long):
"""Return a C double representation of the contents of pylong. If
pylong cannot be approximately represented as a double, an
OverflowError exception is raised and -1.0 will be returned."""
return space.float_w(space.float(w_long)) | 2eb7fcc980a4da85d1324423674fa3e81eb4d793 | 690,230 |
def vaihingen_classes():
"""Vaihingen class names for external use."""
return [
'impervious_surface', 'building', 'low_vegetation', 'tree', 'car',
'clutter'
] | 30829f10626d49ba9b5579a240dee7cabbbee061 | 690,231 |
def get_bar_vector(model, elem, node1, node2, xyz1):
"""helper method for ``rotate_v_wa_wb``"""
cd1 = node1.Cd()
cd2 = node2.Cd()
if model is None:
cd1_ref = node1.cd_ref
cd2_ref = node2.cd_ref
# get the vector v, which defines the projection on to the elemental
# coordi... | 8d7ea9eab3bae13ada2d4da3cb9c338bc8e55275 | 690,232 |
def membership_tree_roots(context, person):
"""
Produces a list of tree roots. For each of these, uses
make_membership_tree to display the entities in the tree that the person
belongs to.
"""
roots = set()
for entity in person.entities.all():
# get the closest-to-root non-abstract en... | 9b55de44c970740f9585781273ee4564aa35f85d | 690,233 |
def graph6n(data):
"""Read initial one or four-unit value from graph6 sequence. Return value, rest of seq."""
if data[0] <= 62:
return data[0], data[1:]
return (data[1]<<12) + (data[2]<<6) + data[3], data[4:] | 799a08bd4728674247c26031dee13e77fa9f4e9c | 690,234 |
def _make_emm_plugin_finalizer(handle, allocations):
"""
Factory to make the finalizer function.
We need to bind *handle* and *allocations* into the actual finalizer, which
takes no args.
"""
def finalizer():
"""
Invoked when the MemoryPointer is freed
"""
# At e... | 682378d6963bf924b77872c2ddf68105c90384b0 | 690,235 |
import random
import torch
def sample_random_batch(dataset, batch_size=32):
"""
* inputs:
- dataset (torch.utils.data.Dataset, required)
An instance of torch.utils.data.Dataset.
- batch_size (int, optional)
Batch size.
* returns:
A mini-batch ran... | 0aae065221a965bc27ee0471b84428a25bcf9e12 | 690,236 |
import urllib.request, urllib.parse, urllib.error
from bs4 import BeautifulSoup
import ssl
import time
def build_dataset_list(url='https://archive.ics.uci.edu/ml/datasets',msg_flag=True):
"""
Scrapes through the UCI ML datasets page and builds a list of all datasets.
"""
# Ignore SSL certificate... | 674f0a65a985b573ca9b915e76e9e2b68c3ca698 | 690,237 |
import collections
def cache_tree():
"""Struct to cache the partial results in memory"""
return collections.defaultdict(cache_tree) | d72e1cf46bb3dcb3a25e57da50d9895f5004b0c5 | 690,238 |
def ps_weight_timemean(field, ps):
"""
This takes the surface pressure time mean of atmos_fields
input:
field xr.DataArray or xr.Dataset
ps surface pressure field with the same dimensions are field, it does not need the verical coordinates
return
same structure are field but ... | a33d3eb7cdece49e8e6ff41c6dff46bf40a4deb4 | 690,239 |
import timeit
def list_complexity(list, words):
"""Estimate complexity of lookup with a list."""
start_time = timeit.default_timer()
for word in words:
if word in list:
continue
return timeit.default_timer() - start_time | e2a643dfb61dad0ba71de50d37d58700cce5d549 | 690,240 |
def cite_key(extracted_cite):
"""Get a hashed key to represent a given ExtractedCite object, to check whether a new one is redundant."""
return hash((
extracted_cite.cite,
extracted_cite.normalized_cite,
extracted_cite.rdb_cite,
extracted_cite.rdb_normalized_cite,
extract... | 50daa265cfe45745726eb2a21794b9bca75f571d | 690,241 |
from functools import reduce
from operator import mul
def doubleFactorial(n):
"""
Returns double factorial of an integer.
"""
return reduce(mul, range(n, 0, -2)) | 43e79b8f43bd4e63806f24c3847fd447e86b3bf9 | 690,242 |
def _get_full_name(first_name, last_name):
"""Gets the full name"""
return ', '.join([last_name, first_name]) | 5699c9e832507e1c44036712a630de0494a6e172 | 690,243 |
from typing import Dict
def delete_api_gateway(api_gateway_client, api_gateway_id: str) -> Dict:
"""
Delete the API Gateway given ID.
Args:
api_gateway_client: API Gateway V2 Client.
api_gateway_id: API Gateway ID to delete.
Returns: The delete_api API response dict.
"""
retu... | 50018d98484ac43576718fafdafc3da0092ab77c | 690,244 |
def get_requirements(remove_links=True):
"""
lists the requirements to install.
"""
requirements = []
try:
with open('requirements.txt') as f:
requirements = f.read().splitlines()
except Exception as ex:
with open('aguamenti.egg-info\requires.txt') as f:
r... | 6f0ac661b154d186d642f3be9f22e5cdc440f5fe | 690,245 |
from bs4 import BeautifulSoup
def html_to_text(html:str) -> str:
"""
Парсинг входящего html в простой текст
:param html: Входящий html
:type html: str
:return: Содержимое html в виде простого текста
:rtype: str
"""
soup = BeautifulSoup(html, 'html.parser')
lines = []
for p... | 33a622ac25f0bbb6601ddb23a7596e652c8ac0bc | 690,246 |
def cal_weight(from_x, from_y, to_x, to_y):
"""
calculate distance
Args:
from_x: x coordinate
from_y: y coordinate
to_x: x coordinate
to_y: y coordinate
Returns:
distance
"""
# return abs(from_x - to_x) + abs(from_y - to_y) # manhattan
return ((fro... | e9f56f935e61f150b6d7632bbf0e4d8282a4d8de | 690,247 |
def convert_datetimes_to_seconds(start_date, datetimes):
""" Converts difference in datetimes to total elapsed seconds.
Parameters
----------
start_date : datetime object
Start date to use for calculating total elapsed seconds.
datetimes : list of datetimes
List of datetimes to calc... | 2f0185731b04b669942a406e025f668186025e84 | 690,248 |
import re
def intersect_lists(list1, list2):
"""Intersect Lists"""
intersect_list = [val for val in list1 if val in list2]
for item in intersect_list:
print("item: {0}\n".format(item))
# intersect_list = [val for val in list1 if val in list2]
# use pattern matching because the original ch... | 04af6dab9739692b6cab124e18eadcf084167ce9 | 690,249 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.