content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def ky_att(xs, b, Mach, k0, Att=-20):
"""
Returns the spanwise gust wavenumber 'ky_att' with response at 'xs' attenuated by 'Att' decibels
Parameters
----------
xs : float
Chordwise coordinate of reference point, defined in interval (-b, +b].
b : float
Airfoil semi chord.
... | 78d62081d0849d035953a694bbb7a0fcf956f76b | 6,044 |
from typing import Optional
def has_multiline_items(strings: Optional[Strings]) -> bool:
"""Check whether one of the items in the list has multiple lines."""
return any(is_multiline(item) for item in strings) if strings else False | 75dd6ce7d7152a200ff12c53104ff839a21d28f4 | 6,045 |
from typing import Optional
from typing import Tuple
import inspect
def eval_ctx(
layer: int = 0, globals_: Optional[DictStrAny] = None, locals_: Optional[DictStrAny] = None
) -> Tuple[DictStrAny, DictStrAny]:
"""获取一个上下文的全局和局部变量
Args:
layer (int, optional): 层数. Defaults to 0.
globals_ (Op... | 81b782596bcc29f1be4432cc1b95230ac952bf2b | 6,046 |
def extract_vcalendar(allriscontainer):
"""Return a list of committee meetings extracted from html content."""
vcalendar = {
'vevents': findall_events(allriscontainer),
}
if vcalendar.get('vevents'):
base_url = allriscontainer.base_url
vcalendar['url'] = find_calendar_url(base_ur... | f792ae3d8826d37b2fba874524ec78ac502fb1f0 | 6,048 |
def rnn_helper(inp,
length,
cell_type=None,
direction="forward",
name=None,
reuse=None,
*args,
**kwargs):
"""Adds ops for a recurrent neural network layer.
This function calls an actual implementation of ... | d6d457a10bd921560a76bc54a083271c82b144ec | 6,049 |
def get_data(dataset):
"""
:return: encodings array of (2048, n)
labels list of (n)
"""
query = "SELECT * FROM embeddings WHERE label IS NOT NULL"
cursor, connection = db_actions.connect(dataset)
cursor.execute(query)
result_list = cursor.fetchall()
encodings = np.zeros((2... | 9f23631c6e263f99bab976e1225adbb448323783 | 6,050 |
def read_hdr(name, order='C'):
"""Read hdr file."""
# get dims from .hdr
h = open(name + ".hdr", "r")
h.readline() # skip line
l = h.readline()
h.close()
dims = [int(i) for i in l.split()]
if order == 'C':
dims.reverse()
return dims | 57daadfdf2342e1e7ef221cc94f2e8f70c504944 | 6,051 |
def IsTouchDevice(dev):
"""Check if a device is a touch device.
Args:
dev: evdev.InputDevice
Returns:
True if dev is a touch device.
"""
keycaps = dev.capabilities().get(evdev.ecodes.EV_KEY, [])
return evdev.ecodes.BTN_TOUCH in keycaps | 6fd36c4921f3ee4bf37c6ce8bcaf435680fc82d5 | 6,052 |
def load_users():
"""
Loads users csv
:return:
"""
with open(USERS, "r") as file:
# creates dictionary to separate csv values to make it easy to iterate between them
# the hash() function is used to identify the values in the csv, as they have their individual hash
# keys, a... | 255745d36b5b995dfd9a8c0b13a154a87ab6f25e | 6,053 |
def clustering_consistency_check(G):
""" Check consistency of a community detection algorithm by running it a number of times.
"""
Hun = G.to_undirected()
Hun = nx.convert_node_labels_to_integers(Hun,label_attribute='skeletonname')
WHa = np.zeros((len(Hun.nodes()),len(Hun.nodes())))
for i ... | 917bb7a23b651821389edbcc62c81fbe4baf3d08 | 6,055 |
def l2_normalize_rows(frame):
"""
L_2-normalize the rows of this DataFrame, so their lengths in Euclidean
distance are all 1. This enables cosine similarities to be computed as
dot-products between these rows.
Rows of zeroes will be normalized to zeroes, and frames with no rows will
be returned... | 889c2f4473fdab4661fecdceb778aae1bb62652d | 6,056 |
import socket
def canonical_ipv4_address(ip_addr):
"""Return the IPv4 address in a canonical format"""
return socket.inet_ntoa(socket.inet_aton(ip_addr)) | edacc70ccc3eef12030c4c597c257775d3ed5fa4 | 6,057 |
def _build_dynatree(site, expanded):
"""Returns a dynatree hash representation of our pages and menu
hierarchy."""
subtree = _pages_subtree(site.doc_root, site.default_language, True, 1,
expanded)
subtree['activate'] = True
pages_node = {
'title': 'Pages',
'key': 'system:page... | 38dd222ed5cde6b4d6bff4a632c6150666580b92 | 6,058 |
def aggregator(df, groupbycols):
"""
Aggregates flowbyactivity or flowbysector df by given groupbycols
:param df: Either flowbyactivity or flowbysector
:param groupbycols: Either flowbyactivity or flowbysector columns
:return:
"""
# tmp replace null values with empty cells
df = replace... | f8333087efc4a48d70aa6e3d727f73a7d03c8252 | 6,060 |
def unpack(X):
""" Unpack a comma separated list of values into a flat list """
return flatten([x.split(",") for x in list(X)]) | 1033fd5bdcd292a130c08a8f9819bf66a38fccac | 6,061 |
def doize(tock=0.0, **opts):
"""
Decorator that returns Doist compatible decorated generator function.
Usage:
@doize
def f():
pass
Parameters:
tock is default tock attribute of doized f
opts is dictionary of remaining parameters that becomes .opts attribute
o... | 0c4a4220546b8c0cbc980c10de0476c9fc6c7995 | 6,062 |
def make_chained_transformation(tran_fns, *args, **kwargs):
"""Returns a dataset transformation function that applies a list of
transformations sequentially.
Args:
tran_fns (list): A list of dataset transformation.
*args: Extra arguments for each of the transformation function.
**kw... | 5f24e030df74a0617e633ca8f8d4a3954674b001 | 6,064 |
def configure_optimizer(learning_rate):
"""Configures the optimizer used for training.
Args:
learning_rate: A scalar or `Tensor` learning rate.
Returns:
An instance of an optimizer.
Raises:
ValueError: if FLAGS.optimizer is not recognized.
"""
if FLAGS.optimizer == 'adadelta':
optim... | bf7dd03c4133675d58428a054cc16e7be41e88b4 | 6,065 |
import functools
def train_and_evaluate(config, workdir):
"""Runs a training and evaluation loop.
Args:
config: Configuration to use.
workdir: Working directory for checkpoints and TF summaries. If this
contains checkpoint training will be resumed from the latest checkpoint.
Returns:
Trainin... | 87f1dba561563acc0033663a30f105fe4056d235 | 6,066 |
def increment(i,k):
""" this is a helper function for a summation of the type :math:`\sum_{0 \leq k \leq i}`,
where i and k are multi-indices.
Parameters
----------
i: numpy.ndarray
integer array, i.size = N
k: numpy.ndarray
integer array, k.size = N... | 1ac8ef592376fbfa0d04cdd4b1c6b29ad3ed9fbd | 6,067 |
def sample_lopt(key: chex.PRNGKey) -> cfgobject.CFGObject:
"""Sample a small lopt model."""
lf = cfgobject.LogFeature
rng = hk.PRNGSequence(key)
task_family_cfg = para_image_mlp.sample_image_mlp(next(rng))
lopt_name = parametric_utils.choice(
next(rng), [
"LearnableAdam", "LearnableSGDM", "L... | b52a7640532ed8ce7760474edbd9832d93e7bdc3 | 6,068 |
import numpy
import time
def gen_df_groupby_usecase(method_name, groupby_params=None, method_params=''):
"""Generate df groupby method use case"""
groupby_params = {} if groupby_params is None else groupby_params
groupby_params = get_groupby_params(**groupby_params)
func_text = groupby_usecase_tmpl.... | 3a4f5745744299db354c17198d3175ad8b7ce4e4 | 6,069 |
import csv
def merge_csvfiles(options):
""" Think of this as a 'join' across options.mergefiles on equal values of
the column options.timestamp. This function takes each file in
options.mergefiles, reads them, and combines their columns in
options.output. The only common column should be options.time... | 171b448c2b49584ce5a601f7d8789d7198fdf935 | 6,071 |
import html
def row_component(cards):
"""
Creates a horizontal row used to contain cards.
The card and row_component work together to create a
layout that stretches and shrinks when the user changes the size of the window,
or accesses the dashboard from a mobile device.
See https://developer.... | baa9f86bcac786a94802d003b1abcc75686e08d8 | 6,072 |
def recCopyElement(oldelement):
"""Generates a copy of an xml element and recursively of all
child elements.
:param oldelement: an instance of lxml.etree._Element
:returns: a copy of the "oldelement"
.. warning::
doesn't copy ``.text`` or ``.tail`` of xml elements
"""
newelement =... | 981f0c5ccdeacc1d82ebbde2de6f51298e82fa14 | 6,073 |
import hashlib
def KETAMA(key):
"""
MD5-based hashing algorithm used in consistent hashing scheme
to compensate for servers added/removed from memcached pool.
"""
d = hashlib.md5(key).digest()
c = _signed_int32
h = c((ord(d[3])&0xff) << 24) | c((ord(d[2]) & 0xff) << 16) | \
c((... | 6baec2ea79a166389625b19c56cbcd3734e819b7 | 6,075 |
import calendar
def add_months(dt, months):
"""
月加减
"""
month = dt.month - 1 + months
year = dt.year + month / 12
month = month % 12 + 1
day = min(dt.day, calendar.monthrange(year, month)[1])
return dt.replace(year=year, month=month, day=day) | 5770c1b61e53fc692f3b13efef203d2f5d544b80 | 6,076 |
def _decomposer_interp(fp, x=None, xp=None):
"""Do the actual interpolation for multiprocessing"""
return np.interp(x, xp, fp) | eef6debf668c62f4d817a0b3697019d0bd4007c9 | 6,077 |
import tensorflow as tf
from nn4omtf import utils
import numpy as np
def create_nn(x, x_shape, is_training):
"""
Args:
x: input hits array
x_shape: input tensor shape for single event
is_training: placeholder for indicating train or valid/test phase
Note: Only code in `create_nn` ... | 8c7a4ce128e434e964b951ca6fe65722c9936be9 | 6,078 |
def generate_outlier_bounds_iqr(df, column, multiplier=1.5):
"""
Takes in a dataframe, the column name, and can specify a multiplier (default=1.5). Returns the upper and lower bounds for the
values in that column that signify outliers.
"""
q1 = df[column].quantile(.25)
q3 = df[column].quantile(.... | 7f096d5f5cf2417cbc161713715a39560efd140a | 6,080 |
import random
def generate_data(Type):
"""
随机生成CAN帧中所包含的数据
:param Type: 需要生成数据的类型
:return: 生成的随机数据序列,长度为8,如['88', '77', '55', '44', '22', '11', '33'', '44']
"""
data = []
if Type == 1:
# 生成反馈帧单体电池Cell1-24电压信息
standard_vol = 35
offset = random.randint(0, 15)
... | 3a920be4b7ef5c5c3e258b3e3c79bc028004179a | 6,081 |
def counting_sort(array):
"""
SORTING FUNCTION USING COUNTING SORT ALGORITHM
ARG array = LIST(ARRAY) OF NUMBERS
"""
## counter lists has elements for every
maximum = max(array)
counter = [0]*(maximum+1)
for i in range(len(array)):
counter[array[i]] += 1
for i in range(1, ma... | 986e2f9277fa71dcd9897ac409653009c651c49f | 6,082 |
import math
from PIL import ImageColor
def indexedcolor(i, num, npersat=15, lightness=60):
"""Returns an rgb color triplet for a given index, with a finite max 'num'.
Thus if you need 10 colors and want to get color #5, you would call this with (5, 10).
The colors are "repeatable".
"""
nsats = int... | 418a875bc8ae50ce21f9667f46718863ba0f55e3 | 6,083 |
def make_customer_satisfaction(branch_index='A'):
"""Create average customer satisfaction heat map"""
customer_satisfaction = make_heat_map(branch_index, 'mean(Rating)', 'Average Satisfaction')
return customer_satisfaction | b891b74a8942da7c212ba7112ffb865deb52aec2 | 6,084 |
def extract_infos(fpath):
"""Extract information about file"""
try:
pe = pefile.PE(fpath)
except pefile.PEFormatError:
return {}
res = {}
res['Machine'] = pe.FILE_HEADER.Machine
res['SizeOfOptionalHeader'] = pe.FILE_HEADER.SizeOfOptionalHeader
res['Characteristics'] = pe.FIL... | f7f3cbef72f7b9d05c25e2aabde33c7a814d05bd | 6,085 |
def calibrate_eye_in_hand(calibration_inputs):
"""Perform eye-in-hand calibration.
Args:
calibration_inputs: List of HandEyeInput
Returns:
A HandEyeOutput instance containing the eye-in-hand transform
"""
return HandEyeOutput(
_zivid.calibration.calibrate_eye_in_hand(
... | d8bc7b8cfe821809c441d3151297edf7f8267803 | 6,086 |
from typing import Optional
def get_intersect(A: np.ndarray, B: np.ndarray, C: np.ndarray, D: np.ndarray) -> Optional[np.ndarray]:
"""
Get the intersection of [A, B] and [C, D]. Return False if segment don't cross.
:param A: Point of the first segment
:param B: Point of the first segment
:param C... | 1c3fab6d189f218e9f5f7e6648a46a9e53683366 | 6,087 |
from typing import Callable
def _make_vector_laplace_scipy_nd(bcs: Boundaries) -> Callable:
""" make a vector Laplacian using the scipy module
This only supports uniform discretizations.
Args:
bcs (:class:`~pde.grids.boundaries.axes.Boundaries`):
|Arg_boundary_conditions|
... | 3cda36d53755c84fcb47259ade64752610aeffbe | 6,088 |
def dot_to_dict(values):
"""Convert dot notation to a dict. For example: ["token.pos", "token._.xyz"]
become {"token": {"pos": True, "_": {"xyz": True }}}.
values (iterable): The values to convert.
RETURNS (dict): The converted values.
"""
result = {}
for value in values:
path = res... | a2c56a01b179d27eabc728d6ff2ec979885d5feb | 6,089 |
def _draw_edges(G, pos, nodes, ax):
"""Draw the edges of a (small) networkx graph.
Params:
G (nx.classes.*) a networkx graph.
pos (dict) returned by nx.layout methods.
nodes (dict) of Circle patches.
ax (AxesSubplot) mpl axe.
Return:
... | 28a207a190a7066656518de7c8e8626b2f534146 | 6,090 |
def benjamini_hochberg_stepup(p_vals):
"""
Given a list of p-values, apply FDR correction and return the q values.
"""
# sort the p_values, but keep the index listed
index = [i[0] for i in sorted(enumerate(p_vals), key=lambda x:x[1])]
# keep the p_values sorted
p_vals = sorted(p_vals)
q... | 7cff2e8d28cda37c4271935ef2e6fb48441137c3 | 6,091 |
def remove_transcription_site(rna, foci, nuc_mask, ndim):
"""Distinguish RNA molecules detected in a transcription site from the
rest.
A transcription site is defined as as a foci detected within the nucleus.
Parameters
----------
rna : np.ndarray, np.int64
Coordinates of the detected ... | 3f6fe083cb85dbf2f7bc237e750be57f13398889 | 6,092 |
def hexagonal_numbers(length: int) -> list[int]:
"""
:param len: max number of elements
:type len: int
:return: Hexagonal numbers as a list
Tests:
>>> hexagonal_numbers(10)
[0, 1, 6, 15, 28, 45, 66, 91, 120, 153]
>>> hexagonal_numbers(5)
[0, 1, 6, 15, 28]
>>> hexagonal_numbers(0... | 632e60505cb17536a17b20305a51656261e469f5 | 6,093 |
def get_free_remote_port(node: Node) -> int:
"""Returns a free remote port.
Uses a Python snippet to determine a free port by binding a socket
to port 0 and immediately releasing it.
:param node: Node to find a port on.
"""
output = node.run("python -c 'import socket; s=socket.soc... | 4cdb0f62909abae1af8470611f63fcc9f5495095 | 6,094 |
from typing import Tuple
from typing import List
import tqdm
def read_conll_data(data_file_path: str) -> Tuple[List[Sentence], List[DependencyTree]]:
"""
Reads Sentences and Trees from a CONLL formatted data file.
Parameters
----------
data_file_path : ``str``
Path to data to be read.
... | 6bee76277fb6a15d03c5c80a5d083920a4412222 | 6,095 |
from typing import Optional
def get_algo_meta(name: AlgoMeta) -> Optional[AlgoMeta]:
"""
Get meta information of a built-in or registered algorithm.
Return None if not found.
"""
for algo in get_all_algo_meta():
if algo.name == name:
return algo
return None | 3a568356d56d26192a1e38be6ec5dd57b52a9bba | 6,096 |
def read_gbt_target(sdfitsfile, objectname, verbose=False):
"""
Give an object name, get all observations of that object as an 'obsblock'
"""
bintable = _get_bintable(sdfitsfile)
whobject = bintable.data['OBJECT'] == objectname
if verbose:
print("Number of individual scans for Object %... | 1215fdccee50f0ab5d135a5cccf0d02da09410e2 | 6,098 |
def regression_target(label_name=None,
weight_column_name=None,
target_dimension=1):
"""Creates a _TargetColumn for linear regression.
Args:
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight... | 064954b58b57caeb654ed30f31b9560ab01d7c42 | 6,099 |
def rdoublegauss(mu1, mu2, sigma1, sigma2, ratio, size=None):
"""random variable from double gaussian"""
r1 = ratio / (1. + ratio)
r2 = 1 - r1
R = np.asarray(np.random.random(size))
Rshape = R.shape
R = np.atleast1d(R)
mask1 = (R < r1)
mask2 = ~mask1
N1 = mask1.sum()
N2 = R.siz... | 5286d31985656d2f38c4e6b126d2f6d0915c82cb | 6,100 |
def check_add_role(store, id, name):
""" Checks if role exist and then adds record if it doesn't """
role = store.find_role(name)
if role == None:
return store.create_role(id=id, name=name)
else:
return role | c8680158cc005bf7a278951774b9fe0a733fc8c6 | 6,101 |
from pathlib import Path
def delta_path(base_path: Path, item_path: Path, new_base_path: Path) -> Path:
"""
Removes a base path from an item, and appends result to a new path
:param base_path: The :py:class:`pathlib.Path` to be removed from `item_path`
:param item_path: The :py:class:`pathlib.Path` t... | ec531a011e36f053a8092525faae2047f5f66ccc | 6,103 |
import asyncio
async def async_validate_config(hass, config):
"""Validate config."""
automations = list(
filter(
lambda x: x is not None,
await asyncio.gather(
*(
_try_async_validate_config_item(hass, p_config, config)
for... | 7f77a4c008a5fcb8d275bb2e7f65005d9e1c49b5 | 6,104 |
def _fwd6(y, dt): # pragma: no cover
"""Compute the first derivative of a uniformly-spaced-in-time array with a
sixth-order forward difference scheme.
Parameters
----------
y : (7,...) ndarray
Data to differentiate. The derivative is taken along the... | 0d7321b3615fab6d6e065917ec94479ada0ee70c | 6,106 |
def minimize_newton_cg(nrgs, x0, num_params):
"""
Minimzes a structure using a Newton-CG method. This requires a
hopefully fully invertible analytic Hessian that will be used
to minimize geometries.
Parameters
----------
nrgs: [list of functionals]
Energy functions used to compute t... | 46ddd6b2004579ef07170ef578859c7119ed4e13 | 6,107 |
def currency(price, currency):
"""
Returns price in currency format
"""
price = float(price)
price *= float(currency.exchange_rate)
try:
return currency.display_format.format(price)
except Exception as e:
raise ImproperlyConfigured('Invalid currency format string: "%s" for cu... | 2204993f5f51c62669395de40dc14d16f110c4b4 | 6,108 |
def project_point(x, R, T, f, c, k, p):
"""
Args
x: Nx3 points in world coordinates
R: 3x3 Camera rotation matrix
T: 3x1 Camera translation parameters
f: 2x1 Camera focal length
c: 2x1 Camera center
k: 3x1 Camera radial distortion coefficients
p: 2x1 Camer... | 5b6cce136ac6753fcdefcde01db9636357687ab2 | 6,109 |
def sum_to_scalar(*args):
"""Adding losses/nmsks together that were evaluated in parallel"""
new_args = list()
for arg in args:
new_args.append({k: v.sum() for (k, v) in arg.items()})
return new_args | a4264911962c7bf3432735f8872522e193ceec8f | 6,110 |
def inv(h_array: np.ndarray) -> np.ndarray:
"""
Calculate pinvh of PSD array. Note pinvh performs poorly
if input matrix is far from being Hermitian, so use pinv2
instead in this case.
Parameters:
----------
h_array : input matrix, assume to be Hermitian
Returns:
----------
... | c3305878b3f2dfdaabe6a245d8063b1039e19bc2 | 6,111 |
from datetime import datetime
def update_risk_cavs(connection):
"""Parse cavs from html to markdown.
Args:
connection: SQLAlchemy connection.
Returns:
ids of risks for which cavs where updated.
"""
cavs_data = connection.execute(
sa.text("""
SELECT cav.id, cav.attribu... | 8af9ef613259915573ca1efc699278c0c2a6a4e4 | 6,112 |
def prefix_to_number(prefix):
"""Return the number of the prefix."""
if prefix in PREFIXES:
return PREFIXES[prefix]
raise ValueError(f'prefix "{prefix}" not found in list of prefixes') | e0a3822aa615d79a1ff0d5c7405097e055573ed0 | 6,113 |
def is_response_going_to_be_used(request, spider):
"""Check whether the request's response is going to be used."""
callback = get_callback(request, spider)
if is_callback_using_response(callback):
return True
for provider in discover_callback_providers(callback):
if is_provider_using_re... | 4cd908dbebfd6089a25bf5168937b2a4f02f23ee | 6,114 |
def eval_market1501(distmat, q_vids, g_vids, q_camids, g_camids, max_rank=50):
"""Evaluation with Market1501 metrics
Key: for each query identity, its gallery images from the same camera view are discarded.
"""
num_q, num_g = distmat.shape
if num_g < max_rank:
max_rank = num_g
print(... | 5387ee7fe7cac90406ac91619844e8e1fd814d88 | 6,115 |
def pytype_raise():
"""A pytest.raises wrapper for catching TypeErrors.
Parameters
----------
match : str, default=None
Regular expression to match exception error text against.
Returns
-------
RaisesContext
pytest context manager for catching exception-raising blocks.
... | ec5c7a56a8a3fb9028fb0ec72ac814061def467d | 6,117 |
def lift_split_buffers(lines):
"""Lift the split buffers in the program
For each module, if we find any split buffers with the name "buf_data_split",
we will lift them out of the for loops and put them in the variable declaration
section at the beginning of the module.
Parameters
----------
... | 78919247b241dc29de84594b097c75d5e7ae1f03 | 6,118 |
import scipy
def peak_finder(
df_run,
cd,
windowlength,
polyorder,
datatype,
lenmax,
peak_thresh):
"""Determines the index of each peak in a dQdV curve
V_series = Pandas series of voltage data
dQdV_series = Pandas series of differential capacity data... | 370e019354579ab7b9a4eedef514dbde84801950 | 6,119 |
def make_box(world, x_dim, y_dim, z_dim, mass=0.5):
"""Makes a new axis-aligned box centered at the origin with
dimensions width x depth x height. The box is a RigidObject
with automatically determined inertia.
"""
boxgeom = Geometry3D()
boxgeom.loadFile("data/objects/cube.tri")
# box i... | f3257a8339542c55d96bd752bad1d0c69c6370e0 | 6,120 |
def LikelihoodRedshiftMeasure( measure='', data=[], scenario=False, measureable=False):
"""
returns likelihood functions of redshift for observed data of measure,
can be used to obtain estimate and deviation
Parameters
----------
measure : string
indicate which measure is probed
d... | 55d414bb0adb00fe549485f2e3682d15b761b7a4 | 6,123 |
def plural_suffix(count: int) -> str:
""""s" when count is not one"""
suffix = ''
if count != 1:
suffix = 's'
return suffix | 950002d57560d06e93e08647ff17d885688bca87 | 6,124 |
def _pr_exists(user, namespace, repo, idx):
""" Utility method checking if a given PR exists. """
repo_obj = pagure.lib.query.get_authorized_project(
flask.g.session, project_name=repo, user=user, namespace=namespace
)
if not repo_obj:
return False
pr_obj = pagure.lib.query.search_... | 2e68b6d4282f6f3ca4d9645c78579e3df3889494 | 6,125 |
import csv
def readData(filename):
"""
Read in our data from a CSV file and create a dictionary of records,
where the key is a unique record ID and each value is dict
"""
data_d = {}
with open(filename) as f:
reader = csv.DictReader(f)
for row in reader:
clean_row... | 57dcb39dac9568024ae4be07bc0921c941d6fae3 | 6,126 |
def _get_client(app):
"""Returns a client instance for an App.
If the App already has a client associated with it, simply returns
it. Otherwise creates a new client, and adds it to the App before
returning it.
Args:
app: A Firebase App instance (or ``None`` to use the default App).
Re... | de96140ed7c15a4aa390f08a76fe7de0074730db | 6,127 |
def get_job_config_build_for_branch(**kwargs):
"""pass kwargs to JobConfig constructor"""
return JobConfig(
type=JobType.copr_build,
trigger=JobConfigTriggerType.commit,
branch="build-branch",
scratch=True,
**kwargs,
) | 0c16a16bce6a1f05ca8daf764dd2de80147c90c4 | 6,128 |
import yaml
def get_connection_string_from_config_file(cfg_src, db_cfg_key):
"""
Gets connection parameters from specified section in
a configuration file.
"""
# reading complete configuration
with open(cfg_src, 'r') as yml_file:
cfg = yaml.safe_load(yml_file)
# looking for specif... | e2245f8e9124d36e5a373f1891590046c10a38fd | 6,129 |
from typing import Tuple
from typing import Sequence
def _decomp_0_matrices(
kak: 'cirq.KakDecomposition',
atol: float = 1e-8,
) -> Tuple[Sequence[Tuple[np.ndarray, np.ndarray]], complex]:
"""Returns the single-qubit matrices for the 0-SQRT_ISWAP decomposition.
Assumes canonical x, y, z and (x, y, z)... | b84d65cc7076b5d294cbf7f4f6a3c3ddff7ef7d2 | 6,130 |
import math
def concave(x, m):
"""Shape function."""
assert shape_args_ok(x, m)
result = 1.0
for i in range(1, len(x) - m + 1):
result *= math.sin(x[i - 1] * math.pi / 2.0)
if m != 1:
result *= math.cos(x[len(x) - m] * math.pi / 2.0)
return correct_to_01(result) | 70020efb06f35e169041491724bd6ddc7c7a9a35 | 6,131 |
def norm_img(img):
"""
normalization image
:param img: (C, H, W)
:return:
norm_img: (C, H, W)
"""
height, width, channel = img.shape
img = np.reshape(img, (height * width, channel)) # (height * width, channel)
mean = np.mean(img, axis=0, keepdims=True) # (1, channel)
center... | a794ec4e096faa0efbfc9c993d9292a54f6573cc | 6,133 |
def convert_examples_to_features(examples, use_label):
"""Loads a data file into a list of `InputBatch`s."""
features = []
line_tags = []
for (ex_index, example) in enumerate(examples):
if use_label:
labels = example.labels
else:
labels = ['O'] * len(example.uni... | 7720a79b7404e0d4cc340ae5ea78084b64115f92 | 6,135 |
def broadcast_to_rank(t, rank, axis = -1):
"""Appends dimensions to tf.Tensor `t` at axis `axis` to match rank `rank`."""
rank_t = t.shape.rank # Assumes ranks are known at compile time (static).
for _ in range(rank - rank_t):
t = tf.expand_dims(t, axis=axis)
return t | 8a57a1d71f92aefc6015481b358b65f565af1b00 | 6,136 |
def operator(func):
"""
Help decorator to rewrite a function so that
it returns another function from it.
"""
@wraps(func)
def wrapper(*args, **kwargs):
def operator(stream):
return func(stream, *args, **kwargs)
return operator
return wrapper | cd2873954ee9dff003d2481d296c5be8740675c8 | 6,137 |
def json(body, charset="utf-8", **kwargs):
"""Takes JSON formatted data, converting it into native Python objects"""
return json_converter.loads(text(body, charset=charset)) | e2cabfca983abb96018f51ea3c09826e033227bb | 6,138 |
def read_corpus(file_path, encoding=ENCODING, **kwargs):
"""
Create a Linguistica object with a corpus data file.
:param file_path: path of input corpus file
:param encoding: encoding of the file at *file_path*. Default: ``'utf8'``
:param kwargs: keyword arguments for parameters and their values.
... | 28f8303e0b94e8df9b6d9a33aca14fa62b15f6e8 | 6,139 |
import random
def sample_pagerank(corpus, damping_factor, n):
"""
Return PageRank values for each page by sampling `n` pages
according to transition model, starting with a page at random.
Return a dictionary where keys are page names, and values are
their estimated PageRank value (a value between ... | 5c9f66aaf72c8330c2ee0fcd2402bf613c4eb9b7 | 6,140 |
import networkx
def compute_participants(matches, challonge_data):
"""Compute series participants.
Iterate all matches and players to create a graph.
Apply connected components algorithm to resolve distinct
participant groups over all matches.
Sort participant groups by number of wins to correla... | a715773d5edd3b4d6852096c665070e64bef1165 | 6,142 |
def write_haiku(word_array, is_ipv6):
"""Return the beautiful haiku"""
# String to place in schema to show word slot.
octct = 'OCTET'
schema = get_schema(is_ipv6, octct)
# Replace each instance of 'octet' in the schema with a word from
# the encoded word array.
for i in range(len(word_array... | b51dc7cd1cca642eb135c48952bbc2ca74faf5e1 | 6,143 |
def import_data():
"""
Utility function to imoprt summary tsv ready for usage in PyMol
"""
col_types = {
'sift_score': float, 'sift_median': float, 'total_energy': float,
'interaction_energy': float, 'diff_interaction_energy': float,
'diff_interface_residues': float, 'freq': floa... | 1b116d74ecba83658d05ea5dbda66b15175f3fdb | 6,144 |
from datetime import datetime
def get_current_datetime():
"""
Get the current datetime.
Note: This function is intended to be mocked in testing
Return:
time(datetime.datetime): current datetime
"""
return datetime.datetime.now(current_app.config['TIMEZONE']) | 6e7986eb6029e9c2be66019d7e9f35a79580c742 | 6,145 |
def adapt_all(iterable, to_cls):
"""
Returns a list of items from adapting each item in iterable to `cls`
If `iterable` is `None`, an empty list will be returned.
"""
if iterable is None:
return []
return [adapt(obj, to_cls) for obj in iterable] | a7c4d0adcce144223929081f47512f9d673efb28 | 6,146 |
import torch
def log_sum_exp_vb(vec, m_size):
"""
calculate log of exp sum
args:
vec (batch_size, vanishing_dim, hidden_dim) : input tensor
m_size : hidden_dim
return:
batch_size, hidden_dim
"""
_, idx = torch.max(vec, 1) # B * 1 * M
max_score = torch.gather(vec, 1, idx.view(-1, 1, m_size)).view(-1, 1,... | 87c99f9ab9a9c114792a2c895284a8743682fc06 | 6,148 |
def C_fun_gen(fractions, speciesindices, y, time):
"""
Calculate the distribution of carbon functional groups as a percent of
total carbon.
Parameters
----------
fractions : list
The lumped phases that you want to include (as specified
in MW['speci... | 28704b470fd919d998fcd8704b125827226fe151 | 6,149 |
def _sigmoid(x):
"""
Sigmoid function that smoothly limits values between 0.0 and 1.0
:param x: Numpy array with float values that are to be limited.
:return: Numpy array with float values between 0.0 and 1.0
"""
return 1.0 / (1.0 + np.exp(-x)) | 770875ba82df9d4ac8eb6d403527cf0fb62d3990 | 6,151 |
from typing import Dict
def inherit_n_genes_prob(n, n_father, n_mother, mutation_prob) -> Dict:
"""Returns dictionary with distribution of conditional probability of
inherited genes given that father has n_father genes and mother has
n_mother genes, taking into account probability of mutations."""
# ... | 0481244db107f6623aa109212e74be8b719f5bb8 | 6,152 |
async def get_metrics_address_counts_summary():
"""
Latest summary of address counts.
"""
qry = f"""
select col
, latest
, diff_1d
, diff_1w
, diff_4w
, diff_6m
, diff_1y
from mtr.address_counts_by_minimal_balance_ch... | c22d6c3442833743559c42e4be59a25ab073c03b | 6,153 |
from typing import Dict
from typing import Any
async def processor(db, document: Dict[str, Any]) -> Dict[str, Any]:
"""
Process a history document before it is returned to the client.
:param db: the application object
:param document: the document to process
:return: the processed document
""... | 89de3dd255923b3eca6444ee4410980e857aa8e1 | 6,154 |
def _unit_scale_traindata(X, xmins, xmaxs):
"""If xmax > xmin, unit-scale the training data, else do nothing
Parameters
----------
x : ndarray of shape (m, n)
xmins : ndarray of shape (n, )
xmaxs : ndarray of shape (n, )
Returns
-------
result : ndarray of shape (m, n)
Notes... | 2778c7a9d7b6e23775df2354b92057e6a5511dc5 | 6,155 |
def extractive_explanations(
dataset,
prefix='explain sentiment',
input_feature='review',
output_classes=('negative', 'positive'),
drop_explanations=False
):
"""Preprocessor to handle extractive rationale prediction datasets.
The preprocessor expects a dataset with the provided 'input_featu... | c1549279cbb676ee45287afe99f1f94410c27b62 | 6,158 |
def corr_weighted_kendalltau(top_list_prev, top_list, use_fast=True):
"""Compute weighted Kendall's Tau correlation (based on custom implementation!).
NOTE: Lists are DataFrame columns AND they must be sorted according to their value!!!"""
# it is irrelevant whether we compute kendall for ranks or scores.
... | 35b473040508561798831343d770acabd97cb76e | 6,159 |
from datetime import datetime
import random
def generate_processes_by_exposure(exposure):
""" Creates a simulated process based on an exposure.
Arguments:
exposure {object} -- Exposure model
Raises:
ValueError -- returns when there is no processing
with a respective expos... | a3a335184fbf9c51e47210ac22fd4d4e8a8a6aa4 | 6,160 |
import copy
def cross_val_confusion(classifier, X, y, cv=None):
"""
Evaluate confusion matrix and score from each fold of cross validation
Parameters:
----------
classifier: classifier object
The object used to fit the data.
X[ndarray]: shape=(n_sample, n_feature)
y[ndarray]: sha... | bbdbed0bc18b7ac201f2933e9cff10eab19d5a75 | 6,161 |
import asyncio
async def async_unload_entry(hass: HomeAssistantType, entry: ConfigEntry):
"""Unload Synology DSM sensors."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, platform)
for platform in PLATFORMS
... | 876aceeaa113a6275a60328f6f00c0d0c4c0f2e1 | 6,162 |
import pathlib
def confirm_control_contains(trestle_dir: pathlib.Path, control_id: str, part_label: str, seek_str: str) -> bool:
"""Confirm the text is present in the control markdown in the correct part."""
control_dir = trestle_dir / ssp_name / control_id.split('-')[0]
md_file = control_dir / f'{control... | b78cd7a7ef435fcee483d98fe2199ba90c905833 | 6,164 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.