content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def rdoublegauss(mu1, mu2, sigma1, sigma2, ratio, size=None):
"""random variable from double gaussian"""
r1 = ratio / (1. + ratio)
r2 = 1 - r1
R = np.asarray(np.random.random(size))
Rshape = R.shape
R = np.atleast1d(R)
mask1 = (R < r1)
mask2 = ~mask1
N1 = mask1.sum()
N2 = R.siz... | 5286d31985656d2f38c4e6b126d2f6d0915c82cb | 6,100 |
def check_add_role(store, id, name):
""" Checks if role exist and then adds record if it doesn't """
role = store.find_role(name)
if role == None:
return store.create_role(id=id, name=name)
else:
return role | c8680158cc005bf7a278951774b9fe0a733fc8c6 | 6,101 |
import copy
def report_map():
"""
update DB with new version of a container instance's id map
:return: str. 'true' if successful
"""
if not request.json:
logger.error('received non-json data')
abort(400)
logger.info('Received map update from {}'.format(request.remote_addr))
... | 46707f8c7ba4a02fa27f4c426c05b428c9eb43b2 | 6,102 |
from pathlib import Path
def delta_path(base_path: Path, item_path: Path, new_base_path: Path) -> Path:
"""
Removes a base path from an item, and appends result to a new path
:param base_path: The :py:class:`pathlib.Path` to be removed from `item_path`
:param item_path: The :py:class:`pathlib.Path` t... | ec531a011e36f053a8092525faae2047f5f66ccc | 6,103 |
import asyncio
async def async_validate_config(hass, config):
"""Validate config."""
automations = list(
filter(
lambda x: x is not None,
await asyncio.gather(
*(
_try_async_validate_config_item(hass, p_config, config)
for... | 7f77a4c008a5fcb8d275bb2e7f65005d9e1c49b5 | 6,104 |
import logging
import os
def judge_result(problem_id, commit_id, data_num):
"""对输出数据进行评测"""
logging.debug("Judging result")
correct_result = os.path.join(
data_dir, str(problem_id), 'data%s.out' %
data_num)
user_result = os.path.join(
work_dir, str(commit_id), 'out%s.txt' %
... | 6eb9503ccc2b6d9ae85611657997308bffb618ce | 6,105 |
def _fwd6(y, dt): # pragma: no cover
"""Compute the first derivative of a uniformly-spaced-in-time array with a
sixth-order forward difference scheme.
Parameters
----------
y : (7,...) ndarray
Data to differentiate. The derivative is taken along the... | 0d7321b3615fab6d6e065917ec94479ada0ee70c | 6,106 |
def minimize_newton_cg(nrgs, x0, num_params):
"""
Minimzes a structure using a Newton-CG method. This requires a
hopefully fully invertible analytic Hessian that will be used
to minimize geometries.
Parameters
----------
nrgs: [list of functionals]
Energy functions used to compute t... | 46ddd6b2004579ef07170ef578859c7119ed4e13 | 6,107 |
def currency(price, currency):
"""
Returns price in currency format
"""
price = float(price)
price *= float(currency.exchange_rate)
try:
return currency.display_format.format(price)
except Exception as e:
raise ImproperlyConfigured('Invalid currency format string: "%s" for cu... | 2204993f5f51c62669395de40dc14d16f110c4b4 | 6,108 |
def project_point(x, R, T, f, c, k, p):
"""
Args
x: Nx3 points in world coordinates
R: 3x3 Camera rotation matrix
T: 3x1 Camera translation parameters
f: 2x1 Camera focal length
c: 2x1 Camera center
k: 3x1 Camera radial distortion coefficients
p: 2x1 Camer... | 5b6cce136ac6753fcdefcde01db9636357687ab2 | 6,109 |
def sum_to_scalar(*args):
"""Adding losses/nmsks together that were evaluated in parallel"""
new_args = list()
for arg in args:
new_args.append({k: v.sum() for (k, v) in arg.items()})
return new_args | a4264911962c7bf3432735f8872522e193ceec8f | 6,110 |
def inv(h_array: np.ndarray) -> np.ndarray:
"""
Calculate pinvh of PSD array. Note pinvh performs poorly
if input matrix is far from being Hermitian, so use pinv2
instead in this case.
Parameters:
----------
h_array : input matrix, assume to be Hermitian
Returns:
----------
... | c3305878b3f2dfdaabe6a245d8063b1039e19bc2 | 6,111 |
from datetime import datetime
def update_risk_cavs(connection):
"""Parse cavs from html to markdown.
Args:
connection: SQLAlchemy connection.
Returns:
ids of risks for which cavs where updated.
"""
cavs_data = connection.execute(
sa.text("""
SELECT cav.id, cav.attribu... | 8af9ef613259915573ca1efc699278c0c2a6a4e4 | 6,112 |
def prefix_to_number(prefix):
"""Return the number of the prefix."""
if prefix in PREFIXES:
return PREFIXES[prefix]
raise ValueError(f'prefix "{prefix}" not found in list of prefixes') | e0a3822aa615d79a1ff0d5c7405097e055573ed0 | 6,113 |
def is_response_going_to_be_used(request, spider):
"""Check whether the request's response is going to be used."""
callback = get_callback(request, spider)
if is_callback_using_response(callback):
return True
for provider in discover_callback_providers(callback):
if is_provider_using_re... | 4cd908dbebfd6089a25bf5168937b2a4f02f23ee | 6,114 |
def eval_market1501(distmat, q_vids, g_vids, q_camids, g_camids, max_rank=50):
"""Evaluation with Market1501 metrics
Key: for each query identity, its gallery images from the same camera view are discarded.
"""
num_q, num_g = distmat.shape
if num_g < max_rank:
max_rank = num_g
print(... | 5387ee7fe7cac90406ac91619844e8e1fd814d88 | 6,115 |
import os
import glob
import warnings
def get_band_structure_from_vasp_multiple_branches(dir_name, efermi=None,
projections=False):
"""
This method is used to get band structure info from a VASP directory. It
takes into account that the run can be divided... | 5df7c4db41a29cfb9c811251b2de7f3a52289fab | 6,116 |
def pytype_raise():
"""A pytest.raises wrapper for catching TypeErrors.
Parameters
----------
match : str, default=None
Regular expression to match exception error text against.
Returns
-------
RaisesContext
pytest context manager for catching exception-raising blocks.
... | ec5c7a56a8a3fb9028fb0ec72ac814061def467d | 6,117 |
def lift_split_buffers(lines):
"""Lift the split buffers in the program
For each module, if we find any split buffers with the name "buf_data_split",
we will lift them out of the for loops and put them in the variable declaration
section at the beginning of the module.
Parameters
----------
... | 78919247b241dc29de84594b097c75d5e7ae1f03 | 6,118 |
import scipy
def peak_finder(
df_run,
cd,
windowlength,
polyorder,
datatype,
lenmax,
peak_thresh):
"""Determines the index of each peak in a dQdV curve
V_series = Pandas series of voltage data
dQdV_series = Pandas series of differential capacity data... | 370e019354579ab7b9a4eedef514dbde84801950 | 6,119 |
def make_box(world, x_dim, y_dim, z_dim, mass=0.5):
"""Makes a new axis-aligned box centered at the origin with
dimensions width x depth x height. The box is a RigidObject
with automatically determined inertia.
"""
boxgeom = Geometry3D()
boxgeom.loadFile("data/objects/cube.tri")
# box i... | f3257a8339542c55d96bd752bad1d0c69c6370e0 | 6,120 |
import os
def as_file(uri):
"""
If the URI is a file (either the ``file`` scheme or no scheme), then returns the normalized
path. Otherwise, returns ``None``.
"""
if _IS_WINDOWS:
# We need this extra check in Windows before urlparse because paths might have a drive
# prefix, e.g. ... | 774cd4bd5786b64cea757ab777d56a610d40b71d | 6,121 |
def add_full_name(obj):
"""
A decorator to add __full_name__ to the function being decorated.
This should be done for all decorators used in pywikibot, as any
decorator that does not add __full_name__ will prevent other
decorators in the same chain from being able to obtain it.
This can be use... | ca7b1541adaa39a62073ec630d705166fc8833b6 | 6,122 |
def LikelihoodRedshiftMeasure( measure='', data=[], scenario=False, measureable=False):
"""
returns likelihood functions of redshift for observed data of measure,
can be used to obtain estimate and deviation
Parameters
----------
measure : string
indicate which measure is probed
d... | 55d414bb0adb00fe549485f2e3682d15b761b7a4 | 6,123 |
def plural_suffix(count: int) -> str:
""""s" when count is not one"""
suffix = ''
if count != 1:
suffix = 's'
return suffix | 950002d57560d06e93e08647ff17d885688bca87 | 6,124 |
def _pr_exists(user, namespace, repo, idx):
""" Utility method checking if a given PR exists. """
repo_obj = pagure.lib.query.get_authorized_project(
flask.g.session, project_name=repo, user=user, namespace=namespace
)
if not repo_obj:
return False
pr_obj = pagure.lib.query.search_... | 2e68b6d4282f6f3ca4d9645c78579e3df3889494 | 6,125 |
import csv
def readData(filename):
"""
Read in our data from a CSV file and create a dictionary of records,
where the key is a unique record ID and each value is dict
"""
data_d = {}
with open(filename) as f:
reader = csv.DictReader(f)
for row in reader:
clean_row... | 57dcb39dac9568024ae4be07bc0921c941d6fae3 | 6,126 |
def _get_client(app):
"""Returns a client instance for an App.
If the App already has a client associated with it, simply returns
it. Otherwise creates a new client, and adds it to the App before
returning it.
Args:
app: A Firebase App instance (or ``None`` to use the default App).
Re... | de96140ed7c15a4aa390f08a76fe7de0074730db | 6,127 |
def get_job_config_build_for_branch(**kwargs):
"""pass kwargs to JobConfig constructor"""
return JobConfig(
type=JobType.copr_build,
trigger=JobConfigTriggerType.commit,
branch="build-branch",
scratch=True,
**kwargs,
) | 0c16a16bce6a1f05ca8daf764dd2de80147c90c4 | 6,128 |
import yaml
def get_connection_string_from_config_file(cfg_src, db_cfg_key):
"""
Gets connection parameters from specified section in
a configuration file.
"""
# reading complete configuration
with open(cfg_src, 'r') as yml_file:
cfg = yaml.safe_load(yml_file)
# looking for specif... | e2245f8e9124d36e5a373f1891590046c10a38fd | 6,129 |
from typing import Tuple
from typing import Sequence
def _decomp_0_matrices(
kak: 'cirq.KakDecomposition',
atol: float = 1e-8,
) -> Tuple[Sequence[Tuple[np.ndarray, np.ndarray]], complex]:
"""Returns the single-qubit matrices for the 0-SQRT_ISWAP decomposition.
Assumes canonical x, y, z and (x, y, z)... | b84d65cc7076b5d294cbf7f4f6a3c3ddff7ef7d2 | 6,130 |
import math
def concave(x, m):
"""Shape function."""
assert shape_args_ok(x, m)
result = 1.0
for i in range(1, len(x) - m + 1):
result *= math.sin(x[i - 1] * math.pi / 2.0)
if m != 1:
result *= math.cos(x[len(x) - m] * math.pi / 2.0)
return correct_to_01(result) | 70020efb06f35e169041491724bd6ddc7c7a9a35 | 6,131 |
import os
def get_filenames(is_training, data_dir):
"""Return filenames for dataset."""
if is_training:
return [
os.path.join(data_dir, 'train-%05d-of-01024' % i)
for i in range(_NUM_TRAIN_FILES)]
else:
return [
os.path.join(data_dir, 'validation-%05d-of... | f925c9f6018ad23f97b0f84c42581857852bd4a7 | 6,132 |
def norm_img(img):
"""
normalization image
:param img: (C, H, W)
:return:
norm_img: (C, H, W)
"""
height, width, channel = img.shape
img = np.reshape(img, (height * width, channel)) # (height * width, channel)
mean = np.mean(img, axis=0, keepdims=True) # (1, channel)
center... | a794ec4e096faa0efbfc9c993d9292a54f6573cc | 6,133 |
import scipy
def propagator_radial_diffusion(n,dim_rad,rate,wrad,lagtime,
lmax,bessel0_zeros,bessels,):
"""calculate propagator for radial diffusion as matrix exponential
n -- dim_trans, dimension transition matrix, usually number of bins in z-direction
dim_rad -- dimension transition matri... | 9fdfa7001ca319fcf57d5e80c492de73bca03b85 | 6,134 |
def convert_examples_to_features(examples, use_label):
"""Loads a data file into a list of `InputBatch`s."""
features = []
line_tags = []
for (ex_index, example) in enumerate(examples):
if use_label:
labels = example.labels
else:
labels = ['O'] * len(example.uni... | 7720a79b7404e0d4cc340ae5ea78084b64115f92 | 6,135 |
def broadcast_to_rank(t, rank, axis = -1):
"""Appends dimensions to tf.Tensor `t` at axis `axis` to match rank `rank`."""
rank_t = t.shape.rank # Assumes ranks are known at compile time (static).
for _ in range(rank - rank_t):
t = tf.expand_dims(t, axis=axis)
return t | 8a57a1d71f92aefc6015481b358b65f565af1b00 | 6,136 |
def operator(func):
"""
Help decorator to rewrite a function so that
it returns another function from it.
"""
@wraps(func)
def wrapper(*args, **kwargs):
def operator(stream):
return func(stream, *args, **kwargs)
return operator
return wrapper | cd2873954ee9dff003d2481d296c5be8740675c8 | 6,137 |
def json(body, charset="utf-8", **kwargs):
"""Takes JSON formatted data, converting it into native Python objects"""
return json_converter.loads(text(body, charset=charset)) | e2cabfca983abb96018f51ea3c09826e033227bb | 6,138 |
def read_corpus(file_path, encoding=ENCODING, **kwargs):
"""
Create a Linguistica object with a corpus data file.
:param file_path: path of input corpus file
:param encoding: encoding of the file at *file_path*. Default: ``'utf8'``
:param kwargs: keyword arguments for parameters and their values.
... | 28f8303e0b94e8df9b6d9a33aca14fa62b15f6e8 | 6,139 |
import random
def sample_pagerank(corpus, damping_factor, n):
"""
Return PageRank values for each page by sampling `n` pages
according to transition model, starting with a page at random.
Return a dictionary where keys are page names, and values are
their estimated PageRank value (a value between ... | 5c9f66aaf72c8330c2ee0fcd2402bf613c4eb9b7 | 6,140 |
import argparse
import sys
def parse_args() -> argparse.Namespace:
"""
Parse program arguments
:return: Parser values
"""
parser = argparse.ArgumentParser(description="")
parser.add_argument("-a", action="store_true")
parser.add_argument("-c", action="store_true")
parser.add_argument("... | 9d86d37d94af5c8ff128c4da8226f15728b0da70 | 6,141 |
import networkx
def compute_participants(matches, challonge_data):
"""Compute series participants.
Iterate all matches and players to create a graph.
Apply connected components algorithm to resolve distinct
participant groups over all matches.
Sort participant groups by number of wins to correla... | a715773d5edd3b4d6852096c665070e64bef1165 | 6,142 |
def write_haiku(word_array, is_ipv6):
"""Return the beautiful haiku"""
# String to place in schema to show word slot.
octct = 'OCTET'
schema = get_schema(is_ipv6, octct)
# Replace each instance of 'octet' in the schema with a word from
# the encoded word array.
for i in range(len(word_array... | b51dc7cd1cca642eb135c48952bbc2ca74faf5e1 | 6,143 |
def import_data():
"""
Utility function to imoprt summary tsv ready for usage in PyMol
"""
col_types = {
'sift_score': float, 'sift_median': float, 'total_energy': float,
'interaction_energy': float, 'diff_interaction_energy': float,
'diff_interface_residues': float, 'freq': floa... | 1b116d74ecba83658d05ea5dbda66b15175f3fdb | 6,144 |
from datetime import datetime
def get_current_datetime():
"""
Get the current datetime.
Note: This function is intended to be mocked in testing
Return:
time(datetime.datetime): current datetime
"""
return datetime.datetime.now(current_app.config['TIMEZONE']) | 6e7986eb6029e9c2be66019d7e9f35a79580c742 | 6,145 |
def adapt_all(iterable, to_cls):
"""
Returns a list of items from adapting each item in iterable to `cls`
If `iterable` is `None`, an empty list will be returned.
"""
if iterable is None:
return []
return [adapt(obj, to_cls) for obj in iterable] | a7c4d0adcce144223929081f47512f9d673efb28 | 6,146 |
import re
import os
import yaml
def get_config(seed, shot):
"""
Uses a given base 1-shot config to replicate it for 'shot' and 'seed'.
Changes dataset training split, cfg.OUTPUT_DIR and iteration number and steps accordingly.
"""
base_config_path: str = args.base_config
assert '1shot'... | d8cac8518a600d6f2900f63dc1320cf234341661 | 6,147 |
import torch
def log_sum_exp_vb(vec, m_size):
"""
calculate log of exp sum
args:
vec (batch_size, vanishing_dim, hidden_dim) : input tensor
m_size : hidden_dim
return:
batch_size, hidden_dim
"""
_, idx = torch.max(vec, 1) # B * 1 * M
max_score = torch.gather(vec, 1, idx.view(-1, 1, m_size)).view(-1, 1,... | 87c99f9ab9a9c114792a2c895284a8743682fc06 | 6,148 |
def C_fun_gen(fractions, speciesindices, y, time):
"""
Calculate the distribution of carbon functional groups as a percent of
total carbon.
Parameters
----------
fractions : list
The lumped phases that you want to include (as specified
in MW['speci... | 28704b470fd919d998fcd8704b125827226fe151 | 6,149 |
def get_branch(repo):
""" Retrieve the current branch of a dulwich repository
"""
refnames, sha = repo.refs.follow(b"HEAD")
if len(refnames) != 2:
LOGGER.debug("Got more than two refnames for HEAD!")
for ref in refnames:
if ref != b"HEAD":
return to_utf8(ref) | d1c5dbcede16e5b1fcd1e078457efae29643b6fd | 6,150 |
def _sigmoid(x):
"""
Sigmoid function that smoothly limits values between 0.0 and 1.0
:param x: Numpy array with float values that are to be limited.
:return: Numpy array with float values between 0.0 and 1.0
"""
return 1.0 / (1.0 + np.exp(-x)) | 770875ba82df9d4ac8eb6d403527cf0fb62d3990 | 6,151 |
from typing import Dict
def inherit_n_genes_prob(n, n_father, n_mother, mutation_prob) -> Dict:
"""Returns dictionary with distribution of conditional probability of
inherited genes given that father has n_father genes and mother has
n_mother genes, taking into account probability of mutations."""
# ... | 0481244db107f6623aa109212e74be8b719f5bb8 | 6,152 |
async def get_metrics_address_counts_summary():
"""
Latest summary of address counts.
"""
qry = f"""
select col
, latest
, diff_1d
, diff_1w
, diff_4w
, diff_6m
, diff_1y
from mtr.address_counts_by_minimal_balance_ch... | c22d6c3442833743559c42e4be59a25ab073c03b | 6,153 |
from typing import Dict
from typing import Any
async def processor(db, document: Dict[str, Any]) -> Dict[str, Any]:
"""
Process a history document before it is returned to the client.
:param db: the application object
:param document: the document to process
:return: the processed document
""... | 89de3dd255923b3eca6444ee4410980e857aa8e1 | 6,154 |
def _unit_scale_traindata(X, xmins, xmaxs):
"""If xmax > xmin, unit-scale the training data, else do nothing
Parameters
----------
x : ndarray of shape (m, n)
xmins : ndarray of shape (n, )
xmaxs : ndarray of shape (n, )
Returns
-------
result : ndarray of shape (m, n)
Notes... | 2778c7a9d7b6e23775df2354b92057e6a5511dc5 | 6,155 |
import subprocess
def get_port_properties(port):
"""Retrieves common port properties from its package.sh file.
Returns:
dict: keys are values from PORT_PROPERTIES, values are from the package.sh file
"""
props = {}
for prop in PORT_PROPERTIES:
res = subprocess.run(f"cd {port}; ex... | 116828540f37e0a3189092ed985ad0f88ed6534a | 6,156 |
import numpy as np
import hdbscan
import matplotlib.pyplot as plt
import seaborn as sns
def run_HDBSCAN_subclustering(df=None, target=None, cluster_col="Cluster", soft_clustering=True,
min_cluster_size=100, min_samples=10,
cluster_selection_epsilon=0.0, cluster_selection_method='eom',
... | 0ce5c53a390fec6b40addd6182c9ef36ed4047fc | 6,157 |
def extractive_explanations(
dataset,
prefix='explain sentiment',
input_feature='review',
output_classes=('negative', 'positive'),
drop_explanations=False
):
"""Preprocessor to handle extractive rationale prediction datasets.
The preprocessor expects a dataset with the provided 'input_featu... | c1549279cbb676ee45287afe99f1f94410c27b62 | 6,158 |
def corr_weighted_kendalltau(top_list_prev, top_list, use_fast=True):
"""Compute weighted Kendall's Tau correlation (based on custom implementation!).
NOTE: Lists are DataFrame columns AND they must be sorted according to their value!!!"""
# it is irrelevant whether we compute kendall for ranks or scores.
... | 35b473040508561798831343d770acabd97cb76e | 6,159 |
from datetime import datetime
import random
def generate_processes_by_exposure(exposure):
""" Creates a simulated process based on an exposure.
Arguments:
exposure {object} -- Exposure model
Raises:
ValueError -- returns when there is no processing
with a respective expos... | a3a335184fbf9c51e47210ac22fd4d4e8a8a6aa4 | 6,160 |
import copy
def cross_val_confusion(classifier, X, y, cv=None):
"""
Evaluate confusion matrix and score from each fold of cross validation
Parameters:
----------
classifier: classifier object
The object used to fit the data.
X[ndarray]: shape=(n_sample, n_feature)
y[ndarray]: sha... | bbdbed0bc18b7ac201f2933e9cff10eab19d5a75 | 6,161 |
import asyncio
async def async_unload_entry(hass: HomeAssistantType, entry: ConfigEntry):
"""Unload Synology DSM sensors."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, platform)
for platform in PLATFORMS
... | 876aceeaa113a6275a60328f6f00c0d0c4c0f2e1 | 6,162 |
import os
def transfer_segm_labels(verts_before, mesh, dir_path, name):
"""
Save segmentation labels for mesh after scan imitation
"""
verts_after = utils.get_vertices_np(mesh)
verts_mapping = utils.match_vert_lists(verts_after, verts_before)
# print(os.path.join(dir_path, name + '_sim_se... | 2076062e084b85701c4bdd5879ca840ee736cb7b | 6,163 |
import pathlib
def confirm_control_contains(trestle_dir: pathlib.Path, control_id: str, part_label: str, seek_str: str) -> bool:
"""Confirm the text is present in the control markdown in the correct part."""
control_dir = trestle_dir / ssp_name / control_id.split('-')[0]
md_file = control_dir / f'{control... | b78cd7a7ef435fcee483d98fe2199ba90c905833 | 6,164 |
import random
def describe_current_subtask(subtask, prefix=True):
"""
Make a 'natural' language description of subtask name
"""
to_verb = {"AnswerQuestion": "answering a question",
"ArmGoal": "moving my arm",
"DemoPresentation": "giving a demo",
"Find": "fi... | 628c699201c26242bd72c6066cba07cce54b14ca | 6,165 |
def addprint(x: int, y: int):
"""Print and "added" representation of `x` and `y`."""
expr = x + y
return "base addprint(x=%r, y=%r): %r" % (x, y, expr) | e3f735afc1d4826a1af7210c3cec88c8b8c87dfe | 6,166 |
import re
def parse_date(deadline_date):
"""
Given a date in the form MM/DD/YY or MM/DD/YYYY, returns
the integers MM, DD, and YYYY (or YY) in this order.
"""
deadline_split = re.split('\\/|\\-', deadline_date)
return int(deadline_split[0]), int(deadline_split[1]), int(deadline_split[2]) | 0ded6bccce8437aad61cfa5ff121c5ed0595849b | 6,167 |
import requests
def jyfm_tools_position_fund_direction(
trade_date="2020-02-24", indicator="期货品种资金流向排名", headers=""
):
"""
交易法门-工具-资金分析-资金流向
https://www.jiaoyifamen.com/tools/position/fund/?day=2020-01-08
:param trade_date: 指定交易日
:type trade_date: str
:param indicator: "期货品种资金流向排名" or "期货主... | 10cfb29f1705460916fa93542ba72a22b3cdbf70 | 6,168 |
def generate_points_in_areas(gdf, values, points_per_unit=1, seed=None):
"""
Create a GeoSeries of random points in polygons.
Parameters
----------
gdf : GeoDataFrame
The areas in which to create points
values : str or Series
The [possibly scaled] number of points to create in each area
points_per_unit : nu... | 14232540c4bee8c9863b2af4f3f2f200bb261098 | 6,169 |
def montager(xi, col=None, row=None, aspect=1.4, transpose=False, isRGB=False,
flipx=False, flipy=False, flipz=False, output_grid_size=False):
""" tile a 3D or 4D image into a single 2D montage
Parameters
----------
xi : ndarray
image data to montage
col : int, optional
... | b8ded004cb0e3aef328fc953c5a0b81805646e1a | 6,170 |
def template_dict(input_dict_arg, params_dict_arg):
"""function to enable templating a dictionary"""
output_dict = input_dict_arg
for key, value in output_dict.items():
if isinstance(value, str):
output_dict[key] = params_re_str(value, params_dict_arg)
elif isinstance(value, dict... | 3a9e2df200f52f9ec320ab3900653851dfb77fcc | 6,171 |
def _traverse_dictionaries(instance, parent="spin_systems"):
"""Parses through the instance object contained within the parent object and return
a list of attributes that are populated.
Args:
instance: An instance object from the parent object.
parent: a string object used to create the add... | 9ecf8050e7c4d9c4f8e84f04303f0be186f594d5 | 6,172 |
def getSingleChildTextByName(rootNode, name):
"""Returns the text of a child node found by name.
Only one such named child is expected.
"""
try:
nodeList = [e.firstChild.data for e in rootNode.childNodes if e.localName == name]
if len(nodeList) > 0:
return nodeList[0]
... | 48a8a4b2c3c95cac944bcb96e33e602d62499f19 | 6,173 |
def test_aggregate_stores_output_in_record(configured_test_manager):
"""An aggregate output should exist in the record state."""
@aggregate(["output"])
def small_aggregate(record, records):
return "hello world"
record = Record(configured_test_manager, None)
small_aggregate(record, [record]... | 865210e1d79c1a467bc44c5a9a1cd69870ff953f | 6,174 |
def _get_energy_ratio_single_wd_bin_bootstrapping(
df_binned,
df_freq,
N=1,
percentiles=[5.0, 95.0],
return_detailed_output=False,
):
"""Get the energy ratio for one particular wind direction bin and
an array of wind speed bins. This function also includes bootstrapping
functionality by ... | a29e1ebaa9994148e473d61d7881737b62a9082e | 6,175 |
from datacube import Datacube
from .tasks import SaveTasks
from .model import DateTimeRange
import json
import sys
def save_tasks(
grid,
year,
temporal_range,
frequency,
output,
products,
dataset_filter,
env,
complevel,
overwrite=False,
tiles=None,
debug=False,
gqa=... | 247fcc8208ad42a8cca2a8e43152b4b6e3f25d00 | 6,176 |
import re
def get_file_name(part):
"""get file name using regex from fragment ID"""
return re.findall(r"='(.*\-[a-z]+).*", part)[0] | 30c8867d8e14b04c593359f1c16d9bf324711ba0 | 6,177 |
def get_helping_materials(project_id, limit=100, offset=0, last_id=None):
"""Return a list of helping materials for a given project ID.
:param project_id: PYBOSSA Project ID
:type project_id: integer
:param limit: Number of returned items, default 100
:type limit: integer
:param offset: Offset ... | 163436a9a09816bc18b31c9911b87db74b8aefbd | 6,178 |
import math
def generate_sphere_points(n):
"""
Returns list of 3d coordinates of points on a sphere using the
Golden Section Spiral algorithm.
"""
points = []
inc = math.pi * (3 - math.sqrt(5))
offset = 2 / float(n)
for k in range(int(n)):
y = k * offset - 1 + (offset / 2)
... | bd6c7624220f7928a44f6dcb24b7112e8d803eb4 | 6,179 |
def svn_repos_dir_delta2(*args):
"""
svn_repos_dir_delta2(svn_fs_root_t src_root, char src_parent_dir, char src_entry,
svn_fs_root_t tgt_root, char tgt_path,
svn_delta_editor_t editor, void edit_baton,
svn_repos_authz_func_t authz_read_func, svn_boolean_t text_deltas,
svn_depth... | c972237fee8c76a24fb9443a9607931566b642ff | 6,180 |
def linear_r2_points(points: np.ndarray, coef: tuple, r2: R2 = R2.classic) -> float:
"""Computes the coefficient of determination (R2).
Args:
points (np.ndarray): numpy array with the points (x, y)
coef (tuple): the coefficients from the linear fit
r2 (R2): select the type of coefficien... | 98c33ba3354ed22ddf3ab718f2f41967c2555f18 | 6,181 |
from typing import List
from datetime import datetime
def _show_tournament_list() -> List:
"""
Функция возвращает список предстоящих турниров
"""
tournaments = []
for tournament in loop.run_until_complete(get_request('https://codeforces.com/api/contest.list?gym=false')):
if tournament['phase'] != 'BEFORE':
... | 0815ae126671a8c85bb3311e900db48ce87fa1f0 | 6,182 |
def less_goals_scored():
"""
returns the lowest number of goals scored during one week
"""
return goals_scored('min') | fda281196148370d4639aef9dabc6ad1cb4fd339 | 6,183 |
from typing import Sequence
from typing import Union
from typing import Tuple
def compute_avgpool_output_shape(input_shape:Sequence[Union[int, None]],
kernel_size:Union[Sequence[int], int]=1,
stride:Union[Sequence[int], int]=1,
... | 5116f6fdb95c1cf07d34c2193e6e08eee47a06da | 6,184 |
def _obs_intersect(((x0, y0), (x1, y1)), ((x2, y2), (x3, y3))):
"""Check if two lines intersect. The boundaries don't count as
intersection."""
base1 = (x0, y0)
base2 = (x2, y2)
dir1 = (x1-x0, y1-y0)
dir2 = (x3-x2, y3-y2)
t1, t2 = _intersect(base1, dir1, base2, dir2)
eps = 0.00001
... | ea2b268adac5fc1156b566ea0c6cabdd2f4fe94e | 6,185 |
import json
import re
def project_configure(request, project_name):
"""
get configuration
:param request: request object
:param project_name: project name
:return: json
"""
# get configuration
if request.method == 'GET':
project = Project.objects.get(name=project_name)
... | a033d7d1810cee5e5370d8d9f6562f23e3e7e64a | 6,186 |
import time
def run_epoch(session, model, eval_op=None, verbose=False):
"""Runs the model on the given data."""
start_time = time.time()
costs = 0.0
iters = 0
state = session.run(model.initial_state)
fetches = {
"cost": model.cost,
"final_state": model.final_state,
}
if eval_op is not None:
fetches["... | 641100d0789c3841a4b3cb67e42963387d0f888d | 6,187 |
def unemployment(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Economic data
https://iexcloud.io/docs/api/#economic-data
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-res... | a5412d78673f639e0d10a95bb91138da1b432221 | 6,188 |
import warnings
def splitunc(p):
"""Deprecated since Python 3.1. Please use splitdrive() instead;
it now handles UNC paths.
Split a pathname into UNC mount point and relative path specifiers.
Return a 2-tuple (unc, rest); either part may be empty.
If unc is not empty, it has the form '//host/mo... | d9748b551e6a9ba101b3817ab22c74dd30cf89d1 | 6,189 |
def expand_locations(ctx, input, targets = []):
"""Expand location templates.
Expands all `$(execpath ...)`, `$(rootpath ...)` and deprecated `$(location ...)` templates in the
given string by replacing with the expanded path. Expansion only works for labels that point to direct dependencies
of this ru... | efa482d928484b7d6f9c8acbf81e0a3d5b4cd50f | 6,190 |
import requests
import json
def scrape_db(test=False, write_file=True):
"""
Function to scrape bodybuild.com recipe database and save results as json.
Parameters:
-----------
"""
# Hacky way to get all recipes - you have to request the number. Luckily,
# this is listed at the beginning ... | d9883058ac434fca861168625493467bfbcafaed | 6,191 |
import functools
def require(required):
""" Decorator for checking the required values in state.
It checks the required attributes in the passed state and stop when
any of those is missing. """
def decorator(function):
@functools.wraps(function)
def wrapper(*args, **kwargs):
... | 9bf04a95d39b89fd10c9872dd7fe29c5c10f06a1 | 6,192 |
import re
def simplify_unicode(sentence):
"""
Most accented Latin characters are pronounced just the same as the base character.
Shrink as many extended Unicode repertoire into the Estonian alphabet as possible.
It is GOOD for machine learning to have smaller ortographic repertoire.
It is a BAD id... | 291a1e002d4d428697d7b892291ad314f0000a2a | 6,193 |
import pickle
def read_file(pickle_file_name):
"""Reads composite or non-composite novelty results from Pickle file.
:param pickle_file_name: Path to input file (created by
`write_standard_file` or `write_pmm_file`).
:return: novelty_dict: Has the following keys if not a composite...
novelty_... | fcc4976648bafc7e845a22552965e1f65e3ddc85 | 6,194 |
import re
def AutoscalersForMigs(migs, autoscalers, project):
"""Finds Autoscalers with target amongst given IGMs.
Args:
migs: List of triples (IGM name, scope type, scope name).
autoscalers: A list of Autoscalers to search among.
project: Project owning resources.
Returns:
A list of all Autosc... | 12b6e10c16c7ea5324f5090cdc3027a38e1247c1 | 6,195 |
def log_loss(
predictions: ArrayLike,
targets: ArrayLike,
) -> ArrayLike:
"""Calculates the log loss of predictions wrt targets.
Args:
predictions: a vector of probabilities of arbitrary shape.
targets: a vector of probabilities of shape compatible with predictions.
Returns:
a vector of same... | a3d27b0229b287e32701fa80822ad1025e875a62 | 6,196 |
import json
def GetAccessTokenOrDie(options):
"""Generates a fresh access token using credentials passed into the script.
Args:
options: Flag values passed into the script.
Returns:
A fresh access token.
Raises:
ValueError: response JSON could not be parsed, or has no access_token... | 6ecbd6875931c6ef139da52578050380da4e62bd | 6,197 |
def remove_whitespace(tokens):
"""Remove any top-level whitespace and comments in a token list."""
return tuple(
token for token in tokens
if token.type not in ('whitespace', 'comment')) | 5ed78f38277487d2e05e20e10e25413b05cab8e5 | 6,198 |
def update(args):
"""
For LdaCgsMulti
"""
(docs, doc_indices, mtrand_state, dtype) = args
start, stop = docs[0][0], docs[-1][1]
global Ktype
if _K.value < 2 ** 8:
Ktype = np.uint8
elif _K.value < 2 ** 16:
Ktype = np.uint16
else:
raise NotImplementedError("Inv... | 2dd014472c77e363fafab1f9dc22ce0267d3e3df | 6,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.