content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
import yaml
def from_yml(yml_string: str) -> dict:
"""Load the given YAML string into a dict
:param yml_string: YAML string to work on
:type yml_string: str
:return: dict representation of the given YAML string
:rtype: dict
"""
config: dict = yaml.safe_load(yml_string)
return config | 28868d568fa519e795df4a32ecae8608da6465d6 | 697,750 |
import string
import random
def generator(length):
"""Generate a random sequence of bytes
Args:
length (int): length
Returns:
bytes: random sequence of bytes
"""
letters = string.printable
return ''.join(random.choice(letters) for i in range(length)).encode() | e7a7f22ec3470e9bf45b0084fdc9da917c1f18fb | 697,751 |
import math
def latlng_index(lat, lng, mult=1 / 15):
"""Convert latitude and longitude to 'indexes'.
"""
# 'mult' = 1 / approx-bucket-size-in-deg
ilat = int(round((lat + 90.0) * mult))
mult = mult * math.cos(abs(lat) * math.pi / 180.0)
ilng = int(round((lng + 180.0) * mult))
return {'latIndex': ilat, 'lngIndex': ilng} | bd297216545dca725ec10c1a301251acd929fecf | 697,752 |
import logging
def get_loglevel(loglevel):
""" Check valid log level supplied and return numeric log level """
numeric_level = getattr(logging, loglevel.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError("Invalid log level: %s" % loglevel)
return numeric_level | 017bc0be3e5feb890ccd7477bedb87d1077f1ef5 | 697,753 |
def count_marked_as_done(idx):
"""
:param idx:
:return:
"""
return idx | 1ffd0a3831ac80b2ba955bdb0b2f922b5375c84b | 697,754 |
def get_nan_audio():
"""
returns list of audio files to test in test_for_nan_values
"""
audio_files = [
"/mnt/disks/data_disk/home/dzubke/awni_speech/data/LibriSpeech/train-other-500/6689/64286/6689-64286-0001.wav",
"/mnt/disks/data_disk/home/dzubke/awni_speech/data/LibriSpeech/train-other-500/1760/143006/1760-143006-0080.wav",
"/mnt/disks/data_disk/home/dzubke/awni_speech/data/LibriSpeech/train-other-500/1579/128155/1579-128155-0012.wav",
"/mnt/disks/data_disk/home/dzubke/awni_speech/data/LibriSpeech/train-other-500/25/123319/25-123319-0055.wav",
"/mnt/disks/data_disk/home/dzubke/awni_speech/data/LibriSpeech/train-other-500/1579/128155/1579-128155-0012.wav",
"/mnt/disks/data_disk/home/dzubke/awni_speech/data/LibriSpeech/train-other-500/4484/37119/4484-37119-0018.wav",
"/mnt/disks/data_disk/home/dzubke/awni_speech/data/LibriSpeech/train-other-500/3433/135988/3433-135988-0038.wav",
"/mnt/disks/data_disk/home/dzubke/awni_speech/data/LibriSpeech/train-clean-100/887/123289/887-123289-0001.wav",
"/mnt/disks/data_disk/home/dzubke/awni_speech/data/LibriSpeech/train-clean-100/4137/11701/4137-11701-0029.wav",
"/mnt/disks/data_disk/home/dzubke/awni_speech/data/LibriSpeech/train-other-500/937/148985/937-148985-0022.wav",
"/mnt/disks/data_disk/home/dzubke/awni_speech/data/LibriSpeech/train-other-500/4931/28257/4931-28257-0042.wav",
"/mnt/disks/data_disk/home/dzubke/awni_speech/data/LibriSpeech/train-clean-100/8312/279790/8312-279790-0043.wav",
"/mnt/disks/data_disk/home/dzubke/awni_speech/data/LibriSpeech/train-other-500/4741/27757/4741-27757-0051.wav",
"/mnt/disks/data_disk/home/dzubke/awni_speech/data/LibriSpeech/train-other-500/8302/281331/8302-281331-0012.wav",
"/mnt/disks/data_disk/home/dzubke/awni_speech/data/LibriSpeech/train-clean-100/32/21631/32-21631-0012.wav",
"/mnt/disks/data_disk/home/dzubke/awni_speech/data/LibriSpeech/train-other-500/2541/159352/2541-159352-0044.wav",
"/mnt/disks/data_disk/home/dzubke/awni_speech/data/LibriSpeech/train-clean-100/2092/145706/2092-145706-0022.wav",
"/mnt/disks/data_disk/home/dzubke/awni_speech/data/LibriSpeech/train-other-500/5045/1197/5045-1197-0002.wav",
"/mnt/disks/data_disk/home/dzubke/awni_speech/data/LibriSpeech/train-other-500/3871/692/3871-692-0025.wav",
"/mnt/disks/data_disk/home/dzubke/awni_speech/data/LibriSpeech/train-other-500/1690/142293/1690-142293-0035.wav",
"/mnt/disks/data_disk/home/dzubke/awni_speech/data/LibriSpeech/train-clean-100/7078/271888/7078-271888-0058.wav",
"/mnt/disks/data_disk/home/dzubke/awni_speech/data/LibriSpeech/train-other-500/5220/112590/5220-112590-0002.wav"
]
return audio_files | 16ffd9457749044bf1f2d68069ec0e3d98116800 | 697,755 |
def safe_column_name(name):
"""Generate SQL friendly column name"""
return '"{}"'.format(name).upper() | 495b54a94c4350a8f20df5a1416a5c8c10d7a559 | 697,756 |
import re
def name_validalidation(name, context):
"""Function used to validate various names"""
if len(name.strip()) == 0 or not re.match("^[-a-zA-Z0-9_\\s]*$", name):
message = "Name shouldn't be empty. No special characters"
response = {
"message": message + " for " + context + " names",
context: "null"
}
return response, 400 | 56e759d40f1b6bb3a99437c1f976fbb9232ce6bf | 697,757 |
import os
def load_random_test_dir():
"""
Relies on being run from test_docker.py (not ideal)
"""
loc = os.path.join(str(os.getcwd()),
'test_backend/testdir/')
return loc | bdbc7c3454b40cd9eabeeb7627f78a13b97f9af1 | 697,758 |
def column_name_list(columns):
"""
Gets a comma-separated list of column names.
:param columns: The list of columns.
:returns: A comma-separated list of column names.
"""
if not columns:
return ''
return ', '.join([column.name for column in columns]) | 7c40e420e55368454768d84229f959a11b5c00dd | 697,759 |
def make_0032():
"""World Server Addr Request(start)(s0031)"""
return "" | 17a166822b9d083c104c652abfc6a5c20cf1743c | 697,760 |
import os
def file_list(root, file_type):
"""
:param root: root directory
:param file_type: .xml in our app
:return: complete list of file paths
"""
return [os.path.join(directory_path, f) for directory_path, directory_name,
files in os.walk(root) for f in files if f.endswith(file_type)] | dd51a70cb4f6d9736d8a59de97ee2bfed67f85eb | 697,761 |
def positiveaxis(axis, ndim):
"""Positive axis
Args:
axis(num): dimension index
ndim(num): number of dimensions
Returns:
num
"""
if axis < 0:
axis += ndim
if axis < 0 or axis >= ndim:
raise IndexError("axis out of range")
return axis | 3075c06cf5843027bc45e04e032798250924f8fd | 697,763 |
def add_schema_ownerships(spec, dbcontext):
"""
Add schema ownerships to an existing spec
One set of assumptions is made here: if a schema's name is the same as its owner and that
owner can login, then the schema is assumed to be a personal schema. This is an assumption
though, and ithe implication is that when `pgbedrock configure` is run, if there are any
objects in that schema that are not owned by the schema owner they will have their ownership
changed (to be the same as the schema owner).
Returns:
dict: The input spec with schema ownerships added
"""
personal_schemas = dbcontext.get_all_personal_schemas()
schemas_and_owners = dbcontext.get_all_schemas_and_owners()
for schema, owner in schemas_and_owners.items():
if schema in personal_schemas:
# Any schema where the owner is the same as the schema's name and the owner can
# log in is assumed to be a personal schema. See docstring for implications
spec[owner]['has_personal_schema'] = True
else:
if 'owns' not in spec[owner]:
spec[owner]['owns'] = {'schemas': []}
elif 'schemas' not in spec[owner]['owns']:
spec[owner]['owns']['schemas'] = []
spec[owner]['owns']['schemas'].append(schema)
return spec | c4551c22760b3ec837b7f3cb2a83840c2bd46481 | 697,764 |
def is_desired_workflow(run_json):
"""
Checks if this run is for the "Presubmit Checks" workflow.
"""
# Each workflow has a fixed ID.
# For the "Persubmit Checks" workflow, it is:
# https://api.github.com/repos/taichi-dev/taichi/actions/workflows/1291024
DESIRED_ID = 1291024
return run_json['workflow_id'] == DESIRED_ID | fc5d915fd3e7b8a62075b29fc9dbe8525fc4da0a | 697,765 |
def format_codepoint(codepoint):
"""Format a codepoint (integer) to a USV (at least 4 hex digits)"""
usv = ''
if codepoint:
usv = f'{codepoint:04X}'
return usv | c3ac63ab0218ad90f7191507a825e95a4f4ab80c | 697,766 |
def ipv4_addr_to_reverse(addr):
"""
Returns the string in reverse, dot as delemiter
1.2.3.4 returns 4.3.2.1
"""
ip = addr.split(".")
return ".".join(reversed(ip)) | f471e5181082e27018a3239614f7b7ec4f6719a4 | 697,767 |
def get_dim_coord_names(cube):
"""
Returns an ordered list of dimension coordinate names on the cube
Args:
cube (iris.cube.Cube)
Returns:
list of str
"""
return [coord.name() for coord in cube.coords(dim_coords=True)] | ec742b197f02d13e067a250685438f0ed88615e5 | 697,768 |
def _noisepeak(amp, npk1):
"""
Private function intended to insert a new RR interval in the buffer.
----------
Parameters
----------
amp : int
Amplitude of the peak under analysis.
npk1 : int
Actual value of NPK1 parameter defined in Pan-Tompkins real-time R peak detection algorithm
(named noise peak).
Returns
-------
npk1 : float
Updated value of NPK1 parameter.
"""
npk1 = 0.125 * amp + 0.875 * npk1 # npk1 is the running estimate of the noise peak
return npk1 | 76d2fef2a78b0af4c6478d3f520784d427991e06 | 697,769 |
def get_center_of_mass(centers_of_mass, masses):
"""Determine the center of mass of a set of objects
Args
centers_of_mass [*shape, num_objects, 2]
masses [*shape, num_objects]
Returns [*shape, 2]
"""
return (centers_of_mass * masses[..., None]).sum(-2) / masses.sum(-1)[..., None] | 22c95024120579ed8471f093ddedd9da75b4ed0f | 697,770 |
def printPath(path):
"""Assumes path is a list of nodes"""
result = ''
for i in range(len(path)):
result += str(path[i])
if i != len(path) - 1:
result += '->'
return result | 1974c31df44e89a69f8e59d3f81288fb054ae9e5 | 697,771 |
import copy
def start_board(solution_board):
"""Generates an empty board from a solution board"""
empty_board = copy.deepcopy(solution_board)
for row in empty_board:
for index in range(len(row)):
row[index] = '-'
return empty_board | fafa73da5867bf7ae324a0531c64c16ae5789f0d | 697,772 |
def safe_get(list, index, fallback_value):
"""Similar to dict's .get(key, fallback) function but for lists. Returns a fallback/default value if the index is not valid for the list, otherwise returns the value at that index.
Args:
list (_type_): a list-like object
index (_type_): an index into the list
fallback_value (_type_): any value to be returned when the indexing fails
Returns:
_type_: the value in the list, or the fallback_value is the index is not valid for the list.
"""
try:
return list[index]
except IndexError:
return fallback_value | bf799f45a04335adc7673aff155b301333ff8e26 | 697,773 |
def clusters_to_labels(clusters):
"""
:param clusters: List of lists, each sublist contains doc ids in that cluster
:return labels: Dict of [doc_id, cluster_label] where cluster_label are assigned from positive ints starting at 1
"""
labels = dict()
for label, cluster in enumerate(clusters):
for doc_id in cluster:
labels[doc_id] = label
return labels | 4a443b729965d4632dc3ff68cb0828bcc2f4f2ff | 697,774 |
import time
def execute_in_hammer_mode(func):
"""
A decorator for automation of reconnecting.
"""
n_trials = 3
sleep_time = 1
sleep_factor = 1
def wrapper(*args, **kwargs):
for i in range(n_trials):
try:
return func(*args, **kwargs)
except Exception as e:
if i == n_trials - 1:
raise RuntimeError(e)
else:
time.sleep((1 + sleep_factor * i) * sleep_time)
# `args[0]` is an instance of `InMemoryStorage`
args[0].close_connection()
args[0].connect()
continue
return wrapper | dbbbc1031181ed49587914299ba412ef40b15417 | 697,775 |
def binarySearch(arr, x):
"""Returns the lowest index of the element equal to `x` or NaN if not found."""
if len(arr) == 0:
return 0
m = len(arr) // 2
if arr[m] < x:
return m + 1 + binarySearch(arr[m + 1:], x)
elif x < arr[m] or (0 < m and arr[m - 1] == x):
return binarySearch(arr[:m], x)
else:
return m | 4b2511f5e74c25c7f027281a16be703ec352fe19 | 697,776 |
def to_bytes(value, encoding='utf8'):
"""
cast string to bytes() like object, but for python2 support it's bytearray copy
"""
if isinstance(value, bytes):
return value
if isinstance(value, str):
return value.encode(encoding)
elif isinstance(value, bytearray):
return bytes(value)
else:
raise TypeError("Not a string or bytes like object") | 6c42f9787c485ab7043665f806a9e6c728b5835a | 697,777 |
import subprocess
def clear_screen():
"""Clear the terminal screen."""
subprocess.call(["reset"])
return None | 2cb77e499abe4f4c930b66b12bb543fb6f8726e2 | 697,778 |
import ftplib
def connectToFTP(host, usern, passw,verb):
"""
Creates an FTP Connection with the given details using FTPLib.
:param host: Hostname (e.g. IP address) of the connection
:param usern: Username to login with
:param passw: password
:param verb: print errors or not.
:return: FTP Connection object. Success:1, Failure:0
"""
try:
ftp = ftplib.FTP(host, usern, passw)
ftp.encoding = "utf-8"
ftp.set_pasv(False)
except Exception as e:
if verb:
print(e)
print("\nERROR while creating FTP Connection \n")
return None,0
return ftp,1 | f6c4dabedbc400204d85cf699a7c592b38704d7d | 697,779 |
import yaml
import json
def render_yaml(value):
"""
Render a dictionary as formatted YAML. This filter is invoked as "yaml":
{{ data_dict|yaml }}
"""
return yaml.dump(json.loads(json.dumps(value))) | dc90315ebc6271eec12531095214ecc24d0df9e8 | 697,780 |
def getHosts(args):
"""
Reads CSV file in
"""
if args.file:
with open(args.file) as f:
return f.read().splitlines()
return args.list.split(',') | 20e2150a5ac294671a7ad8cf1c47fd1b205d0de7 | 697,781 |
def call_instance_method(instance, name, args, kwargs):
"""indirect caller for instance methods for multiprocessing
Args:
instance: the instance to call method with
name (str): method name to call
args (tuple or None): arguments to be passed to getattr(instance, name)
kwargs (dict or None): kwargs to be passed to getattr(instance, name)
Returns:
the returned values of getattr(instance, name)
"""
if args is None:
args = ()
if kwargs is None:
kwargs = {}
return getattr(instance, name)(*args, **kwargs) | 30831cafeab2a4f3310a21c41a03e8d3ec9a43ec | 697,782 |
import numpy
def fdstencil(z, x, nderiv=None):
"""Compute finite difference weights using recurrences for Lagrange polynomials (see Fornberg 1998)"""
if nderiv is None:
nderiv = len(x)
x = numpy.array(x) - z
k = numpy.arange(nderiv+1)
c = numpy.outer(0.*k, x)
c[0,0] = 1
prod = 1
for j in range(1,len(x)):
dx = x[j] - x[:j]
c[1:,j] = x[j-1]*c[1:,j-1] - k[1:]*c[:-1,j-1]
c[0,j] = x[j-1]*c[0,j-1]
c[:,j] *= -prod
prod = numpy.prod(dx)
c[:,j] /= prod
c[1:,:j] = (x[j]*c[1:,:j] - k[1:,None]*c[:-1,:j]) / dx
c[0,:j] = x[j]*c[0,:j] / dx
return c | 1e350dfefb15fc82596164d23909b027d9e14b3e | 697,783 |
def is_iterable(obj):
"""is_empty: Check if object is iterable.
:obj:
:returns: Bool
"""
try:
return bool(iter(obj))
except TypeError as e:
return False | 793846dbb1503b477115cbb4debfadd65c72a70c | 697,784 |
def refactorize(arr, first_na, na_sentinel=-1):
"""
Modify `arr` *inplace* to match pandas' factorization rules.
This detects the code missing values were assigned, sets
those to `na_sentinel`, and shifts codes above that value
down by 1 to fill the hole.
Parameters
----------
arr : ndarray
First return value from :meth:`pandas.factorize`
first_na : int
The index location of the first missing value
na_sentinel : int, default -1
Value to set for missing values.
"""
# A naive benchmark shows that this gets ~285x speedup
# with numba on a 10,000 element array.
na_code = arr[first_na]
for i in range(len(arr)):
val = arr[i]
if val == na_code:
arr[i] = na_sentinel
elif val > na_code:
arr[i] -= 1
return arr | 31d5e07650bb0ec6f93d59d453d8de859ab0c621 | 697,785 |
import time
import hashlib
def generate_file_name(filename):
"""
@brief generate file name
@return new file name
"""
i = filename.rfind('.')
ext = filename[i:]
tmp = filename + str(int(time.time()))
hash_md5 = hashlib.md5(tmp)
return hash_md5.hexdigest() + ext | c4b3aebbaecafd85087769c179e33fdbd77a2b17 | 697,786 |
def get_file_id(service, file_name, mime_type=None, parent_id=None):
"""Return the ID of a Google Drive file
:param service: A Google Drive API service object
:param file_name: A string, the name of the file
:param mime_type: A string, optional MIME type of file to search for
:param parent_id: A string, optional id of a parent folder to search in
:return file_id: A string, file ID of the first found result
"""
file_id = None
query = """name='{}'
and trashed=False
""".format(file_name)
if parent_id:
query += "and parents in '{}'".format(parent_id)
if mime_type:
query += "and mimeType in '{}'".format(mime_type)
try:
results = service.files().list(
q=query,
fields='files(name, id)').execute()
if len(results['files']) > 1:
print('Multiple files found, retrieving first from list')
file_id = results['files'][0]['id']
except Exception as e:
print('An error occurred: {}'.format(e))
return file_id | e8e371ea740ca4be55b35baa74c28207ca6b7b4d | 697,787 |
def get_section(f, first_delim, second_delim):
"""
Some open-source indicator downloads contain multiple sections.
This will return the section of the file f that is between the first_delim and second_delim
:param f: The file containing the section to be processed
:type f: file
:param first_delim: A string representing the beginning of the section
:type first_delim: str
:param second_delim: A string representing the terminator of the section
:type second_delim: str
:returns: list
"""
g = []
line = f.readline()
while line.find(first_delim) == -1:
line = f.readline()
if not line: return(None)
line = f.readline()
if second_delim != "":
while line.find(second_delim) == -1:
g.append(line)
line = f.readline()
else:
for line in f:
g.append(line)
return(g) | d0d08cf5fc157b7361c4ff5e20fe466de76d93fb | 697,788 |
from typing import Dict
def number_topics_and_clusters(model, level: int) -> Dict[str, int]:
"""Get the number of topics and clusters for a level of the model hierarchy."""
model.get_groups(level)
return {
"n_topics": model.groups[level]["Bw"],
"n_clusters": model.groups[level]["Bd"],
} | ff587450e06bb6ce7ce97bd454c310a7b8d9e4b1 | 697,790 |
import re
def get_year_from_date_str(date_str):
"""
Retrieve only the year from a text string.
"""
return re.findall(r'\d{4}', date_str) if date_str else None | b2b02e97963d12236f20dad5d6b5337404a0dfc7 | 697,791 |
from typing import Any
from typing import Dict
def split_bool(param: Any) -> Dict[str, Any]:
""" Split query parameter into filter-bool parameters """
if not param:
return {}
if type(param) == bool:
return dict(value=param)
value = param.lower() in ["true", "yes", "on", "1"]
return dict(value=value) | 1323eb2ccb1c54957470e8f51ffe8e594d76cabc | 697,792 |
def limit(value, min_val, max_val):
"""Returns value clipped to the range [min_val, max_val]"""
return max(min_val, min(value, max_val)) | 8005e5cc9b6947e265f93eed1aa19f4e2abcc5ab | 697,794 |
def int2tap(x):
"""Convert integer to tap position."""
if x[0] == "-":
res = "pre" + x[1:]
else:
res = "post" + x
return res | 2af5e1b98258dfb921005a454689d862bdf3d9fe | 697,795 |
def generate_list(start, stop, step):
"""
>>> generate_list(0, 5, 1)
[0, 1, 2, 3, 4]
>>> generate_list(0, 0, 1)
[]
>>> generate_list(5, 10, 2)
[5, 7, 9]
>>> generate_list(10, 5, -2)
[10, 8, 6]
"""
idx = start
lst = []
if idx < stop:
while idx < stop:
lst.append(idx)
idx += step
else:
while idx > stop:
lst.append(idx)
idx += step
return lst | 51082bd4acf65abdaacbb98b429d79dcb718d9f7 | 697,796 |
def blocksearch(block, name):
""" Recursive search for name in block (inner blocks)
Args:
name (str): search term
Returns:
Block OR False
"""
if hasattr(block, 'tokens'):
for b in block.tokens[1]:
b = (b if hasattr(b, 'raw') and b.raw() == name else blocksearch(
b, name))
if b:
return b
return False | da9f762dddabe762dbabd80addebc0a957b04135 | 697,798 |
import hashlib
def get_hash_of_file(fname, algo="sha512"):
"""Returns the hexdigest of the SHA512 checksum of the
file found at fname.
"""
block_size = 65536
if algo.lower() == "md5":
_hash = hashlib.md5()
else:
_hash = hashlib.sha512()
with open(fname, "rb") as f:
fb = f.read(block_size)
while fb:
_hash.update(fb)
fb = f.read(block_size)
return _hash.hexdigest() | db435d55256c787285948975d9e1ed3807cacef4 | 697,799 |
def get_provenance_record(ancestor_files):
"""Create a provenance record describing the diagnostic data and plot."""
record = {
'caption':
('(a) Zonally averaged sea surface temperature (SST) error in CMIP5 '
'models. (b) Equatorial SST error in CMIP5 models. (c) Zonally '
'averaged multi-model mean SST error for CMIP5 (red line) together '
'with inter-model standard deviation (shading). (d) Equatorial '
'multi-model mean SST in CMIP5(red line) together with inter-model '
'standard deviation (shading) and observations (black). Model '
'climatologies are derived from the 1979-1999 mean of the historical '
'simulations. The Hadley Centre Sea Ice and Sea Surface Temperature '
'(HadISST)(Rayner et al., 2003) observational climatology for '
'1979-1999 is used as reference for the error calculation (a), (b), '
'and (c); and for observations in (d).'),
'statistics': ['anomaly', 'mean', 'stddev', 'clim'],
'domains': ['eq', 'global'],
'plot_types': ['geo', 'sect', 'zonal'],
'authors': ['zimmermann_klaus'],
'projects': ['crescendo'],
'references': ['flato13ipcc', 'hadisst'],
'realms': ['ocean'],
'themes': ['phys'],
'ancestors':
ancestor_files,
}
return record | d0c68eb45dd0a9cee0746294b4fd3202dc1003de | 697,800 |
def getAverageCoords ( allAtomsList, atomsIndList ):
"""
Oblicz srednie wspolrzedne x, y, z wybranych atomow
Wejscie:
allAtomsList - lista obiektow Atom (cala czasteczka)
atomsIndList - lista indeksow atomow, ktorych usrednione polozenie
ma byc obliczone
Wyjscie:
averageCoords - 3-elementowa lista zawierajaca usrednione wspolrzedne
x, y, z wybranych atomow
"""
averageCoords = [ 0. , 0. , 0. ]
for atomInd in atomsIndList:
new_coords = allAtomsList[atomInd].get_coord()
for coord in range(3):
averageCoords[coord] += new_coords[coord]
atomsNo = float(len(atomsIndList))
for coord in range(3):
averageCoords[coord] /= atomsNo
return averageCoords | 076fb6e06972c1de2d4d461ab888bc57f2e4643e | 697,801 |
def write(file_path, new_contents):
""" Write the contents of a file. """
with open(file_path, "wb") as f:
return f.write(new_contents) | 023d90686d4d7033d9421b58df8f1de2cacd5338 | 697,802 |
def compute_statistics(datasetKaggle):
"""
Outputs various statistics of the Kaggle dataset
:param datasetKaggle: the input dataset formatted as a dictionary
:type datasetKaggle: dict
:return: dictionary with the processed information
:rtype: dict
"""
yesNoAnswer = 0
annotationsMax = 0
averageLength = 0
totalExamples = len(datasetKaggle)
for example in datasetKaggle:
annotationsMax = max(len(example['annotations']), annotationsMax) # check for the maximum number of annotations
if example['annotations'][0]['yes_no_answer'] != 'NONE':
yesNoAnswer += 1
averageLength = len(example['document_text']) / totalExamples
output = {'annotationsMax': annotationsMax, 'num_yesNo': yesNoAnswer, 'text_avgLength': averageLength}
return output | 5d41592f45daa252d2a55c71193c1f022f4f53a2 | 697,803 |
def second_half():
"""
second half solver:
"""
# Generator A starts with 591 ; generator A uses 16807
# Generator B starts with 393 ; ; generator B uses 48271
# 2147483647
A, B = 591, 393
count = 0
for x in range (5000000):
while A % 4 != 0:
A = (A * 16807) % 2147483647
while B % 8 != 0:
B = (B * 48271) % 2147483647
if A & 65535 == B & 65535:
count += 1
A = (A * 16807) % 2147483647
B = (B * 48271) % 2147483647
return count | 88f2ce5eb85c3e7d3207d021488eca920e00c29b | 697,804 |
def label(field):
"""Render foundation label."""
return {'field': field} | 8c9f9f8623b3e30a6d43220b7931354de5d42828 | 697,805 |
def filter_ice_border(ice_thick):
"""Sets the ice thickness at the border of the domain to zero."""
ice_thick[0, :] = 0
ice_thick[-1, :] = 0
ice_thick[:, 0] = 0
ice_thick[:, -1] = 0
return ice_thick | cbe448c2659cab832cb499765fb0d4ae8f95d751 | 697,806 |
import re
def split_with_offset(line, _len=len):
""" split string to tokens with offset """
words = re.split('[ ,.:!"|(){}\t\n]', line)
index = line.index
offsets = []
append = offsets.append
running_offset = 0
for word in words:
word_offset = index(word, running_offset)
word_len = _len(word)
running_offset = word_offset + word_len
append((word, word_offset, running_offset - 1))
return offsets | 6299b4af840c5a7a2fe67ca00bc113fbdca982ec | 697,807 |
def is_missing(column):
"""
Determines whether the specified column has missing values.
:param column: The column
:type column: { pandas.series | list | tuple }
:return boolean: True | False
Usage:
======
>> is_missing(data['population'])
>> True
"""
try:
return bool(True) if column.isnull().values.any() == bool(True) else bool(False)
except AttributeError:
print("Method only supported pandas.cores.series") | a4cadb501fc7f66dc8680ba40fe4ee292815f53c | 697,808 |
def get_uniq_chars_list(num: int) -> list:
"""Creates a list of uniq characters"""
if num > 60:
raise Exception("Only up to 60 uniq characters supported, you provided:", str(num))
ret = []
for i in range(65, 65 + num):
ret.append(chr(i))
return ret | 35ed7c6af106c1d2864f188b9e2df5a6dc22d717 | 697,809 |
def column_names_from_cursor(cur):
"""returns column names as a list when provided cx_Oracle Cursor"""
column_names = []
for column_info in cur.description:
column_names.append(column_info[0])
return column_names | f5b1606bec5d32a67438c15c7d655079d189e512 | 697,810 |
from typing import Callable
import functools
def compose(*function: Callable) -> Callable:
"""Compose functions.
I.e::
``lambda x: f(g(x))``
can be written:
``compose(f, g)``
Args:
*function (Callable):
Any number of functions to compose together.
Output type of function N must be the input type of function N+1.
Returns:
Callable:
A composed function with input type same as the firs function, and output
type same as the last function.
"""
return functools.reduce(lambda f, g: lambda x: f(g(x)), function, lambda x: x) | 8c3b04ffeb03303a49c006fa347e150f94700d40 | 697,812 |
def calls():
"""Fixture to record calls."""
return [] | 5044c9aa34415ff7bb97a243c2f0b5cdc33960b0 | 697,813 |
import subprocess
import logging
def checkVMs(VMs):
"""
checks if clients can be pinged
"""
for VM in VMs:
p = subprocess.Popen(['ping', '-n', '1', VM], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if stdout != b'':
logging.info(stdout.decode('utf-8'))
if stderr != b'':
logging.info(stderr.decode('utf-8'))
if p.returncode != 0:
print('Computer: ' +VM+ ' cant be pinged.')
logging.error('Computer: ' +VM+ ' cant be pinged.')
return 1
return 0 | 1d2ac765fbcf65d57bd2bd343343d887d115c063 | 697,814 |
def oxford_join(words):
"""Concatenate words with commas and a final 'and' between."""
assert len(words) > 0
if len(words) == 1:
return words[0]
if len(words) == 2:
return words[0] + ' and ' + words[1]
return ' '.join(map(lambda x: x + ',', words[0:-1])) + ' and ' + words[-1] | e880d3c42d93f521b5c80591ead907318976c271 | 697,815 |
def echo(value: str):
"""Test Celery task to echo a string.
Args:
value (str): string to return.
Returns:
str, the input argument string.
"""
return value | cc96b1f80147950cd0a084f45d379e5be0156c36 | 697,816 |
import os
import ctypes
def load_lib():
"""
Register the stonne connection library into TVM
"""
# Find library based on relative paths
stonne_conv2d = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"stonne_lib/stonne_lib.so")
# load in as global so the global extern symbol is visible to other dll.
lib = ctypes.CDLL(stonne_conv2d, ctypes.RTLD_GLOBAL)
return lib | b3c1eab0160b69a08e6e62dc09a01b5f777c9e70 | 697,817 |
import struct
def wordswap(data):
""" Swap byte order in each DWORD """
fmt = '%dL' % ((len(data)+3)/4)
pad = len(data) % 4
if pad:
pad = 4-pad
data += b'\x00' * pad
return struct.pack('<'+fmt, *struct.unpack('>'+fmt, data)) | 559883d3929f3f521dd8878a2582681651e8d122 | 697,818 |
def distance(x1, y1, x2, y2):
"""
Distance between two points (x1, y1), (x2, y2).
"""
return pow((pow(x1 - x2, 2) + pow(y1 - y2, 2)), 0.5) | 2bd9c1c8be018481998cff594f08c98d29be04de | 697,821 |
def combine_english_defs(senses, separator=u', '):
"""Combines the English definitions in senses.
Args:
senses: An array with dict elements with English info.
Returns:
A string of English definitions separated by the separator.
"""
# Each sense contains a list of English definitions. e.g. [[], [], []]
eng_defs_lists = [sense['english_definitions'] for sense in senses
if 'english_definitions' in sense]
# Combine the inner lists of English definitions into one list.
combined_eng_defs = [eng_def for eng_def_list in eng_defs_lists for eng_def in eng_def_list]
return separator.join(combined_eng_defs) | c4dd5b732e198dc4381ebfd5b133c226a3094be7 | 697,822 |
def add_or_get_merge_anchor_index(lane, pos):
"""
Add a merge anchor at pos if needed, return the index of the merge anchor
"""
if not hasattr(lane, "merge_anchors"):
lane.merge_anchors = []
for ind, e in enumerate(lane.merge_anchors):
if (e[1] is None and pos == lane.start) or e[1] == pos:
return ind
lane.merge_anchors.append([lane.anchor, None if pos == lane.start else pos])
return len(lane.merge_anchors) - 1 | ee52833b37d84ca765f8e5c56ec9e8f52691451a | 697,823 |
def is_hovering(rect, mouse_pos):
"""Checks if a mouse is hovering over a rect"""
if (
rect.left <= mouse_pos[0] <= rect.right
and rect.top <= mouse_pos[1] <= rect.bottom
):
return True
return False | e17f4b8ee6e0b5f174473aa88388ff81ab67ea66 | 697,824 |
import math
def compass_bearing(pointA, pointB):
"""
Calculates the bearing between two points.
The formulae used is the following:
θ = atan2(sin(Δlong).cos(lat2),
cos(lat1).sin(lat2) − sin(lat1).cos(lat2).cos(Δlong))
:Parameters:
- `pointA: The tuple representing the latitude/longitude for the
first point. Latitude and longitude must be in decimal degrees
- `pointB: The tuple representing the latitude/longitude for the
second point. Latitude and longitude must be in decimal degrees
:Returns:
The bearing in degrees
:Returns Type:
float
"""
if (type(pointA) != tuple) or (type(pointB) != tuple):
raise TypeError("Only tuples are supported as arguments")
lat1 = math.radians(pointA[0])
lat2 = math.radians(pointB[0])
diffLong = math.radians(pointB[1] - pointA[1])
x = math.sin(diffLong) * math.cos(lat2)
y = math.cos(lat1) * math.sin(lat2) - (math.sin(lat1)
* math.cos(lat2) * math.cos(diffLong))
initial_bearing = math.atan2(x, y)
# Now we have the initial bearing but math.atan2 return values
# from -180° to + 180° which is not what we want for a compass bearing
# The solution is to normalize the initial bearing as shown below
initial_bearing = math.degrees(initial_bearing)
compass_bearing = (initial_bearing + 360) % 360
return compass_bearing | 4ef715ab9519e306a12c42ab87f0a624d7e0589c | 697,825 |
def knapsack_dp(items, capacity):
"""Return the maximum value that can be stored in the knapsack."""
rows = len(items) + 1
cols = capacity + 1
dp_table = [[0 for j in range(cols)] for i in range(rows)]
for i in range(rows):
for j in range(cols):
if i is 0 or j is 0:
dp_table[i][j] = 0
item = items[i]
capacity = j
value_without = dp_table[i-1][capacity]
if item[1] < capacity:
value_with = dp_table[i-1][capacity-item[1]] + item[2]
dp_table[i][j] = max(value_with, value_without)
else:
dp_table[i][j] = value_without
return dp_table[rows-1][cols-1] | 6beae17041d1922725ccc2641d86594ede7f8bf6 | 697,826 |
def optimize(s, probability, loBound, hiBound):
""" Optimiere auf die max. mögliche Entnahme bei einer vorgegebenen Fehlerquote
Returns:
widthdrawal: max. mögliche prozentuale Entnahme
"""
n_ret_months = s.simulation['n_ret_years'] * 12
accuracy = 0.005 # Genauigkeit der Optimierung
# Vorbereitung der Optimierung
deltaWidthdrawal = (hiBound - loBound) / 2
percWidthdrawal = loBound + deltaWidthdrawal
cnt = 0
curProb = 0
# Optimization by successiv approximation
while (deltaWidthdrawal > accuracy) or (curProb > probability):
cnt += 1
s.withdrawal['fixed_pct'] = percWidthdrawal
s.init_simulation()
s.simulate()
survival = [trial_dict['exhaustion'] for trial_dict in s.latest_simulation]
curProb = 100 * (len(survival) - survival.count(n_ret_months)) / len(survival)
if s.visualization['textoutput'] == True:
print(cnt, '. Entnahme: ', percWidthdrawal, ' Ausfallwahrscheinlichkeit: ', curProb, '%')
deltaWidthdrawal /= 2
if deltaWidthdrawal <= accuracy / 5:
break
if curProb > probability:
percWidthdrawal -= deltaWidthdrawal
else:
percWidthdrawal += deltaWidthdrawal
return percWidthdrawal | 1ad98336a531a8fd149f9351cb6f450c836d67a0 | 697,827 |
import functools
def wraps_set_expire(func):
"""
装饰器, 设置key默认过期时间
"""
@functools.wraps(func)
def wrapper_func(self, keyname, *args, **kwargs):
ret_func = func(self, keyname, *args, **kwargs)
# 设置key的过期时间
if ret_func is not None:
self.set_expire(keyname)
return ret_func
return wrapper_func | 4b2f06cce1c598097514963a5280029d515f63f1 | 697,828 |
def copy_dict(other_dict):
"""
Returns a copy of the dictionary, separate from the original.
This separation is only at the top-level keys.
If you delete a key in the original, it will not change the copy.
>>> d1 = dict(a=1, b=2)
>>> d2 = dict(**d1)
>>> del d1['a']
>>> 'a' in d1
False
>>> 'a' in d2
True
If any of the top-level values are mutable (list, dict) then changes
to the original will appear in the copy.
>>> d1 = dict(a=[1, 2], b=[3, 4])
>>> d2 = dict(**d1)
>>> d1['a'].pop()
2
>>> d1['a']
[1]
>>> d1['a'] == d2['a']
True
Tested in Python 3.4.
"""
new_dict = dict(**other_dict)
return new_dict | a8d25212b2bf0524a5f434fd590c1ac3ec8b8810 | 697,829 |
def box_tuple(c, typ, val):
"""
Convert native array or structure *val* to a tuple object.
"""
tuple_val = c.pyapi.tuple_new(typ.count)
for i, dtype in enumerate(typ):
item = c.builder.extract_value(val, i)
obj = c.box(dtype, item)
c.pyapi.tuple_setitem(tuple_val, i, obj)
return tuple_val | 45e277406d904060afb1e5d22469fcf104a12b32 | 697,830 |
import glob
import os
def get_files(input_dir):
"""return a list of .TXT files from a directory"""
fns = []
fns += glob.glob(os.path.join(input_dir, '*/*/*.txt'))
if not fns:
raise RuntimeError('Error selscting files')
return fns | 248b7b4660fae67b55e5197ad0ed0825b096f22d | 697,832 |
def filter_all_persons(queryset, name, all_persons):
"""Filter only trainees when all_persons==False."""
if all_persons:
return queryset
else:
return queryset.filter(
task__role__name='learner',
task__event__tags__name='TTT').distinct() | f64e4fcdf770e8003319ca1c19b089fa090d4984 | 697,834 |
import shlex
def make_commit_msg(pairs, no_verify):
"""Return a commit message to explain what was replaced by the provided
arguments.
"""
script = ["./scripts/maint/rename_c_identifier.py"]
for id1, id2 in pairs:
qid1 = shlex.quote(id1)
qid2 = shlex.quote(id2)
script.append(" {} {}".format(qid1, qid2))
script = " \\\n".join(script)
if len(pairs) == 1:
line1 = "Rename {} to {}".format(*pairs[0])
else:
line1 = "Replace several C identifiers."
msg = """\
{}
This is an automated commit, generated by this command:
{}
""".format(line1, script)
if no_verify:
msg += """
It was generated with --no-verify, so it probably breaks some commit hooks.
The committer should be sure to fix them up in a subsequent commit.
"""
return msg | 4757c9200dc25d97bdf8a3de0954d0e373adc07c | 697,835 |
import numpy
def _bootstrap_cost(observed_labels, forecast_probabilities, cost_function,
num_replicates):
"""Uses bootstrapping to estimate cost.
E = number of examples
:param observed_labels: length-E numpy array of true classes (integers in
0...1).
:param forecast_probabilities: length-E numpy array of event probabilities
(probabilities of class = 1).
:param cost_function: Cost function. Must be negatively oriented (i.e.,
lower is better), with the following inputs and outputs.
Input: observed_labels: See above.
Input: forecast_probabilities: See above.
Output: cost: Scalar value.
:param num_replicates: Number of bootstrap replicates (i.e., number of times
to estimate cost).
:return: cost_estimates: length-B numpy array of cost estimates, where B =
number of bootstrap replicates.
"""
cost_estimates = numpy.full(num_replicates, numpy.nan)
if num_replicates == 1:
cost_estimates[0] = cost_function(
observed_labels, forecast_probabilities
)
else:
num_examples = len(observed_labels)
example_indices = numpy.linspace(
0, num_examples - 1, num=num_examples, dtype=int
)
for k in range(num_replicates):
these_indices = numpy.random.choice(
example_indices, size=num_examples, replace=True
)
cost_estimates[k] = cost_function(
observed_labels[these_indices],
forecast_probabilities[these_indices]
)
print('Average cost estimate over {0:d} replicates = {1:f}'.format(
num_replicates, numpy.mean(cost_estimates)
))
return cost_estimates | 473613c7b16876d8b4467e68a119309f35793f90 | 697,836 |
import random
def remove_edge_random_stochastic(G, p, random_seed=None, copy=True):
"""
Recieves a Graph and p (uniform probability of failure).
Returns a degraded Graph
"""
if copy:
G_ = G.copy()
else:
G_ = G
if random_seed is not None:
random.seed(random_seed)
for edge in list(G.edges):
if random.random()<=p:
G_.remove_edge(*edge)
return(G_) | 6591c79ccf14513adf58b0f59e569c6586d85d5f | 697,837 |
def list_services(config_dict):
"""
List available services
Args:
config_dict (dict): configuration dictionary
Returns:
list: list of available services
"""
return list(config_dict.keys()) | a3226589405292f0bcef99aabaec71722aaeb1db | 697,838 |
def actions_by_behavior(actions):
"""
Gather a dictionary grouping the actions by behavior (not SubBehaviors). The actions in each
list are still sorted by order of their execution in the script.
@param actions (list) of Action objects
@return (dict) where the keys are behaviors and the values are lists of actions
"""
split = {}
for action in actions:
for behavior in action.behaviors:
if behavior.name not in split:
split[behavior.name] = []
split[behavior.name].append(action)
return split | ebf97a5837a8d7de735207c5df5e19af2438510a | 697,839 |
import decimal
def decimal_round(val, prec=1e-4):
"""wrapper for rounding according to precision
"""
DD = decimal.Decimal
val_ = DD(val).quantize(DD(f'{prec}'), rounding=decimal.ROUND_DOWN)
return float(val_) | d0e37277df94ddbd95a60c1380cf3eb93531c6a7 | 697,840 |
def plugin_reconfigure(handle, new_config):
""" Reconfigures the plugin, it should be called when the configuration of the plugin is changed during the
operation of the device service.
The new configuration category should be passed.
Args:
handle: handle returned by the plugin initialisation call
new_config: JSON object representing the new configuration category for the category
Returns:
new_handle: new handle to be used in the future calls
Raises:
"""
new_handle = new_config['gpiopin']['value']
return new_handle | b65c246d90e8ab6740c0481849ab2da22a90b872 | 697,841 |
import logging
def get_loglevel(loglevel):
""" Check whether a valid log level has been supplied, and return the numeric log level that
corresponds to the given string level.
Parameters
----------
loglevel: str
The loglevel that has been requested
Returns
-------
int
The numeric representation of the given loglevel
"""
numeric_level = getattr(logging, loglevel.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError("Invalid log level: %s" % loglevel)
return numeric_level | a662e00caa21bf5632b28f26cbb604229db9705a | 697,842 |
import logging
import time
def is_valid_date(strdate):
"""
判断是否是一个有效的日期字符串
:param strdate:
:return:
"""
logging.info('is_valid_date')
try:
if ":" in strdate:
time.strptime(strdate, "%Y-%m-%d %H:%M:%S")
else:
time.strptime(strdate, "%Y-%m-%d")
return True
except Exception as e:
return False | 7dd157a3bf970057233066faa563dee627a61dbf | 697,843 |
import base64
import requests
import json
def retrieve_appdevtools_access_token(username, password, auth_endpoint):
"""
Retrieve access token for appdev tools
Args:
username: username
password: password
auth_endpoint: authorization endpoint
Returns: token
"""
# Form credentials request
credentials = "{0}:{1}".format(username, password)
base_encoded_credential = base64.b64encode(bytes(credentials, "utf-8")).decode().replace('\n', '')
# Form application headers
headers = {
"content-type": "application/json",
"Authorization": "Basic {}".format(base_encoded_credential)
}
# Form login request
login_request = {
"username": username,
"password": password
}
# Issue request, skip SSL validation
response = requests.post(url=auth_endpoint, json=login_request,
headers=headers, verify=True)
# Get access token
return json.loads(response.text)["token"] | 8f08f4be45998199aa9db151fec26722a13eb060 | 697,844 |
def struct(typename, attrnames):
"""
Generate simple and fast data class
"""
attrnames = tuple(attrnames.replace(',', ' ').split())
reprfmt = '(' + ', '.join(name + '=%r' for name in attrnames) + ')'
def __init__(self, *args, **kwargs):
if len(args) > len(attrnames):
msg = "__init__() takes %d positional arguments but %d were given" \
% (len(attrnames) + 1, len(args) + 1)
raise AttributeError(msg)
for name, value in zip(attrnames, args):
setattr(self, name, value)
for name, value in kwargs.items():
if name not in attrnames:
msg = "__init__() got an unexpected keyword argument '%s'" % name
raise TypeError(msg)
setattr(self, name, value)
def __repr__(self):
""" Return a nicely formatted representation string """
return self.__class__.__name__ + \
reprfmt % tuple(getattr(self, x) for x in attrnames)
def __getattr__(self, name):
"""
It will only get called for undefined attributes
and exists here mostly to mute pylint 'no-member' warning
"""
raise self.__getattribute__(name)
namespace = {
'__doc__' : '%s(%s)' % (typename, attrnames),
'__slots__' : attrnames,
'__init__' : __init__,
'__repr__' : __repr__,
'__getattr__': __getattr__,
}
result = type(typename, (object,), namespace)
return result | 9e4789e8960e48cd3a80c8907321d23013d29b87 | 697,845 |
from typing import List
import os
def write_shell_script(dir: str, name: str, content: List[str]) -> str:
"""
Creates and writes a bash script in the specified dir with the given name.
The contents of the script are taken from the ``content`` parameter
where each item in the list is written as a line in the script.
Example: ``write_shell_script("/tmp", "foobar", ["sleep 10", "echo hello world"])
::
# creates /tmp/foobar with content below
#! bin/bash
sleep 10
echo hello world
"""
script_path = os.path.join(dir, name)
with open(script_path, "w") as f:
f.write("#! /bin/bash\n")
for line in content:
f.write(f"{line}\n")
f.write("\n")
os.chmod(script_path, 0o755)
return script_path | 49ba8c0f3630b88d7c6acc2487956ed211ec7f4a | 697,846 |
def count_literals(term):
"""
Counts the number of literals in a term
Args:
term : A string containing literals
Returns:
The number of literals in term
"""
count = 0
for char in term:
if char != "_":
count+=1
return count | 48bb0066f09994627a4b7c2b04a657df1106a1c3 | 697,847 |
def BmV_to_gmr( BmV ):
"""Relation from Cook+2014 (2014MNRAS.445..890C) to transform
galaxy B-V color to g-r.
"""
ii_bad = [i for i in range(len(BmV)) if BmV[i] == -99.0]
gmr = 1.12*BmV - 0.18
gmr[ii_bad] = -99.0
return gmr | e5b4d1748d403e206cfb74f640d5e04e5b84f5a1 | 697,848 |
import json
def get_json_from_file(filename, warn = False):
"""Loads json from a file.
Optionally specify warn = True to warn, rather than
fail if file not found."""
f = open(filename, 'r')
return json.loads(f.read()) | 6916e0239067b977d0671e1e38006f457da94ae7 | 697,849 |
import subprocess
def tail(filename, n):
"""
Get last n lines from file as string
Args:
filename:``str``
filename of log file
n:``int``
number of lines
Return:
lines form file as string
"""
p=subprocess.Popen(['tail','-n',str(n),filename], stdout=subprocess.PIPE)
soutput, _=p.communicate()
lines = soutput.decode('utf8').split('\r')
return lines | 7e398b7d5806149555f4b2bb70292e17f0f916c7 | 697,850 |
import itertools
def take_while(collection, predicate):
""":yaql:takeWhile
Returns elements from the collection as long as the predicate is true.
:signature: collection.takeWhile(predicate)
:receiverArg collection: input collection
:argType collection: iterable
:arg predicate: function of one argument to apply to every
collection value
:argType predicate: lambda
:returnType: iterable
.. code::
yaql> [1, 2, 3, 4, 5].takeWhile($ < 4)
[1, 2, 3]
"""
return itertools.takewhile(predicate, collection) | dd1fe4f97db991d1323b814ddd1f7c2e0fe52c88 | 697,851 |
def ip_has_digit(matched_str):
"""Checks to make sure the PII span is not just :: or whatever that may
accidentally be picked up by making sure there are digits."""
return any(map(str.isdigit, matched_str)) | 3ed765a04c1b920addc7e3ab538575ac4edc7651 | 697,852 |
def list_frequencies(list_of_items):
""" Determine frequency of items in list_of_items. """
itemfreq = [list_of_items.count(p) for p in list_of_items]
return dict(zip(list_of_items,itemfreq)) | 8772e64d0abd400dfe333747e7fce0d816a0f59f | 697,854 |
import typing
import subprocess
import re
def search_output_from_cmd(cmd: str,
find_regex: typing.Pattern) -> typing.Match:
"""
Run a shell command and search a given regex object in stdout.
If the regex object is not found, a RuntimeError exception is raised.
:param cmd: command to run
:param find_regex: regular expression object to search for
:return: result of re.search()
"""
# Run the given command in a shell
out = subprocess.run(cmd, shell=True, check=True,
stdout=subprocess.PIPE).stdout.decode("utf-8")
# Search for the object
content = re.search(find_regex, out)
# If the result is not None, return it
if content:
return content
raise RuntimeError("Could not find '%s' in output for '%s'" %
(find_regex.pattern, cmd)) | c05503a856652f637f694b6ac712018378a8097a | 697,855 |
import numpy
def maxDist(src, dst):
"""
Src and dst are both matrices of the same size with each column representing a
different position
"""
diff = dst - src
max_dist = 0
for entry in range(diff.shape[1]):
dist = numpy.linalg.norm(diff[:, entry])
if dist > max_dist:
max_dist = dist
return max_dist | f943ebf703fe57cc79b03efa8100e8d8c3cb947a | 697,856 |
def edit_distance(list1, list2):
# To compute the Levenshtein distance between two entries,
# say entries 92 and 94,
# try this in interactive mode after running this script:
# edit_distance(results_list[92].split(',')[1:], results_list[94].split(',')[1:])
"""Ref: https://bit.ly/2Pf4a6Z"""
#print(f"Comparing:\n{list1}{list2}")
if len(list1) > len(list2):
difference = len(list1) - len(list2)
list1[:difference]
elif len(list2) > len(list1):
difference = len(list2) - len(list1)
list2[:difference]
else:
difference = 0
for i in range(len(list1)):
try:
if list1[i] != list2[i]:
difference += 1
except IndexError:
break
return difference | 15b1d1e495bbbfa39c07043fe97735da0a495222 | 697,857 |
import math
def normStdevMask(img,mask):
"""
normalize an image with mean = 0 and stddev = 1.0 only inside a mask
"""
n1 = mask.sum()
if n1 == 0:
return img
sum1 = (img*mask).sum()
sumsq1 = (img*img*mask).sum()
avg1 = sum1/n1
std1 = math.sqrt((sumsq1 - sum1*sum1/n1)/(n1-1))
std2 = img.std()
return (img - avg1) / std1 | b5493d1e8a0f6594badb97c9376777117263eb0b | 697,858 |
import os
def theme_path():
"""Return the path of the theme directory"""
return os.path.dirname("{}/../themes/".format(os.path.dirname(os.path.realpath(__file__)))) | 82caeb651809ef926516f95d2d1e2a1de15347df | 697,859 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.