content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
import numpy
def transform(Y, R, t, s=None, invert=False):
"""
Transform C{Y} by rotation C{R} and translation C{t}. Optionally scale by C{s}.
>>> R, t = fit(X, Y)
>>> Y_fitted = transform(Y, R, t)
@param Y: (n, d) input vector
@type Y: numpy.array
@param R: (d, d) rotation matrix
@type R: numpy.array
@param t: (d,) translation vector
@type t: numpy.array
@param s: scaling factor
@type s: float
@param invert: if True, apply the inverse transformation
@type invert: bool
@return: transformed input vector
@rtype: numpy.array
"""
if invert:
x = numpy.dot(Y - t, R)
if s is not None:
s = 1. / s
else:
x = numpy.dot(Y, R.T) + t
if s is not None:
x *= s
return x | cdd312f151678fb9ff681d6fd5f112df23fb2d21 | 694,969 |
def _filter_annotations(annotations, image, small_object_area_threshold,
foreground_class_of_interest_id):
"""Filters COCO annotations to visual wakewords annotations.
Args:
annotations: dicts with keys: {
u'objects': [{u'id', u'image_id', u'category_id', u'segmentation',
u'area', u'bbox' : [x,y,width,height], u'iscrowd'}] } Notice
that bounding box coordinates in the official COCO dataset
are given as [x, y, width, height] tuples using absolute
coordinates where x, y represent the top-left (0-indexed)
corner.
image: dict with keys: [u'license', u'file_name', u'coco_url', u'height',
u'width', u'date_captured', u'flickr_url', u'id']
small_object_area_threshold: threshold on fraction of image area below which
small objects are filtered
foreground_class_of_interest_id: category of COCO dataset which visual
wakewords filters
Returns:
annotations_filtered: dict with keys: {
u'objects': [{"area", "bbox" : [x,y,width,height]}],
u'label',
}
"""
objects = []
image_area = image['height'] * image['width']
for annotation in annotations['objects']:
normalized_object_area = annotation['area'] / image_area
category_id = int(annotation['category_id'])
# Filter valid bounding boxes
if category_id == foreground_class_of_interest_id and \
normalized_object_area > small_object_area_threshold:
objects.append({
u'area': annotation['area'],
u'bbox': annotation['bbox'],
})
label = 1 if objects else 0
return {
'objects': objects,
'label': label,
} | 3b0fb8d97b805808bc422cc68a6957102e494e69 | 694,970 |
import math
def _rotate(point, angle, origin = (0,0),unit = 'degree'):
"""Rotate a point counterclockwise by a given angle around a given origin.
Angle can be both in radian or degree. Helper function for rotating a layout.
Parameters
----------
point : tuple
position in (x,y) form
angle : float
angle to rotate the point
origin : tuple in (x,y) form
point will rotate with respect to the origin.
unit : 'degree'/'radian' to indicate if the angle is in degrees or radians.
if given in degrees angle is converted to radians.
Returns
-------
tuple
rotated point as (x,y) tuple.
"""
ox, oy = origin
px, py = point
if unit == 'degree':
angle = math.radians(angle)
if unit == 'radian':
angle = angle
qx = ox + math.cos(angle) * (px - ox) - math.sin(angle) * (py - oy)
qy = oy + math.sin(angle) * (px - ox) + math.cos(angle) * (py - oy)
return qx, qy | ec01d3fc7038b2846b1f33626fa1a33b2b23968f | 694,971 |
def exists(env):
""" Check if `cscope` command is present """
return env['CSCOPE'] if 'CSCOPE' in env else None | f8feae0af16efbcbef1cce57a622d72f4e92a7b0 | 694,972 |
def _splitNameByAbbreviations(parts):
"""
If a name contains abbreviations, then split so that first name is abbreviated parts and last name is parts that
are not abbreviated.
:param parts: Individual words in the name
:raises: ValueError if parts cannot be partitioned into given and family name
:returns: Two values: Given name, Family name
"""
if len(parts[0]) == 1 or parts[0].endswith('.'):
for i in range(1, len(parts)):
if len(parts[i]) > 1 and not parts[i].endswith('.'):
return ' '.join(parts[:i]), ' '.join(parts[i:])
raise ValueError('Could not split name on abbreviations') | c3ca4655fc15801ad57f88629e8f5ad5e3c7f530 | 694,973 |
import torch
def log1p_exp(x):
"""
Computationally stable function for computing log(1+exp(x)).
"""
x_ = x * x.ge(0).to(torch.float32)
res = x_ + torch.log1p(torch.exp(-torch.abs(x)))
return res | e3a8cadd89a53af44254e1f28f61998125238dfc | 694,974 |
def ape(accounts):
"""Get account."""
return accounts[0] | 62edac0af9e3a45cdac6d179b3835698dcced587 | 694,975 |
import argparse
def create_args():
"""
:return: ArgumentParser
Parser that handles cmd arguments.
"""
parser = argparse.ArgumentParser(
usage="-f <path to folder with data files to analyse>")
parser.add_argument("-f", dest="folder_path",
help="path to folder with data files to analyse",
required=True)
return parser | 7c33ac4d837345ee97aee0cb8abf69408389ba66 | 694,976 |
def binary_search_iter(sorted_nums, target):
"""Binary search in sorted list by iteration.
Time complexity: O(logn).
Space complexity: O(1).
"""
# Edge case.
if len(sorted_nums) == 0:
return False
# Compare middle number and iteratively search left or right part.
left, right = 0, len(sorted_nums) - 1
while left < right:
mid = left + (right - left) // 2
if sorted_nums[mid] == target:
return True
elif sorted_nums[mid] < target:
left = mid + 1
else:
right = mid - 1
# Final check when left = right.
if sorted_nums[left] == target:
return True
else:
return False | c0a5a02a6ef128dce44bcc6833bfbb281b5fb7c9 | 694,977 |
def checking_streak(coin_flips):
"""checking number of streaks of 6 in the random list"""
streaks = 0
j = 0
for i, flip in enumerate(coin_flips):
if j <= len(coin_flips) - 6:
j += 1
#if opposite not in coin_flips[i : i+6]:
if coin_flips[i : i+6].count('H') == 6 or coin_flips[i : i+6].count('T') == 6:
streaks += 1
return streaks | 027482c0d2be49085b52d508338e947ec9f0c35c | 694,978 |
def r2v_factory ( klass , *args ) :
"""unpickle `Ostap::MoreFooFit::TwoVars` objects
- see Ostap.MoreRooFit.TwoVars
"""
return klass ( *args ) | 866a6c424dc39c749b74a7bb325528870ae95a37 | 694,979 |
import logging
def dereference_chart_ids(client, school, chart):
"""
MongoClient string dict -> dict
Convert the chart of block metadata into a split chart, that contains courses
of both course data and block metadata
"""
new_chart = {}
for block in chart:
db_name = f"{school}-catalog"
bid_obj = block.pop('_id', None)
bid = str(bid_obj)
block['_id'] = bid
new_chart[bid] = {}
if 'catalog_id' in block and 'department' in block:
dept = block["department"]
if isinstance(block['catalog_id'], list):
courses = []
cids = []
ids = block.pop('catalog_id', None)
for cid in ids:
course_data = client[db_name][dept].find_one(cid)
cid_str = str(course_data["_id"])
course_data["_id"] = cid_str
courses.append(course_data)
cids.append(cid_str)
block['catalog_id'] = cids
new_chart[bid]['course_data'] = courses
else:
cid_obj = block.pop('catalog_id', None)
course_data = client[db_name][dept].find_one(cid_obj)
if course_data is None:
logging.warning(f"Cannot find {cid_obj} in {dept}")
continue
cid = str(course_data["_id"])
course_data["_id"] = cid
block['catalog_id'] = cid
new_chart[bid]['course_data'] = course_data
new_chart[bid]['block_metadata'] = block
return new_chart | 97cc27004fd0c595aa8df4a4fdb2e567c87faae4 | 694,980 |
def get_uplink_downlink_count(duthost, tbinfo):
"""
Retrieves uplink and downlink count from DEVICE_NEIGHBOR_METADATA based on topology
Args:
duthost: DUT host object
tbinfo: information about the running testbed
Returns:
uplink count, downlink count
"""
config_facts = duthost.config_facts(host=duthost.hostname, source="running")['ansible_facts']
device_neighbor_metadata = config_facts['DEVICE_NEIGHBOR_METADATA']
topo = tbinfo['topo']['name']
if "t1" in topo:
spine_router_count = 0
tor_router_count = 0
for neighbor in device_neighbor_metadata.keys():
neighbor_data = device_neighbor_metadata[neighbor]
if neighbor_data['type'] == "SpineRouter":
spine_router_count += 1
elif neighbor_data['type'] == "ToRRouter":
tor_router_count += 1
return spine_router_count, tor_router_count
elif "t0" in topo:
leaf_router_count = 0
server_count = 0
for neighbor in device_neighbor_metadata.keys():
neighbor_data = device_neighbor_metadata[neighbor]
if neighbor_data['type'] == "LeafRouter":
leaf_router_count += 1
elif neighbor_data['type'] == "Server":
server_count += 1
return leaf_router_count, server_count | 73349db69fe403356b2a27b79f3cb0467235973a | 694,981 |
import numpy
def sortCoordinatesDistance(coordinates, center, types1, types2=None, co_sort=None):
"""
Sort the coordinates with respect to distance form the provided center index and type.
:param coordinates: The coordinates to sort.
:param center: The index of the center to calculate distances from.
:type center: int
:param types1: The first list of site types to co-sort with the coordinates.
:type types1: a list of strings
:param types2: The second, optional, list of site types to co-sort with the coordinates.
:type types2: a list of strings
:returns: The sorted coordinates, distances and sorted types.
"""
if types2 is None:
types2 = [ t for t in types1 ]
ret_co_sort = True
if co_sort is None:
co_sort = types2
ret_co_sort = False
# Get the types.
dt = coordinates.dtype
dtype = [('x',dt),
('y',dt),
('z',dt),
('d',dt),
('type1', numpy.array(types1).dtype),
('type2', numpy.array(types2).dtype),
('co_sort', numpy.array(co_sort).dtype)]
# Calculate the distance form the center.
origin = coordinates[center]
distances = numpy.array([ numpy.linalg.norm(coord) for coord in coordinates ])
# Setup the data to sort.
to_sort = numpy.array([ (c[0],c[1],c[2],d,t1,t2,cs) for (c,d,t1,t2,cs) in zip(coordinates,distances,types1, types2, co_sort)],
dtype=dtype)
# Sort.
sorted_list = numpy.sort(to_sort, order=['d','type1','x','y','z'])
# Extract the info.
coordinates = numpy.array([[c[0],c[1],c[2]] for c in sorted_list])
distances = numpy.array([c[3] for c in sorted_list])
types1 = [c[4] for c in sorted_list]
types2 = [c[5] for c in sorted_list]
co_sort = [c[6] for c in sorted_list]
# Done.
if ret_co_sort:
return (coordinates, distances, types1, types2, co_sort)
else:
return (coordinates, distances, types1, types2) | 4cf10ad62e16c9b41bac4a03a71cdf900653a26e | 694,982 |
import json
def load_conf(filepath):
"""Return the URL for API requests."""
with open(filepath, 'r') as file_path:
data = json.load(file_path)
ip_add = next(data.keys().__iter__())
username = data[ip_add]['username']
url = 'http://' + ip_add + '/api/' + username
return url | 652ca0607fa110558a4984694be1c0d8e40b18cc | 694,983 |
def get_project_name():
""" Retrieves project name from the .env file """
with open('.env') as env_file:
for line in env_file:
if 'COMPOSE_PROJECT_NAME' in line:
name = line.split('=', maxsplit=1)[1].strip()
return name | 29e22b76d3d4f9127d9eecc89ce4d8b6d6d09627 | 694,984 |
def get_shape(input):
"""Get shape."""
return input.size() | dbf28720e2fee2d48f8bb2a38786e1547c684a48 | 694,985 |
import socket
def is_valid_ipv6(ipv6_str):
"""Check if the input ipv6_str is an valid ipv6 address
Returns: True/False
"""
try:
socket.inet_pton(socket.AF_INET6, ipv6_str)
return True
except:
return False | 17116c17caa4909a792040ebea74e5dd7b1741ca | 694,986 |
def numberToWords(num):
"""
:type num: int
:rtype: str
"""
if num == 0:
return 'Zero'
LESS_TAN_TWENTY = ['', 'One', 'Two', 'Three', 'Four', 'Five', 'Six', 'Seven', 'Eight', 'Nine', 'Ten', 'Eleven', 'Twelve', 'Thirteen', 'Fourteen', 'Fifteen', 'Sixteen', 'Seventeen', 'Eighteen', 'Nineteen']
TENS = ['', 'Ten', 'Twenty', 'Thirty', 'Forty', 'Fifty', 'Sixty', 'Seventy', 'Eighty', 'Ninety']
THOUSANDS = ['', 'Thousand', 'Million', 'Billion']
def helper(num):
if num == 0:
return ''
elif num < 20:
return LESS_TAN_TWENTY[num] + ' '
elif num < 100:
return TENS[num/10] + ' ' + helper(num%10)
else:
return LESS_TAN_TWENTY[num/100] + ' Hundred ' + helper(num%100)
i, res = 0, ''
while num > 0:
if num % 1000 != 0:
res = helper(num%1000) + THOUSANDS[i] + ' ' + res
num /= 1000
i += 1
return res.strip() | 0e2530b46268cc4f5be277499dd52750bc1b3946 | 694,987 |
def convert_seconds_to_str(sec: float):
"""Returns a str representing a number of seconds"""
msg = ""
sec = round(sec)
years = sec // 31536000
if years != 0:
msg += str(int(years)) + "y "
sec -= years * 31536000
days = sec // 86400
if days != 0:
msg += str(int(days)) + "d "
sec -= days * 86400
hours = sec // 3600
if hours != 0:
msg += str(int(hours)) + "h "
sec -= hours * 3600
minutes = sec // 60
sec -= minutes * 60
if minutes != 0:
msg += str(int(minutes)) + "m "
if sec != 0:
msg += str(int(sec)) + "s "
return msg[:-1] | 6e575b4b5d9641436330600cda8d8b71be2fe9e0 | 694,988 |
def remove_contained(a):
"""
remove contained intervals
:param a: list of tuples (start, end, header)
:return: intervals with contained intervals removed
"""
o = []
a = sorted(a, key=lambda x: (x[0], -x[1]))
max_end = -1
for i in a:
if i[1] > max_end:
max_end = i[1]
o.append(i)
return o | 36c3473e6fbc5de2c788662e21f67ff89add5610 | 694,989 |
def unflatten_dict(d_flat):
"""Unflattens single-level-tuple-keyed dict into dict
"""
result = type(d_flat)()
for k_tuple, v in d_flat.items():
d_curr = result
for i, k in enumerate(k_tuple):
if i == len(k_tuple) - 1:
d_curr[k] = v
elif k not in d_curr:
d_curr[k] = type(d_flat)()
d_curr = d_curr[k]
return result | 9beee326a40f323ea123028de39da594cd237385 | 694,990 |
def _is_string_same_case(input: str):
"""Returns flag indicating whether input string is a single case.
"""
return input == input.lower() or input == input.upper() | 93e8a41859cf8e6e6d871c787c18519c51ab5a4d | 694,991 |
def xyz_from_sparse_index(indexes):
"""Generate coordinates from sparse index."""
x, y, z, = 0, 0, 0
for level, index in enumerate(indexes):
mult = pow(4, ((len(indexes) - 1) - level))
x += (index % 4) * mult
y += (index % 16 // 4) * mult
z += (index // 16) * mult
return (x, y, z) | f67b35dd480fdd51215efa2d7f090151f58ef1d4 | 694,992 |
def rank_freq(hist):
"""Returns a list of (rank, freq) tuples.
hist: map from word to frequency
returns: list of (rank, freq) tuples
"""
# sort the list of frequencies in decreasing order
freqs = list(hist.values())
freqs.sort(reverse=True)
# enumerate the ranks and frequencies
rf = [(r+1, f) for r, f in enumerate(freqs)]
return rf | fc7731190ce44464526d9eda40cda235e07d0985 | 694,993 |
import sys
def get_install_requires():
"""Get the conditional dependencies for source distributions."""
install_requires = []
if 'bdist_wheel' not in sys.argv:
if sys.version_info.major == 2:
install_requires.append('monotonic')
if sys.platform == 'win32':
install_requires.append('pyreadline')
return sorted(install_requires) | 22fdc5e5565170e3b9918581c0f539606bcd535d | 694,994 |
def search_high(left, right, tuples, weight):
"""
:param left: search limit
:param right: search limit
:param tuples: array of elements
:param weight: predefined value
:return: index of array
"""
v = 0
if left == right:
if tuples[left][1] > weight:
v = left
else:
v = left + 1
else:
mid = left + (right - left) // 2
if tuples[mid][1] > weight:
v = search_high(left, mid, tuples, weight) # search left
else:
v = search_high(mid + 1, right, tuples, weight) # search right
return v | f8e5ffdc800d2bbb8cd96e485553151863954b9b | 694,995 |
def get_subs(relativize_fn, links):
""" Return a list of substitution pairs, where the first item is the
original string (link) and the second item is the string to replace it
(relativized link). Duplicate subs are filtered out."""
subs = ((l, relativize_fn(l)) for l in links)
subs = filter(lambda p: p[0] != p[1], subs) # filter out no-op substitutions
return list(subs) | f22b7fd13cf95ac2860c42d0f72b3fed4977d6cb | 694,996 |
import os
def which(name):
"""
Return the full path of a command found on the path
"""
base, ext = os.path.splitext(name)
if not ext:
exts = ('.com', '.exe', '.bat', '.cmd')
else:
exts = (ext,)
for dir in ['.'] + os.environ['PATH'].split(os.pathsep):
for ext in exts:
fn = os.path.join(dir, base + ext)
if os.path.isfile(fn):
return fn
raise Exception("couldn't find program on path: %s" % name) | 2737a4583a3ec7b820bf4c5fb13630ae7c501a17 | 694,997 |
def return_if_inactive( *ret_args ):
"""
This is a decorator that returns proper value if the main thread has been
set as inactive. This decorator prevents infinite looping in case of calling client
method from one of the callback.
"""
def return_if_inactive_body( decorated_function ):
def return_if_inactive_logic( self, *args, **kwargs ):
if not self._alive:
return ret_args
else:
return decorated_function( self, *args, **kwargs )
return return_if_inactive_logic
return return_if_inactive_body | 7a38aa7a71e72f8e338fdfc280482499bb9cf815 | 694,998 |
import socket
def test_connection (host, port):
""" Test available connection of a given host and port """
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((host, port))
return
except socket.error:
return "Error"
s.close() | 80bb07bb5353956d7410cbb529379a61b8ee2635 | 694,999 |
def capillary(srz, ss, crmax, rzsc):
"""capillary rise"""
if rzsc - srz > crmax:
srz += crmax
ss -= crmax
else:
srz += rzsc - srz
ss -= rzsc - srz
return srz, ss | 94ce62c8c3e33410f2d261b0e06e500881678132 | 695,000 |
def load_rel(f = "related.txt"):
"""load related games data from file, return related info as dict"""
f = open(f, 'r')
related = {}
for line in f.readlines():
line = line.strip()
# ignore blank lines
if len(line) < 0:
continue
related[line] = True
return related | 186bd5eafb37b4953da25aaebe072205e1ad0679 | 695,001 |
def return_true(*args, **kwargs):
"""Заглушка, всегда возвращающая True"""
return True | 3769066ce1888254958d3bfa5e6d3c007d88e967 | 695,002 |
def tok2int_sent(example, tokenizer, max_seq_length):
"""Loads a data file into a list of `InputBatch`s."""
src_tokens = example[0]
hyp_tokens = example[1]
src_tokens = tokenizer.tokenize(src_tokens)
src_tokens = src_tokens[:max_seq_length]
hyp_tokens = tokenizer.tokenize(hyp_tokens)
hyp_tokens = hyp_tokens[:max_seq_length]
tokens = ["[CLS]"] + src_tokens + ["[SEP]"]
input_seg = [0] * len(tokens)
input_label = [0] * len(tokens)
tokens = tokens + hyp_tokens + ["[SEP]"]
for token in hyp_tokens:
if "##" in token:
input_label.append(0)
else:
input_label.append(1)
input_label.append(1)
input_seg = input_seg + [1] * (len(hyp_tokens) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
max_len = max_seq_length * 2 + 3
padding = [0] * (max_len - len(input_ids))
input_ids += padding
input_mask += padding
input_seg += padding
input_label += padding
assert len(input_ids) == max_len
assert len(input_mask) == max_len
assert len(input_seg) == max_len
assert len(input_label) == max_len
return input_ids, input_mask, input_seg, input_label | 7f906514cd3dc68d555ff709844ca7d8b8bcae76 | 695,003 |
from typing import List
from typing import Dict
from typing import Any
def list_dict_swap(v: List[Dict[Any, Any]]) -> Dict[Any, List[Any]]:
"""Convert list of dicts to a dict of lists.
>>> list_dict_swap([{'a': 1, 'b': 2}, {'a': 3, 'b': 4}]) == {'a': [1, 3], 'b': [2, 4]}
"""
return {k: [dic[k] for dic in v] for k in v[0]} | 7953a19dfaa326297e666b0f378540c90b079989 | 695,004 |
def _parse_metal_choice(s, max_value):
"""
Parse user options provided in ``detect_metal_options``.
The syntax is <position>,[<new name>], using semicolons to choose several ones.
<new name> can only be 3-letters max and should not collide with existing Amber
types. This is not checked, so be careful! If you choose several ones, they
are considered part of the same metal center! Do not use it for unrelated ions;
instead run the script several times and use the step 1n.
For example:
- 0 # would select the first one (default), without renaming
- 0:ZN1 # select first one with a new name (ZN1)
- 0:ZN1 1:ZN2 # select first and second with new names
Parameters
==========
s : str
Return
======
list of (index, name)
name can be None
"""
if not s:
return [(0, None)]
result = []
for selection in s.split():
name = None
fields = selection.split(':')
if len(fields) == 1:
name == None
elif len(fields) == 2 and 0 < len(fields[1]) <= 3:
name = fields[1]
else:
raise ValueError(' !!! Wrong syntax!')
index = int(fields[0])
if index < 0 or index >= max_value:
raise ValueError(' !!! Index must be within 0 and ' + str(max_value))
result.append((index, name))
return result | 0ad06942f782e5362dc8c9f530194320cbbb3a28 | 695,005 |
def invert_graph(graph):
"""
Inverts a directed graph.
:param graph: the graph, represented as a dictionary of sets
:type graph: dict(set)
:return: the inverted graph
:rtype: dict(set)
"""
inverted = {}
for key in graph:
for value in graph[key]:
inverted.setdefault(value, set()).add(key)
return inverted | 943337d278107d520dde6a724c89016cd1452bee | 695,006 |
def test_low_temp_range(ds, var):
"""
if we have some really low temp values, we want to know. valid for tas, tasmin, tasmax
"""
threshold = 180 # K
return ds[var].where(ds[var] < threshold).count() | 7331c486fff87822c7f98005dfa23c8228f8bf06 | 695,007 |
def vecDot(vecA, vecB):
"""
the dot product of vecA and vecB
:param vecA:
:param vecB:
:return:
"""
x1, y1 = vecA[0], vecA[1]
x2, y2 = vecB[0], vecB[1]
return x1 * x2 + y1 * y2 | 705578c7994b59975aae965726caf55e056f4037 | 695,008 |
def _right_pivot(low, high, array=None):
"""Return rightmost index."""
return high - 1 | ed515351cec3480fdb93e1ec149fd25343b92285 | 695,009 |
def compare(all_dict, funcnames):
"""Return sets of objects only in one of __all__, refguide."""
only_all = set()
for name in all_dict:
if name not in funcnames:
only_all.add(name)
only_ref = set()
for name in funcnames:
if name not in all_dict:
only_ref.add(name)
return only_all, only_ref | 63a5cd784592b874bf4b38301e640c4ec2642fc0 | 695,010 |
def verify_api_access_token():
"""
Test route for API Access Token
"""
return "token is enabled" | d4a626313baf2d95e1a37c7087801749823c43cf | 695,011 |
def get_title_from_path(path):
"""Build title of movie from its path
Given a full path it splits in several ways to get only the name of the
current file: <path>filename<.format>
HACK PENDING:
This is a HUGE restriction, movie's titles should be guessed by other means
than its filename.
"""
title = path.split('/')[-1].split('.')[:-1]
if type(title) is list:
# special case if filename has '.' characters
title = ' '.join(title)
return title | f2e269118b7a91aa4e4f788bcab4f3ff04691646 | 695,012 |
def deval(x,solStruct):
"""
Takes two inputs, x and solStruct, and returns the y values corresponding
to the x values
"""
return solStruct.sol(x).real | f4f47280e411661fd2045991e7ceee1d3499e4aa | 695,013 |
def notas(*num, sit=0):
"""
:param num: valores de nota
:param sit: se True apresenta a situação do boletim se False nao apresenta
:return: dicionario um dict() com varias infos das notas
"""
soma = 0
count = 0
for c in num:
soma += c
count += 1
media = soma/count
situacao = str
if media >= 7:
situacao = 'Boa'
else:
situacao = 'Ruim'
dicionario = {}
if sit == True:
dicionario = {
'numero de notas': len(num),
'notamax': max(num),
'notamin': min(num),
'media': media,
'situação': situacao
}
if sit == False:
dicionario = {
'numero de notas': len(num),
'notamax': max(num),
'notamin': min(num),
'media': media,
}
return dicionario | 329abf9fb712499afd3238b3eba362878696d9e7 | 695,014 |
def get_lstm_out_dim(config):
"""
calculate output dimension of lstm
"""
lstm_last_ts_dim = config['lstm_nhid']
if config['lstm_pooling'] == 'all':
lstm_out_dim = config['lstm_nhid'] * 24
else:
lstm_out_dim = config['lstm_nhid']
return lstm_out_dim, lstm_last_ts_dim | 0960bc56cf86b8a199ef0f6bbc57c25b9a5f551f | 695,016 |
def straight(ranks):
"""Возвращает True, если отсортированные ранги формируют последовательность
5ти, где у 5ти карт ранги идут по порядку (стрит)
:param ranks:- отсортированный от большего к меньшему список
"""
return ranks[0] - ranks[-1] == 4 | f47bcedb41de60c263b00765e7bad4e0f5f5d130 | 695,017 |
def fix_text(txt):
"""
Fixes text so it is csv friendly
"""
return " ".join(txt.replace('"', "'").split()) | 159e742478137ebbefe13ef5faec179015baa9b2 | 695,018 |
def qobjects(cls_target, oid, foreign_name):
"""
Get queryset objects based on `cls_target` given the object/id `oid` and
the `foreign_name` is equal.
"""
if isinstance(oid, int):
fetcher = 'id'
else:
fetcher = 'in'
kwargs = {'{0}__{1}'.format(foreign_name, fetcher): oid}
return cls_target.objects.filter(**kwargs) | dc5c46e0aee6b65e41add750a40c9d10afd88235 | 695,019 |
def is_classic_netcdf(file_buffer):
"""
Returns True if the contents of the byte array matches the magic number in
netCDF files
:param str file_buffer: Byte-array of the first 4 bytes of a file
"""
# CDF.
if file_buffer == b"\x43\x44\x46\x01":
return True
return False | dbb55b582526ad90132f4ef6d9485907e621ceed | 695,020 |
def left(i: int) -> int:
"""
left:对于给定二叉堆结点,获得左子结点下标
@i(int):给定二叉堆结点的下标
@return(int):左子结点下标
"""
return i * 2 + 1 | 165fdfa1dc2cde7e50e52e9e07eeee56bc492d7a | 695,022 |
def trim_version(version, num_parts=2):
"""
Return just the first <num_parts> of <version>, split by periods. For
example, trim_version("1.2.3", 2) will return "1.2".
"""
if type(version) is not str:
raise TypeError("Version should be a string")
if num_parts < 1:
raise ValueError("Cannot split to parts < 1")
parts = version.split(".")
trimmed = ".".join(parts[:num_parts])
return trimmed | 5f23485c914a695bfb652fbc8cccb738147a8e7b | 695,025 |
def getMaxOutdegree(friends):
"""获得朋友数最多的学生编号
@returns -1 未找到
"""
# 根据字典排序
keys = sorted(friends.keys())
maxCount = 0
retS = -1
for s in keys:
if len(friends[s]) > maxCount:
maxCount = len(friends[s])
retS = s
return retS | 06543a631effd19d5d8cdf39984fe0dd6f00970a | 695,026 |
import argparse
def network_valid(nn_architecture):
"""
Check network architecture decision.
"""
err_message = 'Please enter model as FC or CNN.'
if nn_architecture != 'FC' and nn_architecture != 'CNN':
raise argparse.ArgumentTypeError(err_message)
return(nn_architecture) | fc3b8858800f7b71b96d8fbf193d7febd7f167b4 | 695,027 |
from pathlib import Path
import os
import inspect
def get_cache_dir(dir_name: str) -> Path:
"""获取缓存目录
:param dir_name: 目录(文件夹)名
:return: 返回对应目录的Path对象
"""
# 确定主缓存目录
for i in os.listdir(Path(".")):
if i in ["botoy.json", "REMOVED_PLUGINS", "bot.py", "plugins"]:
main_dir = Path(".")
break
else:
cf = inspect.currentframe()
bf = cf.f_back # type:ignore
file = bf.f_globals["__file__"] # type:ignore
dir_ = Path(file).absolute()
while "plugins" in str(dir_):
dir_ = dir_.parent
if dir_.name == "plugins":
main_dir = dir_.parent
break
else:
main_dir = Path(".")
cache_dir = main_dir / "botoy-cache"
if not cache_dir.exists():
cache_dir.mkdir()
this_cache_dir = cache_dir / dir_name
if not this_cache_dir.exists():
os.makedirs(this_cache_dir)
return this_cache_dir | 3d7ee7b6a96b90b8fe9265e3e36472101cffc0e4 | 695,028 |
def read_data_2list(filename):
"""
data structure
low_bound numbers, columns
<low_bound 1 attr> release time, process time, tail time
:param filename:
:return:
"""
file = open(filename, "r")
tasks_val, columns_val = file.readline().split()
tasks_val = int(tasks_val)
columns_val = int(columns_val)
tasks = []
values = []
for val in file.read().split():
values.append(float(val))
for a in range(0, len(values), 3):
tmp_tab = []
for b in range(0, 3):
tmp_tab = tmp_tab + [values[a + b]]
tasks.append(tmp_tab)
file.close()
return tasks_val, columns_val, tasks | 6f6511fd5558382b98ddecca9d25f97a7f738005 | 695,029 |
import json
def load_search_res(labeled=True):
"""
Return a dict contains the search results.
Key is scientist's id.
"""
if labeled:
with open('./data/train_search_info.json') as f:
data = json.load(f)
else:
with open('./data/validation_search_info.json') as f:
data = json.load(f)
return data | 63945e934deb66cc4190af1d33d724442dba6d73 | 695,030 |
import re
import tempfile
def safe_bed_file(infile):
"""Make a BED file with track and browser lines ready for liftOver.
liftOver will fail with track or browser lines. We can make it happy
by converting these to comments. See:
https://lists.soe.ucsc.edu/pipermail/genome/2007-May/013561.html
"""
fix_pat = re.compile("^(track|browser)")
with tempfile.NamedTemporaryFile(mode='w', delete=False) as out_handle, open(infile, 'w') as in_handle:
for line in in_handle:
if fix_pat.match(line):
line = "#" + line
out_handle.write(line)
return out_handle.name | afba5b9b7b3f003e4fbb6c34cd87e99f6d22075f | 695,032 |
import os
def exists_or_link(filename):
"""
exists will return false if a broken link. This will NOT
"""
return os.path.isfile(filename) or os.path.islink(filename) | 1a51f6cbdd45c7d155ff79943225ec7a113abcd5 | 695,034 |
def is_class(T):
"""
"""
try:
return issubclass(T, T)
except TypeError:
return False | 83dc904b5b668a4efff6b3ba7fbb940b347a0f2c | 695,035 |
def hardware_settings(data):
"""GPIO-pinn consistent of Board-pinn-distribution,which is connected by relay"""
Rele_pin = int(data["hardware_settings"]["Rele_pin"])
return Rele_pin | f6ae7e19732af8a7f9173660b6e1abb823557c91 | 695,036 |
def is_vowel_sign_offset(c_offset):
"""
Is the offset a vowel sign (maatraa)
"""
return (c_offset >= 0x3e and c_offset <= 0x4c) | eaee5757d80e8775c802bc0136d34733dc955610 | 695,038 |
def custom404handler(err):
"""Custom handler for 404 errors."""
return dict(err=err) | b1771a98d95903214e96bd6dfa2055c62c783af1 | 695,039 |
from typing import List
import re
def preprocess_text(filename: str) -> List:
"""
This function reads a filename and returns a list of lists
The list consists of the 10 words, that occur after 'dass'
"""
listed_words = []
with open(filename, 'r') as annis:
for line in annis:
if re.search(r"\d+\.", line) != None: # take only the needed lines
if re.search(r"\d+\.\ttok\s\sw\w+\sdass?", line) != None: # check if there is a w-element
line = re.sub(r'\d+\.\ttok\s\sw\w+\sdass?', "", line) # strip that part off
listed_words.append(line.split())
# print(line.split())
if re.search(r"\d+\.\s\stok\s\s.+\sdass?", line) != None: # check if there is a phrasal component
line = re.sub(r'\d+\.\s\stok\s\s.+\sdass?', "", line) # strip that part off
listed_words.append(line.split())
# print(line.split())
if re.search(r"\d+\.\ttok\s\sw\w+\s\w+\sdass?", line) != None: # check if there is a w-element and a phrasal component
line = re.sub(r'\d+\.\ttok\s\sw\w+\s\w+\sdass?', "", line) # strip that part of
listed_words.append(line.split())
# print(line.split())
return listed_words | 0eedd3e16999b1bb3fb6ff30f8575c08e45cc4ba | 695,040 |
import math
def degrees(rad_angle) :
"""Converts any angle in radians to degrees.
If the input is None, the it returns None.
For numerical input, the output is mapped to [-180,180]
"""
if rad_angle is None :
return None
angle = rad_angle * 180 / math.pi
while angle > 180 :
angle = angle - 360
while angle < -180 :
angle = angle + 360
return angle | e889ff09cf0bccc36beabc3451a9b64730f8e9e6 | 695,041 |
import re
def remove_log_text(text):
""" Remove log information from the given string. """
# The pattern basically searches for anything following ".cpp:[0-9]{1-4}] "
pattern = "(?:(?<=\.cpp:[0-9]] )|(?<=\.cpp:[0-9][0-9]] )|(?<=\.cpp:[0-9][0-9][0-9]] )|(?<=\.cpp:[0-9][0-9][0-9][0-9]] )).+"
match = re.search(pattern, text)
return text if match == None else match.group(0) | 8ec14757aae16d1de7c2651faea0f927bb324593 | 695,042 |
import io
def make_pdf_from_image_array(image_list):
"""Make a pdf given an array of Image files
:param image_list: List of images
:type image_list: list
:return: pdf_data
:type pdf_data: PDF as bytes
"""
with io.BytesIO() as output:
image_list[0].save(
output,
"PDF",
resolution=100.0,
save_all=True,
append_images=image_list[1:],
)
pdf_data = output.getvalue()
return pdf_data | 333c3e3475e345eeee7bb3ec17291db9abf990b2 | 695,043 |
async def communicate(*args, **kwargs):
"""Dummy cross-app comunicate function that always cancels"""
return None | 05d5687b2c983bf2bbe28643f8f9fd3f87d49343 | 695,044 |
import os
def get_basename():
"""find the base path so we can build proper paths, needed so we can start
the tests from anywhere"""
curdir = os.path.basename(os.path.abspath(os.path.curdir))
if os.path.isdir('tests') and curdir == 'pycarddav':
basepath = 'tests/'
elif os.path.isdir('assets') and curdir == 'tests':
basepath = './'
elif os.path.isdir('pycarddav') and curdir == 'pycarddav':
basepath = 'pycarddav/tests/'
elif curdir == 'local':
basepath = '../'
else:
raise Exception("don't know where I'm")
return basepath | eb4dba7a0b1d579689b17b5bbd21e20ae66cb700 | 695,045 |
def get_primary_label(topic, primary_labels):
"""Function that returns the primary (preferred) label for a topic. If this topic belongs to
a cluster.
Args:
topic (string): Topic to analyse.
primary_labels (dictionary): It contains the primary labels of all the topics belonging to clusters.
Returns:
topic (string): primary label of the analysed topic.
"""
try:
topic = primary_labels[topic]
except KeyError:
pass
return topic | 58bda0113e6199e609d6160cf6c689d6f9c9f95e | 695,046 |
def find_or_create_location(location, dbh):
"""Find or create the location"""
cur = dbh.cursor()
cur.execute('select location_id from location where name = %s', (location, ))
res = cur.fetchone()
location_id = 0
if res is None:
print(f'Loading location "{location}"')
cur.execute('insert into location (name) values (%s)', (location, ))
location_id = cur.lastrowid
dbh.commit()
else:
location_id = res[0]
return location_id | 363c41a94cdacb5a4c241483917c8523159f574f | 695,047 |
def sqnet_spread_layers(n):
"""Spreads n layers evenly between the 4 maxpool layers. Used in configurable squeezenet model."""
first_expand_layer = n % 2 # if the number of layers is odd, add an "expand" layer after the first one
inner_layers_n = (n - 2 - first_expand_layer) // 2
depth_incr_doubler = [(inner_layers_n + 0) % 4 // 3 + 1,
(inner_layers_n + 2) % 4 // 3 + 1,
1,
1]
layers_n = [1 + first_expand_layer,
(inner_layers_n + 0) // 4 * 2,
(inner_layers_n + 2) // 4 * 2,
(inner_layers_n + 3) // 4 * 2,
(inner_layers_n + 1) // 4 * 2,
1]
if layers_n[1] == 0: depth_incr_doubler[3] = 0 # adjustment needed for small number of layers
if layers_n[2] == 0: depth_incr_doubler[2] = 0
return layers_n, first_expand_layer, depth_incr_doubler | ff25a553f0f433cfeb34dbc8005c6db720cb1275 | 695,048 |
def indent(string):
""" indent string
>>> indent('abc\\ndef\\n')
'\\tabc\\n\\tdef\\n'
"""
return '\n'.join(
'\t' + line if line else ''
for line in string.split('\n')
) | e1e88eda9ae0e9ad11a9eb68c8b51302bbbc5ab6 | 695,049 |
def scrabble(wort):
"""
Berechnet den Scrabble-Wert zu einem deutschen Wort
:param wort: Das Wort dessen Wert berechnet wird (muss klein geschrieben sein)
:return: den zugehörigen Scrabble-Wert
"""
werte = {"a": 1, "b": 3, "c": 4, "d": 1, "e": 1, "f": 4, "g": 2,"h": 2, "i": 1, "j": 6,
"k": 4, "l": 2, "m": 3, "n": 1, "o":2, "p": 4, "q": 10, "r": 1, "s": 1, "t": 1,
"u": 1, "v": 6, "w": 3, "x": 8, "y": 10, "z": 3, "ä": 6, "ö": 8, "ü": 6}
summe = 0
for buchstabe in wort:
summe += werte[buchstabe]
return summe | 1865dea59c3ff8f391975d4ee59e5c501c8d3781 | 695,050 |
def _days_before_year(year):
"""year -> number of days before January 1st of year."""
y = year - 1
return y * 365 + y // 4 - y // 100 + y // 400 | 5070c747c0e28235e51dadd107de77cedfca2886 | 695,051 |
import math
def point_distance(p1, p2, squared=False):
""" Calculate the squared eucliden distance of two points.
Parameters
----------
p1 : associative array $p1 first point
p2 : associative array $p2 second point
Returns
-------
float
Examples
--------
>>> point_distance({'x': 0, 'y': 0}, {'x': 3, 'y': 4})
5.0
>>> '%.2f' % point_distance({'x': 0, 'y': 0}, {'x': 1, 'y': 22})
'22.02'
"""
dx = p1["x"] - p2["x"]
dy = p1["y"] - p2["y"]
if squared:
return (dx*dx + dy*dy)
else:
return math.sqrt(dx*dx + dy*dy) | 28e13856380783eab469a10b01ebd2769422ff1b | 695,052 |
import torch
def compute_diag_trace(cov: torch.Tensor) -> float:
"""
Computes the trace of the covariance matrix diagonal matrix
:param cov: the covariannce matrix as torch tensor
:return: the trace
"""
eig_vals, eigen_space = torch.linalg.eigh(cov, UPLO='U')
eig_vals[eig_vals < 0] = 0
return eig_vals.sum().unsqueeze(dim=0).cpu().numpy()[0] | 9f0957592b61d23e2d4038d354a44ce10107351a | 695,054 |
import os
def getFilePathsFor(path, excluded):
""" Get all the files for a given filepath
:param path: The parent directory
:param excluded: An array of excluded file names
:return: An array of filepaths
"""
filePaths = []
for dir, b, files in os.walk(path):
if dir.count('.git') != 0: continue
for file in files:
if file not in excluded:
filePaths.append((dir + '/' + file).replace('\\\\', '\\').replace('\\', '/'))
return filePaths | 0bb6c50806719ee4f280f56ca415a0d32c29a1e3 | 695,056 |
def path_starts_with(path, prefix):
"""Test whether the path starts with another path.
>>> path_starts_with([1], [1])
True
>>> path_starts_with([1, 2], [1])
True
>>> path_starts_with([2], [1])
False
>>> path_starts_with([1,2,3], [1,2,3])
True
>>> path_starts_with([1,2,3], [1,2])
True
>>> path_starts_with(
... ('\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01',
... '\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x03',
... '\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x02',
... '\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01'),
... ('\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01',
... '\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x03',
... '\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x02'))
True
"""
return path[:len(prefix)] == prefix | dd2fae5ff605ab5b0c9ff49aaa846fb287e58270 | 695,057 |
def to_string(quantity_or_item: str) -> str:
""" Returns the same string. """
return quantity_or_item | e1828ed0ddd6ea3f2db1140a2da2a94965df84b7 | 695,058 |
import bs4
def is_tag(obj):
"""Is tag."""
return isinstance(obj, bs4.Tag) | 24177eed2686611308567f0f86e8baf54f9449a1 | 695,059 |
import argparse
def _get_file_list_parser(enforce=True):
"""
Parser for get-file-list options.
"""
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('--contains', help='Regex expression included in the file name')
return parser | e1f9d843a4a3e08aec7f5be5f7e726777e5f167b | 695,060 |
import argparse
def parse_args():
"""
Parse command line arguments for CLI.
:return: namespace containing the arguments passed.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'-i', '--input',
type=str,
required=True,
help="Path to multipage-tiff file of format [t, x, y]",
)
parser.add_argument(
'-o', '--output',
type=str,
required=True,
help="Path to write results",
)
# sites argument is a list of strings
parser.add_argument(
'-f', '--fov',
type=lambda s: [str(item.strip(' ').strip("'")) for item in s.split(',')],
required=False,
help="list of field-of-views to process (subfolders in raw data directory)",
)
return parser.parse_args() | 50016d3aac33a35b52df00068a118540c5d2f2ac | 695,061 |
def _wrap_config_compilers_xml(inner_string):
"""Utility function to create a config_compilers XML string.
Pass this function a string containing <compiler> elements, and it will add
the necessary header/footer to the file.
"""
_xml_template = """<?xml version="1.0" encoding="UTF-8"?>
<config_compilers>
{}
</config_compilers>
"""
return _xml_template.format(inner_string) | d3a3e01f5b5f41c497d4445f4491eabca2e32893 | 695,062 |
def str_to_bytearray(s: str, encoding: str = 'ascii') -> bytearray:
"""字符串转换成字节流.encoding是编码方式,默认是ascii码.如果有汉字需要选择utf-8等编码"""
return bytearray(s.encode(encoding)) | 15bb61f82724adb37478f791ce58c07246efb871 | 695,063 |
import os
def get_raw_coverage_dirs(task_output_dir):
"""Returns a list of directories containing raw v8 coverage.
Args:
task_output_dir (str): The output directory for the sharded task. This will
contain the raw JavaScript v8 coverage files that are identified by
their ".cov.json" suffix.
"""
coverage_directories = set()
for dir_path, _sub_dirs, file_names in os.walk(task_output_dir):
for name in file_names:
if name.endswith('.cov.json'):
coverage_directories.add(dir_path)
continue
return coverage_directories | 8c94ef6ab231a83c4f61f7fdd662951eda834ffe | 695,064 |
import random
def generate_peer_id():
""" Generates a Azureus-style peer_id for the client.
Format -<ID><version>-<12 digit code>
"""
return '-HR0001-' + ''.join([str(random.randint(0, 9)) for i in range(12)]) | b8283d82cd3291df11eabe0913692fc2f50f12a3 | 695,065 |
def assignment_tag_without_context_parameter(arg):
"""Expected assignment_tag_without_context_parameter __doc__"""
return "Expected result" | 45a3e0302019ccb29ab7f5e5e3ace4aa1e7b2a58 | 695,067 |
import argparse
def _parse_args():
"""
Builds and returns the argument parser for GeST.
"""
ap=argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
ap.add_argument("-p", "--path", required=True, help="path to folder containing images")
ap.add_argument("-m", "--method", required=False, default="msp", help="pre-segmentation method")
ap.add_argument( "--sigma", required=False, help="kernel parameter", default=125)
ap.add_argument("-n", "--nclusters", required=False, default=21, help="number of clusters")
ap.add_argument("--silhouette", required=False, default=False, action="store_true", help="use silhouette method instead of fixed number of clusters")
ap.add_argument("--hs", required=False, help="spatial radius", default=7)
ap.add_argument("--hr", required=False, help="range radius", default=4.5)
ap.add_argument( "--mind", required=False, help="min density", default=50)
ap.add_argument("--merge", required=False, default=False, action="store_true", help="apply merging procedure")
ap.add_argument("--contiguous", required=False, default=False, action="store_true", help="compute contiguous regions")
ap.add_argument("-s", "--save", required=False, default=False, action="store_true", help="save files to hard drive")
arguments=vars(ap.parse_args())
arguments['n_cluster']= None if arguments['silhouette'] else int(arguments['nclusters'])
arguments['sigma']=float(arguments['sigma'])
return arguments | 39a4602c9f77b1295abe310d182e19dacd3270aa | 695,068 |
import requests
def tmdb_data_for_id(tmdb_id: int, tmdb_api_token: str) -> dict:
"""
Get additional information for a movie for which you already have the ID
Args:
tmdb_id (int): the ID for a movie on The Movie Database
tmdb_api_token (str): your tmdb v3 api token
Returns:
dict
"""
url = f"https://api.themoviedb.org/3/movie/{tmdb_id}?"
params = {'language': 'en-US', 'api_key': tmdb_api_token, }
return requests.get(url, params).json() | dbac37d7969e85138de18a24f1caf963cc7d44da | 695,069 |
def get_branch_minidx(grouped_df, best_performance_idx, branch1, branch2, sigma=0.001):
"""[summary]
Args:
grouped_df ([type]): [description]
best_performance_idx ([type]): [description]
branch1 ([type]): [description]
branch2 ([type]): [description]
Returns:
[type]: [description]
"""
datasets_better = grouped_df[
(grouped_df[branch1] > grouped_df[branch2])
& (abs(grouped_df[branch1] - grouped_df[branch2]) > sigma)
]
dataset_better_idx = best_performance_idx[
best_performance_idx["Dataset_name"].isin(datasets_better["Dataset_name"])
]
return dataset_better_idx.loc[:, ["Dataset_name", branch1, branch2]] | 3a5591420824923720203a2ca134523248e438f4 | 695,070 |
def blueScaleForPPMsize( ppmSize ):
"""
Returns blueZone value for given PPM size,
up to which overshoots will be suppressed.
"""
return (float(ppmSize) - 2.04) / 1000.0 | fa558d5a0e7e7c2066f1d3bc1eabf1b0fd1eea33 | 695,071 |
def UrlEscape(url):
"""Scapes XML entities.
Args:
url: potentially with XML invalid characters.
Returns:
same URL after replacing those with XML entities.
"""
return url.replace("'", "'").replace("'", """) | f51933904238114bd6b5e135d1b27710aa3858c8 | 695,072 |
from typing import Union
def str_or_float(str_float: Union[str, float]) -> float:
"""Tries to convert string to float."""
for typ in [float, str]:
try:
return typ(str_float)
except ValueError:
pass
raise ValueError(f'Can\'t figure out the type of "{str_float}".') | 0a7d506a3d501d50c8ee664e373a533a1ead4a58 | 695,073 |
def format_orbit_notes(text: str) -> str:
"""Given orbit notes returns them as HTML"""
template = '<div class="orbit-notes"><p>{}</p></div>'
html_text = text.replace("\n", "</p><p>")
return template.format(html_text) | 259ede244822cec07a3abff2f566643d8b0d117e | 695,074 |
def calculate_refresh_commands(rm_exe, config, fil, activate, is_ini):
"""Detect if an activate config flag is required or not."""
if activate:
cmds = [rm_exe, "!ActivateConfig", config]
if is_ini:
cmds.append(fil)
return cmds
else:
return None | 9950eaa2bc0e89cb85e4d3ad89733cce4c1fb8ff | 695,075 |
def remove_empty_keys(dirty_dict):
"""
Remove empty keys from a dictionary. This method is useful when passing jsons
in which a null field will update the value to null and you don't want that.
"""
clean_dict = {}
for k, v in dirty_dict.items():
if v:
clean_dict[k] = v
return clean_dict | 53e6dbd89640ba3c3529a055871f11093af6897a | 695,076 |
def get_data(f1,f2,f3,f4):
"""
four folder loss
"""
epoches = []
train_word = []
test_word = []
train_char = []
test_char = []
with open(f1,"r") as f:
for line in f:
tmp = line.rstrip("\n\r").split("\t")
train_word.append(float(tmp[1])*100)
epoches.append(tmp[0])
with open(f2,"r") as f:
for line in f:
tmp = line.rstrip("\n\r").split("\t")
test_word.append(float(tmp[1])*100)
with open(f3,"r") as f:
for line in f:
tmp = line.rstrip("\n\r").split("\t")
train_char.append(float(tmp[1])*100)
with open(f4,"r") as f:
for line in f:
tmp = line.rstrip("\n\r").split("\t")
test_char.append(float(tmp[1])*100)
return epoches, train_word, test_word, train_char, test_char | 4511368c88b17f0820a1612769bf5609d5d22924 | 695,077 |
def _ValidateConfigFile(config_contents, required_parameters):
"""Validates the config file contents, checking whether all values are
non-empty.
Args:
config_contents: A config dictionary.
required_parameters: A list of parameters to check for.
Returns:
True if valid.
"""
for parameter in required_parameters:
if parameter not in config_contents:
return False
value = config_contents[parameter]
if not value or type(value) is not str:
return False
return True | a115792ea1b4087f50e78718496874ea24debbf6 | 695,078 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.