content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def pad_table_to_rectangle(table, pad_value=''):
"""
Take any table and ensure it is entirely rectangular.
Any missing entries will be filled with pad_value.
Returns: The table passed in.
"""
max_len = max([len(x) for x in table])
for row in table:
row += [pad_value for _ in range(max_len - len(row))]
return table | 90de99e0c7ead6a15d8f99922677e5b3f659af56 | 694,849 |
def encode_ebv(value):
"""
Get 01-encoded string for integer value in EBV encoding.
Parameters
----------
value : int
"""
def _encode(value_, first_block):
prefix = '0' if first_block else '1'
if value_ < 128:
return prefix + format(value_, '07b')
return _encode(value_ >> 7, first_block=False) + \
_encode(value_ % 128, first_block=first_block)
return _encode(value, first_block=True) | c5708244ca5ee6489657ef9d3a1083f516f3f086 | 694,850 |
def plot_evoked_topomaps(epochs, events, average_method, times):
"""
Plot evoked topomaps.
One figure is generated for each event.
Parameters
----------
epochs : mne.epochs.Epochs
Epochs extracted from a Raw instance.
events : list[str]
Events to include.
average_method : "mean" | "median
How to average epochs.
times : list[float] | "auto" | "peaks" | "interactive"
The time point(s) to plot.
Returns
-------
list[matplotlib.figure.Figure]
A list of the figure(s) generated.
"""
figs = []
for event in events:
evoked = epochs[event].average(method=average_method)
figs.append(evoked.plot_topomap(times, title=f'Event: {event}'))
if times == 'interactive':
figs[-1].set_size_inches(6, 4)
return figs | 83e040930cc8971aeb045b4accaa315fcb53cc4f | 694,851 |
def my_diffusion_constant(vacf,h):
""" Calculate the diffusion constant from the
velocity-velocity auto-correlation function (vacf).
Args:
vacf (np.array): shape (nt,), vacf sampled at
nt time steps from t=0 to nt*dt. dt is set to 0.032.
Returns:
float: the diffusion constant calculated from the vacf.
"""
D = 0
for i in range(len(vacf)-1):
D += (vacf[i] + vacf[i+1]) * h / 2
D /= 3
return D | e1907e43c5039b5373dc2f040001f6552dcdc930 | 694,852 |
def healthz():
""" Healthz endpoint """
return '' | 5bbcd5aecd646b192ebac6c98a3277a89551410f | 694,853 |
def CheckDoNotSubmitInDescription(input_api, output_api):
"""Checks that the user didn't add 'DO NOT ''SUBMIT' to the CL description.
"""
keyword = 'DO NOT ''SUBMIT'
if keyword in input_api.change.DescriptionText():
return [output_api.PresubmitError(
keyword + ' is present in the changelist description.')]
else:
return [] | c2ac7c263db43897933120fac475a5a2fc6774bc | 694,854 |
def replaceEmojis(text):
"""Turn emojis into smilePositive and smileNegative to reduce noise."""
processedText = text.replace('0:-)', 'smilePositive')
processedText = processedText.replace(':)', 'smilePositive')
processedText = processedText.replace(':D', 'smilePositive')
processedText = processedText.replace(':*', 'smilePositive')
processedText = processedText.replace(':o', 'smilePositive')
processedText = processedText.replace(':p', 'smilePositive')
processedText = processedText.replace(';)', 'smilePositive')
processedText = processedText.replace('>:(', 'smileNegative')
processedText = processedText.replace(';(', 'smileNegative')
processedText = processedText.replace('>:)', 'smileNegative')
processedText = processedText.replace('d:<', 'smileNegative')
processedText = processedText.replace(':(', 'smileNegative')
processedText = processedText.replace(':|', 'smileNegative')
processedText = processedText.replace('>:/', 'smileNegative')
return processedText | d4e9fbb6ff7ea531464fad375d0a6e0dcaf0c293 | 694,855 |
import math
import itertools
def permutate(k, l_exp):
"""
Generate the permutations for all exponents of y
:param k: number of meaningful directions
:param l_exp: expansion order
:return perms: array of permutations
"""
Nt = int(math.factorial(l_exp + k) / (math.factorial(l_exp) * math.factorial(k)))
lst = [ll for ll in range(l_exp + 1)] * k
perms_all = set(itertools.permutations(lst, k))
perms = []
for per in perms_all:
if sum(per) <= l_exp:
perms.append(per)
return perms | c0de0d2964299e2956ba4e04fe3ad5df9cafbc2d | 694,856 |
import random
def randomize_seed(n_clicks):
"""
randomize the seed input.
"""
return random.randint(100, 2**31-1) | 3c69cbd2cf02c076e257ea5e78ac6b4f492dcebc | 694,857 |
import math
def ebob(a,b):
"""
Returns the greatest common divisor of a and b.
"""
return math.gcd(a,b) | 43039a85b7a1e5065f7365c17c308a1c9d9df37d | 694,858 |
def remove_planet(name):
"""Remove the trailing b, c, d, etc in the stellar name"""
planets = 'abcdefghijB'
for planet in planets:
if name.endswith(' %s' % planet):
return name[:-2].strip()
# some exoplanets have .01 or .02 in the name
if name.endswith('.01') or name.endswith('.02') or name.endswith('.2'):
return name[:-3].strip()
if name.endswith(' (bc)'):
return name[:-4].strip()
return name | adc3b62a6df6e4e26f76c382b06a8462a9c5b8bf | 694,859 |
def convert_comma_separated_integer_to_float(comma_separated_number_string):
"""Converts a string of the form 'x,xxx,xxx' to its equivalent float value.
:param comma_separated_number_string: A string in comma-separated float form to be converted.
:returns: A float representing the comma-separated number.
"""
return float(comma_separated_number_string.replace(',', '')) | daddeaa78a3efb8ffd2d5eac122757f041da5f97 | 694,860 |
from datetime import datetime
import json
import requests
def send_metrics(
uuid,
data,
solution_id,
url="https://metrics.awssolutionsbuilder.com/generic",
):
"""sends metric to aws-solutions
Args:
uuid (string): unique id to make metrics anonymous
data (dict): usage metrics from the solution
solution_id (string): solution id
url (str, optional): aws-solutions endpoint. \
Defaults to "https://metrics.awssolutionsbuilder.com/generic".
Returns:
int: request code
"""
time_stamp = {
"TimeStamp": str(datetime.utcnow().isoformat())
.replace("T", " ")
.replace("Z", "")
} # Date and time instant in a java.sql.Timestamp compatible format,
params = {"Solution": solution_id, "UUID": uuid, "Data": data}
metrics = dict(time_stamp, **params)
json_data = json.dumps(metrics)
headers = {"content-type": "application/json"}
req = requests.post(url, data=json_data, headers=headers)
code = req.status_code
return code | ab29d0c8830cb6ead501e84fa09ec9c0c28c9dcd | 694,861 |
import os
def setup_env(mode, test_dir):
"""Define directories."""
mode = "ref" if mode is None else mode.lower()
ref_dir = (
os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"support",
"ref_images{0}".format("_{0}" if mode == "test" else ""),
)
if (mode == "test") or (not test_dir)
else test_dir
)
test_dir = (
os.path.abspath(os.path.join(".", os.path.abspath(os.sep), "test_images"))
if test_dir is None
else test_dir
)
return mode, ref_dir, test_dir | d1cb1041b60c0c70897d94ebf091e487bb1da7e5 | 694,862 |
import logging
def cache_and_log(rdd_name, rdd, show_items=1, info_func=logging.info, warn_func=logging.warning):
"""Cache and pretty log an RDD, then return the rdd itself."""
rdd = rdd.cache()
count = rdd.count()
if count == 0:
warn_func('{} is empty!'.format(rdd_name))
elif show_items == 0:
info_func('{} has {} elements.'.format(rdd_name, count))
elif show_items == 1:
info_func('{} has {} elements: [{}, ...]'.format(rdd_name, count, rdd.first()))
else:
info_func('{} has {} elements: [{}, ...]'.format(rdd_name, count,
', '.join(rdd.take(show_items))))
return rdd | 21c0512b0aff8bc2a9b9f5cf05bfce2b6854d8b1 | 694,863 |
def duplicate_string(text):
"""
Try to get a fresh clone of the specified text:
new object with a reference count of 1.
This is a best-effort: latin1 single letters and the empty
string ('') are singletons and cannot be cloned.
"""
return text.encode().decode() | 4a69e3ff34abe7bfa01824fe4ab44ae163e94bd8 | 694,864 |
def _encode_Bool_Flag(bBoolFlag : bool) -> int:
"""
Description
-----------
Method encoding a bool value into an integer flag.
Parameters
----------
`bBoolFlag` : bool
Boolean flag which should be converted into an integer value
Return
------
`iEncodedFlag` : int
Boolean flag encoded as integer (0 : True | 1 : False)
"""
if (bBoolFlag == True):
return 1
elif (bBoolFlag == False):
return 0 | 1df72f50b6b3e8206a09634386cd14f308f931f5 | 694,865 |
import os
import pdb
def get_path(fname):
"""Return file's path or empty string if no path."""
pdb.set_trace()
head, tail = os.path.split(fname)
return head | 62fd5c0e9bf8251589530495a0d3b93aa644bd8b | 694,866 |
def dataToTuple(stringData):
"""
Formats string to tuple ("id:data" -> (id,data))
"""
splitLine = stringData.split(": ")
result = (splitLine[0],splitLine[1].strip("\n"))
return result | dce23300746935541822cd9e6924fca84bc702d4 | 694,867 |
def login(client, secret_id, rresp):
"""logging in as 'secret_id' with 'g-recaptcha-response'
client (class Flask.testing.FlaskClient): the client
secret_id (str): the secret_id to login
rresp (str): the recapctha response code. can be empty in testing
"""
return client.post('/auth', json={
'id': secret_id,
'g-recaptcha-response': rresp
}, follow_redirects=True).get_json() | fdcf5a8646638fb14ec11a1a3e35c4f8aa71b96c | 694,868 |
def aggregate_count(keyname):
"""
Straightforward sum of the given keyname.
"""
def inner(docs):
return sum(doc[keyname] for doc in docs)
return keyname, inner | 431896d1c8d8c1f773b684b5235919ce1728a8ce | 694,869 |
import collections
def group_trend_item_by_name(trends):
"""This method grouops a list of topics by its name, to avoid duplicates on the list.
Once the topic could be the same for different locations on brazil, we could group it as the same trend.
** It must receive a list of trending topics that was already cleaned with the method (clean_trends_results)
** It returns a list of list of topics grouped by name.
"""
grouped = collections.defaultdict(list)
for item in trends:
grouped[item['name']].append(item)
return grouped.items() | 2d1b718d3d8f3bc765a3e31fb861cc3cd3ec2747 | 694,871 |
import hashlib
import os
def get_hash_of_dirs(directory):
"""
Recursively hash the contents of the given directory.
Args:
directory (str): The root directory we want to hash.
Returns:
A hash of all the contents in the directory.
"""
sha = hashlib.sha512()
if not os.path.exists(directory):
return -1
for root, _, files in os.walk(directory):
for names in files:
filepath = os.path.join(root, names)
with open(filepath, 'rb') as next_file:
for line in next_file:
sha.update(line)
return sha.hexdigest() | 249ce35549b972964654bc55ef6a5c3f1b13eb80 | 694,872 |
def degTodms(s):
"""
"""
#s = s.split(',')
num = s[0]
signo = s[1]
point = num.find('.')
d = num[:point-2]
m = num[point-2:]
m = float(m) / 60
numero = float(d) + m
if signo in ['S','W']:
return numero * (-1)
return numero | 3b035222ed4c26479dcb2fc59b7fa60708178e28 | 694,873 |
def new_version_entry(version):
"""
Returns a new entry for the version index JSON schema.
"""
return {
"allOf": [
{"properties": {"version": {"oneOf": [{"const": version}]}}},
{
"$ref": "https://raw.githubusercontent.com/deepset-ai/haystack/master/json-schemas/"
f"haystack-pipeline-{version}.schema.json"
},
]
} | 7198e859b1634ebfbf181a4ff0a9f488f29afd32 | 694,875 |
import cProfile
def profilewrapper(func):
""" This is a decorator to profile a function.
Args:
func: function pointer
args: arguments to the function
kwargs: named arguments not defined in advance to be passed in to the function
"""
def profiler(*args, **kwargs):
profile = cProfile.Profile()
try:
profile.enable()
result = func(*args, **kwargs)
profile.disable()
return result
finally:
profile.print_stats()
return profiler | d1962d5f496d064eedd61f2432fe67bd2bed4001 | 694,876 |
def fourier(framearray):
""" Fourier transforms all waves from array.
(Real values only)
:param framearray: array of waves (frames)
:return: array of FFT waves (spectrums)
"""
fourier_frame = []
for frame in framearray:
index = frame.make_spectrum()
fourier_frame.append(index)
return fourier_frame | 0f97ec9bd3f9ceda8d449a35623e9a2beea0ccda | 694,877 |
def start_bench_scenarios(wdis):
"""
Return all combinations of start, backups for all players in wdis.
"""
return [{
'starter': player,
'bench': [x for x in wdis if x != player]
} for player in wdis] | a50aa1c74333fdb1346e65493b0f742d69198bbc | 694,878 |
def get_media_stream(streams):
"""Returns the metadata for the media stream in an MXF file,
discarding data streams."""
found = None
for stream in streams:
# skip 'data' streams that provide info about related mxf files
if stream['codec_type'] == 'data':
continue
if found:
raise UserWarning('Expected only one media stream per MXF.')
found = stream
return found | 563ef35c2be3e9787340d7527e7fd0f27b6db036 | 694,879 |
import re
import os
def get_relative_module_path(full_path: str) -> str:
""" Get relative module path """
full_path_without_suffix = re.sub(r'\.py$', '', full_path)
project_root = os.getcwd()
if project_root not in full_path_without_suffix:
raise FileNotFoundError('Cannot get relative module path for %s' % (full_path))
relative_path = re.sub('^/', '', full_path_without_suffix[len(project_root):])
module_path_format = re.sub('/', '.', relative_path)
return module_path_format | 261f2862e7b747ceee10024fd6ecd0d5c0781038 | 694,880 |
def validate_iyr(issue_year: str) -> bool:
"""iyr (Issue Year) - four digits; at least 2010 and at most 2020."""
return len(issue_year) == 4 and int(issue_year) >= 2010 and int(issue_year) <= 2020 | b30614523c1633ecf40a4f68556dcc820e1aefef | 694,882 |
def drawAsInfinite(requestContext, seriesList):
"""
Takes one metric or a wildcard seriesList.
If the value is zero, draw the line at 0. If the value is above zero, draw
the line at infinity. If the value is null or less than zero, do not draw
the line.
Useful for displaying on/off metrics, such as exit codes. (0 = success,
anything else = failure.)
Example:
.. code-block:: none
drawAsInfinite(Testing.script.exitCode)
"""
for series in seriesList:
series.options['drawAsInfinite'] = True
series.name = 'drawAsInfinite(%s)' % series.name
return seriesList | 8cb8273d57abca57600071101f6bd9b4a595ea38 | 694,883 |
import time
def download(item):
"""下载"""
print('download item ', item)
time.sleep(0.1)
return item | fdea636eb655e50a05585d334a196ad36ebaa585 | 694,884 |
def parentChainContainsCycle(term):
"""
Check if the chain of parents from an item contains a cycle by navigating along the sequence of parents
:param term: The ontology term to check
:type term: pronto.Term
:return: Whether the parent chain contains a cycle
:rtype: bool
"""
seen = set()
toProcessed = [term]
while len(toProcessed) > 0:
processing = toProcessed.pop(0)
if processing in seen:
return True
seen.add(processing)
toProcessed += list(processing.parents)
return False | 162267808cfbafee1c1c4d07467d1b50eddcb58f | 694,885 |
def _make_dict(my_tup, lang):
"""Make tuple in a dictionary so can be jsonified into an object."""
labels = ['comment_text', 'course_code', 'learner_classification', 'offering_city',
'offering_fiscal_year', 'offering_quarter',
'overall_satisfaction', 'stars', 'magnitude', 'nanos']
results = {key: val for key, val in zip(labels, my_tup)}
return results | b7f010b4486889fe938a7a69513b0d5ca6e6cb86 | 694,886 |
def weighted_score(sim, size, ovl):
"""
Unite the similarity measures and make a score
return (2*sim + 1*size + 1*ovl) / 3.0
"""
return (2 * sim + 1 * size + 1 * ovl) / 3.0 | cbb39a914c08e4071e7f91364c81584ab132a04e | 694,888 |
import copy
import random
def ContractionRun(List, Size):
""" Function that runs 1 contraction iteration to find a MinCut starting from random choice of vertex pair"""
# Deepcopy required because List is a "nested list"
# Each item in List = [item1 = starting vertex, item2 = list of ending vertices]
InList = copy.deepcopy(List)
sz = Size
# Stop when only 2 sub-groups are left
while sz > 2:
#randomly choose vertex (corresponds to row in List)
n1 = random.choice(range(1,sz))
#randomly choose another vertex connected to first vertex
n2 = random.choice(InList[n1-1][1])
#print(n1,n2,InList)
#iterate over all rows in the List
for i in range(0,sz):
#print(n2 in InList[i][0])
#check if choosen vertex pair is connected or not
if n2 in InList[i][0]:
#combine 2 starting vertices
InList[n1-1][0].extend(InList[i][0])
#combine 2 lists of ending vertices
InList[n1-1][1].extend(InList[i][1])
#remove both starting vertices from combined list of ending vertices
InList[n1-1][1] = [ele for ele in InList[n1-1][1] if ele not in InList[n1-1][0]]
InList[n1-1][1] = [ele for ele in InList[n1-1][1] if ele not in InList[i][0]]
del(InList[i])
sz=sz-1
#print(InList)
break
# combined ending vertices list in both sub-groups should be of same size
assert(len(InList[0][1]) == len(InList[1][1]))
#print(InList,len(InList[0][1]),len(InList[1][1]),len(InList[0][0]),len(InList[1][0]))
return len(InList[0][1]) | 219c74c32995d5847964dbba70ded3e19c55fc67 | 694,889 |
import re
def __static_case_string(list_cfg_regex, base_name):
"""
Convert the case of a string inside the base name of a file.
"""
for item in list_cfg_regex:
spec_chars = "\\?*+$.:;^|()[]{}"
for char in spec_chars:
item = item.replace(char, "")
regex = re.compile(".*" + item.lower() + ".*")
if regex.match(base_name.lower()):
base_name = re.sub(item.lower(), item, base_name,
flags=re.IGNORECASE)
return base_name | ff2eda860a7fb97c9f9acfcb4bce5c4ce24ba601 | 694,892 |
import typing
import re
def split_sentences(text:str) -> typing.List[str]:
"""Split multiple sentences in one string into a list. Each item being a sentence.
Args:
text (str): Incoming string with multiple sentences.
Returns:
typing.List[str]: list of sentences.
"""
SEP_REGEX = r"[^.!?]+[.!?]"
sentences = []
text_index = 0
# try finding all and ensure its a valid match
match: re.Match
for match in re.finditer(SEP_REGEX, text):
if match:
sub_text = match.string[match.start():match.end()]
if sub_text != "":
sentences.append(sub_text.strip())
text_index = match.end() + 1
if text_index < len(text):
remaining_text = text[text_index:]
sentences.append(f"{remaining_text.strip()}.")
return sentences | b748311fb4b9c0b5249782ceb140a9f64b17fa15 | 694,893 |
def get_apim_nv_secret(client, resource_group_name, service_name, named_value_id):
"""Gets the secret of the NamedValue."""
return client.named_value.list_value(resource_group_name, service_name, named_value_id) | 186030bca5285a82e133c8fc3acbf617d7247eb5 | 694,894 |
import numpy as np
def __dataset__pixelData__(self, index=0, storedvalue=False):
"""Returns pixel values in this DataSet.
Args:
index (int): pixelData return index'th image if dataset holds multiframe
data.
storedvalue (bool): True for get stored values; pixel values before LUT
transformation using RescaleSlope and RescaleIntercept.
Returns:
Numpy array containing pixel values of `index`'th image if dataset holds
multiframe data. If `storedvalue` is False, RescaleSlope and
RescaleIntercept are applied to pixel values.
( pixel values = stored values * RescsaleSlope + RescaleIntercept )
Example:
>>> dset=dicom.open_file('some_CT_image_file')
>>> dset.RescaleIntercept
-1024.0
>>> dset.pixelData(storedvalue=True).min() # before LUT transformation
-2000
>>> dset.pixelData().min() # after LUT transformation
-3024.0
"""
# https://stackoverflow.com/questions/44659924/returning-numpy-arrays-via-pybind11
info = self.getPixelDataInfo()
dtype = info['dtype']
if info['PlanarConfiguration'] == 'RRRGGGBBB':
shape = [3, info['Rows'], info['Cols']]
elif info['PlanarConfiguration'] == 'RGBRGBRGB':
shape = [info['Rows'], info['Cols'], 3]
else:
shape = [info['Rows'], info['Cols']]
outarr = np.empty(shape, dtype=dtype)
self.copyFrameData(index, outarr)
check = lambda x: x[index] if isinstance(x, list) else x
if storedvalue is False:
intercept = check(info['RescaleIntercept'])
intercept = intercept if intercept is not None else 0.0
slope = check(info['RescaleSlope'])
slope = slope if slope is not None else 1.0
outarr = np.float32(outarr)
outarr *= slope
outarr += intercept
return outarr | 00e5d3a7f5b0a1d2219f677a36c137375b23f64b | 694,895 |
from socket import socket, AF_INET, SOCK_STREAM
def tcpConnect(proxy):
"""
TCP 三次握手
:param proxy:
:return:
"""
s = socket(AF_INET, SOCK_STREAM)
ip, port = proxy.split(':')
result = s.connect_ex((ip, int(port)))
return True if result == 0 else False | 426bfbab57e5cac96d3a12041663eab62c15f51b | 694,899 |
def extract_condition_disease(condition):
"""Extracts the disease encoded in the Condition resource.
Example resource:
{
...
"code":{
"coding":[
{
"code":"Yellow Fever",
"system":"http://hl7.org/fhir/v3/ConditionCode"
}
]
}
...
}
Args:
condition (Object):
Returns:
str: the disease in the condition.
"""
return condition['code']['coding'][0]['code'] | a3adcf89c58c5a95790e8c9ea9089ccd6e8818af | 694,900 |
import os
def list_subfloders(path='./', s_prefix=None):
"""List sub folders.
Subfolder name starts with given s_prefix ('_starting_with_certain_name_prefix')
"""
if s_prefix is None:
l_sf = [d for d in os.listdir(path) if os.path.isdir(d)]
else:
l_sf = [d for d in os.listdir(path) if (
os.path.isdir(d) and d.startswith(s_prefix))]
return l_sf | 2b564a91a557c6cb747fa86e08aef08ee452fae7 | 694,901 |
def bound_ros_command(bounds, ros_pos, fail_out_of_range_goal, clip_ros_tolerance=1e-3):
"""Clip the command with clip_ros_tolerance, instead of
invalidating it, if it is close enough to the valid ranges.
"""
if ros_pos < bounds[0]:
if fail_out_of_range_goal:
return bounds[0] if (bounds[0] - ros_pos) < clip_ros_tolerance else None
else:
return bounds[0]
if ros_pos > bounds[1]:
if fail_out_of_range_goal:
return bounds[1] if (ros_pos - bounds[1]) < clip_ros_tolerance else None
else:
return bounds[1]
return ros_pos | 6d241ff973c088e1ea8a3b152f73c5ebfe951588 | 694,902 |
def get_train_na_percentages(train):
"""
Return a Series with the percentage of Na values per columns in train dataframe.
Must be called just after impute_train_missing_data().
Keyword arguments:
train -- the train dataframe
"""
na_cols_pctg_train = train[train.columns[train.isna().sum() > 0]].isna().sum() / train.shape[0]
return na_cols_pctg_train | 4de1d8f509a6a94a7ef73de3f17fb4d218791a7b | 694,903 |
def list_tags_to_dict_tags(tags):
"""
:type tags: typing.List[typing.Dict[str, str]]
:rtype: typing.Dict[str, str]
"""
return {
dct["Key"]: dct["Value"]
for dct in tags
} | f18d768fbe42fe3f18954874dcd251c63ec8c1a1 | 694,904 |
from datetime import datetime
import os
import json
def load_trend(file_dir, file_name="trend.json"):
"""Load and return trend list from JSON file.
Return list of summaries loaded from tests trend JSON file.
If the file is broken, empty or not found then create and return a new dummy trend.
Trend is a list of report summaries per date.
This enables tracking of the number of failed and passed tests.
Trend example:
[
{
"date": "08/06/2019 09:37:45",
"failed": 61,
"passed": 497,
"skipped": 0,
"versions": [
{
"name": "onnx",
"version": "1.5.0"
}
]
},
{
"date": "08/08/2019 08:34:18",
"failed": 51,
"passed": 507,
"skipped": 0,
"versions": [
{
"name": "onnx",
"version": "1.6.0"
}
]
}
]
:param file_dir: Path to the dir with trend JSON file.
:type path: str
:param file_name: Name of trend file.
:type path: str
:return: List of summaries.
:rtype: list
"""
dummy_trend = [
{
"date": datetime.now().strftime("%m/%d/%Y %H:%M:%S"),
"failed": 0,
"passed": 0,
"skipped": 0,
}
]
try:
with open(os.path.join(file_dir, file_name), "r") as trend_file:
trend = json.load(trend_file)
if not trend:
raise IndexError
except (IndexError, IOError, json.decoder.JSONDecodeError):
trend = dummy_trend
return trend | 1a8e0f4261e66fcac53c7763a29b66f392db33e8 | 694,905 |
def cut_array_to_table(data,collumns):
"""
Example I have 14 items and I need to cut them to rows of 7 items in each, i use the data array and parameter 7
"""
result = []
row = []
collumn_index = 0
for x in data:
oid, value = x[0].prettyPrint(), x[1].prettyPrint()
if value == "No more variables left in this MIB View":
continue
x = (oid, value)
if collumn_index == 0:
row.append(x)
collumn_index = 1
elif collumn_index < collumns:
collumn_index = collumn_index + 1
row.append(x)
if collumn_index == collumns:
result.append(row)
else:
collumn_index = 1
row = [x] #starts new row
return result | 704b72677b33bca3667bc3a4e22fd336ab228253 | 694,906 |
def backtrace3(node):
"""
assumes a node is (word, seg-label, node) etc
"""
hyp = [(node[0], node[1])]
while node[2] is not None:
node = node[2]
hyp.append((node[0], node[1]))
return hyp[-2::-1] | c2b50ea93acc9e7a1f1093a928a2c0492d1f537f | 694,907 |
def GetDocumentNumberForKeyword(keyword, inverted_index_table):
"""Gets the number of documents which contains the keyword."""
inverted_index = inverted_index_table.Get(keyword)
if not inverted_index:
return 0
return inverted_index.n_of_doc | 1bfea4ced59f86c520bd7224d1e1736a74686a5e | 694,908 |
import os
def get_extension(filename: str) -> str:
"""
Returns the extension from a filename / path
:param filename: filename or file path to the file which extension is required
:return: The files extension (with .)
"""
return os.path.splitext(filename)[1] | d9f62523a3d5775ee50e9a8f4cda60b6c4a67032 | 694,910 |
import json
def parse_node_file(filename):
"""
Parses a node file and creates the following variables:
graph = {child:{None}, child:{Parent1, Parent2}, ...}
prior = {node: [prior values]}
lambdas = {parent:{child1:lambda1, child2:lambda2, leak_node:lambda0}}
Those can then be used with the samplers, e.g., adaptive, annealed, etc.
"""
with open(filename) as inputfile:
data = json.load(inputfile)
graph = {}
prior = {}
lambdas = {}
for key in data:
# root node
d = data[key]
if len(d["parents"]) == 0:
graph[key] = {None}
prior[key] = d["cpt"]
else:
graph[key] = {p for p in d["parents"]}
t = graph[key]
c = d["cpt"]
lambdas[key] = {node: c[i] for i, node in enumerate(t)}
lambdas[key]["leak_node"] = c[len(t)]
return graph, prior, lambdas | 736786825bf9a3f44bc641b4ccdc1d2b46fe9816 | 694,911 |
import socket
def isip(id):
"""
is the string an ipv4 address?
"""
try:
socket.inet_aton(id)
return True
except:
return False | 258b0f09f5471ea276ee136ebfc7833c3e54fd04 | 694,912 |
def parameters_property(parameters):
"""Given the list of parameters this function constructs a property that simply
returns the given list. It doesn't provide a setter so that the list of parameters
cannot be overridden."""
def getter(self):
return parameters
return property(fget=getter) | 65aa43e925d07ace01705234bb4130251c50adda | 694,913 |
async def get_tz_offset(client, user_id):
"""
Retrieve the (seconds as an integer) time zone offset from UTC for a user.
Outputs an integer in the range [-43200, 43200]. (-12h, +12h.)
client: the client from the context object.
user_id: the human-opaque Slack user identifier.
"""
# https://api.slack.com/methods/users.info
res = await client.users_info(user=user_id)
return res['user']['tz_offset'] | 02bcfd0cb21cc35eb331951e77bca41d8a58722c | 694,915 |
def choose_date_slice(l_list,start_date,end_date):
""" Find start and end point of date in list """
first_slice = 0
last_slice = len(l_list)
l_list.sort()
if start_date != None:
count=0
while count < len(l_list):
if l_list[count][1] == start_date:
first_slice=count
count=len(l_list)
count += 1
if end_date != None:
l_list.reverse()
count=0
while count < len(l_list):
if l_list[count][1] == end_date:
last_slice=len(l_list) - count
count=len(l_list)
count+=1
l_list.reverse()
return first_slice,last_slice | 5434b3ad1049f44dc312086e0e24b8685941b879 | 694,916 |
import numpy
def update_scale_moment_residual(smresidual, ssmmpsf, lhs, rhs, gain, mscale, mval):
""" Update residual by subtracting the effect of model update for each moment
"""
# Lines 30 - 32 of Algorithm 1.
nscales, nmoment, _, _ = smresidual.shape
smresidual[:, :, lhs[0]:lhs[1], lhs[2]:lhs[3]] -= \
gain * numpy.einsum("stqxy,q->stxy", ssmmpsf[mscale, :, :, :, rhs[0]:rhs[1], rhs[2]:rhs[3]], mval)
return smresidual | 3badfee6701bd2926ea65e7313158c6747ae9ed8 | 694,917 |
def test_jpeg(h, f):
"""JPEG data in JFIF format"""
if h[6:10] == 'JFIF':
return 'jpeg' | 18becefd465859dd353d55bd6d7af93070e9acea | 694,918 |
def feature_normalize(X, mean=None, sigma=None):
"""
returns a normalized version of X where
the mean value of each feature is 0 and the standard deviation
is 1. This is often a good preprocessing step to do when
working with learning algorithms.
the normalization is processed separately for each feature
:param X: numpy array of shape (m,n)
Training data
:return: X_norm, mu, sigma
X_norm matrix(m,n) - Normalised Feature matrix
mu matrix(1,n) - mean of individual features
sigma matrix(1,n) - standard deviation of individual features
"""
if mean is None:
mean = X.mean(0)
if sigma is None:
sigma = X.std(0)
X_norm = (X - mean) / sigma
return X_norm, mean, sigma | 81342428799f5ac8d1815b54400e8eb4d7cc302b | 694,919 |
def periodic_ordering(amin, amax, bmin, bmax):
"""Figures out the order of the permutation that maps the minima and
maxima to their order, in canonical form (amin<amax, bmin<bmax if
possible).
Parameters
----------
amin : float
minimum of first range
amax : float
maximum of first range
bmin : float
minimum of second range
bmax : float
maximum of second range
Returns
-------
list of int 0-3
Order index of amin, amax, bmin, bmax in that order; i.e. the return
value [0, 2, 1, 3] means amin < bmin < amax < bmax; amin in order
spot 0, amax in 2, bmin in 1, bmax in 3.
"""
dict2 = {'a' : amin, 'A' : amax, 'b' : bmin, 'B' : bmax}
order = ['a']
# put the labels in the increasing order, starting at amin
for label in ('A', 'b', 'B'):
i = 0
val = dict2[label]
while i < len(order):
if val < dict2[order[i]]:
order.insert(i, label)
break
i += 1
if label not in order:
order.append(label)
# Canonical order is 'a' always before 'A', and if possible, 'b' before
# 'B', and 'a' before 'B' (in that order of priority). This defines a
# unique member within the set of cyclic permutations; find that cyclic
# permutation.
idx0 = order.index('a')
out = []
for i in range(4):
out.append(order[(idx0+i) % 4])
if out[3] == 'b':
out = [out[3]] + out[slice(0, 3)]
# at this point we have a canonically ordered list of the letter a, A,
# b, and B.
final = [out.index(a) for a in ['a', 'A', 'b', 'B']]
return final | ec81f998ea9abe2f23825f0de1890b6b2d62ba0e | 694,920 |
def strip_comments(string, markers):
"""
Complete the solution so that it strips all text that follows any of a set of comment markers passed in.
Any whitespace at the end of the line should also be stripped out.
:param string: a string input.
:param markers: list of characters.
:return: a new string with whitespace and comment markers removed.
"""
parts = string.split("\n")
for v in markers:
parts = [x.split(v)[0].rstrip() for x in parts]
return "\n".join(parts) | 0ab980996897a7c42254a318ed325a9108033f97 | 694,921 |
def encode_schedule(schedule):
"""Encodes a schedule tuple into a string.
Args:
schedule: A tuple containing (interpolation, steps, pmfs), where
interpolation is a string specifying the interpolation strategy, steps
is an int array_like of shape [N] specifying the global steps, and pmfs is
an array_like of shape [N, M] where pmf[i] is the sampling distribution
at global step steps[i]. N is the number of schedule requirements to
interpolate and M is the size of the probability space.
Returns:
The string encoding of the schedule tuple.
"""
interpolation, steps, pmfs = schedule
return interpolation + ' ' + ' '.join(
'@' + str(s) + ' ' + ' '.join(map(str, p)) for s, p in zip(steps, pmfs)) | d660bc6826dcef2bbc5fbfca0eafaf2d72ab061f | 694,922 |
def ansi(num: int):
"""Return function that escapes text with ANSI color n."""
return lambda txt: f'\033[{num}m{txt}\033[0m' | 88297df114c3670a24e5ffa248d5756de09183cb | 694,923 |
from typing import Union
def compound_interest(
capital: float,
application_time: float,
fess: float,
*,
ret_dict=False,
ret_text=False,
) -> Union[tuple, dict, str]:
"""
Function to apply compound interest.
>>> from snakypy import helpers
>>> helpers.calcs.compound_interest(2455, 12, 1)
{'amount': 2766.36, 'fess': 311.36}
>>> helpers.calcs.compound_interest(2455, 12, 1, ret_text=True)
'The amount was: $ 2766.36. The fees were: $ 311.36.'
Args:
capital (float): Capital value
application_time (float): Time if applications
fess (float): Value fees
ret_dict (bool): If it is True returns in the dictionary form
ret_text (bool): If it is True returns in the dictionary text
Returns:
Returns dictionary or a string or both.
"""
amount = capital * ((1 + fess / 100) ** application_time)
fess_value = amount - capital
format_text = f"The amount was: $ {amount:.2f}. The fees were: $ {fess_value:.2f}."
format_dict = {"amount": float(f"{amount:.2f}"), "fess": float(f"{fess_value:.2f}")}
if ret_dict and ret_text:
return format_dict, format_text
elif ret_text:
return format_text
elif ret_dict:
return format_dict
return format_dict | a70b75c867fc1795e9f2491751b6a93e2bea92ec | 694,924 |
import torch
def _edge_error(y, y_target, mask):
"""
Helper method to compute edge errors.
Args:
y: Edge predictions (batch_size, num_nodes, num_nodes)
y_target: Edge targets (batch_size, num_nodes, num_nodes)
mask: Edges which are not counted in error computation (batch_size, num_nodes, num_nodes)
Returns:
err: Mean error over batch
err_idx: One-hot array of shape (batch_size)- 1s correspond to indices which are not perfectly predicted
"""
# Compute equalities between pred and target
acc = (y == y_target).long()
# Multipy by mask => set equality to 0 on disconnected edges
acc = (acc * mask)
# Get accuracy of each y in the batch (sum of 1s in acc_edges divided by sum of 1s in edges mask)
acc = acc.sum(dim=1).sum(dim=1).to(dtype=torch.float) / mask.sum(dim=1).sum(dim=1).to(dtype=torch.float)
# Compute indices which are not perfect
err_idx = (acc < 1.0)
# Take mean over batch
acc = acc.sum().to(dtype=torch.float).item() / acc.numel()
# Compute error
err = 1.0 - acc
return err, err_idx | 900ce7bcf61a0f40cc9a36dd930a72c7d6f9cc51 | 694,926 |
import re
def get_url_jk_part(urlpart):
"""given a url substring containing the jk id from the job detail page - strip out the jk id and return it
string urlpart: a url substring containing the jk id
returns (str): the jk id param only e.g. jk=886c10571b6df72a which can be appended to a working job detail url
"""
jkrx= re.compile(r'jk=(\w+)', re.I)
m=jkrx.search(urlpart)
if not m:
print('could not find jk part of url')
return None
#print('matched string is ', m.groups()[0])
jkpart = m.groups()[0]
return jkpart | 97f1e5366f7ded9da84793fc39dbff965e3381d5 | 694,927 |
def is_angle_between(first_angle, middle_angle, second_angle):
"""Determines whether an angle is between two other angles.
Args:
first_angle (float): The first bounding angle in degrees.
middle_angle (float): The angle in question in degrees.
second_angle (float): The second bounding angle in degrees.
Returns:
bool: True when `middle_angle` is not in the reflex angle of `first_angle` and `second_angle`, false otherwise.
"""
return True | e4dea61cfea1938e627edd46258d22dc2899aa70 | 694,928 |
def _PN_ROF(x,w,y, **kwargs):
""" Proximal Newton Method for the ROF (TV+L2) model """
#info = np.zeros(_N_INFO) # Holds [num iterations, gap]
#_call(lib.PN_ROF, x, w, y, info, np.size(x), kwargs['sigma'], ffi.NULL)
return 1 | 1dd967e84db48421e224369e13ebac997a3750e7 | 694,929 |
import torch
def binary_cross_entropy_cls(predictions: torch.Tensor, labels: torch.Tensor):
"""
https://pytorch.org/docs/stable/nn.html#torch.nn.BCELoss
Parameters
----------
predictions: (B, ) must be in [0, 1]
labels: (B, )
size_average
check_input
Returns
-------
"""
assert predictions.size() == labels.size()
criterion = torch.nn.BCELoss() # should I create new instance here!!!!
return criterion(predictions, labels.float()) | 6f1168613d4382a68ec303d67b2ca49eca4e2a70 | 694,930 |
def unique_nonzero(ngb):
"""Return unique non-zero values from vector."""
uni = list()
for n in ngb:
if n > 0 and n not in uni:
uni.append(n)
return uni | 88ab425a2e8fa85a733fcd0887772219beae0403 | 694,931 |
import argparse
def get_arguments() -> argparse.Namespace:
"""
parse all the arguments from command line inteface
return a list of parsed arguments
"""
parser = argparse.ArgumentParser(
description="generate ground truth arrays for boundary regression."
)
parser.add_argument(
"--dataset_dir",
type=str,
default="./dataset",
help="path to a dataset directory (default: ./dataset)",
)
return parser.parse_args() | 77b4d3498ba1f1b06607a26aeafe947d812a7f6a | 694,932 |
def list_to_string(the_list):
""" converts list of ints to string """
a = ''
for secondary_list in the_list:
a += ' '
for item in secondary_list:
a += str(item)
a += ','
a += ' '
return a | 6e50ff18cbdc29fdbe1153e208a8544ed7b7b0be | 694,935 |
def check_value(val, fields):
""" Checks if a value is valid for every field."""
valid_fields = []
for ranges in fields.values():
# if value is in any range of the field, then True, else False
valid_ranges = [(l <= val <= h) for (l, h) in ranges]
valid_fields.append(any(valid_ranges))
# return True if value is valid in at least one field
return any(valid_fields) | d7530f8381df8e050ce407b972235a319dc5df36 | 694,936 |
def is_current_connection(context, connection):
"""Returns True if connection named name is the default connection"""
if context.pywbem_server_exists():
current_connection = context.pywbem_server
else:
current_connection = None
if current_connection and current_connection.name == connection.name:
return True
return False | 44cf36d4d389e9416ed469d76b63e46241c3eece | 694,937 |
def complement_interval(intervals, domain_min, domain_max):
"""Compute the union of intervals: domain - union(intervals).
We assume that intervals is sorted in increasing order.
"""
if len(intervals) == 0:
return [(domain_min, domain_max)]
complement = []
if intervals[0][0] > domain_min:
complement.append((domain_min, intervals[0][0] - 1))
if len(intervals) > 1:
for i1, i2 in zip(intervals[0:-1], intervals[1:]):
if i2[0] - i1[1] > 0:
complement.append((i1[1] + 1, i2[0] - 1))
if intervals[-1][1] < domain_max - 1:
complement.append((intervals[-1][1] + 1, domain_max))
return complement | e569bffd89630336ca04847eae57e3a47588667a | 694,939 |
def corr():
"""Input aberration correction."""
return 'NONE' | ede5b1eef8efee46fb2700593768a03d88770a13 | 694,940 |
import os
import errno
def mtime(path, default=0):
"""
Returns the mtime of the file at the specified path. Returns default if
file does not exist. An OSError is raised if the file cannot be stat'd.
"""
try:
return os.stat(path).st_mtime
except OSError as e:
if e.errno == errno.ENOENT:
return default
raise e | b9b62b272272a3c98b9a4a00d067f7c5c4e2fa94 | 694,941 |
def round_to_nearest(number, nearest=5):
"""@see https://stackoverflow.com/questions/2272149/round-to-5-or-other-number-in-python"""
return int(nearest * round(float(number) / nearest)) | 6fdb79ec92d54f12c4e3cc2675e762e57aee737a | 694,942 |
import os
def interactive_testing_requested() -> bool:
"""
Certain tests are only useful when run interactively, and so are not regularly run.
These are activated by this funciton returning True, which the user requests by
setting the environment variable `PYTORCH3D_INTERACTIVE_TESTING` to 1.
"""
return os.environ.get("PYTORCH3D_INTERACTIVE_TESTING", "") == "1" | 4615b1e2254d60c303a4fedbda7f185ec6b4a731 | 694,943 |
def get_supervisor_info(self):
"""Return data for Supervisor
This method return a coroutine.
"""
return self.send_command("/supervisor/info", method="get") | 759a74c5e786d329ea8bfdf2c5cb677823ee047e | 694,944 |
def to_dero(value):
"""Convert number in smallest unit to number in dero"""
return value/10**12 | b4a48ca750ec9f40c826d5dec3eb6209b54ccf24 | 694,945 |
def get_absolute_import_name(dir_path: str, import_name: str) -> str:
"""Joins a relative import path with an import name."""
return f'{dir_path}:{import_name}' | 5cebf043df64b2f0160f5d7230c2a1eca94ce7a8 | 694,946 |
def install_start_end_marker(name: str, length: float) -> str:
"""Method to add start and end marker to the lattice.
Parameters
----------
name : str
lattice name
length : float
length of the lattice
Returns
-------
str
MADX install string.
"""
# define start and end marker
text = "{:12}: {:12};\n".format("MSTART", "MARKER")
text += "{:12}: {:12};\n\n".format("MEND", "MARKER")
# start sequence edit
text += "USE, SEQUENCE={};\n".format(name)
text += "SEQEDIT, SEQUENCE = {}; \nFLATTEN;\n".format(name)
# install start and end marker
line = "INSTALL, ELEMENT = {:16}, AT = {:12.6f};\n".format("MSTART", 0.00000)
text += line
line = "INSTALL, ELEMENT = {:16}, AT = {:12.6f};\n".format("MEND", length)
text += line
# end sequence edit
text += "FLATTEN;\nENDEDIT;"
return text | 58dd4a789d4a12efdc454741ac8c9cafc40e22fe | 694,947 |
def trips_frequencies(gtfs):
"""
Get the frequency of trip_I in a particular day
"""
query = (
" SELECT q1.stop_I as from_stop_I, q2.stop_I as to_stop_I, q1.trip_I as trip_I, COUNT(*) as freq FROM"
" (SELECT * FROM stop_times) q1,"
" (SELECT * FROM stop_times) q2"
" WHERE q1.seq+1=q2.seq AND q1.trip_I=q2.trip_I"
" GROUP BY from_stop_I, to_stop_I")
return(gtfs.execute_custom_query_pandas(query)) | 6c9e4fe9f50cca8f345c866b83e33945ba4bfaf2 | 694,948 |
def dictfetchall(cursor):
"""
Return all rows from a cursor as a dict
"""
columns = [col[0] for col in cursor.description]
return [dict(zip(columns, row)) for row in cursor.fetchall()] | f9e7c5dfd23358db7b4cfc9417185ac70abc5b3c | 694,949 |
import torch
def remove_edge_cells(mask: torch.Tensor) -> torch.Tensor:
"""
Removes cells touching the border
:param mask: (B, X, Y, Z)
:return: mask (B, X, Y, Z)
"""
left = torch.unique(mask[:, 0, :, :])
right = torch.unique(mask[:, -1, :, :])
top = torch.unique(mask[:, :, 0, :])
bottom = torch.unique(mask[:, :, -1, :])
cells = torch.unique(torch.cat((left, right, top, bottom)))
for c in cells:
if c == 0:
continue
mask[mask == c] = 0
return mask | c5370d9f8519ba4e0b76c3069f72601f3c0e90f4 | 694,952 |
def strategy(history, memory):
"""
Orannis's punitive detective:
Cooperate but when the other player defects, cooperate one more turn to
see if they defect again. If they do, defect for 10 turns.
Cooperate twice more and if they defect the second time, defect forever.
memory is a tuple of (state, counter)
where state is one of:
"initial_cooperation"
"first_punishment"
"second_cooperation"
"final_punishment"
"""
num_rounds = history.shape[1]
if memory is None or memory[0] == "initial_cooperation":
# If they defected twice in a row, transition to first punishment
if num_rounds >= 2 and history[1, -1] == 0 and history[1, -2] == 0:
return 0, ("first_punishment", 9)
# Otherwise keep cooperating
return 1, ("initial_cooperation", 0)
elif memory[0] == "first_punishment":
# Punish until the counter runs out
if memory[1] > 0:
return 0, ("first_punishment", memory[1] - 1)
# Once done, transition to second cooperation
else:
return 1, ("second_cooperation", 0)
elif memory[0] == "second_cooperation":
# If they defected twice in a row, transition to final punishment
if num_rounds >= 2 and history[1, -1] == 0 and history[1, -2] == 0:
return 0, ("final_punishment", 0)
# Otherwise keep cooperating
return 1, ("second_cooperation", 0)
elif memory[0] == "final_punishment":
return 0, ("final_punishment", 0) | 6d3b4a1b7029a8eb43eac935531603cff7c865dc | 694,954 |
import os
def parent_dir(path):
"""
Return the parent of a directory.
"""
return os.path.abspath(os.path.join(path, os.pardir)) | 3c65af3ee7ccaef0b142bdddb6649db321b32f18 | 694,955 |
from typing import List
import os
def find_validate_dirs(base_dirs: List[str]) -> List[str]:
"""Construct a list of validation directories by searching for
validate.sh scripts."""
all_validation_dirs = []
for base in base_dirs:
for root, _, files in os.walk(base):
if 'validate.sh' in files:
all_validation_dirs.append(root)
return all_validation_dirs | f8c81a226df1b2f7a0ab882e6ba463a630763b33 | 694,956 |
def main(*, left, right):
"""entrypoint function for this component
Usage example:
>>> main(left = pd.Series(
... {
... "2019-08-01T15:20:12": 1.2,
... "2019-08-01T15:44:12": None,
... "2019-08-03T16:20:15": 0.3,
... "2019-08-05T12:00:34": 0.5,
... }
... ),
... right = pd.Series(
... {
... "2019-08-01T15:20:12": 1.0,
... "2019-08-01T15:44:12": 27,
... "2019-08-03T16:20:15": 3.6,
... "2020-08-05T12:00:34": 17,
... "2021-08-05T12:00:34": None,
... }
... ),
... )["result"]
2019-08-01T15:20:12 True
2019-08-01T15:44:12 False
2019-08-03T16:20:15 False
2019-08-05T12:00:34 False
2020-08-05T12:00:34 False
2021-08-05T12:00:34 False
dtype: bool
"""
# ***** DO NOT EDIT LINES ABOVE *****
# write your function code here.
try:
return {"result": left > right}
except ValueError:
return {"result": left.gt(right)} | 4656d789a5a705fee7d6e552a64542e22ab1e73e | 694,957 |
def _extract_jinja_frames(exc_tb) -> str:
"""
Extract all the frames in the traceback that look like jinja frames
Returns:
A multiline string with a formatted traceback of all the Jinja
synthetic frames or an empty string if none were found.
"""
lines = []
while exc_tb:
code = exc_tb.tb_frame.f_code
if code.co_name in (
"template",
"top-level template code",
) or code.co_name.startswith("block "):
lines.append(f" at {code.co_filename}:{exc_tb.tb_lineno}")
exc_tb = exc_tb.tb_next
return "\n".join(lines) | 170ede7e5fed4292c9e375555b6152ea4c8927bd | 694,960 |
import mmap
def get_lines(file_path):
"""
return an integer representing the number of lines
in the given file
:param file_path: Path to the given file
:return: The number of lines in a file
"""
with open(file_path, 'r+') as file:
line_count = 0
buffer = mmap.mmap(file.fileno(), 0)
readline = buffer.readline
while readline():
line_count += 1
return line_count | 9f135ded40890a62fd99a24ee72943d12b21f6e8 | 694,961 |
def get_log_file_data(log_file):
"""
get the resilient circuits log file
:param log_file:
:return: all lines for the log file
"""
with open(log_file, "r") as f:
return f.readlines() | 8eb7e451ab4c0e388703f285e176f8453fe8a8f2 | 694,962 |
import subprocess
def read(id, loc):
"""
Read from window id's loc file
runs
9p read acme/id/loc
returns
utf-8 encoded string with contents of acme/id/loc
"""
s = subprocess.check_output(['9p', 'read', 'acme/'+str(id)+'/'+loc])
return s.decode('utf-8') | bf9cb36915d12600dbdea8b3ab7da3f2e05b6fd7 | 694,963 |
def load_data(filename):
"""Open a text file of numbers & turn contents into a list of integers."""
with open(filename) as f:
lines = f.read().strip().split('\n')
return [int(i) for i in lines] | 2baf679166eb1ee36f2b36c3e18f4f1d6a5272d9 | 694,964 |
def create_help(header, options):
"""Create formated help."""
return "\n" + header + "\n" + \
"\n".join(map(lambda x: " " + x, options)) + "\n" | 6643a5de137e22a5f00fd62ef0f27745bb60a8ad | 694,965 |
def get_first(*values, condition=None, default=None):
"""
Permet de renvoyer le premier élément qui valide la condition parmi l'ensemble des valeurs
:param values: Liste d'éléments
:param condition: Fonction de filtrage conditionnel (non nul par défaut)
:param default: Valeur par défaut si non trouvé
:return: Premier élément qui valide la condition
"""
condition = condition or (lambda e: e is not None)
return next(filter(condition, values), default) | 5109dc7b6ee399e6ff96e08c40d813569e7d0f6b | 694,966 |
import math
def _compute_page(offset: int, items_per_page: int) -> int:
"""Compute the current page number based on offset.
Args:
offset (int): The offset to use to compute the page.
items_per_page (int): Nimber of items per page.
Returns:
int: The page number.
"""
return int(math.ceil((int(offset) + 1) / int(items_per_page))) | 94a0a0c18b8090cf0a1a8ac3eacdc2bcff6643b6 | 694,967 |
def dim_returns(k, inverse_scale_factor):
"""
A simple utility calculation method.
Given k items in posession return the benefit of a K + 1th item given some
inverse scale factor.
The formula used is utility = 100% if no items are in posession or
utility = 1 / inverse_scale_factor * (k + 1)
"""
if k == 0:
return 1;
return (1 / (inverse_scale_factor * (k + 1))) | 05d8b9cfe690737fc03b2d2fa9410bafae06bd2b | 694,968 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.