content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def getMaximumNodeId(nodeset):
"""
:return: Maximum node identifier in nodeset or -1 if none.
"""
maximumNodeId = -1
nodeiterator = nodeset.createNodeiterator()
node = nodeiterator.next()
while node.isValid():
id = node.getIdentifier()
if id > maximumNodeId:
maximumNodeId = id
node = nodeiterator.next()
return maximumNodeId | 45306222d8b26cd4294e4c4fb91c34c2effc3d95 | 695,190 |
import math
def n(i,pv,fv,pmt):
"""Calculate the number of periods in an annuity"""
n = math.log((fv*i+pmt)/(pv*i+pmt)) / math.log(1+i)
return n | b7219ae4d69f059bf7c0b34e7101bcbcdb991914 | 695,192 |
import csv
def errorreader(filename):
"""
读取表格,将URLError的编码重新读取,输出一个列表
:param filename:
:return:
"""
codelist = []
try:
with open(filename, 'r') as csvfile:
reader = csv.reader(csvfile)
for data in reader:
if len(data) >= 1: # 非空行
if data[2] == 'URLError': #如果是错误
codelist.append(data[0])
print(filename + " 读取完毕 :)")
except IOError:
print(filename + " 读取失败 :(")
return codelist | 2b8c71f248a3a1bd472ecc60ccdbd6bcbb02cdd7 | 695,193 |
import six
def compress_literal_list(literals):
"""Uses repetition syntax to shorten a literal list.
>>> compress_literal_list([])
[]
>>> compress_literal_list(['true'])
['true']
>>> compress_literal_list(['1', '2', 'f*', '3', '3', '3', '5'])
['1', '2', 'f*', '3', '3', '3', '5']
>>> compress_literal_list(['f*', 'f*'])
['f*', 'f*']
"""
compressed = []
if len(literals) == 0:
return compressed
# for right now do not compress
do_compression = False
if do_compression:
# Start with the first literal.
old_literal = literals[0]
num_reps = 1
for literal in literals[1:]:
if literal == old_literal:
# For each new literal, if it matches the old one, it increases the
# number of repetitions by one.
num_reps += 1
else:
# Otherwise, write out the previous literal and start tracking the
# new one.
rep_str = str(num_reps) + '*' if num_reps > 1 else ''
if isinstance(old_literal, six.string_types):
compressed.append(rep_str + old_literal)
else:
compressed.append(rep_str + str(old_literal))
old_literal = literal
num_reps = 1
rep_str = str(num_reps) + '*' if num_reps > 1 else ''
if isinstance(old_literal, six.string_types):
compressed.append(rep_str + old_literal)
else:
compressed.append(rep_str + str(old_literal))
return compressed
else:
for literal in literals:
if isinstance(literal, six.string_types):
compressed.append(literal)
else:
compressed.append(str(literal))
return compressed | 3427cd91ea5d499cc00211a13cb75c28ff33baf4 | 695,194 |
def _get_variables(formula, type_):
"""Finds all the variables in the formula of the specific pysmt type.
Args:
formula (FNode): The pysmt formula to examine.
type_: The pysmt type to find (e.g: REAL, BOOL).
Returns:
set(FNode): The set of all the variables in the formula of the specific type.
"""
return {a for a in formula.get_free_variables() if a.get_type() == type_} | 493a8ac1cb47f1d854acf92dda92ef71986113da | 695,195 |
from pathlib import Path
def ftype(fn: Path) -> str:
"""
returns file type i.e. 'dt0','dt1','dt2','dt3'
"""
return fn.stem.rsplit(".", 1)[-1] | f4ce4e9bd8e4ad6c7c236941f2bfa08403f21fc5 | 695,196 |
def prompt(prompt_message: str = "Enter a string", default: str = "") -> str:
"""
Prints a message and waits for user input.
Args:
prompt_message: string to be printed
default: string to be returned if no value is entered
Returns:
string entered or default value
"""
default = str(default)
try:
if default != "":
in1 = input(prompt_message + " [" + default + "]: ")
else:
in1 = input(prompt_message + ": ")
except KeyboardInterrupt:
return ""
if in1 == "":
return default
else:
return in1 | 0b8de5d4c3886de947887525684ed40a952d200e | 695,197 |
def count_utf8(line):
"""
文字コードで解説とコードを見分けるいい加減な実装
"""
a, u = 0, 0
for c in line:
if c == '#' and u == 0: #コメント
break
if ord(c) > 128:
u+=1
else:
a+=1
return u / (a+u) | c3d46709b9fe77f0bc22fb1399d80fc6b1ccbd04 | 695,198 |
def loadTextCode(path):
"""
Charge du code en version texte.
Cette fonction charge le texte du fichier situer à
l'emplacement de l'argument « path ». La fonction retourne
ensuite le texte.
:example:
>>> saveTextCode("C:/test.asm", "mov 10 acc")
>>> loadTextCode("C:/test.asm")
'mov 10 acc'
>>> os.remove("C:/test.asm")
:param path: Chemin du fichier
:type path: str
:return: Texte du fichier.
:rtype: str
"""
# On ouvre le fichier
fichier = open(path, "r")
# Nous lisons le texte et le transfèrons dans notre variable.
code = fichier.read()
# On ferme le fichier et on retourne le code.
fichier.close()
return code | 78892c5b449103fa43365a3c43a671ca50b7a780 | 695,199 |
import copy
def makePhe(phes = [],ids=[]):
"""Create the null phenotype values and append them to the case id
phes is the (generator, headername) for each column
for a phe file, ids are the case identifiers for the phenotype file
res contains the final strings for the file
each column is derived by iterating
over the generator actions set up by makePhes
"""
header = ['FID','IID'] # for plink
res = copy.copy(ids)
for (f,fname) in phes:
header.append(fname)
for n,subject in enumerate(ids):
res[n].append(f.next()) # generate the right value
res.insert(0,header)
res = [' '.join(x) for x in res] # must be space delim for fbat
return res | 65361813bf17abac42c13bf7c24a898979e40054 | 695,200 |
def detect_combiner(env):
"""Detect if mkdocscombine exe is detected on the system, or use user specified option"""
if 'Mkdocs_Combine' in env:
return env.Detect(env['Mkdocs_Combine'])
else:
return env.Detect('mkdocscombine') | 9ab1237d835d5c03ab232eb66e9f99cb8b8b35b3 | 695,201 |
from datetime import datetime
def generate_po_ref():
"""
Convert date and time to hexidecimal.
Returns
-------
hexNum : TYPE Hexidecial Number
DESCRIPTION. Last five characters of the current date and time in hexidecimal.
"""
now = datetime.now()
date_time = now.strftime("%d%m%Y%H%M%S")
int_date_time = int(date_time)
intNum = int_date_time
po_ref = hex(intNum).upper()[-5:]
return po_ref | 38b5e43356ad00e7e823fabed79dca308ecc2e7c | 695,202 |
def quote_string(string, repl): # real signature unknown; restored from __doc__
"""
quote_string(string: str, repl: str) -> str
Escape the string 'string', replacing any character not allowed in a URLor specified by 'repl' with its ASCII value preceded by a percent sign(so for example ' ' becomes '%20').
"""
return "" | db2ef8868012ff1c24961df76e59981a98b45157 | 695,203 |
def any_key_id_mode():
"""
Only key id mode 2.
"""
return 2 | d63c3513de3b18c16fe14a88fab21da38f91c9e9 | 695,204 |
def escape_html(string: str, quote=True) -> str:
"""
Replace special characters "&", "<" and ">" to HTML-safe sequences.
:param string: the string
:param quote: If the optional flag quote is true (the default), the quotation mark characters, both double quote (") and single quote (') characters are also translated.
:return: the escaped string
"""
string = string.replace("&", "&") # Must be done first!
string = string.replace("<", "<")
string = string.replace(">", ">")
if quote:
string = string.replace('"', """)
string = string.replace("'", "'")
return string | a21a40e658cc09b4d24a94e00a54c97c6f135a80 | 695,205 |
def blank_if_zero(n):
"""Return str(n) unless n is zero, in which case return ""."""
if n == 0:
return ""
return str(n) | d2249e29c43a05efa9bee6b380b5c8636f4c0dd9 | 695,206 |
def extractTime(val):
"""
extract the size from the string returned by an ls -l|a command
"""
return val.split(' ')[1].split('T')[0] | a9b05ec5d34ebd9d2a8724cae8e7c08e76c54d0b | 695,207 |
def parse_args(parser):
"""Parse command line arguments."""
parser.add_argument('-w', '--workload', default='tpch', help="""The target workload to
run. Choices: tpch, tpcds. Default: tpch""")
parser.add_argument('-s', '--scale', default='', help="""The scale factor for the
workload. Default: the scale of the dataload databases - e.g. 'tpch_parquet'""")
parser.add_argument('-t', '--table_format', default='parquet', help="""The file format
to use. Choices: parquet, text. Default: parquet""")
parser.add_argument('-i', '--num_impalads', default='5', help="""The number of impalads
to run. One impalad will be a dedicated coordinator. Default: 5""")
parser.add_argument('-f', '--kill_frequency', default='30', help="""How often, in
seconds, a random impalad should be killed. Default: 30""")
parser.add_argument('-d', '--start_delay', default='10', help="""Number of seconds to
wait before restarting a killed impalad. Default: 10""")
parser.add_argument('-c', '--concurrency', default='4', help="""The number of
concurrent streams of the workload to run. Default: 4""")
parser.add_argument('-r', '--iterations', default='4', help="""The number of
times each workload will be run. Each concurrent stream will execute the workload
this many times. Default: 4""")
args = parser.parse_args()
return args | f6b3b10b4cc0ac855ddf9addb9ceebf0c99e3498 | 695,208 |
def remove_null_values(data):
""" Removed null value
Arguments:
data {dict}: data
Returns:
data {dict}: update data
"""
return {k: v for k, v in data.items() if v is not None} | 702c0c308aaf9b7926535121eea7de0383412634 | 695,209 |
def get_as_clause_multicol(columns_to_query_lst, update_param_list):
"""
get_as_clause will return tuple of column names of intermediate table c of postgresql query.
:param columns_to_query_lst: columns for where clause.E.g [col1]
:param update_param_list: new column names for columns to be updated.E.g [updatecol2,updatecol3]
:return as_clause: string. E.g "as c(col1,updatecol2,updatecol3)"
"""
column_str = []
for col in columns_to_query_lst:
column_str.append(col)
for col in update_param_list:
column_str.append(col)
as_clause = ",".join(column_str)
as_clause = "as c(" + as_clause + ")"
return as_clause | 7bb34c1ca9534877d3ae4a17fa5fdc1e28ddabfc | 695,210 |
def format_epoch_score(epoch: int, loss: float) -> str:
"""Formats the results obtained at the end of an epoch in
a string used for logging.
Args:
epoch (int): current epoch
loss (float): current associated loss
Returns:
str: formatted string
"""
return f"Epoch {epoch}: {loss}" | fc2c816ca32c7e6e9d899bc89757d9a5424e7f00 | 695,211 |
def create_stats(value_counts):
"""
@param pd.Series value_counts: the value counts of the field in question
@returns dict: the statistics to be output in markdown
"""
stats = {}
stats["Unique Values"] = len(value_counts)
if len(value_counts) > 0:
stats["Most frequently occurring value"] = value_counts.index[0]
stats["Number of values with a single occurrence"] = len([k for k in value_counts if k == 1])
return stats | 947db7bbd7ec86cfad9e7afd9addf102e7e0e508 | 695,212 |
import unicodedata
def is_number(string: str) -> bool:
"""See if a given string is a number (int or float)"""
try:
float(string)
return True
except ValueError:
pass
try:
unicodedata.numeric(string)
return True
except (TypeError, ValueError):
pass
return False | e71ecaa10b619b6eac34001cc2f964d3b1d993f8 | 695,213 |
from typing import Optional
def validate_opex_per_capacity(opex_per_capacity: Optional[float]) -> Optional[float]:
"""
Validates the opex per capacity of an object.
Opex per capacity is always optional.
:param opex_per_capacity: The opex per capacity of the object.
:return: The validated opex per capacity.
"""
if opex_per_capacity is None:
# Skip validation if no value provided
return None
if opex_per_capacity < 0:
raise ValueError("Opex per capacity must be zero or positive.")
return opex_per_capacity | a9297cd0bb20a0ddcd37d946152b032d9938cf66 | 695,214 |
def get_structure_from_prev_run(vasprun, outcar=None):
"""
Process structure from previous run.
Args:
vasprun (Vasprun): Vasprun that contains the final structure
from previous run.
outcar (Outcar): Outcar that contains the magnetization info from
previous run.
Returns:
Returns the magmom-decorated structure that can be passed to get
Vasp input files, e.g. get_kpoints.
"""
structure = vasprun.final_structure
site_properties = {}
# magmom
if vasprun.is_spin:
if outcar and outcar.magnetization:
site_properties.update({"magmom": [i['tot']
for i in outcar.magnetization]})
else:
site_properties.update({"magmom": vasprun.parameters['MAGMOM']})
# ldau
if vasprun.parameters.get("LDAU", False):
for k in ("LDAUU", "LDAUJ", "LDAUL"):
vals = vasprun.incar[k]
m = {}
l_val = []
s = 0
for site in structure:
if site.specie.symbol not in m:
m[site.specie.symbol] = vals[s]
s += 1
l_val.append(m[site.specie.symbol])
if len(l_val) == len(structure):
site_properties.update({k.lower(): l_val})
else:
raise ValueError("length of list {} not the same as"
"structure".format(l_val))
return structure.copy(site_properties=site_properties) | c6d272d2bff74b408e1a16e758db975837822e79 | 695,215 |
def newPage(doc, pno=-1, width=595, height=842):
"""Create and return a new page object.
"""
doc._newPage(pno, width=width, height=height)
return doc[pno] | f908615439b911805d98d45e164ebe41565b5a1a | 695,217 |
def get_query(params):
""" Choose correct query based on the origin and destination string length.
"""
## Here I'm using Psycopg capability automatically convert Python objects to the SQL literals
## (note %(origin)s and %(destination)s variables). Without this the code would be susceptible
## to the SQL injection attack, meaning that the attacker could craft a malformed input string
## and either access unauthorized data or perform destructive operations on the database.
base_query = ('SELECT day, ROUND(AVG(price)) average_price '
'FROM prices '
'INNER JOIN ports orig ON orig_code = orig.code '
'INNER JOIN ports dest ON dest_code = dest.code '
'WHERE {origin} = %(origin)s AND {destination} = %(destination)s '
'AND day BETWEEN %(date_from)s AND %(date_to)s '
'GROUP BY day ')
base_fields = dict(origin='orig_code', destination='dest_code')
if len(params['origin']) > 5:
base_fields['origin'] = 'orig.parent_slug'
if len(params['destination']) > 5:
base_fields['destination'] = 'dest.parent_slug'
return base_query.format(**base_fields) | 1fa13426d07f301a20093db7f40e5dd0e7a42a13 | 695,218 |
def _nt_unicode_error_resolver(err):
"""
Do unicode char replaces as defined in https://www.w3.org/TR/2004/REC-rdf-testcases-20040210/#ntrip_strings
"""
def _replace_single(c):
c = ord(c)
fmt = u'\\u%04X' if c <= 0xFFFF else u'\\U%08X'
return fmt % c
string = err.object[err.start:err.end]
return ( "".join( _replace_single(c) for c in string ), err.end ) | 67c0f2ea92379a91c67dcfeabc51c4eee9972df7 | 695,219 |
def twoSum(nums, target):
"""
인자형: List[int]
목표값의 형: int
반환형: List[int]
"""
chk_map = {}
for index, val in enumerate(nums):
compl = target - val
if compl in chk_map:
indices = [chk_map[compl], index]
print(indices)
return [indices]
else:
chk_map[val] = index
return False | 975d4a855a6f95369a25447f9201d2b31a8b75a8 | 695,220 |
import win32serviceutil
import pywintypes
def get_winsvc_status(svcname):
"""Get Windows service status.
Args:
svcname (str): Service name to query.
Returns:
(str): Service Status string.
"""
try:
ret = win32serviceutil.QueryServiceStatus(svcname)
except pywintypes.error:
return None
return ret | 7f2eccfaf2890c948aec2cc4478cda66d872d9ea | 695,221 |
import os
def abs_path(pathname: str) -> str:
"""Transform a script-relative path to an absolute path."""
directory = os.path.dirname(os.path.abspath(__file__))
return os.path.normpath(os.path.join(directory, pathname)) | 37e5f23d3ca181b9e378213d052f736c4f8da02a | 695,222 |
from typing import List
from typing import Dict
def find_values(item, keys: List[str]) -> Dict:
"""Find values for keys in item, if present.
Parameters
----------
item : Any
Any item
keys : List[str]
Keys whose value to retrieve in item
Returns
-------
Dict
Mapping of key -> value for keys found in item.
Raises
------
ValueError
If one key is found whose value is a tuple, list or dict.
"""
if isinstance(item, (list, tuple)):
values = {}
for it in item:
values.update(find_values(it, keys))
return values
elif isinstance(item, dict):
values = dict()
for key, value in item.items():
if key in keys:
if isinstance(value, (list, tuple, dict)):
raise ValueError(f"Cannot find value for '{key}'. Type must be literal but got {type(value)}")
values[key] = value
else:
values.update(find_values(value, keys))
return values
else:
return {} | bab4c8a9695113390654e4eaf971559a98f4eb71 | 695,223 |
def _toList(qvar):
""" Make qvar a list if not."""
if type(qvar) != type([]): qvar = [qvar]
return qvar | 96644b04ee791d901a44731daddcb636ed549b46 | 695,225 |
import os
from warnings import warn
import subprocess
def make_bubble(reports_dir, out_dir, title):
"""Make bubble chart"""
fig_dir = os.path.join(out_dir, 'figures')
if not os.path.isdir(fig_dir):
os.makedirs(fig_dir)
cur_dir = os.path.dirname(os.path.realpath(__file__))
bubble = os.path.join(cur_dir, 'centrifuge_bubble.r')
tmpl = '{} --dir "{}" --title "{}" --outdir "{}"'
job = tmpl.format(bubble, reports_dir, title, fig_dir)
warn(job)
subprocess.run(job, shell=True)
return fig_dir | a401f023407abd3d1f20fbc953a50781cf6058c7 | 695,226 |
import os
import yaml
def load_remote_source(mirror=''):
"""
Loads config file with definition of the remote data sources and formats it according to the chosen mirror.
@return: loaded config dictionary
"""
my_path = os.path.dirname(os.path.realpath(__file__))
with open('{}/remote_sources.yml'.format(my_path)) as f:
config = yaml.safe_load(f)
if mirror:
mirrors = config.pop('mirrors')
config.update(mirrors[mirror])
return config | 8fa18769feb0476d9d9a69a44acc000ae8c09754 | 695,227 |
def create_members_api_bp_from_app(app):
"""Create members api blueprint."""
ext = app.extensions["invenio-communities"]
# control blueprint endpoints registration
return ext.members_resource.as_blueprint() | b6899ab54fe26419b5ca10f747719555aa50d2de | 695,228 |
import re
def macs_species(genome):
"""Convert genome to macs2 species encoding"""
if re.match('^hg[0-9]+$', genome):
return 'hs'
elif re.match('^mm[0-9]+$', genome):
return 'mm'
raise Exception('Unknown species {}'.format(genome)) | ecacca48f7bb7cd1eaa5e21a58ea2efba00627ae | 695,229 |
from typing import List
from typing import Dict
import os
import json
def get_installed_versions(path: str) -> List[Dict[str,str]]:
"""
Returns all installed versions
"""
dir_list = os.listdir(os.path.join(path,"versions"))
version_list = []
for i in dir_list:
if not os.path.isdir(os.path.join(path,"versions",i)):
continue
with open(os.path.join(path,"versions",i,i + ".json"),"r",encoding="utf-8") as f:
version_data = json.load(f)
version_list.append({"id":version_data["id"],"type":version_data["type"]})
return version_list | beba59898459d7a188b85930425c633d62983908 | 695,230 |
def compile_ingredients(dishes):
"""
:param dishes: list of dish ingredient sets
:return: set
This function should return a `set` of all ingredients from all listed dishes.
"""
return set.union(*dishes) | 2277c3f5dcba68bae257a975f69fdbd83bf327a9 | 695,231 |
import socket
def is_port_opened(port, hostname='127.0.0.1'):
""" Checks if the specified port is opened
:param port: The port to check
:param hostname: The hostname to check, defaults to '127.0.0.1'
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex((hostname, port))
if result == 0:
return True
return False | 5b9e1a80988c99289d71cdf616cc36800dcf4848 | 695,232 |
import re
def get_html_url(content):
"""Takes a HTML tag and returns the href or SRC"""
href_q = re.search(r'<a.+href="(.+)">(.+)</a>', content)
src_q = re.search(r'src="(.+)"', content)
src_alt_q = re.search(r'src=.+alt=(".+").*>', content)
if href_q:
return href_q.group(1), href_q.group(2)
elif src_q and src_alt_q:
return src_q.group(1), src_alt_q.group(1)
elif src_q and not src_alt_q:
return src_q.group(1), ''
else:
return | 742c31e21a6512e9c5c4f9186396f50f26ed2b53 | 695,233 |
def wait4(pid, options):
"""Similar to :func:`waitpid`, except a 3-element tuple, containing the child's
process id, exit status indication, and resource usage information is returned.
Refer to :mod:`resource`.\ :func:`getrusage` for details on resource usage
information. The arguments to :func:`wait4` are the same as those provided to
:func:`waitpid`."""
return (0, 0, 0) | 83365bc9ece235a3a8e50f624f81bf60c8c41cfa | 695,234 |
import hashlib
def verify_file_hash(
file_path:str,
file_hash:str,
file_hash_algorithm:str
):
"""Return True if the calculated hash of the file matches the given hash, false else"""
md5_hasher = hashlib.md5()
sha1_hasher = hashlib.sha1()
sha256_hasher = hashlib.sha256()
with open(file_path, 'rb') as f:
for chunk in iter(lambda: f.read(4096), b""):
md5_hasher.update(chunk)
sha1_hasher.update(chunk)
sha256_hasher.update(chunk)
calc_md5_hash = md5_hasher.hexdigest().lower()
calc_sha1_hash = sha1_hasher.hexdigest().lower()
calc_sha256_hash = sha256_hasher.hexdigest().lower()
file_hash = file_hash.lower()
if file_hash_algorithm in ('auto', 'md5') and calc_md5_hash == file_hash:
return True
if file_hash_algorithm in ('auto', 'sha1') and calc_sha1_hash == file_hash:
return True
if file_hash_algorithm in ('auto', 'sha256') and calc_sha256_hash == file_hash:
return True
return False | b237c737d2416fb05c9a1e99d0c242e04dd57a21 | 695,235 |
def parse_filename(args, start_date):
"""Parse filename from args and start_date
Arguments:
args: the arguments provided to the script
Returns:
filename: the filename to use for the csv data
"""
if args.filename:
filename = args.filename
# depending on date args either append the month or the start_date
elif args.last_month:
filename = (__file__.replace('.py', '_{}.csv').format(start_date.strftime('%B')))
else:
filename = __file__.replace('.py', '_{}.csv').format(start_date)
return filename | c68a3036ec43b4657b88c7f2f623c689c721cf43 | 695,236 |
def make_rmd_featurestat(project_name):
"""
Rmarkdown creation: Obtain proportion of no feature counts from htseq-count results
Separate here because of the htseq-count-specific feature
"""
rmd="\n"
rmd=rmd+"## HTSeq-count: No feature counts statistics\n\n"
rmd=rmd+"Numbers of reads that can not mapped to any feature (Nofeature count) are shown by per million reads from htseq-count quantification results\n\n"
rmd=rmd+"```{r nofeature, eval=T, echo=F, message=F, warning=F, results='asis'}\n"
rmd=rmd+"nofeature.data <- read.table('"+project_name+"_htseq_nofeature.txt', sep='\\t', header=T, as.is=T)\n"
rmd=rmd+"DT::datatable(nofeature.data, rownames=FALSE, options = list(pageLength = 25))\n"
rmd=rmd+"```\n\n"
return rmd | 861fc179846827775841a57b3d2dba1770e87ebe | 695,237 |
def make_gauge(name, value, m_type='gauge'):
"""Return a dict for use as a gauge."""
return {
'name': name,
'value': value,
'type': m_type
} | f1c5b11a481ad17eaf23169f9401b08cdff674d3 | 695,238 |
def has_triple_string_quotes(string_contents: str) -> bool:
"""Tells whether string token is written as inside triple quotes."""
if string_contents.startswith('"""') and string_contents.endswith('"""'):
return True
elif string_contents.startswith("'''") and string_contents.endswith("'''"):
return True
return False | 708d6848c8e7f58f8f93818f7f3a5095bf4e2fa3 | 695,239 |
def difficultyLevel(mychoice):
""" Assigns the difficulty level and answers
Args:
param1 (str): the user's difficulty choice
Returns: the user's dificulty choice in word form
"""
if(mychoice == "easy" or mychoice == '1'):
return "easy"
elif(mychoice == "medium" or mychoice == '2'):
return "medium"
else:
return "hard" | 8257daa0e21adc4dedbd7c177e4f0baebd4713dd | 695,240 |
def check_managability(user, note, action):
"""
Determine if user can edit or delete this note. This note can be edited or deleted if at least one of the
following criteria is met:
- user is an admin
- user is the author of the note
- user is member of a group in groups AND note is proper permission is set
- note is public and proper permission is set
Args:
user: django User object
note: Note object
action: (str) name of action to check for (edit or delete)
Returns (bool):
True if this note can be managed by the provided user
False if this note can not be managed by the provided user
"""
if action not in 'editdelete':
return False
if user.is_superuser:
return True
if note.author == user:
return True
if note.scope == 'group' and \
any(i in user.groups.all() for i in note.groups.all()) and \
action in note.permissions:
return True
if note.scope == 'public' and action in note.permissions:
return True
return False | 2b7ab8dea97f0f0c4e67b3d3f212438a130e7b92 | 695,241 |
def ipow(a, b, N):
""" Returns a raised to the power b modulo N by squaring exponentiation """
res = 1
while b > 0:
if b % 2 == 1:
res = (res * a) % N
a = (a * a) % N
b >>= 1
return res % N | 9b2047cb35c9b76db1597377a8bd2de23c37b0e8 | 695,242 |
def normalized_probabilistic_similarity(lang_model, query_model):
"""
Returns the 'normalized probability' (invented by me) of a query text belongs to a language model
:rtype : float
:param lang_model: The model of a language (usually a dictionary of features and it's values)
:param query_model: The query text (usually a dictionary of features and it's values)
:return: The normalized probability that the given query belongs to the provided language model
"""
# if there's no query, then there's no change it belongs to the model
if query_model is None:
return 0.0
# getting the length of the query model
n = len(query_model)
# if the query model is empty, then there's no change it belongs to the model
if n == 0:
return 0.0
# computing total of the features appearances in the language model
lang_total = float(sum(lang_model.values()))
# computing total of the features appearances in the query
query_total = float(sum(query_model.values()))
# lambda function to compute distance
d = lambda x: 1.0 if x not in lang_model else abs(query_model[x] / query_total - lang_model[x] / lang_total)
# computing the total distance of the query to the model
query_total_distance = float(sum([d(f) for f in query_model]))
# returning the normalized probability
return 1.0 - query_total_distance / n | 6ba9bbefa587756176672308006e2c429efbdd73 | 695,243 |
def get_interface_speed(speed):
"""Translates speed into bits/sec given the output from the API
Args:
speed (string): input should be from NX-API in the form of '10 Gb/s'-
param being sent should be "eth_speed" from output from
'show interface eth x/y' in NX-API
Returns:
equivalent speed (str) in bits per second or "auto"
"""
if speed.startswith('auto'):
return 'auto'
elif speed.startswith('40'):
return '40000'
elif speed.startswith('100 G'):
return '100000'
elif speed.startswith('10'):
return '10000'
elif speed.startswith('1'):
return '1000'
elif speed.startswith('100 M'):
return '100' | 73b0ed1f91386cfb63277e59e9d618a616a6d569 | 695,244 |
def _is_tcp_syn(tcp_flags):
"""
Passed a TCP flags object (hex) and return 1 if it
contains a TCP SYN and no other flags
"""
if tcp_flags == 2:
return 1
else:
return 0 | 7072b33e641099a97bdba850bfd11c9aeccc1223 | 695,247 |
def scope2dom(name, no_css_selector=False):
"""Get the CSS selector/element name actually used in the front-end html page
:param str/tuple name: When it is str, it is regarded as the Dom ID name;
when tuple, the format is (css selector, element name)
"""
selector = '#'
if isinstance(name, tuple):
selector, name = name
name = name.replace(' ', '-')
if no_css_selector:
selector = ''
return '%spywebio-scope-%s' % (selector, name) | 3716eea31f90a58e5914e2dcfda61a47524f990b | 695,248 |
def indexToWorld(flatmap_index, map_width, map_resolution, map_origin = [0,0]):
"""
Converts a flatmap index value to world coordinates (meters)
flatmap_index: a linear index value, specifying a cell/pixel in an 1-D array
map_width: number of columns in the occupancy grid
map_resolution: side lenght of each grid map cell in meters
map_origin: the x,y position in grid cell coordinates of the world's coordinate origin
Returns a list containing x,y coordinates in the world frame of reference
"""
# convert to x,y grid cell/pixel coordinates
grid_cell_map_x = flatmap_index % map_width
grid_cell_map_y = flatmap_index // map_width
# convert to world coordinates
x = map_resolution * grid_cell_map_x + map_origin[0]
y = map_resolution * grid_cell_map_y + map_origin[1]
return [x,y] | 210a4c730072d855d825f08f2b41fbc05dbf04c2 | 695,249 |
def addext(name_str, ext_str):
"""Add a file extension to a filename
:param name_str: The filename that will get the extension
:param ext_str: The extension (no leading ``.`` required)
:returns: The filename with the extension
"""
if not ext_str:
return name_str
return name_str + "." + ext_str | 834484ac16e979dd221eed08c0c62bd8ca2a716a | 695,250 |
def indentify(stuff, rep = 1, indent = '\t'):
""" From http://code.activestate.com/recipes/66055-changing-the-indentation-of-a-multi-line-string/#c4 """
lines = [(rep * indent) + line for line in stuff.splitlines()]
return "\n".join(lines) | 53497ce2539e637295b41f97da7b3ac54fc4e485 | 695,251 |
def roll(image, delta):
"""Roll an image sideways."""
xsize, ysize = image.size
delta = delta % xsize
if delta == 0: return image
part1 = image.crop((0, 0, delta, ysize))
part2 = image.crop((delta, 0, xsize, ysize))
image.paste(part1, (xsize - delta, 0, xsize, ysize))
image.paste(part2, (0, 0, xsize - delta, ysize))
return image | 28e212e7309749f9a93d6d51587ae939c51ae082 | 695,252 |
def elt0(list, context):
"""return first member of reduction"""
return list[0] | 43015dcad789f1e9f8bc0d87f42d947ce53505a4 | 695,253 |
def tagnum(line):
"""
returns the current event count
"""
return int(line[line.find('=') + 1:line.find(')')]) | 2ea8111d44bcb9a9273e1a93629a60f574231c51 | 695,254 |
def get_factorial(num: int):
""" Returns the factorial of `num` """
answer = 1
for i in range(num, 0, -1):
answer *= i
return answer | 8dbdeb545c76fa4860240d80059a46c82a032202 | 695,255 |
import sys
def validate_input(text: str) -> bool:
"""ensure the game gives back the initial string to begin"""
if len(text) != 4:
print('Input string must be 4 characters long', file=sys.stderr)
return False
return True | 45727a485dff50a7b2ff71236490bb5fd025ec4c | 695,258 |
from typing import List
from typing import Dict
import json
def construct_pod_dict(node_info_file: str,
pod_types: List[str]) -> Dict[str, List[str]]:
"""Constructs a dict of pod names to be queried.
Args:
node_info_file: The file path contains the pod names to query.
The pods' names are put into a Dict of list that keyed by the
role name: clients, servers and driver.
"""
with open(node_info_file, 'r') as f:
pod_names = json.load(f)
pod_type_to_name = {'clients': [], 'driver': [], 'servers': []}
for client in pod_names['Clients']:
pod_type_to_name['clients'].append(client['Name'])
for server in pod_names['Servers']:
pod_type_to_name['servers'].append(server['Name'])
pod_type_to_name["driver"].append(pod_names['Driver']['Name'])
pod_names_to_query = {}
for pod_type in pod_types:
pod_names_to_query[pod_type] = pod_type_to_name[pod_type]
return pod_names_to_query | bb9bd23013649d73ac8cfaa182159ac2b1ce9edc | 695,259 |
def rflink_to_brightness(dim_level: int) -> int:
"""Convert RFLink dim level (0-15) to 0-255 brightness."""
return int(dim_level * 17) | 8924a7de8892b0d1e339c5917ae25ff6b29e7f2b | 695,260 |
import os
def get_include():
"""Return the directory of framework header files.
Returns
-------
str
The include directory.
"""
core_root = os.path.dirname(os.path.dirname(__file__))
return os.path.join(os.path.dirname(core_root), 'include') | 68d37816087161b9382a06f50a6a800edd691c14 | 695,261 |
import logging
def MergeDictionaryValues(old_dict, new_dict):
"""Attempts to merge the given dictionaries.
Warns if a key exists with different values in both dictionaries. In this
case, the new_dict value trumps the previous value.
Args:
old_dict: Existing dictionary.
new_dict: New dictionary.
Returns:
Result of merging the two dictionaries.
Raises:
ValueError: If the keys in each dictionary are not unique.
"""
common_keys = set(old_dict) & set(new_dict)
if common_keys:
conflicting_keys = set(key for key in common_keys
if old_dict[key] != new_dict[key])
if conflicting_keys:
def FormatKey(key):
return ('\'{key}\' has conflicting values \'{old}\' and \'{new}\'. '
'Using \'{new}\'.').format(key=key,
old=old_dict[key],
new=new_dict[key])
for conflicting_key in conflicting_keys:
logging.warning(FormatKey(conflicting_key))
result = old_dict.copy()
result.update(new_dict)
return result | 2836386f78363a1a0cede442ac0d6e81e3cd71e4 | 695,263 |
import re
def strip(path):
"""
:param path: 需要清洗的文件夹名字
:return: 清洗掉Windows系统非法文件夹名字的字符串
"""
path = re.sub(r'[?\\*|“<>:/]', '', str(path))
return path | 0e5988a857e8645ab9a80cb2b9276c630e3d0e6a | 695,264 |
def make_pounds(coins, bill):
"""
Find how many ways there are to make bill from the given list of coins
:param coins List of coins
:type coins list
:param bill Coin/note to make change for
:type bill int
:return: Number of ways to make change for the given currency
:rtype: int
"""
ways_to_make_bill = [0] * (bill + 1)
ways_to_make_bill[0] = 1
for x in range(len(coins)):
for n in range(coins[x], bill + 1):
ways_to_make_bill[n] += ways_to_make_bill[n - coins[x]]
return ways_to_make_bill[bill] | c1cb1e82bdb50e4ada3b4d02b3ff7b692bb91f5a | 695,266 |
def bytes_identical(a_bytes, b_bytes):
"""Return a tuple
(bytes a == bytes b, index of first difference)"""
if len(a_bytes) != len(b_bytes):
return False, min([len(a_bytes), len(b_bytes)])
elif a_bytes == b_bytes:
return True, 0 # True, dummy argument
else:
pos = 0
while a_bytes[pos] == b_bytes[pos]:
pos += 1
return False, pos | 258471bfa76a58bb236ca0a18833324d3f4576e4 | 695,267 |
import configparser
import os
def extract_endpoint(profile="default"):
""" A simple utility to extract the endpoint from an S3 configuration
file, since it's not currently supported in the boto3 client (see
https://github.com/aws/aws-cli/issues/1270 ."""
config = configparser.ConfigParser()
# Path to the config generated from secret is hardcoded
if "AWS_CONFIG_FILE" in os.environ:
config.read(os.environ["AWS_CONFIG_FILE"])
# Check for a config file in the default location
elif os.path.exists("~/.aws/config"):
config.read("~/.aws/config")
# Takes care of both non-existent profile and/or field
return config[profile]["endpoint_url"] \
if config.has_option(profile, "endpoint_url") else None | 1ce101b396b79153d6f9c9223ce97adb5f261729 | 695,268 |
def find_shablons(key_word, array):
"""
Receive the `key_word` as string shablon, find all words in `array`
which contain it and return list of those words
Parameters
----------
key_word : str
array: list
Returns
-------
list
"""
result_list = []
key_word = key_word.lower()
for comp in array:
comp = str(comp)
comp_lower = comp.lower()
if comp_lower.find(key_word) != -1:
result_list.append(comp)
return result_list | fe959369e204910723a9c32caeaa9f8712590bce | 695,269 |
def iso_string_to_sql_date_mysql(x: str) -> str:
"""
Provides MySQL SQL to convert an ISO-8601-format string (with punctuation)
to a ``DATE``, just by taking the date fields (without any timezone
conversion). The argument ``x`` is the SQL expression to be converted (such
as a column name).
"""
return f"STR_TO_DATE(LEFT({x}, 10), '%Y-%m-%d')" | 6798578e8633e819e7e55622e53b2cd2be583fc5 | 695,270 |
def check_proximity(xy1, xy2):
"""
>>> check_proximity((10,10), (10,10))
False
>>> check_proximity((11,11), (11,12))
True
>>> check_proximity((0,0), (0,0))
False
"""
if type(xy1) is not tuple or type(xy2) is not tuple: return False
if len(xy1) != 2 or len(xy2) != 2: return False
if xy1 == xy2: return False
if (abs(xy1[0] - xy2[0]) <= 1 and xy1[1] == xy2[1]) or (abs(xy1[1] - xy2[1]) <= 1 and xy1[0] == xy2[0]):
return True
return False | f94c2767a0c4c8f2eb11266647ad7663bd0db71e | 695,271 |
import os
def exists_file(filename):
"""
Check whether a file exists or not.
:param filename: (string) the filename.
:return: True, if the file exists; False, otherwise.
"""
dirname = os.path.dirname(filename)
dirname = dirname if len(dirname) != 0 else os.path.curdir
filepath = os.path.join(dirname, filename)
return os.path.exists(filepath) | 3318e3e424948475d1f474aceb3d93154a5e1ab6 | 695,273 |
def unique(iterable):
"""Return unique elements.
Note
====
Always returning a list for consistency was tempting,
however this defeats the purpose of creating this function
to achieve brevity elsewhere in the code.
@return: iterable with duplicates removed, as C{set} if possible.
@rtype:
- If all items in C{iterable} are hashable,
then returns C{set}.
- If iterable contains unhashable item,
then returns C{list} of unique elements.
"""
# hashable items ?
try:
unique_items = set(iterable)
except:
unique_items = []
for item in iterable:
if item not in unique_items:
unique_items.append(item)
return unique_items | 330fa2a729d86134d2745d668a0fa690e8c56618 | 695,274 |
from typing import Dict
def get_config_input(user_cfg: Dict[str, dict]) -> Dict[str, dict]:
"""
Get the input configuration
:param user_cfg: user configuration
:type user_cfg: dict
:return cfg: partial configuration
:rtype cfg: dict
"""
cfg = {}
if "input" in user_cfg:
cfg["input"] = user_cfg["input"]
return cfg | bbcaf21c3c337b14e33ac9ce6469368987eaac4b | 695,275 |
from datetime import datetime
def ctime():
"""Returns current timestamp in the format of hours-minutes-seconds."""
return datetime.now().strftime("%H:%M:%S") | c48271101866e8cf6f7907668895ea6cf934624d | 695,276 |
from typing import Dict
from typing import List
def prepare_data_for_markdown_formatting(scans: Dict) -> List[List[str]]:
"""Method responsible for formatting the data into the correct form for the MarkdownTableWriter.
Args:
scans: Dictionary containing the scans, from the virustotal api response.
Returns:
data: List of lists, each containing the name of the antivirus, and the result of its scan.
"""
data = []
for antivirus, result in scans.items():
status = '_Malicous_' if result['detected'] is True else '_Safe_'
row = [antivirus, status]
data.append(row)
return data | 89c1560b40579cc1944bde8c04be8fcdf28a64ad | 695,277 |
def docstring(docstr, sep="\n"):
"""
Decorator: Append to a function's docstring.
"""
def _decorator(func):
if func.__doc__ == None:
func.__doc__ = docstr
else:
func.__doc__ = sep.join([func.__doc__, docstr])
return func
return _decorator | 21bbd5e158a183ebda28e313e8b4180ba3928233 | 695,278 |
def link_easy(sid):
"""
Creates an html link to a dataset page in Easy.
:param sid: a dataset id
:return: link to the page for that dataset
"""
prefix = 'https://easy.dans.knaw.nl/ui/datasets/id/'
return '<a target="_blank" href="{}{}">{}</a>'.format(prefix, sid, sid) | f329ac256e459bf79572b8686a1b6be003efccae | 695,279 |
import base64
def _get_signed_query_params(credentials, expiration, string_to_sign):
"""Gets query parameters for creating a signed URL.
:type credentials: :class:`oauth2client.client.AssertionCredentials`
:param credentials: The credentials used to create a private key
for signing text.
:type expiration: int or long
:param expiration: When the signed URL should expire.
:type string_to_sign: string
:param string_to_sign: The string to be signed by the credentials.
:raises AttributeError: If :meth: sign_blob is unavailable.
:rtype: dict
:returns: Query parameters matching the signing credentials with a
signed payload.
"""
if not hasattr(credentials, 'sign_blob'):
raise AttributeError('you need a private key to sign credentials.'
'the credentials you are currently using %s '
'just contains a token. see https://googlecloud'
'platform.github.io/gcloud-python/stable/gcloud-'
'auth.html#setting-up-a-service-account for more '
'details.' % type(credentials))
_, signature_bytes = credentials.sign_blob(string_to_sign)
signature = base64.b64encode(signature_bytes)
service_account_name = credentials.service_account_email
return {
'GoogleAccessId': service_account_name,
'Expires': str(expiration),
'Signature': signature,
} | 6f7fd8d24240ee604fdfe3b5d63ff8638e601f77 | 695,280 |
def is_closed(request):
"""Check if the request is closed."""
return request.is_closed | a2949d8f185f56f52e5b637066353ce935df28c3 | 695,281 |
import csv
def read_subjects_file(subjects_file, has_header_line=False, index_of_subject_id_field=0, **kwargs):
"""
Read a subjects file in CSV format that has the subject id as the first entry on each line. Arbitrary data may follow in the consecutive fields on each line, and will be ignored. Having nothing but the subject id on the line is also fine, of course.
The file can be a simple text file that contains one `subject_id` per line. It can also be a CSV file that has other data following, but the `subject_id` has to be the first item on each line and the separator must be a comma. So a line is allowed to look like this: `subject1, 35, center1, 147`. No header is allowed. If you have a different format, consider reading the file yourself and pass the result as `subjects_list` instead.
Parameters
----------
subjects_file: string
Path to a subjects file (see above for format details).
has_header_line: boolean, optional
Whether the first line is a header line and should be skipped. Defaults to 'False'.
index_of_subject_id_field: integer, optional
The column index of the field that contains the subject id in each row. Defaults to '0'. Changing this only makes sense for CSV files.
**kwargs: any
Any other named arguments will be passed on to the call to the call to the `csv.reader` constructor. That is a class from Python's standard `csv` module. Example: pass `delimiter='\t'` if your CSV file is limited by tabs.
Returns
-------
list of strings
A list of subject identifiers.
Examples
--------
Load a list of subjects from a simple text file that contains one subject per line.
>>> import brainload.nitools as nit
>>> subjects_ids = nit.read_subjects_file('/home/myuser/data/study5/subjects.txt')
"""
subject_ids = []
with open(subjects_file, 'r') as sfh:
reader = csv.reader(sfh, **kwargs)
if has_header_line:
next(reader)
for row in reader:
subject_ids.append(row[index_of_subject_id_field])
return subject_ids | 9722d3a6d86306cf7b441ad46f9f7932213c7c46 | 695,282 |
def get_dataset_params(dataset):
"""
Get the capacities to test for the particular dataset.
:param dataset: string of dataset
:return: list of capacities
"""
if dataset == 'cub200':
capacity_list = [2, 4, 8, 16]
else:
raise NotImplementedError('Dataset not supported.')
return capacity_list | a8e859da52085233426957397d8d0fc50628d5b8 | 695,283 |
import math
def sched_cos(start, end, pos):
"""Cosine scheduler."""
return start + (1 + math.cos(math.pi * (1 - pos))) * (end - start) / 2 | d20afee0f5d11b792bc926b4f13d1353e0208619 | 695,284 |
import re
def replace_aea_fetch_statements(
content: str, old_string: str, new_string: str, type_: str
) -> str:
"""Replace statements of the type: 'aea fetch <old_string>'."""
if type_ == "agents":
content = re.sub(
fr"aea +fetch +{old_string}", f"aea fetch {new_string}", content
)
return content | 0bd3838d642c4a6120e2b1fcad4fe8995ab7425f | 695,285 |
def bigquery_serialize_date(py_date):
"""
Convert a python date object into a serialized format that Bigquery accepts.
Accurate to days.
Bigguery format: 'YYYY-[M]M-[D]D'
https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types
Args:
py_date (datetime.date): The date to convert.
Returns:
(str): The Serialized date.
"""
return py_date.strftime('%Y-%m-%d') | b730fdfe6696011601d466ad65010b5b566749e7 | 695,286 |
def get_model(obj):
"""
Finds model root element for the given object.
"""
p = obj
while hasattr(p, 'parent'):
p = p.parent
return p | d61bc104296db17fe178307503a8494e19c9718b | 695,287 |
import torch
def focal_loss(input_values, gamma):
"""Computes the focal loss"""
p = torch.exp(-input_values)
loss = (1 - p) ** gamma * input_values
return loss.mean() | 7246eade4f4175065c845386dbeb0e26b4f63b81 | 695,289 |
def correct_timing(offset, stimuli):
"""Correct Timing.
Given the offset and stimuli array, return
an array of data with stimuli corrected to the offset!
"""
print('Calculating the correct trigger times in relation to offset... \n')
new_stimuli = []
for stim in stimuli:
# The stimuli are in the form ['stimuli_character informtation time']
components = stim.split(' ')
# add the offset to the stimuli time
new_stimuli_time = float(components[2]) + offset
# append in the correct form to the text file
new_stimuli.append([f'{components[0]} {components[1]} {new_stimuli_time}'])
return new_stimuli | 58b1f264e7cf85a859a171f18efb33b3ee1f519a | 695,290 |
import click
def main(args=None):
"""Console script for flowsym."""
click.echo("Welcome to FlowSym, a Python API used to simulate flow "
"cytometry data!"
"\n\n"
"If you don't see any warning messages above then you're "
"all set!")
click.echo("See documentation at ")
return 0 | 46dbe0ebde8f896a7113bc4c8a415003b44e9553 | 695,291 |
from typing import Tuple
from typing import Union
def is_secret_key(key: str) -> Tuple[bool, Union[str, None]]:
"""Check if the Airflow Flask webserver secret key is valid.
:param key: Key to check.
:return: Validity, and an error message if not valid.
"""
key_bytes = bytes(key, "utf-8")
message = None
key_length = len(key_bytes)
if key_length < 16:
message = f"Secret key should be length >=16, but is length {key_length}."
return False, message
return True, message | 77327659da0935da96be5597b296cce7173d7bf2 | 695,292 |
def correct_img_dimension(width, height, threshold_w, threshold_h):
"""
return a new width and height that is as close as to the thresholds, while keeping
aspect ratio same
:param width:
:param height:
:param threshold_w:
:param threshold_h:
:return: tuple of new width and height
"""
isWidthGreater = False
if width > height:
isWidthGreater = True
ratio = height / width
if isWidthGreater:
return (threshold_w, ratio * threshold_h)
return (threshold_w * (1 / ratio), threshold_h) | 30b3b30bff06004b3df19e036972be71c05e3ff1 | 695,294 |
def check_dummy_variables(df, dummy_list):
"""
Checks that all missing variables where added when encoding, otherwise
adds the ones that are missing
"""
for var in dummy_list:
if var not in df.columns:
df[var] = 0
return df | 1c96c9963605a082f678c2ce3129f83aebcc9948 | 695,295 |
def format_schema(schema):
"""Convert Schema to fixture format.
"""
return {
"fields": {
"name": schema.name,
"datastore_id": schema.datastore_id,
"workspace_id": str(schema.workspace_id),
"created_at": str(schema.created_at),
"updated_at": str(schema.updated_at),
"object_id": schema.object_id,
"tags": schema.tags,
},
"model": "definitions.schema",
"pk": schema.pk,
} | cbb310f7e6d8dfac43eecc153138fbbbe7c6f274 | 695,296 |
def grep(pat,list,case=1):
"""Simple minded grep-like function.
grep(pat,list) returns occurrences of pat in list, None on failure.
It only does simple string matching, with no support for regexps. Use the
option case=0 for case-insensitive matching."""
# This is pretty crude. At least it should implement copying only references
# to the original data in case it's big. Now it copies the data for output.
out=[]
if case:
for term in list:
if term.find(pat)>-1: out.append(term)
else:
lpat=pat.lower()
for term in list:
if term.lower().find(lpat)>-1: out.append(term)
if len(out): return out
else: return None | 00e073adf85ed0f08fa76d09b61c82cc0cfaaf93 | 695,297 |
def file_to_list(filename):
"""
Read lines of text file to a list.
@param filename: path to file
@type filename: string
@return list of lines
"""
with open(filename) as fil:
meat = fil.readlines()
return [i.strip() for i in meat] | 7d5db114798470e75e2f9e750a666cee57520820 | 695,298 |
def ssl_config(module):
"""
Creates a ssl config dictionary to be used for ssl auth.
"""
ctx = module.params['ssl_auth']
try:
return {
'cert': ctx['cert'],
'serverca': ctx['serverca'],
'no_ssl_verify': ctx.get('verify', True),
'authtype': 'ssl'
}
except KeyError as e:
module.fail_json(changed=False,
skipped=False,
failed=True,
error='Missing ssl_auth "%s" key.' % e.args[0]) | 2581d58acb25fb5d0c6bb1bfc877619c3c6ed52c | 695,299 |
def c(x, y):
"""
Convert ECMA-48 character notation to 8-bit form
```python
>>> c(0,0)
0
>>> c(1,11)
27
>>> c(15,15)
255
```
"""
return (x << 4) + y | de9028b1ac244c23f13ffe2fe30c2a8a64d3b80a | 695,300 |
import os
def get_hash_from_name(file_path):
"""
Please note that there is absolutely no error checking in this function.
All paths are assumed to end in 'file_name.extension'. It is also assumed
that the file_name is, in fact, the hash and not some other name.
"""
return os.path.basename(file_path).split('.')[0] | be117cf56a20ae2aba9d3d05a03d647de5dfe82e | 695,301 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.