code stringlengths 59 4.4k | docstring stringlengths 5 7.69k |
|---|---|
def infer(self, inputPattern, computeScores=True, overCategories=True,
partitionId=None):
sparsity = 0.0
if self.minSparsity > 0.0:
sparsity = ( float(len(inputPattern.nonzero()[0])) /
len(inputPattern) )
if len(self._categoryList) == 0 or sparsity < self.minSparsity:
... | Finds the category that best matches the input pattern. Returns the
winning category index as well as a distribution over all categories.
:param inputPattern: (list or array) The pattern to be classified. This
must be a dense representation of the array (e.g. [0, 0, 1, 1, 0, 1]).
:param computeSco... |
def getheaders(self, name):
result = []
current = ''
have_header = 0
for s in self.getallmatchingheaders(name):
if s[0].isspace():
if current:
current = "%s\n %s" % (current, s.strip())
else:
current = s.... | Get all values for a header.
This returns a list of values for headers given more than once; each
value in the result list is stripped in the same way as the result of
getheader(). If the header is not given, return an empty list. |
def find_intersections_with(self, other):
import shapely.geometry
geom = _convert_var_to_shapely_geometry(other)
result = []
for p_start, p_end in zip(self.coords[:-1], self.coords[1:]):
ls = shapely.geometry.LineString([p_start, p_end])
intersections = ls.interse... | Find all intersection points between the line string and `other`.
Parameters
----------
other : tuple of number or list of tuple of number or \
list of LineString or LineString
The other geometry to use during intersection tests.
Returns
-------
... |
def db_value(self, value):
value = self.transform_value(value)
return self.hhash.encrypt(value,
salt_size=self.salt_size, rounds=self.rounds) | Convert the python value for storage in the database. |
def edit_distance(s1, s2):
d = {}
lenstr1 = len(s1)
lenstr2 = len(s2)
for i in xrange(-1, lenstr1 + 1):
d[(i, -1)] = i + 1
for j in xrange(-1, lenstr2 + 1):
d[(-1, j)] = j + 1
for i in xrange(lenstr1):
for j in xrange(lenstr2):
if s1[i] == s2[j]:
... | Calculates string edit distance between string 1 and string 2.
Deletion, insertion, substitution, and transposition all increase edit distance. |
def debug(*args, **kwargs):
if not (DEBUG and args):
return None
parent = kwargs.get('parent', None)
with suppress(KeyError):
kwargs.pop('parent')
backlevel = kwargs.get('back', 1)
with suppress(KeyError):
kwargs.pop('back')
frame = inspect.currentframe()
while backle... | Print a message only if DEBUG is truthy. |
def _delete_entity(self):
if self._is_ndb():
_NDB_KEY(self._model, self._key_name).delete()
else:
entity_key = db.Key.from_path(self._model.kind(), self._key_name)
db.delete(entity_key) | Delete entity from datastore.
Attempts to delete using the key_name stored on the object, whether or
not the given key is in the datastore. |
def submit_registration_form(self, form):
self.lock.acquire()
try:
if form and form.type!="cancel":
self.registration_form = form
iq = Iq(stanza_type = "set")
iq.set_content(self.__register.submit_form(form))
self.set_response_h... | Submit a registration form.
[client only]
:Parameters:
- `form`: the filled-in form. When form is `None` or its type is
"cancel" the registration is to be canceled.
:Types:
- `form`: `pyxmpp.jabber.dataforms.Form` |
def _initiate_starttls(self, **kwargs):
if self._tls_state == "connected":
raise RuntimeError("Already TLS-connected")
kwargs["do_handshake_on_connect"] = False
logger.debug("Wrapping the socket into ssl")
self._socket = ssl.wrap_socket(self._socket, **kwargs)
self._s... | Initiate starttls handshake over the socket. |
def writeToCheckpoint(self, checkpointDir):
proto = self.getSchema().new_message()
self.write(proto)
checkpointPath = self._getModelCheckpointFilePath(checkpointDir)
if os.path.exists(checkpointDir):
if not os.path.isdir(checkpointDir):
raise Exception(("Existing filesystem entry <%s> is n... | Serializes model using capnproto and writes data to ``checkpointDir`` |
def omega_mixture(omegas, zs, CASRNs=None, Method=None,
AvailableMethods=False):
r
def list_methods():
methods = []
if none_and_length_check([zs, omegas]):
methods.append('SIMPLE')
methods.append('NONE')
return methods
if AvailableMethods:
... | r'''This function handles the calculation of a mixture's acentric factor.
Calculation is based on the omegas provided for each pure component. Will
automatically select a method to use if no Method is provided;
returns None if insufficient data is available.
Examples
--------
>>> omega_mixture(... |
def add_handler(self, handler):
if not isinstance(handler, EventHandler):
raise TypeError, "Not an EventHandler"
with self.lock:
if handler in self.handlers:
return
self.handlers.append(handler)
self._update_handlers() | Add a handler object.
:Parameters:
- `handler`: the object providing event handler methods
:Types:
- `handler`: `EventHandler` |
def start(self):
self.bot_start_time = datetime.now()
self.webserver = Webserver(self.config['webserver']['host'], self.config['webserver']['port'])
self.plugins.load()
self.plugins.load_state()
self._find_event_handlers()
self.sc = ThreadedSlackClient(self.config['slack_... | Initializes the bot, plugins, and everything. |
def T_dependent_property_derivative(self, T, order=1):
r
if self.method:
if self.test_method_validity(T, self.method):
try:
return self.calculate_derivative(T, self.method, order)
except:
pass
sorted_valid_method... | r'''Method to obtain a derivative of a property with respect to
temperature, of a given order. Methods found valid by
`select_valid_methods` are attempted until a method succeeds. If no
methods are valid and succeed, None is returned.
Calls `calculate_derivative` internally to perfor... |
def usage_palette(parser):
parser.print_usage()
print('')
print('available palettes:')
for palette in sorted(PALETTE):
print(' %-12s' % (palette,))
return 0 | Show usage and available palettes. |
def _win32_dir(path, star=''):
from ubelt import util_cmd
import re
wrapper = 'cmd /S /C "{}"'
command = 'dir /-C "{}"{}'.format(path, star)
wrapped = wrapper.format(command)
info = util_cmd.cmd(wrapped, shell=True)
if info['ret'] != 0:
from ubelt import util_format
print('Fa... | Using the windows cmd shell to get information about a directory |
def __intermediate_proto(self, interface, address):
address_proto = address.pop('proto', 'static')
if 'proto' not in interface:
return address_proto
else:
return interface.pop('proto') | determines UCI interface "proto" option |
def send_message(source_jid, password, target_jid, body, subject = None,
message_type = "chat", message_thread = None, settings = None):
if sys.version_info.major < 3:
from locale import getpreferredencoding
encoding = getpreferredencoding()
if isinstance(source_jid, str):
... | Star an XMPP session and send a message, then exit.
:Parameters:
- `source_jid`: sender JID
- `password`: sender password
- `target_jid`: recipient JID
- `body`: message body
- `subject`: message subject
- `message_type`: message type
- `message_thread`: mess... |
def find_sections(lines):
sections = []
for line in lines:
if is_heading(line):
sections.append(get_heading(line))
return sections | Find all section names and return a list with their names. |
def memcopy(self, stream, offset=0, length=float("inf")):
data = [ord(i) for i in list(stream)]
size = min(length, len(data), self.m_size)
buff = cast(self.m_buf, POINTER(c_uint8))
for i in range(size):
buff[offset + i] = data[i] | Copy stream to buffer |
def tell(self, message, sender=no_sender):
if sender is not no_sender and not isinstance(sender, ActorRef):
raise ValueError("Sender must be actor reference")
self._cell.send_message(message, sender) | Send a message to this actor. Asynchronous fire-and-forget.
:param message: The message to send.
:type message: Any
:param sender: The sender of the message. If provided it will be made
available to the receiving actor via the :attr:`Actor.sender` attribute.
:type sender: :... |
def natural_keys(text):
def atoi(text):
return int(text) if text.isdigit() else text
return [atoi(c) for c in re.split('(\d+)', text)] | Sort list of string with number in human order.
Examples
----------
>>> l = ['im1.jpg', 'im31.jpg', 'im11.jpg', 'im21.jpg', 'im03.jpg', 'im05.jpg']
>>> l.sort(key=tl.files.natural_keys)
['im1.jpg', 'im03.jpg', 'im05', 'im11.jpg', 'im21.jpg', 'im31.jpg']
>>> l.sort() # that is what we dont want
... |
def build_url(base, seg, query=None):
def clean_segment(segment):
segment = segment.strip('/')
if isinstance(segment, basestring):
segment = segment.encode('utf-8')
return segment
seg = (quote(clean_segment(s)) for s in seg)
if query is None or len(query) == 0:
qu... | Create a URL from a list of path segments and an optional dict of query
parameters. |
def align_and_parse(handle, max_internal_indels=5, is_gbs=False):
try:
with open(handle, 'rb') as infile:
clusts = infile.read().split("//\n//\n")
clusts = [i for i in clusts if i]
if not clusts:
raise IPyradError
except (IOError, IPyradError):
... | much faster implementation for aligning chunks |
def convert(self, value, param, ctx):
resource = tower_cli.get_resource(self.resource_name)
if value is None:
return None
if isinstance(value, int):
return value
if re.match(r'^[\d]+$', value):
return int(value)
if value == 'null':
... | Return the appropriate integer value. If a non-integer is
provided, attempt a name-based lookup and return the primary key. |
def verify_roster_set(self, fix = False, settings = None):
try:
self._verify((None, u"remove"), fix)
except ValueError, err:
raise BadRequestProtocolError(unicode(err))
if self.ask:
if fix:
self.ask = None
else:
rais... | Check if `self` is valid roster set item.
For use on server to validate incoming roster sets.
Valid item must have proper `subscription` value other and valid value
for 'ask'. The lengths of name and group names must fit the configured
limits.
:Parameters:
- `fix`:... |
def _parse_annotations(sbase):
annotation = {}
if sbase.isSetSBOTerm():
annotation["sbo"] = sbase.getSBOTermID()
cvterms = sbase.getCVTerms()
if cvterms is None:
return annotation
for cvterm in cvterms:
for k in range(cvterm.getNumResources()):
uri = cvterm.getRes... | Parses cobra annotations from a given SBase object.
Annotations are dictionaries with the providers as keys.
Parameters
----------
sbase : libsbml.SBase
SBase from which the SBML annotations are read
Returns
-------
dict (annotation dictionary)
FIXME: annotation format must b... |
def __checkCancelation(self):
print >>sys.stderr, "reporter:counter:HypersearchWorker,numRecords,50"
jobCancel = self._jobsDAO.jobGetFields(self._jobID, ['cancel'])[0]
if jobCancel:
self._cmpReason = ClientJobsDAO.CMPL_REASON_KILLED
self._isCanceled = True
self._logger.info("Model %s cance... | Check if the cancelation flag has been set for this model
in the Model DB |
def _read_config_file(config_file, verbose):
config_file = os.path.abspath(config_file)
if not os.path.exists(config_file):
raise RuntimeError("Couldn't open configuration file '{}'.".format(config_file))
if config_file.endswith(".json"):
with io.open(config_file, mode="r", encoding="utf-8")... | Read configuration file options into a dictionary. |
def grep(prev, pattern, *args, **kw):
inv = False if 'inv' not in kw else kw.pop('inv')
pattern_obj = re.compile(pattern, *args, **kw)
for data in prev:
if bool(inv) ^ bool(pattern_obj.match(data)):
yield data | The pipe greps the data passed from previous generator according to
given regular expression.
:param prev: The previous iterator of pipe.
:type prev: Pipe
:param pattern: The pattern which used to filter out data.
:type pattern: str|unicode|re pattern object
:param inv: If true, invert the matc... |
def sanitize_path(path):
if path == '/':
return path
if path[:1] != '/':
raise InvalidPath('The path must start with a slash')
path = re.sub(r'/+', '/', path)
return path.rstrip('/') | Performs sanitation of the path after validating
:param path: path to sanitize
:return: path
:raises:
- InvalidPath if the path doesn't start with a slash |
def next_event(self, event_id, prev=False):
i = self.events.index(self._events_dict[event_id])
if prev and i > 0:
return self.events[i - 1]
elif not prev and i + 1 < len(self.events):
return self.events[i + 1]
else:
return None | Get the event following another event in this conversation.
Args:
event_id (str): ID of the event.
prev (bool): If ``True``, return the previous event rather than the
next event. Defaults to ``False``.
Raises:
KeyError: If no such :class:`.Conversati... |
def read_docs(self, docsfiles):
updates = DocParser()
for docsfile in _list(docsfiles):
if os.path.isfile(docsfile):
updates.parse(docsfile)
self.docs.update((k, _docs(updates[k], self.docvars)) for k in self.docs if updates.blocks[k])
for name, text in update... | Read program documentation from a DocParser compatible file.
docsfiles is a list of paths to potential docsfiles: parse if present.
A string is taken as a list of one item. |
def _ConvertStructMessage(value, message):
if not isinstance(value, dict):
raise ParseError(
'Struct must be in a dict which is {0}.'.format(value))
for key in value:
_ConvertValueMessage(value[key], message.fields[key])
return | Convert a JSON representation into Struct message. |
def prune(self, depth=0):
for n in list(self.nodes):
if len(n.links) <= depth:
self.remove_node(n.id) | Removes all nodes with less or equal links than depth. |
def mmGetMetricSequencesPredictedActiveCellsPerColumn(self):
self._mmComputeTransitionTraces()
numCellsPerColumn = []
for predictedActiveCells in (
self._mmData["predictedActiveCellsForSequence"].values()):
cellsForColumn = self.mapCellsToColumns(predictedActiveCells)
numCellsPerColumn +... | Metric for number of predicted => active cells per column for each sequence
@return (Metric) metric |
def CheckCStyleCast(filename, clean_lines, linenum, cast_type, pattern, error):
line = clean_lines.elided[linenum]
match = Search(pattern, line)
if not match:
return False
context = line[0:match.start(1) - 1]
if Match(r'.*\b(?:sizeof|alignof|alignas|[_A-Z][_A-Z0-9]*)\s*$', context):
return False
if ... | Checks for a C-style cast by looking for the pattern.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
cast_type: The string for the C++ cast to recommend. This is either
reinterpret_cast, static_... |
def decree(cls, path, concrete_start='', **kwargs):
try:
return cls(_make_decree(path, concrete_start), **kwargs)
except KeyError:
raise Exception(f'Invalid binary: {path}') | Constructor for Decree binary analysis.
:param str path: Path to binary to analyze
:param str concrete_start: Concrete stdin to use before symbolic input
:param kwargs: Forwarded to the Manticore constructor
:return: Manticore instance, initialized with a Decree State
:rtype: Ma... |
def get_termination_stats(self, get_cos=True):
delta_vals = self._last_vals - self.param_vals
delta_err = self._last_error - self.error
frac_err = delta_err / self.error
to_return = {'delta_vals':delta_vals, 'delta_err':delta_err,
'num_iter':1*self._num_iter, 'frac_err':f... | Returns a dict of termination statistics
Parameters
----------
get_cos : Bool, optional
Whether or not to calcualte the cosine of the residuals
with the tangent plane of the model using the current J.
The calculation may take some time. Defaul... |
def add(reader, writer, column, start, stop, value):
for i, row in enumerate(reader):
if i >= start and i <= stop:
row[column] = type(value)(row[column]) + value
writer.appendRecord(row) | Adds a value over a range of rows.
Args:
reader: A FileRecordStream object with input data.
writer: A FileRecordStream object to write output data to.
column: The column of data to modify.
start: The first row in the range to modify.
end: The last row in the range to modify.
value: The value ... |
def add_pfba(model, objective=None, fraction_of_optimum=1.0):
if objective is not None:
model.objective = objective
if model.solver.objective.name == '_pfba_objective':
raise ValueError('The model already has a pFBA objective.')
sutil.fix_objective_as_constraint(model, fraction=fraction_of_o... | Add pFBA objective
Add objective to minimize the summed flux of all reactions to the
current objective.
See Also
-------
pfba
Parameters
----------
model : cobra.Model
The model to add the objective to
objective :
An objective to set in combination with the pFBA ob... |
def matchPatterns(patterns, keys):
results = []
if patterns:
for pattern in patterns:
prog = re.compile(pattern)
for key in keys:
if prog.match(key):
results.append(key)
else:
return None
return results | Returns a subset of the keys that match any of the given patterns
:param patterns: (list) regular expressions to match
:param keys: (list) keys to search for matches |
def arrow(self, x, y, width, type=NORMAL, draw=True, **kwargs):
path = self.BezierPath(**kwargs)
if type == self.NORMAL:
head = width * .4
tail = width * .2
path.moveto(x, y)
path.lineto(x - head, y + head)
path.lineto(x - head, y + tail)
... | Draw an arrow.
Arrows can be two types: NORMAL or FORTYFIVE.
:param x: top left x-coordinate
:param y: top left y-coordinate
:param width: width of arrow
:param type: NORMAL or FORTYFIVE
:draw: If True draws arrow immediately
:return: Path object representing... |
def get(self, id):
info = super(Images, self).get(id)
return ImageActions(self.api, parent=self, **info) | id or slug |
def fit(self, X, y=None):
X = check_array(X)
self._x_min = X.min(axis=0)
self._x_max = X.max(axis=0)
return self | Find min and max values of every feature.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The training input samples.
y : Ignored
not used, present for API consistency by convention.
Returns
-------
se... |
def init_options(self):
self.options = GoogleMapOptions()
d = self.declaration
self.set_map_type(d.map_type)
if d.ambient_mode:
self.set_ambient_mode(d.ambient_mode)
if (d.camera_position or d.camera_zoom or
d.camera_tilt or d.camera_bearing):
... | Initialize the underlying map options. |
def set_bot(self, bot):
self.bot = bot
self.sink.set_bot(bot) | Bot must be set before running |
def round_any(x, accuracy, f=np.round):
if not hasattr(x, 'dtype'):
x = np.asarray(x)
return f(x / accuracy) * accuracy | Round to multiple of any number. |
def read_temple_config():
with open(temple.constants.TEMPLE_CONFIG_FILE) as temple_config_file:
return yaml.load(temple_config_file, Loader=yaml.SafeLoader) | Reads the temple YAML configuration file in the repository |
def add_metabolites(self, metabolite_list):
if not hasattr(metabolite_list, '__iter__'):
metabolite_list = [metabolite_list]
if len(metabolite_list) == 0:
return None
metabolite_list = [x for x in metabolite_list
if x.id not in self.metabolites]... | Will add a list of metabolites to the model object and add new
constraints accordingly.
The change is reverted upon exit when using the model as a context.
Parameters
----------
metabolite_list : A list of `cobra.core.Metabolite` objects |
def get_href(self):
safe = "/" + "!*'()," + "$-_|."
return compat.quote(
self.provider.mount_path
+ self.provider.share_path
+ self.get_preferred_path(),
safe=safe,
) | Convert path to a URL that can be passed to XML responses.
Byte string, UTF-8 encoded, quoted.
See http://www.webdav.org/specs/rfc4918.html#rfc.section.8.3
We are using the path-absolute option. i.e. starting with '/'.
URI ; See section 3.2.1 of [RFC2068] |
def _addPartitionId(self, index, partitionId=None):
if partitionId is None:
self._partitionIdList.append(numpy.inf)
else:
self._partitionIdList.append(partitionId)
indices = self._partitionIdMap.get(partitionId, [])
indices.append(index)
self._partitionIdMap[partitionId] = indices | Adds partition id for pattern index |
def isValidClass(self, class_):
module = inspect.getmodule(class_)
valid = (
module in self._valid_modules
or (
hasattr(module, '__file__')
and module.__file__ in self._valid_named_modules
)
)
return valid and not privat... | Needs to be its own method so it can be called from both wantClass and
registerGoodClass. |
def _err(self, msg):
out = '%s%s' % ('[%s] ' % self.description if len(self.description) > 0 else '', msg)
if self.kind == 'warn':
print(out)
return self
elif self.kind == 'soft':
global _soft_err
_soft_err.append(out)
return self
... | Helper to raise an AssertionError, and optionally prepend custom description. |
def list_targets_by_rule(client=None, **kwargs):
result = client.list_targets_by_rule(**kwargs)
if not result.get("Targets"):
result.update({"Targets": []})
return result | Rule='string' |
def chirp(t, f0=0., t1=1., f1=100., form='linear', phase=0):
r
valid_forms = ['linear', 'quadratic', 'logarithmic']
if form not in valid_forms:
raise ValueError("Invalid form. Valid form are %s"
% valid_forms)
t = numpy.array(t)
phase = 2. * pi * phase / 360.
if form == "line... | r"""Evaluate a chirp signal at time t.
A chirp signal is a frequency swept cosine wave.
.. math:: a = \pi (f_1 - f_0) / t_1
.. math:: b = 2 \pi f_0
.. math:: y = \cos\left( \pi\frac{f_1-f_0}{t_1} t^2 + 2\pi f_0 t + \rm{phase} \right)
:param array t: times at which to evaluate the chirp signal... |
def get(self, store_id, customer_id, **queryparams):
self.store_id = store_id
self.customer_id = customer_id
return self._mc_client._get(url=self._build_path(store_id, 'customers', customer_id), **queryparams) | Get information about a specific customer.
:param store_id: The store id.
:type store_id: :py:class:`str`
:param customer_id: The id for the customer of a store.
:type customer_id: :py:class:`str`
:param queryparams: The query string parameters
queryparams['fields'] = []... |
def get_constants(self, **params: keys):
url = self.api.CONSTANTS
return self._get_model(url, **params) | Get the CR Constants
Parameters
----------
\*\*keys: Optional[list] = None
Filter which keys should be included in the
response
\*\*exclude: Optional[list] = None
Filter which keys should be excluded from the
response
\*\*timeout: ... |
def parse_arguments(*args, **options):
days = options.get('days', 1)
enterprise_customer_uuid = options.get('enterprise_customer_uuid')
enterprise_customer = None
if enterprise_customer_uuid:
try:
enterprise_customer = EnterpriseCustomer.objects.get(uuid=enter... | Parse and validate arguments for send_course_enrollments command.
Arguments:
*args: Positional arguments passed to the command
**options: optional arguments passed to the command
Returns:
A tuple containing parsed values for
1. days (int): Integer showin... |
def set_doc_data_lics(self, doc, lics):
if not self.doc_data_lics_set:
self.doc_data_lics_set = True
if validations.validate_data_lics(lics):
doc.data_license = document.License.from_identifier(lics)
return True
else:
raise SPDX... | Sets the document data license.
Raises value error if malformed value, CardinalityError
if already defined. |
def train_model(best_processed_path, weight_path='../weight/model_weight.h5', verbose=2):
x_train_char, x_train_type, y_train = prepare_feature(best_processed_path, option='train')
x_test_char, x_test_type, y_test = prepare_feature(best_processed_path, option='test')
validation_set = False
if os.path.is... | Given path to processed BEST dataset,
train CNN model for words beginning alongside with
character label encoder and character type label encoder
Input
=====
best_processed_path: str, path to processed BEST dataset
weight_path: str, path to weight path file
verbose: int, verbost option for ... |
def _add_file(self, tar, name, contents, mode=DEFAULT_FILE_MODE):
byte_contents = BytesIO(contents.encode('utf8'))
info = tarfile.TarInfo(name=name)
info.size = len(contents)
info.mtime = 0
info.type = tarfile.REGTYPE
info.mode = int(mode, 8)
tar.addfile(tarinfo=i... | Adds a single file in tarfile instance.
:param tar: tarfile instance
:param name: string representing filename or path
:param contents: string representing file contents
:param mode: string representing file mode, defaults to 644
:returns: None |
def mongo(daemon=False, port=20771):
cmd = "mongod --port {0}".format(port)
if daemon:
cmd += " --fork"
run(cmd) | Run the mongod process. |
def string_presenter(self, dumper, data):
if '\n' in data:
return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|')
else:
return dumper.represent_scalar('tag:yaml.org,2002:str', data) | Presenter to force yaml.dump to use multi-line string style. |
def set_shared_config(cls, config):
assert isinstance(config, dict)
cls._sharedInstance.config.update(config)
if cls._sharedInstance.instance:
cls._sharedInstance.instance = None | This allows to set a config that will be used when calling
``shared_blockchain_instance`` and allows to define the configuration
without requiring to actually create an instance |
def setup(app):
lexer = MarkdownLexer()
for alias in lexer.aliases:
app.add_lexer(alias, lexer)
return dict(version=__version__) | Initializer for Sphinx extension API.
See http://www.sphinx-doc.org/en/stable/extdev/index.html#dev-extensions. |
def _categoryToLabelList(self, category):
if category is None:
return []
labelList = []
labelNum = 0
while category > 0:
if category % 2 == 1:
labelList.append(self.saved_categories[labelNum])
labelNum += 1
category = category >> 1
return labelList | Converts a category number into a list of labels |
def scale(self, w=1.0, h=1.0):
from types import FloatType
w0, h0 = self.img.size
if type(w) == FloatType: w = int(w*w0)
if type(h) == FloatType: h = int(h*h0)
self.img = self.img.resize((w,h), INTERPOLATION)
self.w = w
self.h = h | Resizes the layer to the given width and height.
When width w or height h is a floating-point number,
scales percentual,
otherwise scales to the given size in pixels. |
def namespace_array(ns_key):
obs_sch = namespace(ns_key)
obs_sch['title'] = 'Observation'
sch = copy.deepcopy(JAMS_SCHEMA['definitions']['SparseObservationList'])
sch['items'] = obs_sch
return sch | Construct a validation schema for arrays of a given namespace.
Parameters
----------
ns_key : str
Namespace key identifier
Returns
-------
schema : dict
JSON schema of `namespace` observation arrays |
def rssi(self, timeout_sec=TIMEOUT_SEC):
self._rssi_read.clear()
self._peripheral.readRSSI()
if not self._rssi_read.wait(timeout_sec):
raise RuntimeError('Exceeded timeout waiting for RSSI value!')
return self._rssi | Return the RSSI signal strength in decibels. |
def fit(self, x, y=None):
if self._dtype is not None:
iter2array(x, dtype=self._dtype)
else:
iter2array(x)
return self | Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence work in pipelines. |
def can_reach(self, node, traversable=lambda node, edge: True):
if isinstance(node, str):
node = self.graph[node]
for n in self.graph.nodes:
n._visited = False
return proximity.depth_first_search(self,
visit=lambda n: node == n,
traversable=travers... | Returns True if given node can be reached over traversable edges.
To enforce edge direction, use a node==edge.node1 traversable. |
def nexmake(mdict, nlocus, dirs, mcmc_burnin, mcmc_ngen, mcmc_sample_freq):
max_name_len = max([len(i) for i in mdict])
namestring = "{:<" + str(max_name_len+1) + "} {}\n"
matrix = ""
for i in mdict.items():
matrix += namestring.format(i[0], i[1])
handle = os.path.join(dirs, "{}.nex".format(... | function that takes a dictionary mapping names to
sequences, and a locus number, and writes it as a NEXUS
file with a mrbayes analysis block. |
def is_oct(ip):
try:
dec = int(str(ip), 8)
except (TypeError, ValueError):
return False
if dec > 0o37777777777 or dec < 0:
return False
return True | Return true if the IP address is in octal notation. |
def load_values(self):
for config_name, evar in self.evar_defs.items():
if evar.is_required and evar.name not in os.environ:
raise RuntimeError((
"Missing required environment variable: {evar_name}\n"
"{help_txt}"
).format(evar_... | Go through the env var map, transferring the values to this object
as attributes.
:raises: RuntimeError if a required env var isn't defined. |
def parse_typing_status_message(p):
return TypingStatusMessage(
conv_id=p.conversation_id.id,
user_id=from_participantid(p.sender_id),
timestamp=from_timestamp(p.timestamp),
status=p.type,
) | Return TypingStatusMessage from hangouts_pb2.SetTypingNotification.
The same status may be sent multiple times consecutively, and when a
message is sent the typing status will not change to stopped. |
def handle_extracted_license(self, extr_lic):
lic = self.parse_only_extr_license(extr_lic)
if lic is not None:
self.doc.add_extr_lic(lic)
return lic | Build and return an ExtractedLicense or None.
Note that this function adds the license to the document. |
def write(self, text, hashline=b"
u
if not text.endswith(b"\n"):
text += b"\n"
actual_hash = hashlib.sha1(text).hexdigest()
with open(self.filename, "wb") as f:
f.write(text)
f.write(hashline.decode("utf8").format(actual_hash).encode("utf8"))
... | u"""
Write `text` to the file.
Writes the text to the file, with a final line checksumming the
contents. The entire file must be written with one `.write()` call.
The last line is written with the `hashline` format string, which can
be changed to accommodate different file syn... |
def start_server(self, datacenter_id, server_id):
response = self._perform_request(
url='/datacenters/%s/servers/%s/start' % (
datacenter_id,
server_id),
method='POST-ACTION')
return response | Starts the server.
:param datacenter_id: The unique ID of the data center.
:type datacenter_id: ``str``
:param server_id: The unique ID of the server.
:type server_id: ``str`` |
def level_chunker(text, getreffs, level=1):
references = getreffs(level=level)
return [(ref.split(":")[-1], ref.split(":")[-1]) for ref in references] | Chunk a text at the passage level
:param text: Text object
:type text: MyCapytains.resources.text.api
:param getreffs: Callback function to retrieve text
:type getreffs: function(level)
:return: List of urn references with their human readable version
:rtype: [(str, str)] |
def calc_J(self):
del self.J
self.J = np.zeros([self.param_vals.size, self.data.size])
dp = np.zeros_like(self.param_vals)
f0 = self.model.copy()
for a in range(self.param_vals.size):
dp *= 0
dp[a] = self.dl[a]
f1 = self.func(self.param_vals + ... | Updates self.J, returns nothing |
def repolist(status='', media=None):
manager = MANAGER
with settings(hide('running', 'stdout')):
if media:
repos = run_as_root("%(manager)s repolist %(status)s | sed '$d' | sed -n '/repo id/,$p'" % locals())
else:
repos = run_as_root("%(manager)s repolist %(status)s | sed... | Get the list of ``yum`` repositories.
Returns enabled repositories by default. Extra *status* may be passed
to list disabled repositories if necessary.
Media and debug repositories are kept disabled, except if you pass *media*.
::
import burlap
# Install a package that may be includ... |
def invalidate_ip(self, ip):
if self._use_cache:
key = self._make_cache_key(ip)
self._cache.delete(key, version=self._cache_version) | Invalidate httpBL cache for IP address
:param ip: ipv4 IP address |
def identify_phase(T, P, Tm=None, Tb=None, Tc=None, Psat=None):
r
if Tm and T <= Tm:
return 's'
elif Tc and T >= Tc:
return 'g'
elif Psat:
if P <= Psat:
return 'g'
elif P > Psat:
return 'l'
elif Tb:
if 9E4 < P < 1.1E5:
if T ... | r'''Determines the phase of a one-species chemical system according to
basic rules, using whatever information is available. Considers only the
phases liquid, solid, and gas; does not consider two-phase
scenarios, as should occurs between phase boundaries.
* If the melting temperature is known and the ... |
def add_droplets(self, droplet_ids):
return self.get_data(
"load_balancers/%s/droplets/" % self.id,
type=POST,
params={"droplet_ids": droplet_ids}
) | Assign a LoadBalancer to a Droplet.
Args:
droplet_ids (obj:`list` of `int`): A list of Droplet IDs |
def scan(xml):
if xml.tag is et.Comment:
yield {'type': COMMENT, 'text': xml.text}
return
if xml.tag is et.PI:
if xml.text:
yield {'type': PI, 'target': xml.target, 'text': xml.text}
else:
yield {'type': PI, 'target': xml.target}
return
obj = _... | Converts XML tree to event generator |
def request_configuration_form(self):
iq = Iq(to_jid = self.room_jid.bare(), stanza_type = "get")
iq.new_query(MUC_OWNER_NS, "query")
self.manager.stream.set_response_handlers(
iq, self.process_configuration_form_success, self.process_configuration_form_error)
self.manage... | Request a configuration form for the room.
When the form is received `self.handler.configuration_form_received` will be called.
When an error response is received then `self.handler.error` will be called.
:return: id of the request stanza.
:returntype: `unicode` |
def _find_any(self, task_spec):
tasks = []
if self.task_spec == task_spec:
tasks.append(self)
for child in self:
if child.task_spec != task_spec:
continue
tasks.append(child)
return tasks | Returns any descendants that have the given task spec assigned.
:type task_spec: TaskSpec
:param task_spec: The wanted task spec.
:rtype: list(Task)
:returns: The tasks objects that are attached to the given task spec. |
def autoset_settings(set_var):
try:
devices = ast.literal_eval(os.environ["CUDA_VISIBLE_DEVICES"])
if type(devices) != list and type(devices) != tuple:
devices = [devices]
if len(devices) != 0:
set_var.GPU = len(devices)
set_var.NB_JOBS = len(devices)
... | Autoset GPU parameters using CUDA_VISIBLE_DEVICES variables.
Return default config if variable not set.
:param set_var: Variable to set. Must be of type ConfigSettings |
def configure(level=logging.INFO, logfile=None):
for handler in Log.handlers:
if isinstance(handler, logging.StreamHandler):
Log.handlers.remove(handler)
Log.setLevel(level)
if logfile is not None:
log_format = "[%(asctime)s] [%(levelname)s]: %(message)s"
formatter = logging.Formatter(fmt=log_fo... | Configure logger which dumps log on terminal
:param level: logging level: info, warning, verbose...
:type level: logging level
:param logfile: log file name, default to None
:type logfile: string
:return: None
:rtype: None |
def register_chooser(self, chooser, **kwargs):
if not issubclass(chooser, Chooser):
return self.register_simple_chooser(chooser, **kwargs)
self.choosers[chooser.model] = chooser(**kwargs)
return chooser | Adds a model chooser definition to the registry. |
def get_compound_afrs(self):
result = self._compound_mfrs * 1.0
for compound in self.material.compounds:
index = self.material.get_compound_index(compound)
result[index] = stoich.amount(compound, result[index])
return result | Determine the amount flow rates of all the compounds.
:returns: List of amount flow rates. [kmol/h] |
def show(self):
self.parent.addLayout(self._logSelectLayout)
self.menuCount += 1
self._connectSlots() | Display menus and connect even signals. |
def sys_openat(self, dirfd, buf, flags, mode):
filename = self.current.read_string(buf)
dirfd = ctypes.c_int32(dirfd).value
if os.path.isabs(filename) or dirfd == self.FCNTL_FDCWD:
return self.sys_open(buf, flags, mode)
try:
dir_entry = self._get_fd(dirfd)
... | Openat SystemCall - Similar to open system call except dirfd argument
when path contained in buf is relative, dirfd is referred to set the relative path
Special value AT_FDCWD set for dirfd to set path relative to current directory
:param dirfd: directory file descriptor to refer in case of rel... |
def marketYesterdayDF(token='', version=''):
x = marketYesterday(token, version)
data = []
for key in x:
data.append(x[key])
data[-1]['symbol'] = key
df = pd.DataFrame(data)
_toDatetime(df)
_reindex(df, 'symbol')
return df | This returns previous day adjusted price data for whole market
https://iexcloud.io/docs/api/#previous-day-prices
Available after 4am ET Tue-Sat
Args:
symbol (string); Ticker to request
token (string); Access token
version (string); API version
Returns:
DataFrame: resul... |
def getNextRecord(self, useCache=True):
assert self._file is not None
assert self._mode == self._FILE_READ_MODE
try:
line = self._reader.next()
except StopIteration:
if self.rewindAtEOF:
if self._recordCount == 0:
raise Exception("The source configured to reset at EOF but "... | Returns next available data record from the file.
:returns: a data row (a list or tuple) if available; None, if no more
records in the table (End of Stream - EOS); empty sequence (list
or tuple) when timing out while waiting for the next record. |
def dataReceived(self, data):
self.resetTimeout()
lines = (self._buffer + data).splitlines()
if data.endswith(b'\n') or data.endswith(b'\r'):
self._buffer = b''
else:
self._buffer = lines.pop(-1)
for line in lines:
if self.transport.disconnecti... | Translates bytes into lines, and calls lineReceived.
Copied from ``twisted.protocols.basic.LineOnlyReceiver`` but using
str.splitlines() to split on ``\r\n``, ``\n``, and ``\r``. |
def extracted_array_2d_from_array_2d_and_coordinates(array_2d, y0, y1, x0, x1):
new_shape = (y1-y0, x1-x0)
resized_array = np.zeros(shape=new_shape)
for y_resized, y in enumerate(range(y0, y1)):
for x_resized, x in enumerate(range(x0, x1)):
resized_array[y_resized, x_resized] = array... | Resize an array to a new size by extracting a sub-set of the array.
The extracted input coordinates use NumPy convention, such that the upper values should be specified as +1 the \
dimensions of the extracted array.
In the example below, an array of size (5,5) is extracted using the coordinates y0=1, y1=4... |
def shuffle_cols(seqarr, newarr, cols):
for idx in xrange(cols.shape[0]):
newarr[:, idx] = seqarr[:, cols[idx]]
return newarr | used in bootstrap resampling without a map file |
def create_checksum_node(self, chksum):
chksum_node = BNode()
type_triple = (chksum_node, RDF.type, self.spdx_namespace.Checksum)
self.graph.add(type_triple)
algorithm_triple = (chksum_node, self.spdx_namespace.algorithm, Literal(chksum.identifier))
self.graph.add(algorithm_tripl... | Return a node representing spdx.checksum. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.