code stringlengths 59 4.4k | docstring stringlengths 5 7.69k |
|---|---|
def locked_put(self, credentials):
filters = {self.key_name: self.key_value}
query = self.session.query(self.model_class).filter_by(**filters)
entity = query.first()
if not entity:
entity = self.model_class(**filters)
setattr(entity, self.property_name, credentials)
... | Write a credentials to the SQLAlchemy datastore.
Args:
credentials: :class:`oauth2client.Credentials` |
def create_commands(self, commands, parser):
self.apply_defaults(commands)
def create_single_command(command):
keys = command['keys']
del command['keys']
kwargs = {}
for item in command:
kwargs[item] = command[item]
parser.add_a... | add commands to parser |
def update(kernel=False):
manager = MANAGER
cmds = {'yum -y --color=never': {False: '--exclude=kernel* update', True: 'update'}}
cmd = cmds[manager][kernel]
run_as_root("%(manager)s %(cmd)s" % locals()) | Upgrade all packages, skip obsoletes if ``obsoletes=0`` in ``yum.conf``.
Exclude *kernel* upgrades by default. |
def update_reportnumbers(self):
report_037_fields = record_get_field_instances(self.record, '037')
for field in report_037_fields:
subs = field_get_subfields(field)
for val in subs.get("a", []):
if "arXiv" not in val:
record_delete_field(self.r... | Update reportnumbers. |
def item_fields(self):
if self.templates.get("item_fields") and not self._updated(
"/itemFields", self.templates["item_fields"], "item_fields"
):
return self.templates["item_fields"]["tmplt"]
query_string = "/itemFields"
retrieved = self._retrieve_data(query_strin... | Get all available item fields |
def _calcSkipRecords(numIngested, windowSize, learningPeriod):
numShiftedOut = max(0, numIngested - windowSize)
return min(numIngested, max(0, learningPeriod - numShiftedOut)) | Return the value of skipRecords for passing to estimateAnomalyLikelihoods
If `windowSize` is very large (bigger than the amount of data) then this
could just return `learningPeriod`. But when some values have fallen out of
the historical sliding window of anomaly records, then we have to take those
int... |
def get_course_or_program_context(self, enterprise_customer, course_id=None, program_uuid=None):
context_data = {}
if course_id:
context_data.update({'course_id': course_id, 'course_specific': True})
if not self.preview_mode:
try:
catalog_api_c... | Return a dict having course or program specific keys for data sharing consent page. |
def get_files_to_commit(autooptions):
workingdir = autooptions['working-directory']
includes = autooptions['track']['includes']
excludes = autooptions['track']['excludes']
includes = r'|'.join([fnmatch.translate(x) for x in includes])
excludes = r'|'.join([fnmatch.translate(x) for x in excludes]) or... | Look through the local directory to pick up files to check |
def populate_subtasks(self, context, sg, parent_job_id):
db_sg = db_api.security_group_find(context, id=sg, scope=db_api.ONE)
if not db_sg:
return None
ports = db_api.sg_gather_associated_ports(context, db_sg)
if len(ports) == 0:
return {"ports": 0}
for po... | Produces a list of ports to be updated async. |
def new_symbolic_value(self, nbits, label=None, taint=frozenset()):
assert nbits in (1, 4, 8, 16, 32, 64, 128, 256)
avoid_collisions = False
if label is None:
label = 'val'
avoid_collisions = True
expr = self._constraints.new_bitvec(nbits, name=label, taint=taint,... | Create and return a symbolic value that is `nbits` bits wide. Assign
the value to a register or write it into the address space to introduce
it into the program state.
:param int nbits: The bitwidth of the value returned
:param str label: The label to assign to the value
:param ... |
def not_followed_by(parser):
@tri
def not_followed_by_block():
failed = object()
result = optional(tri(parser), failed)
if result != failed:
fail(["not " + _fun_to_str(parser)])
choice(not_followed_by_block) | Succeeds if the given parser cannot consume input |
def fit(self, X, y=None):
if is_integer(X):
dim = X
else:
X = as_features(X)
dim = X.dim
M = self.smoothness
inds = np.mgrid[(slice(M + 1),) * dim].reshape(dim, (M + 1) ** dim).T
self.inds_ = inds[(inds ** 2).sum(axis=1) <= M ** 2]
retu... | Picks the elements of the basis to use for the given data.
Only depends on the dimension of X. If it's more convenient, you can
pass a single integer for X, which is the dimension to use.
Parameters
----------
X : an integer, a :class:`Features` instance, or a list of bag featu... |
def is_user_enrolled(cls, user, course_id, course_mode):
enrollment_client = EnrollmentApiClient()
try:
enrollments = enrollment_client.get_course_enrollment(user.username, course_id)
if enrollments and course_mode == enrollments.get('mode'):
return True
e... | Query the enrollment API and determine if a learner is enrolled in a given course run track.
Args:
user: The user whose enrollment needs to be checked
course_mode: The mode with which the enrollment should be checked
course_id: course id of the course where enrollment should... |
def figure_protocol(self):
self.log.debug("creating overlayed protocols plot")
self.figure()
plt.plot(self.abf.protoX,self.abf.protoY,color='r')
self.marginX=0
self.decorate(protocol=True) | plot the current sweep protocol. |
def parse(filename):
for event, elt in et.iterparse(filename, events= ('start', 'end', 'comment', 'pi'), huge_tree=True):
if event == 'start':
obj = _elt2obj(elt)
obj['type'] = ENTER
yield obj
if elt.text:
yield {'type': TEXT, 'text': elt.text}... | Parses file content into events stream |
def to_type_constructor(value, python_path=None):
if not value:
return value
if callable(value):
return {'datatype': value}
value = to_type(value)
typename = value.get('typename')
if typename:
r = aliases.resolve(typename)
try:
value['datatype'] = importer... | Tries to convert a value to a type constructor.
If value is a string, then it used as the "typename" field.
If the "typename" field exists, the symbol for that name is imported and
added to the type constructor as a field "datatype".
Throws:
ImportError -- if "typename" is set but cannot be ... |
def docstring(docstr):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
wrapper.__doc__ = docstr
return wrapper
return decorator | Decorates a function with the given docstring
Parameters
----------
docstr : string |
def get_all_firewalls(self):
data = self.get_data("firewalls")
firewalls = list()
for jsoned in data['firewalls']:
firewall = Firewall(**jsoned)
firewall.token = self.token
in_rules = list()
for rule in jsoned['inbound_rules']:
in_r... | This function returns a list of Firewall objects. |
def tf_import_experience(self, states, internals, actions, terminal, reward):
return self.memory.store(
states=states,
internals=internals,
actions=actions,
terminal=terminal,
reward=reward
) | Imports experiences into the TensorFlow memory structure. Can be used to import
off-policy data.
:param states: Dict of state values to import with keys as state names and values as values to set.
:param internals: Internal values to set, can be fetched from agent via agent.current_internals
... |
def _lastRecursiveChild(self):
"Finds the last element beneath this object to be parsed."
lastChild = self
while hasattr(lastChild, 'contents') and lastChild.contents:
lastChild = lastChild.contents[-1]
return lastChild | Finds the last element beneath this object to be parsed. |
def locked_put(self, credentials):
entity, _ = self.model_class.objects.get_or_create(
**{self.key_name: self.key_value})
setattr(entity, self.property_name, credentials)
entity.save() | Write a Credentials to the Django datastore.
Args:
credentials: Credentials, the credentials to store. |
def filter_by(zips=_zips, **kwargs):
return [z for z in zips if all([k in z and z[k] == v for k, v in kwargs.items()])] | Use `kwargs` to select for desired attributes from list of zipcode dicts |
def getaccesskey(self, window_name, object_name):
menu_handle = self._get_menu_handle(window_name, object_name)
key = menu_handle.AXMenuItemCmdChar
modifiers = menu_handle.AXMenuItemCmdModifiers
glpyh = menu_handle.AXMenuItemCmdGlyph
virtual_key = menu_handle.AXMenuItemCmdVirtual... | Get access key of given object
@param window_name: Window name to look for, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to look for, either full name,
LDTP's name convention, or a Unix glob. Or menu heirarch... |
def _match(self, **kwargs):
for k in kwargs.keys():
try:
val = getattr(self, k)
except _a11y.Error:
return False
if sys.version_info[:2] <= (2, 6):
if isinstance(val, basestring):
if not fnmatch.fnmatch(unico... | Method which indicates if the object matches specified criteria.
Match accepts criteria as kwargs and looks them up on attributes.
Actual matching is performed with fnmatch, so shell-like wildcards
work within match strings. Examples:
obj._match(AXTitle='Terminal*')
obj._match(... |
def create_from_snapshot(self, *args, **kwargs):
data = self.get_data('volumes/',
type=POST,
params={'name': self.name,
'snapshot_id': self.snapshot_id,
'region': self.region,
... | Creates a Block Storage volume
Note: Every argument and parameter given to this method will be
assigned to the object.
Args:
name: string - a name for the volume
snapshot_id: string - unique identifier for the volume snapshot
size_gigabytes: int - size of th... |
def show_worst_drawdown_periods(returns, top=5):
drawdown_df = timeseries.gen_drawdown_table(returns, top=top)
utils.print_table(
drawdown_df.sort_values('Net drawdown in %', ascending=False),
name='Worst drawdown periods',
float_format='{0:.2f}'.format,
) | Prints information about the worst drawdown periods.
Prints peak dates, valley dates, recovery dates, and net
drawdowns.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
top : int, o... |
def verify_valid_gdb_subprocess(self):
if not self.gdb_process:
raise NoGdbProcessError("gdb process is not attached")
elif self.gdb_process.poll() is not None:
raise NoGdbProcessError(
"gdb process has already finished with return code: %s"
% str(... | Verify there is a process object, and that it is still running.
Raise NoGdbProcessError if either of the above are not true. |
def _slugify_foreign_key(schema):
for foreign_key in schema.get('foreignKeys', []):
foreign_key['reference']['resource'] = _slugify_resource_name(
foreign_key['reference'].get('resource', ''))
return schema | Slugify foreign key |
def run_multiple(self, workingArea, package_indices):
if not package_indices:
return [ ]
job_desc = self._compose_job_desc(workingArea, package_indices)
clusterprocids = submit_jobs(job_desc, cwd=workingArea.path)
clusterids = clusterprocids2clusterids(clusterprocids)
... | Submit multiple jobs
Parameters
----------
workingArea :
A workingArea
package_indices : list(int)
A list of package indices
Returns
-------
list(str)
The list of the run IDs of the jobs |
def _syspath_modname_to_modpath(modname, sys_path=None, exclude=None):
def _isvalid(modpath, base):
subdir = dirname(modpath)
while subdir and subdir != base:
if not exists(join(subdir, '__init__.py')):
return False
subdir = dirname(subdir)
return True... | syspath version of modname_to_modpath
Args:
modname (str): name of module to find
sys_path (List[PathLike], default=None):
if specified overrides `sys.path`
exclude (List[PathLike], default=None):
list of directory paths. if specified prevents these directories
... |
def process_dir(self, album, force=False):
for f in album:
if isfile(f.dst_path) and not force:
self.logger.info("%s exists - skipping", f.filename)
self.stats[f.type + '_skipped'] += 1
else:
self.stats[f.type] += 1
yield (f... | Process a list of images in a directory. |
def _in_gae_environment():
if SETTINGS.env_name is not None:
return SETTINGS.env_name in ('GAE_PRODUCTION', 'GAE_LOCAL')
try:
import google.appengine
except ImportError:
pass
else:
server_software = os.environ.get(_SERVER_SOFTWARE, '')
if server_software.startswit... | Detects if the code is running in the App Engine environment.
Returns:
True if running in the GAE environment, False otherwise. |
def getExtraIncludes(self):
if 'extraIncludes' in self.description:
return [os.path.normpath(x) for x in self.description['extraIncludes']]
else:
return [] | Some components must export whole directories full of headers into
the search path. This is really really bad, and they shouldn't do
it, but support is provided as a concession to compatibility. |
async def play_at(self, index: int):
self.queue = self.queue[min(index, len(self.queue) - 1):len(self.queue)]
await self.play(ignore_shuffle=True) | Play the queue from a specific point. Disregards tracks before the index. |
def _accumulateFrequencyCounts(values, freqCounts=None):
values = numpy.array(values)
numEntries = values.max() + 1
if freqCounts is not None:
numEntries = max(numEntries, freqCounts.size)
if freqCounts is not None:
if freqCounts.size != numEntries:
newCounts = numpy.zeros(numEntries, dtype='int32... | Accumulate a list of values 'values' into the frequency counts 'freqCounts',
and return the updated frequency counts
For example, if values contained the following: [1,1,3,5,1,3,5], and the initial
freqCounts was None, then the return value would be:
[0,3,0,2,0,2]
which corresponds to how many of each value ... |
def input(self, input, song):
try:
cmd = getattr(self, self.CMD_MAP[input][1])
except (IndexError, KeyError):
return self.screen.print_error(
"Invalid command {!r}!".format(input))
cmd(song) | Input callback, handles key presses |
def generateDataset(aggregationInfo, inputFilename, outputFilename=None):
inputFullPath = resource_filename("nupic.datafiles", inputFilename)
inputObj = FileRecordStream(inputFullPath)
aggregator = Aggregator(aggregationInfo=aggregationInfo,
inputFields=inputObj.getFields())
if aggrega... | Generate a dataset of aggregated values
Parameters:
----------------------------------------------------------------------------
aggregationInfo: a dictionary that contains the following entries
- fields: a list of pairs. Each pair is a field name and an
aggregation function (e.g. sum). The function wi... |
def __intermediate_interface(self, interface, uci_name):
interface.update({
'.type': 'interface',
'.name': uci_name,
'ifname': interface.pop('name')
})
if 'network' in interface:
del interface['network']
if 'mac' in interface:
i... | converts NetJSON interface to
UCI intermediate data structure |
def _parent_filter(self, parent, relationship, **kwargs):
if parent is None or relationship is None:
return {}
parent_filter_kwargs = {}
query_params = ((self._reverse_rel_name(relationship), parent),)
parent_filter_kwargs['query'] = query_params
if kwargs.get('workfl... | Returns filtering parameters to limit a search to the children
of a particular node by a particular relationship. |
def dir_maker(path):
directory = os.path.dirname(path)
if directory != '' and not os.path.isdir(directory):
try:
os.makedirs(directory)
except OSError as e:
sys.exit('Failed to create directory: {}'.format(e)) | Create a directory if it does not exist. |
def paragraphs(quantity=2, separator='\n\n', wrap_start='', wrap_end='',
html=False, sentences_quantity=3, as_list=False):
if html:
wrap_start = '<p>'
wrap_end = '</p>'
separator = '\n\n'
result = []
try:
for _ in xrange(0, quantity):
result.append(... | Return random paragraphs. |
def build_SVG_dict(self):
zoom = self._zoom
layout = self._layout
builder = self._builder
bbox = list(map(lambda f: f * zoom, layout.bounding_box))
builder.bounding_box = bbox
flip_x = bbox[2] + bbox[0] * 2
flip_y = bbox[3] + bbox[1] * 2
instructions = lis... | Go through the layout and build the SVG.
:return: an xml dict that can be exported using a
:class:`~knittingpattern.Dumper.XMLDumper`
:rtype: dict |
def dictDiff(da, db):
different = False
resultDict = dict()
resultDict['inAButNotInB'] = set(da) - set(db)
if resultDict['inAButNotInB']:
different = True
resultDict['inBButNotInA'] = set(db) - set(da)
if resultDict['inBButNotInA']:
different = True
resultDict['differentValues'] = []
for key in ... | Compares two python dictionaries at the top level and return differences
da: first dictionary
db: second dictionary
Returns: None if dictionaries test equal; otherwise returns a
dictionary as follows:
{
'inAButNotInB':
... |
def _close(self, conn):
super(PooledAIODatabase, self)._close(conn)
for waiter in self._waiters:
if not waiter.done():
logger.debug('Release a waiter')
waiter.set_result(True)
break | Release waiters. |
def proto_02_01_MT70(abf=exampleABF):
standard_overlayWithAverage(abf)
swhlab.memtest.memtest(abf)
swhlab.memtest.checkSweep(abf)
swhlab.plot.save(abf,tag='check',resize=False) | repeated membrane tests. |
def load_nipy_img(nii_file):
import nipy
if not os.path.exists(nii_file):
raise FileNotFound(nii_file)
try:
return nipy.load_image(nii_file)
except Exception as exc:
raise Exception('Reading file {0}.'.format(repr_imgs(nii_file))) from exc | Read a Nifti file and return as nipy.Image
Parameters
----------
param nii_file: str
Nifti file path
Returns
-------
nipy.Image |
def payload_class_for_element_name(element_name):
logger.debug(" looking up payload class for element: {0!r}".format(
element_name))
logger.debug(" known: {0!r}".format(STANZA_PAYLOAD_CLASSES))
if element_name in STANZA_PAYLOAD_CLASSES:
... | Return a payload class for given element name. |
def _get_account_and_descendants_(self, account, result):
result.append(account)
for child in account.accounts:
self._get_account_and_descendants_(child, result) | Returns the account and all of it's sub accounts.
:param account: The account.
:param result: The list to add all the accounts to. |
def downsample(self, factor=2):
if not isinstance(factor, int) or factor < 1:
raise ValueError('factor must be a positive integer.')
effect_args = ['downsample', '{}'.format(factor)]
self.effects.extend(effect_args)
self.effects_log.append('downsample')
return self | Downsample the signal by an integer factor. Only the first out of
each factor samples is retained, the others are discarded.
No decimation filter is applied. If the input is not a properly
bandlimited baseband signal, aliasing will occur. This may be desirable
e.g., for frequency transl... |
def deploy(file, manager_path, check, dry_run):
config = read_deployment_config(file)
manager = DeployManager(config=config,
filepath=file,
manager_path=manager_path,
dry_run=dry_run)
exception = None
if check:
m... | Deploy polyaxon. |
def iso_reference_valid_char(c, raise_error=True):
if c in ISO_REFERENCE_VALID:
return True
if raise_error:
raise ValueError("'%s' is not in '%s'" % (c, ISO_REFERENCE_VALID))
return False | Helper to make sure the given character is valid for a reference number |
def load(self, clear=False):
if clear:
self.settings = {}
defer = []
for conf in pkg_resources.iter_entry_points('pyconfig'):
if conf.attrs:
raise RuntimeError("config must be a module")
mod_name = conf.module_name
base_name = conf.... | Loads all the config plugin modules to build a working configuration.
If there is a ``localconfig`` module on the python path, it will be
loaded last, overriding other settings.
:param bool clear: Clear out the previous settings before loading |
def delete(self, pk=None, fail_on_missing=False, **kwargs):
self._separate(kwargs)
return super(Resource, self).\
delete(pk=pk, fail_on_missing=fail_on_missing, **kwargs) | Remove the given notification template.
Note here configuration-related fields like
'notification_configuration' and 'channels' will not be
used even provided.
If `fail_on_missing` is True, then the object's not being found is
considered a failure; otherwise, a success with no ... |
def get(self, q=None, page=None):
etag = generate_etag(current_ext.content_version.encode('utf8'))
self.check_etag(etag, weak=True)
res = jsonify(current_ext.styles)
res.set_etag(etag)
return res | Get styles. |
def init_raspbian_disk(self, yes=0):
self.assume_localhost()
yes = int(yes)
device_question = 'SD card present at %s? ' % self.env.sd_device
if not yes and not raw_input(device_question).lower().startswith('y'):
return
r = self.local_renderer
r.local_if_missin... | Downloads the latest Raspbian image and writes it to a microSD card.
Based on the instructions from:
https://www.raspberrypi.org/documentation/installation/installing-images/linux.md |
def count_seeds(usort):
with open(usort, 'r') as insort:
cmd1 = ["cut", "-f", "2"]
cmd2 = ["uniq"]
cmd3 = ["wc"]
proc1 = sps.Popen(cmd1, stdin=insort, stdout=sps.PIPE, close_fds=True)
proc2 = sps.Popen(cmd2, stdin=proc1.stdout, stdout=sps.PIPE, close_fds=True)
proc3 =... | uses bash commands to quickly count N seeds from utemp file |
def example(self, relative_path):
example_path = os.path.join("examples", relative_path)
return self.relative_file(__file__, example_path) | Load an example from the knitting pattern examples.
:param str relative_path: the path to load
:return: the result of the processing
You can use :meth:`knittingpattern.Loader.PathLoader.examples`
to find out the paths of all examples. |
def add_child(self, child):
if not isinstance(child, ChildMixin):
raise TypeError(
'Requires instance of TreeElement. '
'Got {}'.format(type(child))
)
child.parent = self
self._children.append(child) | If the given object is an instance of Child add it to self and
register self as a parent. |
def rootdir(self, username, reponame, create=True):
path = os.path.join(self.workspace,
'datasets',
username,
reponame)
if create:
try:
os.makedirs(path)
except:
p... | Working directory for the repo |
def configure_camera(self):
r = self.local_renderer
if self.env.camera_enabled:
r.pc('Enabling camera.')
r.enable_attr(
filename='/boot/config.txt',
key='start_x',
value=1,
use_sudo=True,
)
r.... | Enables access to the camera.
http://raspberrypi.stackexchange.com/questions/14229/how-can-i-enable-the-camera-without-using-raspi-config
https://mike632t.wordpress.com/2014/06/26/raspberry-pi-camera-setup/
Afterwards, test with:
/opt/vc/bin/raspistill --nopreview --output... |
def copy(self, pk=None, new_name=None, **kwargs):
orig = self.read(pk, fail_on_no_results=True, fail_on_multiple_results=True)
orig = orig['results'][0]
self._pop_none(kwargs)
newresource = copy(orig)
newresource.pop('id')
basename = newresource['name'].split('@', 1)[0].s... | Copy an object.
Only the ID is used for the lookup. All provided fields are used to override the old data from the
copied resource.
=====API DOCS=====
Copy an object.
:param pk: Primary key of the resource object to be copied
:param new_name: The new name to give the r... |
def SPI(ledtype=None, num=0, **kwargs):
from ...project.types.ledtype import make
if ledtype is None:
raise ValueError('Must provide ledtype value!')
ledtype = make(ledtype)
if num == 0:
raise ValueError('Must provide num value >0!')
if ledtype not in SPI_DRIVERS.keys():
rais... | Wrapper function for using SPI device drivers on systems like the
Raspberry Pi and BeagleBone. This allows using any of the SPI drivers
from a single entry point instead importing the driver for a specific
LED type.
Provides the same parameters of
:py:class:`bibliopixel.drivers.SPI.SPIBase` as
... |
def callback(self, event):
if event.mask == 0x00000008:
if event.name.endswith('.json'):
print_success("Ldapdomaindump file found")
if event.name in ['domain_groups.json', 'domain_users.json']:
if event.name == 'domain_groups.json':
... | Function that gets called on each event from pyinotify. |
def streaming_to_client():
for handler in client_logger.handlers:
if hasattr(handler, 'append_newlines'):
break
else:
handler = None
old_propagate = client_logger.propagate
client_logger.propagate = False
if handler is not None:
old_append = handler.append_newline... | Puts the client logger into streaming mode, which sends
unbuffered input through to the socket one character at a time.
We also disable propagation so the root logger does not
receive many one-byte emissions. This context handler
was originally created for streaming Compose up's
terminal output thro... |
def add_annotation_date(self, doc, annotation_date):
if len(doc.annotations) != 0:
if not self.annotation_date_set:
self.annotation_date_set = True
date = utils.datetime_from_iso_format(annotation_date)
if date is not None:
doc.anno... | Sets the annotation date. Raises CardinalityError if
already set. OrderError if no annotator defined before.
Raises SPDXValueError if invalid value. |
def plot_border(mask, should_plot_border, units, kpc_per_arcsec, pointsize, zoom_offset_pixels):
if should_plot_border and mask is not None:
plt.gca()
border_pixels = mask.masked_grid_index_to_pixel[mask.border_pixels]
if zoom_offset_pixels is not None:
border_pixels -= zoom_offs... | Plot the borders of the mask or the array on the figure.
Parameters
-----------t.
mask : ndarray of data.array.mask.Mask
The mask applied to the array, the edge of which is plotted as a set of points over the plotted array.
should_plot_border : bool
If a mask is supplied, its borders pi... |
def add_annotation_comment(self, doc, comment):
if len(doc.annotations) != 0:
if not self.annotation_comment_set:
self.annotation_comment_set = True
doc.annotations[-1].comment = comment
return True
else:
raise CardinalityEr... | Sets the annotation comment. Raises CardinalityError if
already set. OrderError if no annotator defined before. |
def copy(self, space=None, name=None):
return Cells(space=space, name=name, formula=self.formula) | Make a copy of itself and return it. |
def groupify(function):
@wraps(function)
def wrapper(paths, *args, **kwargs):
groups = {}
for path in paths:
key = function(path, *args, **kwargs)
if key is not None:
groups.setdefault(key, set()).add(path)
return groups
return wrapper | Decorator to convert a function which takes a single value and returns
a key into one which takes a list of values and returns a dict of key-group
mappings.
:param function: A function which takes a value and returns a hash key.
:type function: ``function(value) -> key``
:rtype:
.. parsed-... |
def update(self):
bulbs = self._hub.get_lights()
if not bulbs:
_LOGGER.debug("%s is offline, send command failed", self._zid)
self._online = False | Update light objects to their current values. |
def query_by_user(cls, user, with_pending=False, eager=False):
q1 = Group.query.join(Membership).filter_by(user_id=user.get_id())
if not with_pending:
q1 = q1.filter_by(state=MembershipState.ACTIVE)
if eager:
q1 = q1.options(joinedload(Group.members))
q2 = Group.q... | Query group by user.
:param user: User object.
:param bool with_pending: Whether to include pending users.
:param bool eager: Eagerly fetch group members.
:returns: Query object. |
def contact(request):
form = ContactForm(request.POST or None)
if form.is_valid():
subject = form.cleaned_data['subject']
message = form.cleaned_data['message']
sender = form.cleaned_data['sender']
cc_myself = form.cleaned_data['cc_myself']
recipients = settings.CONTACTFO... | Displays the contact form and sends the email |
def run_model(model, returns_train, returns_test=None,
bmark=None, samples=500, ppc=False, progressbar=True):
if model == 'alpha_beta':
model, trace = model_returns_t_alpha_beta(returns_train,
bmark, samples,
... | Run one of the Bayesian models.
Parameters
----------
model : {'alpha_beta', 't', 'normal', 'best'}
Which model to run
returns_train : pd.Series
Timeseries of simple returns
returns_test : pd.Series (optional)
Out-of-sample returns. Datetimes in returns_test will be added to... |
def KeyboardInput(wVk: int, wScan: int, dwFlags: int = KeyboardEventFlag.KeyDown, time_: int = 0) -> INPUT:
return _CreateInput(KEYBDINPUT(wVk, wScan, dwFlags, time_, None)) | Create Win32 struct `KEYBDINPUT` for `SendInput`. |
def get_annotation_type(self, r_term):
for _, _, typ in self.graph.triples((
r_term, self.spdx_namespace['annotationType'], None)):
if typ is not None:
return typ
else:
self.error = True
msg = 'Annotation must have exactly o... | Returns annotation type or None if found none or more than one.
Reports errors on failure. |
def is_repository_file(self, relativePath):
relativePath = self.to_repo_relative_path(path=relativePath, split=False)
if relativePath == '':
return False, False, False, False
relaDir, name = os.path.split(relativePath)
fileOnDisk = os.path.isfile(os.path.join(self.__path,... | Check whether a given relative path is a repository file path
:Parameters:
#. relativePath (string): File relative path
:Returns:
#. isRepoFile (boolean): Whether file is a repository file.
#. isFileOnDisk (boolean): Whether file is found on disk.
#. isF... |
def find(self, s):
pSet = [s]
parent = self._leader[s]
while parent != self._leader[parent]:
pSet.append(parent)
parent = self._leader[parent]
if len(pSet) > 1:
for a in pSet:
self._leader[a] = parent
return parent | Locates the leader of the set to which the element ``s`` belongs.
Parameters
----------
s : object
An object that the ``UnionFind`` contains.
Returns
-------
object
The leader of the set that contains ``s``. |
def fit(self, trX, trY, batch_size=64, n_epochs=1, len_filter=LenFilter(), snapshot_freq=1, path=None):
if len_filter is not None:
trX, trY = len_filter.filter(trX, trY)
trY = standardize_targets(trY, cost=self.cost)
n = 0.
t = time()
costs = []
for e in range... | Train model on given training examples and return the list of costs after each minibatch is processed.
Args:
trX (list) -- Inputs
trY (list) -- Outputs
batch_size (int, optional) -- number of examples in a minibatch (default 64)
n_epochs (int, optional) -- number of epo... |
def trans(ele, standard=False):
try:
node = globals().get(ele['type'])
if not node:
raise NotImplementedError('%s is not supported!' % ele['type'])
if standard:
node = node.__dict__[
'standard'] if 'standard' in node.__dict__ else node
return n... | Translates esprima syntax tree to python by delegating to appropriate translating node |
def filter_excluded_tags(self, all_tags):
filtered_tags = copy.deepcopy(all_tags)
if self.options.exclude_tags:
filtered_tags = self.apply_exclude_tags(filtered_tags)
if self.options.exclude_tags_regex:
filtered_tags = self.apply_exclude_tags_regex(filtered_tags)
... | Filter tags according exclude_tags and exclude_tags_regex option.
:param list(dict) all_tags: Pre-filtered tags.
:rtype: list(dict)
:return: Filtered tags. |
def _generate(self):
candidates = np.array(range(self._n), np.uint32)
for i in xrange(self._num):
self._random.shuffle(candidates)
pattern = candidates[0:self._getW()]
self._patterns[i] = set(pattern) | Generates set of random patterns. |
def filter_queryset(self, request, queryset, view):
if request.user.is_staff:
email = request.query_params.get('email', None)
username = request.query_params.get('username', None)
query_parameters = {}
if email:
query_parameters.update(email=email)... | Apply incoming filters only if user is staff. If not, only filter by user's ID. |
def messages(self):
method = 'GET'
url = 'activeMessage'
rc = self.__request__(method, url)
return rc['activeMessage'] | Return active messages. |
def ProcessFileData(filename, file_extension, lines, error,
extra_check_functions=None):
lines = (['// marker so line numbers and indices both start at 1'] + lines +
['// marker so line numbers end in a known way'])
include_state = _IncludeState()
function_state = _FunctionState()
... | Performs lint checks and reports any errors to the given error function.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
lines: An array of strings, each representing a line of the file, with the
last element being emp... |
def legal_status(self):
r
if self.__legal_status:
return self.__legal_status
else:
self.__legal_status = legal_status(self.CAS, Method='COMBINED')
return self.__legal_status | r'''Dictionary of legal status indicators for the chemical.
Examples
--------
>>> pprint(Chemical('benzene').legal_status)
{'DSL': 'LISTED',
'EINECS': 'LISTED',
'NLP': 'UNLISTED',
'SPIN': 'LISTED',
'TSCA': 'LISTED'} |
def preprocess_dict(d):
out_env = {}
for k, v in d.items():
if not type(v) in PREPROCESSORS:
raise KeyError('Invalid type in dict: {}'.format(type(v)))
out_env[k] = PREPROCESSORS[type(v)](v)
return out_env | Preprocess a dict to be used as environment variables.
:param d: dict to be processed |
def permalink(self, repo, path):
if not os.path.exists(path):
return (None, None)
cwd = os.getcwd()
if os.path.isfile(path):
os.chdir(os.path.dirname(path))
rootdir = self._run(["rev-parse", "--show-toplevel"])
if "fatal" in rootdir:
return (... | Get the permalink to command that generated the dataset |
def _learnBacktrack(self):
numPrevPatterns = len(self._prevLrnPatterns) - 1
if numPrevPatterns <= 0:
if self.verbosity >= 3:
print "lrnBacktrack: No available history to backtrack from"
return False
badPatterns = []
inSequence = False
for startOffset in range(0, numPrevPatterns):... | This "backtracks" our learning state, trying to see if we can lock onto
the current set of inputs by assuming the sequence started up to N steps
ago on start cells.
This will adjust @ref lrnActiveState['t'] if it does manage to lock on to a
sequence that started earlier.
:returns: >0 if w... |
def annotate_metadata_action(repo):
package = repo.package
print("Including history of actions")
with cd(repo.rootdir):
filename = ".dgit/log.json"
if os.path.exists(filename):
history = open(filename).readlines()
actions = []
fo... | Update metadata with the action history |
def dispatch(self, message, source = None):
msgtype = ""
try:
if type(message[0]) == str:
address = message[0]
self.callbacks[address](message)
elif type(message[0]) == list:
for msg in message:
self.dispatch(msg... | Sends decoded OSC data to an appropriate calback |
def all(self):
response = self.api.get(url=PATHS['GET_PROFILES'])
for raw_profile in response:
self.append(Profile(self.api, raw_profile))
return self | Get all social newtworks profiles |
def duplicated_rows(df, col_name):
_check_cols(df, [col_name])
dups = df[pd.notnull(df[col_name]) & df.duplicated(subset=[col_name])]
return dups | Return a DataFrame with the duplicated values of the column `col_name`
in `df`. |
def validate_params(required, optional, params):
missing_fields = [x for x in required if x not in params]
if missing_fields:
field_strings = ", ".join(missing_fields)
raise Exception("Missing fields: %s" % field_strings)
disallowed_fields = [x for x in params if x not in optional and x not ... | Helps us validate the parameters for the request
:param valid_options: a list of strings of valid options for the
api request
:param params: a dict, the key-value store which we really only care about
the key which has tells us what the user is using for the
... |
def reload_cache_config(self, call_params):
path = '/' + self.api_version + '/ReloadCacheConfig/'
method = 'POST'
return self.request(path, method, call_params) | REST Reload Plivo Cache Config helper |
def save_stream(self, key, binary=False):
s = io.BytesIO() if binary else io.StringIO()
yield s
self.save_value(key, s.getvalue()) | Return a managed file-like object into which the calling code can write
arbitrary data.
:param key:
:return: A managed stream-like object |
def pickle_save(thing,fname):
pickle.dump(thing, open(fname,"wb"),pickle.HIGHEST_PROTOCOL)
return thing | save something to a pickle file |
def complex_has_member(graph: BELGraph, complex_node: ComplexAbundance, member_node: BaseEntity) -> bool:
return any(
v == member_node
for _, v, data in graph.out_edges(complex_node, data=True)
if data[RELATION] == HAS_COMPONENT
) | Does the given complex contain the member? |
def flip_axis_multi(x, axis, is_random=False):
if is_random:
factor = np.random.uniform(-1, 1)
if factor > 0:
results = []
for data in x:
data = np.asarray(data).swapaxes(axis, 0)
data = data[::-1, ...]
data = data.swapaxes(0, a... | Flip the axises of multiple images together, such as flip left and right, up and down, randomly or non-randomly,
Parameters
-----------
x : list of numpy.array
List of images with dimension of [n_images, row, col, channel] (default).
others : args
See ``tl.prepro.flip_axis``.
Retur... |
def set_file_license_comment(self, doc, text):
if self.has_package(doc) and self.has_file(doc):
if not self.file_license_comment_set:
self.file_license_comment_set = True
self.file(doc).license_comment = text
return True
else:
... | Raises OrderError if no package or file defined.
Raises CardinalityError if more than one per file. |
def convert_matmul(params, w_name, scope_name, inputs, layers, weights, names):
print('Converting matmul ...')
if names == 'short':
tf_name = 'MMUL' + random_string(4)
elif names == 'keep':
tf_name = w_name
else:
tf_name = w_name + str(random.random())
if len(inputs) == 1:
... | Convert matmul layer.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
names: use short names for kera... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.