code stringlengths 59 4.4k | docstring stringlengths 5 7.69k |
|---|---|
def bubble_at_P(P, zs, vapor_pressure_eqns, fugacities=None, gammas=None):
def bubble_P_error(T):
Psats = [VP(T) for VP in vapor_pressure_eqns]
Pcalc = bubble_at_T(zs, Psats, fugacities, gammas)
return P - Pcalc
T_bubble = newton(bubble_P_error, 300)
return T_bubble | Calculates bubble point for a given pressure
Parameters
----------
P : float
Pressure, [Pa]
zs : list[float]
Overall mole fractions of all species, [-]
vapor_pressure_eqns : list[functions]
Temperature dependent function for each specie, Returns Psat, [Pa]
fugacities : l... |
def rst2md(text):
top_heading = re.compile(r'^=+$\s^([\w\s-]+)^=+$', flags=re.M)
text = re.sub(top_heading, r'
math_eq = re.compile(r'^\.\. math::((?:.+)?(?:\n+^ .+)*)', flags=re.M)
text = re.sub(math_eq,
lambda match: r'$${0}$$'.format(match.group(1).strip()),
text)... | Converts the RST text from the examples docstrigs and comments
into markdown text for the IPython notebooks |
def ftp_folder_match(ftp,localFolder,deleteStuff=True):
for fname in glob.glob(localFolder+"/*.*"):
ftp_upload(ftp,fname)
return | upload everything from localFolder into the current FTP folder. |
def reboot_server(self, datacenter_id, server_id):
response = self._perform_request(
url='/datacenters/%s/servers/%s/reboot' % (
datacenter_id,
server_id),
method='POST-ACTION')
return response | Reboots the server.
:param datacenter_id: The unique ID of the data center.
:type datacenter_id: ``str``
:param server_id: The unique ID of the server.
:type server_id: ``str`` |
def hsv_to_rgb(hsv):
h, s, v = hsv
c = v * s
h /= 60
x = c * (1 - abs((h % 2) - 1))
m = v - c
if h < 1:
res = (c, x, 0)
elif h < 2:
res = (x, c, 0)
elif h < 3:
res = (0, c, x)
elif h < 4:
res = (0, x, c)
elif h < 5:
res = (x, 0, c)
elif... | Convert an HSV color representation to an RGB color representation.
(h, s, v) :: h -> [0, 360)
s -> [0, 1]
v -> [0, 1]
:param hsv: A tuple of three numeric values corresponding to the hue, saturation, and value.
:return: RGB representation of the input HSV value.
:rty... |
def mget(self, keys, *args):
args = list_or_args(keys, args)
server_keys = {}
ret_dict = {}
for key in args:
server_name = self.get_server_name(key)
server_keys[server_name] = server_keys.get(server_name, [])
server_keys[server_name].append(key)
... | Returns a list of values ordered identically to ``keys`` |
def add_variability_to_fakelc_collection(simbasedir,
override_paramdists=None,
overwrite_existingvar=False):
infof = os.path.join(simbasedir,'fakelcs-info.pkl')
with open(infof, 'rb') as infd:
lcinfo = pickle.load(infd)
... | This adds variability and noise to all fake LCs in `simbasedir`.
If an object is marked as variable in the `fakelcs-info`.pkl file in
`simbasedir`, a variable signal will be added to its light curve based on
its selected type, default period and amplitude distribution, the
appropriate params, etc. the ... |
def del_attr(self, name):
if name in self.namespace:
if name in self.cells:
self.del_cells(name)
elif name in self.spaces:
self.del_space(name)
elif name in self.refs:
self.del_ref(name)
else:
raise R... | Implementation of attribute deletion
``del space.name`` by user script
Called from ``StaticSpace.__delattr__`` |
def sanitize_filename(filename):
token = generate_drop_id()
name, extension = splitext(filename)
if extension:
return '%s%s' % (token, extension)
else:
return token | preserve the file ending, but replace the name with a random token |
def _getEphemeralMembers(self):
e = BacktrackingTM._getEphemeralMembers(self)
if self.makeCells4Ephemeral:
e.extend(['cells4'])
return e | List of our member variables that we don't need to be saved |
def match(self, subsetLines, offsetOfSubset, fileName):
for (offset,l) in enumerate(subsetLines):
column = l.find(self.literal)
if column != -1:
truePosition = offset + offsetOfSubset
_logger.debug('Found match on line {}, col {}'.format(str(truePosition+ ... | Search through lines for match.
Raise an Exception if fail to match
If match is succesful return the position the match was found |
def detect_actual_closed_dates(self, issues, kind):
if self.options.verbose:
print("Fetching closed dates for {} {}...".format(
len(issues), kind)
)
all_issues = copy.deepcopy(issues)
for issue in all_issues:
if self.options.verbose > 2:
... | Find correct closed dates, if issues was closed by commits.
:param list issues: issues to check
:param str kind: either "issues" or "pull requests"
:rtype: list
:return: issues with updated closed dates |
def niftilist_mask_to_array(img_filelist, mask_file=None, outdtype=None):
img = check_img(img_filelist[0])
if not outdtype:
outdtype = img.dtype
mask_data, _ = load_mask_data(mask_file)
indices = np.where (mask_data)
mask = check_img(mask_file)
outmat = np.zeros((len(img_fileli... | From the list of absolute paths to nifti files, creates a Numpy array
with the masked data.
Parameters
----------
img_filelist: list of str
List of absolute file paths to nifti files. All nifti files must have
the same shape.
mask_file: str
Path to a Nifti mask file.
... |
def _post_connect(self):
if not self.initiator:
if "plain" in self.auth_methods or "digest" in self.auth_methods:
self.set_iq_get_handler("query","jabber:iq:auth",
self.auth_in_stage1)
self.set_iq_set_handler("query","jabber:iq:auth",
... | Initialize authentication when the connection is established
and we are the initiator. |
def check_sla(self, sla, diff_metric):
try:
if sla.display is '%':
diff_val = float(diff_metric['percent_diff'])
else:
diff_val = float(diff_metric['absolute_diff'])
except ValueError:
return False
if not (sla.check_sla_passed(diff_val)):
self.sla_failures += 1
... | Check whether the SLA has passed or failed |
def set(verbose,
host,
http_port,
ws_port,
use_https,
verify_ssl):
_config = GlobalConfigManager.get_config_or_default()
if verbose is not None:
_config.verbose = verbose
if host is not None:
_config.host = host
if http_port is not None:
_c... | Set the global config values.
Example:
\b
```bash
$ polyaxon config set --hots=localhost http_port=80
``` |
def var(nums, mean_func=amean, ddof=0):
r
x_bar = mean_func(nums)
return sum((x - x_bar) ** 2 for x in nums) / (len(nums) - ddof) | r"""Calculate the variance.
The variance (:math:`\sigma^2`) of a series of numbers (:math:`x_i`) with
mean :math:`\mu` and population :math:`N` is:
:math:`\sigma^2 = \frac{1}{N}\sum_{i=1}^{N}(x_i-\mu)^2`.
Cf. https://en.wikipedia.org/wiki/Variance
Parameters
----------
nums : list
... |
def adjacency2graph(adjacency, edge_type=None, adjust=1, **kwargs):
if isinstance(adjacency, np.ndarray):
adjacency = _matrix2dict(adjacency)
elif isinstance(adjacency, dict):
adjacency = _dict2dict(adjacency)
else:
msg = ("If the adjacency parameter is supplied it must be a "
... | Takes an adjacency list, dict, or matrix and returns a graph.
The purpose of this function is take an adjacency list (or matrix)
and return a :class:`.QueueNetworkDiGraph` that can be used with a
:class:`.QueueNetwork` instance. The Graph returned has the
``edge_type`` edge property set for each edge. ... |
def _parse_genotype(self, vcf_fields):
format_col = vcf_fields[8].split(':')
genome_data = vcf_fields[9].split(':')
try:
gt_idx = format_col.index('GT')
except ValueError:
return []
return [int(x) for x in re.split(r'[\|/]', genome_data[gt_idx]) if
... | Parse genotype from VCF line data |
def create_projection(self, fov: float = 75.0, near: float = 1.0, far: float = 100.0, aspect_ratio: float = None):
return matrix44.create_perspective_projection_matrix(
fov,
aspect_ratio or self.window.aspect_ratio,
near,
far,
dtype='f4',
) | Create a projection matrix with the following parameters.
When ``aspect_ratio`` is not provided the configured aspect
ratio for the window will be used.
Args:
fov (float): Field of view (float)
near (float): Camera near value
far (float): Camrea far value
... |
def send(self, message, json=False, callback=None):
pkt = dict(type="message", data=message, endpoint=self.ns_name)
if json:
pkt['type'] = "json"
if callback:
pkt['ack'] = True
pkt['id'] = msgid = self.socket._get_next_msgid()
self.socket._save_ack... | Use send to send a simple string message.
If ``json`` is True, the message will be encoded as a JSON object
on the wire, and decoded on the other side.
This is mostly for backwards compatibility. ``emit()`` is more fun.
:param callback: This is a callback function that will be
... |
def hubspot(parser, token):
bits = token.split_contents()
if len(bits) > 1:
raise TemplateSyntaxError("'%s' takes no arguments" % bits[0])
return HubSpotNode() | HubSpot tracking template tag.
Renders Javascript code to track page visits. You must supply
your portal ID (as a string) in the ``HUBSPOT_PORTAL_ID`` setting. |
def zoom_region(self):
where = np.array(np.where(np.invert(self.astype('bool'))))
y0, x0 = np.amin(where, axis=1)
y1, x1 = np.amax(where, axis=1)
return [y0, y1+1, x0, x1+1] | The zoomed rectangular region corresponding to the square encompassing all unmasked values.
This is used to zoom in on the region of an image that is used in an analysis for visualization. |
def record_schema(self):
schema_path = current_jsonschemas.url_to_path(self['$schema'])
schema_prefix = current_app.config['DEPOSIT_JSONSCHEMAS_PREFIX']
if schema_path and schema_path.startswith(schema_prefix):
return current_jsonschemas.path_to_url(
schema_path[len(s... | Convert deposit schema to a valid record schema. |
def _post(self, url, data, scope):
self._create_session(scope)
response = self.session.post(url, data=data)
return response.status_code, response.text | Make a POST request using the session object to a Degreed endpoint.
Args:
url (str): The url to send a POST request to.
data (str): The json encoded payload to POST.
scope (str): Must be one of the scopes Degreed expects:
- `CONTENT_PROVIDER_SCOPE`
... |
def apply_mask(self, mask_img):
self.set_mask(mask_img)
return self.get_data(masked=True, smoothed=True, safe_copy=True) | First set_mask and the get_masked_data.
Parameters
----------
mask_img: nifti-like image, NeuroImage or str
3D mask array: True where a voxel should be used.
Can either be:
- a file path to a Nifti image
- any object with get_data() and get_affin... |
def scaper_to_tag(annotation):
annotation.namespace = 'tag_open'
data = annotation.pop_data()
for obs in data:
annotation.append(time=obs.time, duration=obs.duration,
confidence=obs.confidence, value=obs.value['label'])
return annotation | Convert scaper annotations to tag_open |
def padded_blurred_image_2d_from_padded_image_1d_and_psf(self, padded_image_1d, psf):
padded_model_image_1d = self.convolve_array_1d_with_psf(padded_array_1d=padded_image_1d, psf=psf)
return self.scaled_array_2d_from_array_1d(array_1d=padded_model_image_1d) | Compute a 2D padded blurred image from a 1D padded image.
Parameters
----------
padded_image_1d : ndarray
A 1D unmasked image which is blurred with the PSF.
psf : ndarray
An array describing the PSF kernel of the image. |
def clear_values(self, red=0.0, green=0.0, blue=0.0, alpha=0.0, depth=1.0):
self.clear_color = (red, green, blue, alpha)
self.clear_depth = depth | Sets the clear values for the window buffer.
Args:
red (float): red compoent
green (float): green compoent
blue (float): blue compoent
alpha (float): alpha compoent
depth (float): depth value |
def build(ctx, project, build):
ctx.obj = ctx.obj or {}
ctx.obj['project'] = project
ctx.obj['build'] = build | Commands for build jobs. |
def hierarchy(annotation, **kwargs):
htimes, hlabels = hierarchy_flatten(annotation)
htimes = [np.asarray(_) for _ in htimes]
return mir_eval.display.hierarchy(htimes, hlabels, **kwargs) | Plotting wrapper for hierarchical segmentations |
def upload_stream(self, destination, *, offset=0):
return self.get_stream(
"STOR " + str(destination),
"1xx",
offset=offset,
) | Create stream for write data to `destination` file.
:param destination: destination path of file on server side
:type destination: :py:class:`str` or :py:class:`pathlib.PurePosixPath`
:param offset: byte offset for stream start position
:type offset: :py:class:`int`
:rtype: :p... |
async def set_tz(self):
settings = await self.api.account.settings.get()
tz = settings.time_zone.tzinfo_name
os.environ['TZ'] = tz
time.tzset() | set the environment timezone to the timezone
set in your twitter settings |
def execute_async_script(self, script, *args):
return self._execute(Command.EXECUTE_ASYNC_SCRIPT, {
'script': script,
'args': list(args)}) | Execute JavaScript Asynchronously in current context.
Support:
Web(WebView)
Args:
script: The JavaScript to execute.
*args: Arguments for your JavaScript.
Returns:
Returns the return value of the function. |
def fetch_by_name(self, name):
service = self.collection.find_one({'name': name})
if not service:
raise ServiceNotFound
return Service(service) | Gets service for given ``name`` from mongodb storage. |
def MOVBE(cpu, dest, src):
size = dest.size
arg0 = dest.read()
temp = 0
for pos in range(0, size, 8):
temp = (temp << 8) | (arg0 & 0xff)
arg0 = arg0 >> 8
dest.write(arg0) | Moves data after swapping bytes.
Performs a byte swap operation on the data copied from the second operand (source operand) and store the result
in the first operand (destination operand). The source operand can be a general-purpose register, or memory location; the destination register can be a genera... |
def _get_job_metadata(provider, user_id, job_name, script, task_ids,
user_project, unique_job_id):
create_time = dsub_util.replace_timezone(datetime.datetime.now(), tzlocal())
user_id = user_id or dsub_util.get_os_user()
job_metadata = provider.prepare_job_metadata(script.name, job_name, use... | Allow provider to extract job-specific metadata from command-line args.
Args:
provider: job service provider
user_id: user submitting the job
job_name: name for the job
script: the script to run
task_ids: a set of the task-ids for all tasks in the job
user_project: name of the project to be b... |
def contains(self, *items):
if len(items) == 0:
raise ValueError('one or more args must be given')
elif len(items) == 1:
if items[0] not in self.val:
if self._check_dict_like(self.val, return_as_bool=True):
self._err('Expected <%s> to contain k... | Asserts that val contains the given item or items. |
def get_key(self, *args, **kwargs):
if kwargs.pop('force', None):
headers = kwargs.get('headers', {})
headers['force'] = True
kwargs['headers'] = headers
return super(Bucket, self).get_key(*args, **kwargs) | Return the key from MimicDB.
:param boolean force: If true, API call is forced to S3 |
def save_validation_log(self, **kwargs):
self._fill_project_info(kwargs)
kwargs.update({'time': datetime.utcnow()})
_result = self.db.ValidLog.insert_one(kwargs)
_log = self._print_dict(kwargs)
logging.info("[Database] valid log: " + _log) | Saves the validation log, timestamp will be added automatically.
Parameters
-----------
kwargs : logging information
Events, such as accuracy, loss, step number and etc.
Examples
---------
>>> db.save_validation_log(accuracy=0.33, loss=0.98) |
def autocommit(self):
if len(self.cursors.keys()) == 0:
self.connection.autocommit = True
else:
raise AttributeError('database cursors are already active, '
'cannot switch to autocommit now') | This sets the database connection to autocommit. Must be called before
any cursors have been instantiated. |
def _get_node_names(h5file, h5path='/', node_type=h5py.Dataset):
if isinstance(h5file, str):
_h5file = get_h5file(h5file, mode='r')
else:
_h5file = h5file
if not h5path.startswith('/'):
h5path = '/' + h5path
names = []
try:
h5group = _h5file.require_group(h5path)
... | Return the node of type node_type names within h5path of h5file.
Parameters
----------
h5file: h5py.File
HDF5 file object
h5path: str
HDF5 group path to get the group names from
node_type: h5py object type
HDF5 object type
Returns
-------
names: list of str
... |
def _update_optional(cobra_object, new_dict, optional_attribute_dict,
ordered_keys):
for key in ordered_keys:
default = optional_attribute_dict[key]
value = getattr(cobra_object, key)
if value is None or value == default:
continue
new_dict[key] = _fix... | update new_dict with optional attributes from cobra_object |
def ghmean(nums):
m_g = gmean(nums)
m_h = hmean(nums)
if math.isnan(m_g) or math.isnan(m_h):
return float('nan')
while round(m_h, 12) != round(m_g, 12):
m_g, m_h = (m_g * m_h) ** (1 / 2), (2 * m_g * m_h) / (m_g + m_h)
return m_g | Return geometric-harmonic mean.
Iterates between geometric & harmonic means until they converge to
a single value (rounded to 12 digits).
Cf. https://en.wikipedia.org/wiki/Geometric-harmonic_mean
Parameters
----------
nums : list
A series of numbers
Returns
-------
float
... |
def post_deploy(self):
for service in self.genv.services:
service = service.strip().upper()
self.vprint('post_deploy:', service)
funcs = common.service_post_deployers.get(service)
if funcs:
self.vprint('Running post-deployments for service %s...' %... | Runs methods services have requested be run before after deployment. |
def draw_cornu_bezier(x0, y0, t0, t1, s0, c0, flip, cs, ss, cmd, scale, rot):
s = None
for j in range(0, 5):
t = j * .2
t2 = t+ .2
curvetime = t0 + t * (t1 - t0)
curvetime2 = t0 + t2 * (t1 - t0)
Dt = (curvetime2 - curvetime) * scale
if not s:
s, c = ev... | Mark Meyer's code draws elegant CURVETO segments. |
def _kwargs(self):
return dict(color=self.color, velocity=self.velocity, colors=self.colors) | Keyword arguments for recreating the Shape from the vertices. |
def autocorr_magseries(times, mags, errs,
maxlags=1000,
func=_autocorr_func3,
fillgaps=0.0,
filterwindow=11,
forcetimebin=None,
sigclip=3.0,
magsarefluxes=Fals... | This calculates the ACF of a light curve.
This will pre-process the light curve to fill in all the gaps and normalize
everything to zero. If `fillgaps = 'noiselevel'`, fills the gaps with the
noise level obtained via the procedure above. If `fillgaps = 'nan'`, fills
the gaps with `np.nan`.
Paramet... |
def outfile(self, p):
if self.outdir is not None:
return os.path.join(self.outdir, os.path.basename(p))
else:
return p | Path for an output file.
If :attr:`outdir` is set then the path is
``outdir/basename(p)`` else just ``p`` |
def replicated_dataset(dataset, weights, n=None):
"Copy dataset, replicating each example in proportion to its weight."
n = n or len(dataset.examples)
result = copy.copy(dataset)
result.examples = weighted_replicate(dataset.examples, weights, n)
return result | Copy dataset, replicating each example in proportion to its weight. |
def associate_notification_template(self, workflow,
notification_template, status):
return self._assoc('notification_templates_%s' % status,
workflow, notification_template) | Associate a notification template from this workflow.
=====API DOCS=====
Associate a notification template from this workflow job template.
:param workflow: The workflow job template to associate to.
:type workflow: str
:param notification_template: The notification template to... |
def _combineargs(self, *args, **kwargs):
d = {arg: True for arg in args}
d.update(kwargs)
return d | Add switches as 'options' with value True to the options dict. |
def last_arg_decorator(func):
@wraps(func)
def decorator(*args, **kwargs):
if signature_matches(func, args, kwargs):
return func(*args, **kwargs)
else:
return lambda last: func(*(args + (last,)), **kwargs)
return decorator | Allows a function to be used as either a decorator with args, or called as
a normal function.
@last_arg_decorator
def register_a_thing(foo, func, bar=True):
..
# Called as a decorator
@register_a_thing("abc", bar=False)
def my_func():
...
# Called as a normal function call... |
def create(self, data):
if 'name' not in data:
raise KeyError('The file must have a name')
if 'file_data' not in data:
raise KeyError('The file must have file_data')
response = self._mc_client._post(url=self._build_path(), data=data)
if response is not None:
... | Upload a new image or file to the File Manager.
:param data: The request body parameters
:type data: :py:class:`dict`
data = {
"name": string*,
"file_data": string*
} |
def get_all_domains(self):
data = self.get_data("domains/")
domains = list()
for jsoned in data['domains']:
domain = Domain(**jsoned)
domain.token = self.token
domains.append(domain)
return domains | This function returns a list of Domain object. |
def selectlastrow(self, window_name, object_name):
object_handle = self._get_object_handle(window_name, object_name)
if not object_handle.AXEnabled:
raise LdtpServerException(u"Object %s state disabled" % object_name)
cell = object_handle.AXRows[-1]
if not cell.AXSelected:
... | Select last row
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: strin... |
def build(ctx, dput='', opts=''):
with io.open('debian/changelog', encoding='utf-8') as changes:
metadata = re.match(r'^([^ ]+) \(([^)]+)\) ([^;]+); urgency=(.+)$', changes.readline().rstrip())
if not metadata:
notify.failure('Badly formatted top entry in changelog')
name, versio... | Build a DEB package. |
def saveRecords(self, path='myOutput'):
numRecords = self.fields[0].numRecords
assert (all(field.numRecords==numRecords for field in self.fields))
import csv
with open(path+'.csv', 'wb') as f:
writer = csv.writer(f)
writer.writerow(self.getAllFieldNames())
writer.writerow(self.getAllDa... | Export all the records into a csv file in numenta format.
Example header format:
fieldName1 fieldName2 fieldName3
date string float
T S
Parameters:
--------------------------------------------------------------------
path: Relative path of the file to... |
def copy_arguments_to_annotations(args, type_comment, *, is_method=False):
if isinstance(type_comment, ast3.Ellipsis):
return
expected = len(args.args)
if args.vararg:
expected += 1
expected += len(args.kwonlyargs)
if args.kwarg:
expected += 1
actual = len(type_comment) i... | Copies AST nodes from `type_comment` into the ast3.arguments in `args`.
Does validaation of argument count (allowing for untyped self/cls)
and type (vararg and kwarg). |
def find_unique(table, sample, unique_fields=None):
res = search_unique(table, sample, unique_fields)
if res is not None:
return res.eid
else:
return res | Search in `table` an item with the value of the `unique_fields` in the `sample` sample.
Check if the the obtained result is unique. If nothing is found will return an empty list,
if there is more than one item found, will raise an IndexError.
Parameters
----------
table: tinydb.table
sample: d... |
def density_between_circular_annuli_in_angular_units(self, inner_annuli_radius, outer_annuli_radius):
annuli_area = (np.pi * outer_annuli_radius ** 2.0) - (np.pi * inner_annuli_radius ** 2.0)
return (self.mass_within_circle_in_units(radius=outer_annuli_radius) -
self.mass_within_circle_i... | Calculate the mass between two circular annuli and compute the density by dividing by the annuli surface
area.
The value returned by the mass integral is dimensionless, therefore the density between annuli is returned in \
units of inverse radius squared. A conversion factor can be specified to... |
def split_multiline(value):
return [element for element in (line.strip() for line in value.split('\n'))
if element] | Split a multiline string into a list, excluding blank lines. |
def plot_mask(mask, units, kpc_per_arcsec, pointsize, zoom_offset_pixels):
if mask is not None:
plt.gca()
edge_pixels = mask.masked_grid_index_to_pixel[mask.edge_pixels] + 0.5
if zoom_offset_pixels is not None:
edge_pixels -= zoom_offset_pixels
edge_arcsec = mask.grid_pix... | Plot the mask of the array on the figure.
Parameters
-----------
mask : ndarray of data.array.mask.Mask
The mask applied to the array, the edge of which is plotted as a set of points over the plotted array.
units : str
The units of the y / x axis of the plots, in arc-seconds ('arcsec') ... |
def _apply_each_methods(self, i, r,
summarize=False,
report_unexpected_exceptions=True,
context=None):
for a in dir(self):
if a.startswith('each'):
rdict = self._as_dict(r)
f = getattr... | Invoke 'each' methods on `r`. |
def sim_crb_diff(std0, std1, N=10000):
a = std0*np.random.randn(N, len(std0))
b = std1*np.random.randn(N, len(std1))
return a - b | each element of std0 should correspond with the element of std1 |
def post_process(self, group, event, is_new, is_sample, **kwargs):
if not self.is_configured(group.project):
return
host = self.get_option('server_host', group.project)
port = int(self.get_option('server_port', group.project))
prefix = self.get_option('prefix', group.project)... | Process error. |
def _key(cls, obs):
if not isinstance(obs, Observation):
raise JamsError('{} must be of type jams.Observation'.format(obs))
return obs.time | Provides sorting index for Observation objects |
def execute(self, command, timeout=None):
try:
self.channel = self.ssh.get_transport().open_session()
except paramiko.SSHException as e:
self.unknown("Create channel error: %s" % e)
try:
self.channel.settimeout(self.args.timeout if not timeout else timeout)
... | Execute a shell command. |
def ismounted(device):
with settings(hide('running', 'stdout')):
res = run_as_root('mount')
for line in res.splitlines():
fields = line.split()
if fields[0] == device:
return True
with settings(hide('running', 'stdout')):
res = run_as_root('swapon -s')
for lin... | Check if partition is mounted
Example::
from burlap.disk import ismounted
if ismounted('/dev/sda1'):
print ("disk sda1 is mounted") |
def wait_for_readability(self):
with self.lock:
while True:
if self._socket is None or self._eof:
return False
if self._state in ("connected", "closing"):
return True
if self._state == "tls-handshake" and \
... | Stop current thread until the channel is readable.
:Return: `False` if it won't be readable (e.g. is closed) |
def download(self, source_file, target_folder=''):
current_folder = self._ftp.pwd()
if not target_folder.startswith('/'):
target_folder = join(getcwd(), target_folder)
folder = os.path.dirname(source_file)
self.cd(folder)
if folder.startswith("/"):
folder ... | Downloads a file from the FTP server to target folder
:param source_file: the absolute path for the file on the server
it can be the one of the files coming from
FtpHandler.dir().
:type source_file: string
:param target_folder: relative or absolute path of ... |
def ma(X, Q, M):
if Q <= 0 or Q >= M:
raise ValueError('Q(MA) must be in ]0,lag[')
a, rho, _c = yulewalker.aryule(X, M, 'biased')
a = np.insert(a, 0, 1)
ma_params, _p, _c = yulewalker.aryule(a, Q, 'biased')
return ma_params, rho | Moving average estimator.
This program provides an estimate of the moving average parameters
and driving noise variance for a data sequence based on a
long AR model and a least squares fit.
:param array X: The input data array
:param int Q: Desired MA model order (must be >0 and <M)
:param int... |
def group_nodes_by_annotation_filtered(graph: BELGraph,
node_predicates: NodePredicates = None,
annotation: str = 'Subgraph',
) -> Mapping[str, Set[BaseEntity]]:
node_filter = concatenate_node_predic... | Group the nodes occurring in edges by the given annotation, with a node filter applied.
:param graph: A BEL graph
:param node_predicates: A predicate or list of predicates (graph, node) -> bool
:param annotation: The annotation to use for grouping
:return: A dictionary of {annotation value: set of node... |
def doc2md(docstr, title, min_level=1, more_info=False, toc=True, maxdepth=0):
text = doctrim(docstr)
lines = text.split('\n')
sections = find_sections(lines)
if sections:
level = min(n for n,t in sections) - 1
else:
level = 1
shiftlevel = 0
if level < min_level:
shif... | Convert a docstring to a markdown text. |
def _sync_content_metadata(self, serialized_data, http_method):
try:
status_code, response_body = getattr(self, '_' + http_method)(
urljoin(self.enterprise_configuration.degreed_base_url, self.global_degreed_config.course_api_path),
serialized_data,
se... | Synchronize content metadata using the Degreed course content API.
Args:
serialized_data: JSON-encoded object containing content metadata.
http_method: The HTTP method to use for the API request.
Raises:
ClientError: If Degreed API request fails. |
def create_from_settings(settings):
return Connection(
settings["url"],
settings["base_url"],
settings["user"],
settings["password"],
authorizations = settings["authorizations"],
debug = settings["debug"]
) | Create a connection with given settings.
Args:
settings (dict): A dictionary of settings
Returns:
:class:`Connection`. The connection |
def window_riemann(N):
r
n = linspace(-N/2., (N)/2., N)
w = sin(n/float(N)*2.*pi) / (n / float(N)*2.*pi)
return w | r"""Riemann tapering window
:param int N: window length
.. math:: w(n) = 1 - \left| \frac{n}{N/2} \right|^2
with :math:`-N/2 \leq n \leq N/2`.
.. plot::
:width: 80%
:include-source:
from spectrum import window_visu
window_visu(64, 'riesz')
.. seealso:: :func:`c... |
def prefetch_users(persistent_course_grades):
users = User.objects.filter(
id__in=[grade.user_id for grade in persistent_course_grades]
)
return {
user.id: user for user in users
} | Prefetch Users from the list of user_ids present in the persistent_course_grades.
Arguments:
persistent_course_grades (list): A list of PersistentCourseGrade.
Returns:
(dict): A dictionary containing user_id to user mapping. |
def subtract_metabolites(self, metabolites, combine=True, reversibly=True):
self.add_metabolites({
k: -v for k, v in iteritems(metabolites)},
combine=combine, reversibly=reversibly) | Subtract metabolites from a reaction.
That means add the metabolites with -1*coefficient. If the final
coefficient for a metabolite is 0 then the metabolite is removed from
the reaction.
Notes
-----
* A final coefficient < 0 implies a reactant.
* The change is r... |
def extend_model(self, exchange_reactions=False, demand_reactions=True):
for rxn in self.universal.reactions:
rxn.gapfilling_type = 'universal'
new_metabolites = self.universal.metabolites.query(
lambda metabolite: metabolite not in self.model.metabolites
... | Extend gapfilling model.
Add reactions from universal model and optionally exchange and
demand reactions for all metabolites in the model to perform
gapfilling on.
Parameters
----------
exchange_reactions : bool
Consider adding exchange (uptake) reactions fo... |
def _read_all_z_variable_info(self):
self.z_variable_info = {}
self.z_variable_names_by_num = {}
info = fortran_cdf.z_var_all_inquire(self.fname, self._num_z_vars,
len(self.fname))
status = info[0]
data_types = info[1]
num_elem... | Gets all CDF z-variable information, not data though.
Maps to calls using var_inquire. Gets information on
data type, number of elements, number of dimensions, etc. |
def build_delete_node_by_hash(manager: Manager) -> Callable[[BELGraph, str], None]:
@in_place_transformation
def delete_node_by_hash(graph: BELGraph, node_hash: str) -> None:
node = manager.get_dsl_by_hash(node_hash)
graph.remove_node(node)
return delete_node_by_hash | Make a delete function that's bound to the manager. |
def bond_task(
perc_graph_result, seeds, ps, convolution_factors_tasks_iterator
):
convolution_factors_tasks = list(convolution_factors_tasks_iterator)
return reduce(
percolate.hpc.bond_reduce,
map(
bond_run,
itertools.repeat(perc_graph_result),
seeds,
... | Perform a number of runs
The number of runs is the number of seeds
convolution_factors_tasks_iterator needs to be an iterator
We shield the convolution factors tasks from jug value/result mechanism
by supplying an iterator to the list of tasks for lazy evaluation
http://github.com/luispedro/jug/b... |
def vagrant(self, name=''):
r = self.local_renderer
config = self.ssh_config(name)
extra_args = self._settings_dict(config)
r.genv.update(extra_args) | Run the following tasks on a vagrant box.
First, you need to import this task in your ``fabfile.py``::
from fabric.api import *
from burlap.vagrant import vagrant
@task
def some_task():
run('echo hello')
Then you can easily run tasks on... |
def BSWAP(cpu, dest):
parts = []
arg0 = dest.read()
for i in range(0, dest.size, 8):
parts.append(Operators.EXTRACT(arg0, i, 8))
dest.write(Operators.CONCAT(8 * len(parts), *parts)) | Byte swap.
Reverses the byte order of a 32-bit (destination) register: bits 0 through
7 are swapped with bits 24 through 31, and bits 8 through 15 are swapped
with bits 16 through 23. This instruction is provided for converting little-endian
values to big-endian format and vice versa.
... |
def is_displayed(target):
is_displayed = getattr(target, 'is_displayed', None)
if not is_displayed or not callable(is_displayed):
raise TypeError('Target has no attribute \'is_displayed\' or not callable')
if not is_displayed():
raise WebDriverException('element not visible') | Assert whether the target is displayed
Args:
target(WebElement): WebElement Object.
Returns:
Return True if the element is displayed or return False otherwise. |
def sendToSbs(self, challenge_id, item_id):
method = 'PUT'
url = 'sbs/challenge/%s/squad' % challenge_id
squad = self.sbsSquad(challenge_id)
players = []
moved = False
n = 0
for i in squad['squad']['players']:
if i['itemData']['id'] == item_id:
... | Send card FROM CLUB to first free slot in sbs squad. |
def load_state(self, state_id, delete=True):
return self._store.load_state(f'{self._prefix}{state_id:08x}{self._suffix}', delete=delete) | Load a state from storage identified by `state_id`.
:param state_id: The state reference of what to load
:return: The deserialized state
:rtype: State |
def set_fields(self):
if self.is_initialized:
self.model_map_dict = self.create_document_dictionary(self.model_instance)
else:
self.model_map_dict = self.create_document_dictionary(self.model)
form_field_dict = self.get_form_field_dict(self.model_map_dict)
self.se... | Sets existing data to form fields. |
def _auto_client_files(cls, client, ca_path=None, ca_contents=None, cert_path=None,
cert_contents=None, key_path=None, key_contents=None):
files = []
if ca_path and ca_contents:
client['ca'] = ca_path
files.append(dict(path=ca_path,
... | returns a list of NetJSON extra files for automatically generated clients
produces side effects in ``client`` dictionary |
def _build_query(self, query_string, no_params=False):
try:
query = quote(query_string.format(u=self.library_id, t=self.library_type))
except KeyError as err:
raise ze.ParamNotPassed("There's a request parameter missing: %s" % err)
if no_params is False:
if no... | Set request parameters. Will always add the user ID if it hasn't
been specifically set by an API method |
def add(self, stream_id, task_ids, grouping, source_comp_name):
if stream_id not in self.targets:
self.targets[stream_id] = []
self.targets[stream_id].append(Target(task_ids, grouping, source_comp_name)) | Adds the target component
:type stream_id: str
:param stream_id: stream id into which tuples are emitted
:type task_ids: list of str
:param task_ids: list of task ids to which tuples are emitted
:type grouping: ICustomStreamGrouping object
:param grouping: custom grouping to use
:type sourc... |
def save_ckpt(
sess=None, mode_name='model.ckpt', save_dir='checkpoint', var_list=None, global_step=None, printable=False
):
if sess is None:
raise ValueError("session is None.")
if var_list is None:
var_list = []
ckpt_file = os.path.join(save_dir, mode_name)
if var_list == []:
... | Save parameters into `ckpt` file.
Parameters
------------
sess : Session
TensorFlow Session.
mode_name : str
The name of the model, default is ``model.ckpt``.
save_dir : str
The path / file directory to the `ckpt`, default is ``checkpoint``.
var_list : list of tensor
... |
def _filter(filterObj, **kwargs):
for key, value in kwargs.items():
if key.endswith('__ne'):
notFilter = True
key = key[:-4]
else:
notFilter = False
if key not in filterObj.indexedFields:
raise ValueError('Field "' + key + '" is not in INDEXED_FIELDS array. Filtering is only supported on inde... | Internal for handling filters; the guts of .filter and .filterInline |
def new_address(self, label=None):
return self._backend.new_address(account=self.index, label=label) | Creates a new address.
:param label: address label as `str`
:rtype: :class:`SubAddress <monero.address.SubAddress>` |
def set_embedded_doc(self, document, form_key, current_key, remaining_key):
embedded_doc = getattr(document, current_key, False)
if not embedded_doc:
embedded_doc = document._fields[current_key].document_type_obj()
new_key, new_remaining_key_array = trim_field_key(embedded_doc, remai... | Get the existing embedded document if it exists, else created it. |
def fasta_dict_to_file(fasta_dict, fasta_file, line_char_limit=None):
fasta_fp = fasta_file
if isinstance(fasta_file, str):
fasta_fp = open(fasta_file, 'wb')
for key in fasta_dict:
seq = fasta_dict[key]['seq']
if line_char_limit:
seq = '\n'.join([seq[i:i+line_char_limit] ... | Write fasta_dict to fasta_file
:param fasta_dict: returned by fasta_file_to_dict
:param fasta_file: output file can be a string path or a file object
:param line_char_limit: None = no limit (default)
:return: None |
def get_varfeatures(simbasedir,
mindet=1000,
nworkers=None):
with open(os.path.join(simbasedir, 'fakelcs-info.pkl'),'rb') as infd:
siminfo = pickle.load(infd)
lcfpaths = siminfo['lcfpath']
varfeaturedir = os.path.join(simbasedir,'varfeatures')
timecols = s... | This runs `lcproc.lcvfeatures.parallel_varfeatures` on fake LCs in
`simbasedir`.
Parameters
----------
simbasedir : str
The directory containing the fake LCs to process.
mindet : int
The minimum number of detections needed to accept an LC and process it.
nworkers : int or Non... |
def delete(self, pk=None, fail_on_missing=False, **kwargs):
if not pk:
existing_data = self._lookup(fail_on_missing=fail_on_missing, **kwargs)
if not existing_data:
return {'changed': False}
pk = existing_data['id']
url = '%s%s/' % (self.endpoint, pk)
... | Remove the given object.
If `fail_on_missing` is True, then the object's not being found is considered a failure; otherwise,
a success with no change is reported.
=====API DOCS=====
Remove the given object.
:param pk: Primary key of the resource to be deleted.
:type pk... |
def MessageSetItemDecoder(extensions_by_number):
type_id_tag_bytes = encoder.TagBytes(2, wire_format.WIRETYPE_VARINT)
message_tag_bytes = encoder.TagBytes(3, wire_format.WIRETYPE_LENGTH_DELIMITED)
item_end_tag_bytes = encoder.TagBytes(1, wire_format.WIRETYPE_END_GROUP)
local_ReadTag = ReadTag
local_DecodeVari... | Returns a decoder for a MessageSet item.
The parameter is the _extensions_by_number map for the message class.
The message set message looks like this:
message MessageSet {
repeated group Item = 1 {
required int32 type_id = 2;
required string message = 3;
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.