code
stringlengths
4
4.48k
docstring
stringlengths
1
6.45k
_id
stringlengths
24
24
def to_json(self, is_to_client): <NEW_LINE> <INDENT> del is_to_client <NEW_LINE> return { "scene_type": self.scene_type, "wait_duration": self.wait_duration }
Convert a cutscene to a dict which can be converted to a JSON string. Args: is_to_client: True to get the version of the cutscene sent to the client, False to get the version of the cutscene to save to file.
625941bed268445f265b4d91
def _optim_init (self) : <NEW_LINE> <INDENT> pbest, gbest = super()._optim_init() <NEW_LINE> fitness_q = deque(maxlen=self.Nc) <NEW_LINE> fitness_q.append(self.obj(self.particles)) <NEW_LINE> return fitness_q, pbest, gbest
Initialiser of certain state variables before the optimization loop
625941be99fddb7c1c9de2b4
def append_cookie(self, value): <NEW_LINE> <INDENT> if 'HTTP_COOKIE' in self.environ and self.environ['HTTP_COOKIE']: <NEW_LINE> <INDENT> self.environ['HTTP_COOKIE'] += ';{}'.format(value) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.environ['HTTP_COOKIE'] = '{}'.format(value)
Append cookie to the string or create a new string. Whether a new cookie should append on to the string of cookies to be set or create a new string. This string is used by the browser to interpret how handle setting a cookie. Arguments: key {string} -- Name of cookie to be stored value {string} -- Value of cookie to be stored
625941bebde94217f3682d16
def privmsg(self, string): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> self.socket.send(string) <NEW_LINE> if self.dcctype == "chat": <NEW_LINE> <INDENT> self.socket.send("\n") <NEW_LINE> <DEDENT> if DEBUG: <NEW_LINE> <INDENT> print ("TO PEER: %s\n" % string) <NEW_LINE> <DEDENT> <DEDENT> except socket.error(x): <NEW_LINE> <INDENT> self.disconnect("Connection reset by peer.")
Send data to DCC peer. The string will be padded with appropriate LF if it's a DCC CHAT session.
625941be2eb69b55b151c7ce
def forcing_constraint(): <NEW_LINE> <INDENT> return { "type": "class", "base": "designing.numerical_requirement", "is_abstract": False, "properties": [ ( "additional_constraint", "str", "0.1", "Additional information, e.g. hold constant from 2100-01-01.", ), ( "category", "str", "0.1", "Category to which this belongs (from a CV, e.g. GASES).", ), ( "code", "str", "0.1", "Programme wide code from a controlled vocabulary (e.g. N2O).", ), ( "data_link", "linked_to(data.dataset)", "0.1", "A data record used by the forcing ", ), ( "forcing_type", "designing.forcing_types", "1.1", "Type of integration.", ), ("group", "str", "0.1", "Sub-Category (e.g. GHG)."), ( "origin", "linked_to(shared.citation)", "0.1", "Pointer to origin, e.g. CMIP6 RCP database.", ), ], "constraints": [("cardinality", "additional_requirements", "0.0")], }
Identifies a model forcing constraint.
625941be711fe17d82542293
def isOneEditDistance(self, s, t): <NEW_LINE> <INDENT> n, m = len(s), len(t) <NEW_LINE> if abs(n - m) > 1: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> k = min(n, m) <NEW_LINE> i = j = 0 <NEW_LINE> while i < k and s[i] == t[i]: <NEW_LINE> <INDENT> i += 1 <NEW_LINE> <DEDENT> while j < k - i and s[-j-1] == t[-j-1]: <NEW_LINE> <INDENT> j += 1 <NEW_LINE> <DEDENT> return max(n, m) - (i + j) == 1
:type s: str :type t: str :rtype: bool
625941becdde0d52a9e52f52
def get_occurrences (self, value, datatype=None): <NEW_LINE> <INDENT> if value is None: <NEW_LINE> <INDENT> raise IllegalArgumentException('value must not be None') <NEW_LINE> <DEDENT> if isinstance(value, Locator): <NEW_LINE> <INDENT> value = value.get_reference() <NEW_LINE> datatype = XSD_ANY_URI <NEW_LINE> <DEDENT> elif datatype is None: <NEW_LINE> <INDENT> datatype = XSD_STRING <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> datatype = datatype.get_reference() <NEW_LINE> <DEDENT> return Occurrence.objects.filter(topic__topic_map=self.topic_map).filter(value=value).filter(datatype=datatype)
Returns the `Occurrence`s in the topic map whose value property matches `value` (or if `value` is a `Locator`, the IRI represented by `value`). If `value` is a string and `datatype` is None, the `Occurrence`s' datatype property must be xsd:string. If `value` is a `Locator`, the `Occurrence`s' datatype property must be xsd:anyURI. If `datatype` is not None, the `Occurrence`s returned must be of that datatype. The return value may be empty but must never be None. :param value: the value of the `Occurrence`s to be returned :type value: string or `Locator` :param datatype: optional datatype of the `Occurrence`s to be returned :type datatype: `Locator` :rtype: `QuerySet` of `Occurrence`s
625941be10dbd63aa1bd2ac8
def compact(self, tableNameOrRegionName): <NEW_LINE> <INDENT> self.send_compact(tableNameOrRegionName) <NEW_LINE> self.recv_compact()
Parameters: - tableNameOrRegionName
625941be07d97122c41787a8
def clear(self): <NEW_LINE> <INDENT> for b in self.buckets: <NEW_LINE> <INDENT> b.clear()
Empties the HashSet.
625941be8a43f66fc4b53f8a
def load_ggpk(ggpk_path): <NEW_LINE> <INDENT> ggpk = GGPKFile() <NEW_LINE> ggpk.read(ggpk_path) <NEW_LINE> ggpk.directory_build() <NEW_LINE> return ggpk
Creates a ggpk object from a path :param ggpk_path: :return: ggpk
625941be15fb5d323cde0a2e
def getHash(self): <NEW_LINE> <INDENT> if self.host is None: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> be = FileOps(self.host) <NEW_LINE> hash = be.getHash(self.filename, 'Videos') <NEW_LINE> return hash
Video.getHash() -> file hash
625941be8da39b475bd64e93
def run_glm(self, phenotypes, markers, covariates = None): <NEW_LINE> <INDENT> dataList = LinkedList(generic=(Datum,)) <NEW_LINE> for pheno in phenotypes: <NEW_LINE> <INDENT> dataList.add(self.phenotypes[pheno]) <NEW_LINE> <DEDENT> dataList.add(self.alignments[markers]) <NEW_LINE> if covariates: <NEW_LINE> <INDENT> for covar in covariates: <NEW_LINE> <INDENT> dataList.add(self.covariates[covar]) <NEW_LINE> <DEDENT> <DEDENT> ds = DataSet(dataList, None) <NEW_LINE> ds = IntersectionAlignmentPlugin(None, False).performFunction(ds) <NEW_LINE> resultSet = FixedEffectLMPlugin(None, False).performFunction(ds) <NEW_LINE> marker_effects = resultSet.getData(0).getData().castTo(TableReport) <NEW_LINE> allele_effects = resultSet.getData(1).getData().castTo(TableReport) <NEW_LINE> return marker_effects, allele_effects
Runs the GLM and returns the results as a TableReport Parameters ---------- phenotypes : iterable of strings The names of the phenotypes you want to run markers : str The name of the marker set you want to use covariates : iterable of strings, optional The names of the covariates you want to use Raises ------ KeyError If names not present Returns ------- TableReport of marker effects, TableReport of allelic effects
625941be6fb2d068a760efbd
def receive_change(self): <NEW_LINE> <INDENT> if self.listen_serial: <NEW_LINE> <INDENT> self.listen_serial = False <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.listen_serial = True <NEW_LINE> self.thread_listing_start() <NEW_LINE> <DEDENT> self.check_status()
Обработка нажатия на кнопку начал приема сообщений :return:
625941bee64d504609d74762
def get_first_corrected_line(self, cr, uid, ids, context=None): <NEW_LINE> <INDENT> if not context: <NEW_LINE> <INDENT> context = {} <NEW_LINE> <DEDENT> if isinstance(ids, (int, long)): <NEW_LINE> <INDENT> ids = [ids] <NEW_LINE> <DEDENT> res = {} <NEW_LINE> for ml in self.browse(cr, uid, ids, context=context): <NEW_LINE> <INDENT> line = ml <NEW_LINE> corrected_line_id = ml.corrected_line_id and ml.corrected_line_id <NEW_LINE> while corrected_line_id != False: <NEW_LINE> <INDENT> line = line.corrected_line_id or False <NEW_LINE> if not line: <NEW_LINE> <INDENT> corrected_line_id = False <NEW_LINE> continue <NEW_LINE> <DEDENT> corrected_line_id = line.corrected_line_id and line.corrected_line_id.id or False <NEW_LINE> <DEDENT> res[str(ml.id)] = False <NEW_LINE> if line: <NEW_LINE> <INDENT> res[str(ml.id)] = line.id <NEW_LINE> <DEDENT> <DEDENT> return res
For each move line, give the first line from which all corrections have been done. Example: - line 1 exists. - line 1 was corrected by line 3. - line 5 correct line 3. - line 8 correct line 5. - get_first_corrected_line of line 8 should give line 1.
625941bed99f1b3c44c674b7
def __init__(self, filnename, new=False): <NEW_LINE> <INDENT> self.filename = filnename <NEW_LINE> self.con = sqlite3.connect(self.filename) <NEW_LINE> if new: <NEW_LINE> <INDENT> self.clear()
Konstruktor :param filnename: nazwa pliku *.db :param new: czy utworzyć nową bazę (True/False) :return: obiekt typu Database
625941be627d3e7fe0d68d71
def plotdvdhFuncByHealth(self,t,bMin=None,bMax=20.0,hSet=None,Alt=False): <NEW_LINE> <INDENT> if hSet is None: <NEW_LINE> <INDENT> hSet = self.solution[t].dvdhFunc.y_list <NEW_LINE> <DEDENT> if bMin is None: <NEW_LINE> <INDENT> bMin = self.solution[t].dvdhFunc.x_list[0] <NEW_LINE> <DEDENT> B = np.linspace(bMin,bMax,300) <NEW_LINE> some_ones = np.ones_like(B) <NEW_LINE> for hLvl in hSet: <NEW_LINE> <INDENT> if Alt: <NEW_LINE> <INDENT> dvdh = self.solution[t].dvdhFuncAlt(B,hLvl*some_ones) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> dvdh = self.solution[t].dvdhFunc(B,hLvl*some_ones) <NEW_LINE> <DEDENT> plt.plot(B,dvdh) <NEW_LINE> <DEDENT> plt.xlabel('Market resources bLvl') <NEW_LINE> plt.ylabel('Marginal value dvdh') <NEW_LINE> plt.show()
Plot the marginal value function with respect to health status vs bLvl at a set of health values. Parameters ---------- None Returns ------- None
625941bedc8b845886cb5456
def __init__(self) -> None: <NEW_LINE> <INDENT> pass
Initialize a new inquirer.
625941beadb09d7d5db6c6b4
def get_xls(xls_name, sheet_name): <NEW_LINE> <INDENT> cls = [] <NEW_LINE> xls_path = os.path.join(BasePath, 'data', xls_name) <NEW_LINE> files = open_workbook(xls_path) <NEW_LINE> sheet = files.sheet_by_name(sheet_name) <NEW_LINE> nrows = sheet.nrows <NEW_LINE> for i in range(nrows): <NEW_LINE> <INDENT> if sheet.row_values(i)[0] != u'case_name': <NEW_LINE> <INDENT> cls.append(sheet.row_values(i)) <NEW_LINE> <DEDENT> <DEDENT> return cls
get test data from xls file :param xls_name: :param sheet_name: :return:
625941be5fcc89381b1e15df
def _create_job(self, job_id, drive, base, top, bandwidth): <NEW_LINE> <INDENT> return Job( id=job_id, drive=drive.name, disk={ "poolID": drive.poolID, "domainID": drive.domainID, "imageID": drive.imageID, "volumeID": drive.volumeID, }, base=base, top=top, bandwidth=bandwidth, )
Create new untracked job.
625941beb57a9660fec337a4
def rmsprop(data, parameter, func_grad, lr = 1e-2, rho = 0.9, epsilon = 1e-6, iterationNumber = 500, *arg): <NEW_LINE> <INDENT> global totalError <NEW_LINE> E_grad2 = np.zeros(parameter.shape[0]) <NEW_LINE> for t in range(iterationNumber): <NEW_LINE> <INDENT> grad = func_grad(data, parameter) <NEW_LINE> E_grad2 = (rho * E_grad2) + ((1. - rho) * (grad ** 2)) <NEW_LINE> parameter = parameter - (lr / (np.sqrt(E_grad2 + epsilon)) * grad) <NEW_LINE> totalError[5].append(func_computeError(data, parameter)) <NEW_LINE> <DEDENT> return parameter
RMSprop implementation adaptive learning rate method proposed by Geoff Hinton :param data: the data :param parameter: the start point for the optimization :param func_grad: returns the loss functions gradient :param lr: the global learning rate for rmsprop (good default value is 0.01) :param rho: the global factor for adadelta (good default value is 0.9) :param epsilon: a small number to counter numerical instabiltiy (e.g. zero division) :param iterationNumber: the number of iterations which algorithm will run :param *args: a list or tuple of additional arguments (e.g. passed to function func_grad) :return:
625941bebd1bec0571d90551
def read_sac(self, event, traceid): <NEW_LINE> <INDENT> eventdir = event['origin'].strftime("%Y%m%d%H%M%S") <NEW_LINE> outdir = os.path.join(self.sacdir, eventdir) <NEW_LINE> sac_flnm = ".".join([event["origin"].strftime("%Y.%j.%H.%M.%S"), "0000", traceid, "M", "SAC"]) <NEW_LINE> sac_fullname = os.path.join(outdir, sac_flnm) <NEW_LINE> try: <NEW_LINE> <INDENT> trace = read(sac_fullname) <NEW_LINE> return trace <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> logger.error("FileNotFoundError") <NEW_LINE> return None
read trimed trace
625941be2ae34c7f2600d054
def discover(self): <NEW_LINE> <INDENT> return self._transport.send_discovery(self._source, self._seq)
Perform device discovery now.
625941bee76e3b2f99f3a733
def __init__(self, nome_da_loja): <NEW_LINE> <INDENT> self.nome_da_loja = nome_da_loja <NEW_LINE> Compra.ID += 1 <NEW_LINE> self.id_nota_fiscal = Compra.ID
Cria a compra do cliente.
625941be287bf620b61d3988
def fill_ice(self, GT): <NEW_LINE> <INDENT> if self.ice_type == "square": <NEW_LINE> <INDENT> if 0 in [elt for row in GT for elt in row]: <NEW_LINE> <INDENT> print("Current model doesn't have column 0. Please add 1 to all entries for square ice.\n") <NEW_LINE> exit(0) <NEW_LINE> <DEDENT> for i in range(1, self.nrows+1): <NEW_LINE> <INDENT> left_v = self.get_vertex(i, self.ncols) <NEW_LINE> left_v.change_left(-1) <NEW_LINE> right_v = self.get_vertex(i, 1) <NEW_LINE> right_v.change_right(-1) <NEW_LINE> <DEDENT> for i in range(1, self.ncols+1): <NEW_LINE> <INDENT> bottom_v = self.get_vertex(self.nrows, i) <NEW_LINE> bottom_v.change_down(1) <NEW_LINE> <DEDENT> <DEDENT> elif self.ice_type == "alt" or self.ice_type == "KT": <NEW_LINE> <INDENT> for i in range(1, self.nrows+1): <NEW_LINE> <INDENT> left_v = self.get_vertex(i, self.ncols) <NEW_LINE> left_v.change_left(-1) <NEW_LINE> <DEDENT> for i in range(1, self.ncols+1): <NEW_LINE> <INDENT> bottom_v = self.get_vertex(self.nrows, i) <NEW_LINE> bottom_v.change_down(1) <NEW_LINE> <DEDENT> <DEDENT> try: <NEW_LINE> <INDENT> for i in range(1, self.nrows+1): <NEW_LINE> <INDENT> for j in range(self.ncols, 0, -1): <NEW_LINE> <INDENT> current_v = self.get_vertex(i, j) <NEW_LINE> if j in GT[i-1]: <NEW_LINE> <INDENT> current_v.change_up(1) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> current_v.change_up(-1) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> except: <NEW_LINE> <INDENT> raise ValueError("vertex "+str(i)+","+str(j)+" went wrong during initialization") <NEW_LINE> return current_v <NEW_LINE> <DEDENT> for i in range(1, self.nrows+1): <NEW_LINE> <INDENT> for j in range(self.ncols, 0, -1): <NEW_LINE> <INDENT> current_v = self.get_vertex(i, j) <NEW_LINE> if self.get_vertex(i+1, j): <NEW_LINE> <INDENT> down = self.get_vertex(i+1, j).up <NEW_LINE> current_v.change_down(-down) <NEW_LINE> <DEDENT> if self.get_vertex(i, j+1): <NEW_LINE> <INDENT> left = self.get_vertex(i, j+1).right <NEW_LINE> current_v.change_left(-left) <NEW_LINE> <DEDENT> changed = current_v.fill_all() <NEW_LINE> if not changed: <NEW_LINE> <INDENT> self.visualize() <NEW_LINE> raise ValueError("Process failed at row " +str(current_v.x) + ", column " + str(current_v.y)) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> count = self.tally() <NEW_LINE> return count
Start filling the ice model from the top-left corner. If ice modeled is succesfully filled out, print result and tally ice states; display an error message otherwise. Note: This process only changes edges where no arrows have been placed (i.e. labeled by 0)
625941beeab8aa0e5d26da7a
def change_location(self, city, state, zip_code): <NEW_LINE> <INDENT> self.__longitude = longitude <NEW_LINE> self.__latitude = latitude <NEW_LINE> self.__nearObjects = []
Modify location
625941be01c39578d7e74d5e
def remove_logic_adapter(self, adapter_name): <NEW_LINE> <INDENT> for index, adapter in enumerate(self.adapters): <NEW_LINE> <INDENT> if adapter_name == type(adapter).__name__: <NEW_LINE> <INDENT> del self.adapters[index] <NEW_LINE> return True <NEW_LINE> <DEDENT> <DEDENT> return False
取消适配器的绑定 :param adapter_name: 需要移除的适配器 :type adapter_name: str
625941becdde0d52a9e52f53
def _generate_type_signature (typed): <NEW_LINE> <INDENT> return _signature(typed.get_type())
Returns the signature for the type of a typed Topic Maps construct. :param typed: the typed Topic Maps construct :type typed: `Typed` :rtype: integer
625941be85dfad0860c3ad7c
def hanoi(n, i, k): <NEW_LINE> <INDENT> if n == 1: <NEW_LINE> <INDENT> print('Переложить блин 1 со столбика', i, 'на', k) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> tmp = 6 - i - k <NEW_LINE> hanoi(n-1, i, tmp) <NEW_LINE> print('Переложить блин', n, 'со столбика', i, 'на', k) <NEW_LINE> hanoi(n-1, tmp, k)
Пишет последовательновательность действий по решению головоломки "Ханойские башни" :param n: высота пирамидки - оно же номер самго широкого "блина" :param i: номер стобика откуда перекладываем :param k: номер столбика куда перекладываем :return: None
625941be3617ad0b5ed67e1b
def reportClusterOnSystem(system, systemOsh): <NEW_LINE> <INDENT> clusterReporter = jee.ClusterReporter(jee.ClusterBuilder()) <NEW_LINE> cluster = jee.Cluster(system.getName()) <NEW_LINE> clusterOsh = clusterReporter.reportCluster(cluster, systemOsh) <NEW_LINE> return clusterOsh
@types: System, osh -> osh
625941be0a50d4780f666db3
def percent_filtered(self): <NEW_LINE> <INDENT> return None
If any filtering was done, the percent of examples that were filtered. Exposed so evaluators can compute percentages fairly even if some examples were removed during pre-processing
625941bef548e778e58cd49f
def get_radius_of_gyration(self): <NEW_LINE> <INDENT> M = self.get_atomic_standard_weights() <NEW_LINE> r = self.get_atomic_positions() <NEW_LINE> r_com = self.get_center_of_mass() <NEW_LINE> r_g = numpy.sqrt( (M[:, numpy.newaxis]*(r-r_com)**2).sum() / M.sum() ) <NEW_LINE> return r_g
Return the radius of gyration :math:`R_g` Atomic structure of :math:`N` atoms with masses :math:`m_i` at the positions :math:`\vec{r}_i` :math:`R_g = \fract{ \sqrt{ \sum_{i=0}^N{ \vec{r}_i-\vec{r}_{\text{COM}} } } }{ \sum_{i=0}^N{ m_i }}`
625941be56b00c62f0f1457a
def validate_unit(self, sudoku_part): <NEW_LINE> <INDENT> return True if sudoku_part == list(range(1, self.SUDOKU_SIZE + 1)) else False
Validate a fundamental unit of sudoku :param sudoku_part: row, column or unit square as a list :return (bool): is valid or not
625941be4f6381625f114960
def cluster(self): <NEW_LINE> <INDENT> for i in range(len(self.nodes)): <NEW_LINE> <INDENT> self.nodes[i].resetConChildren() <NEW_LINE> self.nodes[i].clearTempDataRec() <NEW_LINE> if self.nodes[i].alive: <NEW_LINE> <INDENT> self.nodes[i].generateCHstatus(self.fParam, self.h_s_Param, self.h_r_Param, p, self.rnd) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.nodes[i].CHstatus = 0 <NEW_LINE> <DEDENT> <DEDENT> for i in range(len(self.nodes)): <NEW_LINE> <INDENT> if self.nodes[i].getCHstatus() == 0: <NEW_LINE> <INDENT> minDistance = self.nodes[i].getDistance(self.sink) <NEW_LINE> jshortest = -1 <NEW_LINE> for j in range(len(self.nodes)): <NEW_LINE> <INDENT> if self.nodes[j].getCHstatus() == 1: <NEW_LINE> <INDENT> if minDistance > self.nodes[i].getDistance(self.nodes[j]): <NEW_LINE> <INDENT> minDistance = self.nodes[i].getDistance(self.nodes[j]) <NEW_LINE> jshortest = j <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> if jshortest > 0: <NEW_LINE> <INDENT> self.nodes[i].connect(self.nodes[jshortest]) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.nodes[i].connect(self.sink) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> self.nodes[i].connect(self.sink)
METHOD cluster Iterates through all nodes and decides whether to be a CH or not with generateCHstatus(). After that, it iterates through all nodes that aren't CHs and compares it's distance to sink to all distances to all nodes that are CHs. It uses the index js shortest to store which distance was the shortest. If no distance to a CH was shorter than the distance to the sink, the node simply connects to the sink. :return: self
625941be5e10d32532c5ee4a
def health(self) -> Health: <NEW_LINE> <INDENT> k8s_health = Health(source=self._instance_id, status=HealthStatus.UNKNOWN) <NEW_LINE> for test_health_function in [self._health_swarm_nodes]: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> test_health = test_health_function() <NEW_LINE> <DEDENT> except Exception as err: <NEW_LINE> <INDENT> test_health = Health(source=self._instance_id) <NEW_LINE> test_health.critical(f"{test_health_function} exception: {err}") <NEW_LINE> <DEDENT> finally: <NEW_LINE> <INDENT> k8s_health.merge(test_health) <NEW_LINE> <DEDENT> <DEDENT> return k8s_health
Determine the health of the K8s instance.
625941be1b99ca400220a9d4
def add_report_output(root, report): <NEW_LINE> <INDENT> report_element = ET.SubElement(root, tagify(report.heading)) <NEW_LINE> for result in report.results: <NEW_LINE> <INDENT> if not result.include_in_non_verbose: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> subreport_element = ET.SubElement(report_element, tagify(result.heading)) <NEW_LINE> subreport_element.attrib["length"] = str(len(result)) <NEW_LINE> desc = ET.SubElement(subreport_element, "Description") <NEW_LINE> desc.text = result.description <NEW_LINE> for id_, name in sorted(result.results, key=lambda x: x[1]): <NEW_LINE> <INDENT> item = ET.SubElement(subreport_element, tagify(report.obj_type)) <NEW_LINE> item.text = name <NEW_LINE> item.attrib["id"] = str(id_) <NEW_LINE> <DEDENT> <DEDENT> for metadata, val in report.metadata.items(): <NEW_LINE> <INDENT> metadata_element = ET.SubElement(report_element, tagify(metadata)) <NEW_LINE> for submeta, submeta_val in val.items(): <NEW_LINE> <INDENT> item = ET.SubElement(metadata_element, tagify(submeta)) <NEW_LINE> for line in submeta_val: <NEW_LINE> <INDENT> value = ET.SubElement(item, "Value") <NEW_LINE> value.text = line.strip()
Write the results to an xml file. Args: results: A Result object. ofile: String path to desired output filename.
625941be656771135c3eb78f
def create_vocab(alphabet, substring_length): <NEW_LINE> <INDENT> vocab = [''.join(i) for i in itertools.product(alphabet, repeat = substring_length)] <NEW_LINE> vocab2index = {} <NEW_LINE> index2vocab = {} <NEW_LINE> for idx, v in enumerate(vocab): <NEW_LINE> <INDENT> vocab2index[v] = idx <NEW_LINE> index2vocab[idx] = v <NEW_LINE> <DEDENT> return vocab2index, index2vocab
Create all the vocabulary of all possibles words using the alphabet: all combination of length substring_length. Vocabulary is of size |alphabet|^substring_length. Input: alphabet: letters available in the alphabet substring_length: lenghth of words Output: vocab2index: dictionary associating each word in the vocab to an index (integer) index2vocab: dictionary associating each index to a word in the vocab
625941be7d847024c06be1dc
def _getNameComponent(self, predicateURI, predicates): <NEW_LINE> <INDENT> if predicateURI in predicates: <NEW_LINE> <INDENT> return unicode(predicates[predicateURI][0]) <NEW_LINE> <DEDENT> return u''
Return a component of a human being's name identified by predicateURI from the given set of predicates. If the component is missing, return an empty string.
625941bebaa26c4b54cb1045
def minimum_distance_constraints(self, large=False): <NEW_LINE> <INDENT> n_turbines = len(self.turbine_positions) <NEW_LINE> if (n_turbines < 1): <NEW_LINE> <INDENT> raise ValueError("Turbines must be deployed before minimum " "distance constraints can be calculated.") <NEW_LINE> <DEDENT> controls = self._turbine_specification.controls <NEW_LINE> minimum_distance = self._turbine_specification.minimum_distance <NEW_LINE> positions = self.turbine_positions <NEW_LINE> if large: <NEW_LINE> <INDENT> return MinimumDistanceConstraintsLargeArrays(positions, minimum_distance, controls) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return MinimumDistanceConstraints(positions, minimum_distance, controls)
Returns an instance of MinimumDistanceConstraints. :param bool large: Use a minimum distance implementation that is suitable for large farms (i.e. many turbines). Default: False :returns: An instance of dolfin_adjoint.InequalityConstraint that enforces a minimum distance between turbines. :rtype: :py:class:`MinimumDistanceConstraints` (if large=False) or :py:class:`MinimumDistanceConstraintsLargeArray` (if large=True)
625941be377c676e912720cc
def create_json(self): <NEW_LINE> <INDENT> json_info = json.dumps(self.entry_info) <NEW_LINE> return json_info
Turn the entry into json data which can be stored.
625941be442bda511e8be33f
def _cleaner(k, v): <NEW_LINE> <INDENT> if isinstance(v, str): <NEW_LINE> <INDENT> v = unicode(v, 'utf8', 'ignore').encode() <NEW_LINE> <DEDENT> if k == 'PRODUCTID': <NEW_LINE> <INDENT> v = v.split('_')[1] <NEW_LINE> <DEDENT> if k in self.rename_map: <NEW_LINE> <INDENT> return (self.rename_map[k], v)
Helper function to rename keys and purge any keys that are not in the map.
625941be3d592f4c4ed1cf97
@main.route('/register', methods=['GET', 'POST']) <NEW_LINE> def register(): <NEW_LINE> <INDENT> form = RegistrationForm() <NEW_LINE> if form.validate_on_submit(): <NEW_LINE> <INDENT> user = User(email=form.email.data, name=form.name.data, password=form.password.data, confirmed=1, image='img/default_avatar.jpg') <NEW_LINE> if User.query.filter_by(email=form.email.data).first(): <NEW_LINE> <INDENT> flash('该邮箱已注册') <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> db.session.add(user) <NEW_LINE> db.session.commit() <NEW_LINE> return redirect(url_for('main.signin')) <NEW_LINE> <DEDENT> <DEDENT> return render_template('register.html', form=form)
注册,并发送激活邮件。 即使通过配置程序已经可以在末尾自动提交数据库变化,这里也要添加db.session.commit(),因为后续确定令牌要用到id。
625941be1d351010ab855a3f
def test_indexing_methods(self): <NEW_LINE> <INDENT> self.es.create_index(self.index) <NEW_LINE> assert self.es.index_exists(self.index) <NEW_LINE> check1 = self.es.load_json(__file__, 'test_checks/check1.json') <NEW_LINE> check2 = self.es.load_json(__file__, 'test_checks/check2.json') <NEW_LINE> check3 = self.es.load_json(__file__, 'test_checks/check3.json') <NEW_LINE> self.es.put_object(self.uuid(check1), check1) <NEW_LINE> self.es.put_object(self.uuid(check2), check2) <NEW_LINE> self.es.refresh_index() <NEW_LINE> keys = self.es.list_all_keys() <NEW_LINE> assert self.uuid(check1) in keys <NEW_LINE> assert self.uuid(check2) in keys <NEW_LINE> assert self.uuid(check3) not in keys <NEW_LINE> self.es.put_object(self.uuid(check3), check3) <NEW_LINE> self.es.refresh_index() <NEW_LINE> objs = self.es.get_all_objects() <NEW_LINE> assert len(objs) == 3 <NEW_LINE> self.es.delete_keys([self.uuid(check1), self.uuid(check2)]) <NEW_LINE> self.es.refresh_index() <NEW_LINE> keys = self.es.list_all_keys() <NEW_LINE> assert len(keys) == 1 <NEW_LINE> assert self.uuid(check3) in keys <NEW_LINE> assert self.es.delete_index(self.index)
Creates a test index, indexes a few check items, uses additional methods to interact with the index, such as list_all_keys, get_all_objects
625941be50812a4eaa59c247
def set_min_output_buffer(self, *args): <NEW_LINE> <INDENT> return _blocks_swig1.vector_sink_c_sptr_set_min_output_buffer(self, *args)
set_min_output_buffer(vector_sink_c_sptr self, long min_output_buffer) set_min_output_buffer(vector_sink_c_sptr self, int port, long min_output_buffer)
625941be2eb69b55b151c7cf
def on_data(self, data): <NEW_LINE> <INDENT> if self.stop_event.is_set(): <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> self.queue.put(data) <NEW_LINE> return True
Puts a task to process the new data on the queue.
625941be38b623060ff0ad11
def grep(self, pattern): <NEW_LINE> <INDENT> compiled_pattern = re.compile(pattern) <NEW_LINE> for line in self.get_lines(): <NEW_LINE> <INDENT> if compiled_pattern.search(line): <NEW_LINE> <INDENT> yield line
Find all lines matched by ``pattern``
625941be56ac1b37e62640f7
def setAutoTransform(self, bool): <NEW_LINE> <INDENT> pass
setAutoTransform(self, bool)
625941bed18da76e235323f6
def forward(self, state): <NEW_LINE> <INDENT> output = F.relu(self.fc1(state)) <NEW_LINE> output = F.relu(self.fc2(output)) <NEW_LINE> output = F.relu(self.fc3(output)) <NEW_LINE> action = F.tanh(self.fc4(output)) <NEW_LINE> action = action * float(self.action_lim) <NEW_LINE> return action
returns policy function Pi(s) obtained from actor network this function is a gaussian prob distribution for all actions with mean lying in (-1,1) and sigma lying in (0,1) The sampled action can , then later be rescaled :param state: Input state (Torch Variable : [n,state_dim] ) :return: Output action (Torch Variable: [n,action_dim] )
625941be45492302aab5e1e4
def create_name_from_html (html): <NEW_LINE> <INDENT> name_list = (html.partition("</title")[0]).split("<title") <NEW_LINE> name_part = name_list[-1] <NEW_LINE> name = name_part.split(">")[-1] <NEW_LINE> if name: <NEW_LINE> <INDENT> name = format_filename(name) + '__' + str(time.time()) <NEW_LINE> logger.info('Created name ' + name) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> name = "no_title_" + str(time.time()) <NEW_LINE> logger.warn('Failed to create a name, using \'' + name + '\' instead') <NEW_LINE> <DEDENT> return name
Function for creating name Use the title of the html page as the title of the text file Called from process_current_link Uses string search to locate the <title> tag Parameter html is a string
625941be7b180e01f3dc4726
def test_load_queries_section_timeout(self, logger, config_full, write_config): <NEW_LINE> <INDENT> config_full["queries"]["q"]["timeout"] = 2.0 <NEW_LINE> config_file = write_config(config_full) <NEW_LINE> with config_file.open() as fd: <NEW_LINE> <INDENT> result = load_config(fd, logger) <NEW_LINE> <DEDENT> query1 = result.queries["q"] <NEW_LINE> assert query1.timeout == 2.0
Query configuration can include a timeout.
625941becb5e8a47e48b79d0
def stop(self): <NEW_LINE> <INDENT> self._running = False
Stop the supplicant.
625941be66656f66f7cbc0cd
def make_wsgi_app(config=None): <NEW_LINE> <INDENT> if config is None: <NEW_LINE> <INDENT> config = configure() <NEW_LINE> <DEDENT> return config.make_wsgi_app()
WSGI application factory :param config: optional configuration object :type config: :class:`pyramid.configuration.Configurator`
625941be38b623060ff0ad12
def _get_stages_with_exit_status(self, value): <NEW_LINE> <INDENT> stages = [] <NEW_LINE> for group in self.submitted_jobs: <NEW_LINE> <INDENT> for job in self.submitted_jobs[group]: <NEW_LINE> <INDENT> if job.exit_status and job.exit_status['completion_status'] == value: <NEW_LINE> <INDENT> stages.append(job.name) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return list(set(stages))
Return a list of stage names for jobs that have a specified exit status. Args: value (str): run status to consider Returns: int: stages with requested run status
625941be566aa707497f4490
def cmu_to_ipa(phonemes): <NEW_LINE> <INDENT> return tuple([CMU_2IPA[p] for p in phonemes])
Convert CMU phonemes to IPA unicode format.
625941beec188e330fd5a6c7
def test_storagedriver__delitem__(self): <NEW_LINE> <INDENT> key_to_remove = random.choice(list(self.mapping.keys())) <NEW_LINE> del self.storage[key_to_remove] <NEW_LINE> self.assertTrue(key_to_remove not in self.storage) <NEW_LINE> self.assertRaises(KeyError, self.storage.__delitem__, 'nokey')
Test Python dict-like __delitem__ method
625941be566aa707497f4491
def _anime_delete(self, data): <NEW_LINE> <INDENT> if isinstance(data, Anime): <NEW_LINE> <INDENT> r = requests.get(self.apiurl + "animelist/delete/{}.xml".format(data.id), auth=HTTPBasicAuth(self._username, self._password), headers=self.header) <NEW_LINE> if r.status_code != 200: <NEW_LINE> <INDENT> raise ServerError(r.text, r.status_code) <NEW_LINE> <DEDENT> return True <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise SyntaxError( "Invalid type: data should be a Pymoe.Mal.Objects.Anime object. Got a {}".format(type(data)))
Deletes an anime from a user's list :param data: A :class:`Pymoe.Mal.Objects.Anime` object with the anime data :raises: SyntaxError on invalid data type :raises: ServerError on failure to add :rtype: Bool :return: True on success
625941be21bff66bcd684878
def _validate_sbd_options( sbd_config, allow_unknown_opts=False, allow_invalid_option_values=False ): <NEW_LINE> <INDENT> validators = [ validate.NamesIn( ALLOWED_SBD_OPTION_LIST, banned_name_list=UNSUPPORTED_SBD_OPTION_LIST, severity=reports.item.get_severity( reports.codes.FORCE, allow_unknown_opts ), ), validate.ValueNonnegativeInteger("SBD_WATCHDOG_TIMEOUT"), validate.ValueIn( "SBD_TIMEOUT_ACTION", TIMEOUT_ACTION_ALLOWED_VALUE_LIST, severity=reports.item.get_severity( reports.codes.FORCE, allow_invalid_option_values ), ), ] <NEW_LINE> return validate.ValidatorAll(validators).validate(sbd_config)
Validate user SBD configuration. Options 'SBD_WATCHDOG_DEV' and 'SBD_OPTS' are restricted. Returns list of ReportItem sbd_config -- dictionary in format: <SBD config option>: <value> allow_unknown_opts -- if True, accept also unknown options.
625941be596a8972360899e6
def loss(self, X, y=None): <NEW_LINE> <INDENT> X = X.astype(self.dtype) <NEW_LINE> mode = 'test' if y is None else 'train' <NEW_LINE> if self.dropout_param is not None: <NEW_LINE> <INDENT> self.dropout_param['mode'] = mode <NEW_LINE> <DEDENT> if self.use_batchnorm: <NEW_LINE> <INDENT> for bn_param in self.bn_params: <NEW_LINE> <INDENT> bn_param[mode] = mode <NEW_LINE> <DEDENT> <DEDENT> scores = None <NEW_LINE> layer = {} <NEW_LINE> layer[0] = X <NEW_LINE> cache_layer = {} <NEW_LINE> for idx_layer in range(1, self.num_layers): <NEW_LINE> <INDENT> layer[idx_layer], cache_layer[idx_layer] = affine_relu_forward(layer[idx_layer - 1], self.params['W%d' % idx_layer], self.params['b%d' % idx_layer]) <NEW_LINE> <DEDENT> WLast = 'W%d' % self.num_layers <NEW_LINE> bLast = 'b%d' % self.num_layers <NEW_LINE> scores, cache_scores = affine_forward(layer[self.num_layers- 1], self.params[WLast], self.params[bLast]) <NEW_LINE> if mode == 'test': <NEW_LINE> <INDENT> return scores <NEW_LINE> <DEDENT> loss, grads = 0.0, {} <NEW_LINE> data_loss, dscores = softmax_loss(scores, y) <NEW_LINE> reg_loss = 0 <NEW_LINE> for idx in range(1, self.num_layers + 1): <NEW_LINE> <INDENT> reg_loss += 0.5 * self.reg * np.sum(self.params['W%d'% idx]**2) <NEW_LINE> <DEDENT> loss = data_loss + reg_loss <NEW_LINE> dx = {} <NEW_LINE> dx[self.num_layers], grads[WLast], grads[bLast] = affine_backward(dscores, cache_scores) <NEW_LINE> grads[WLast] += self.reg * self.params[WLast] <NEW_LINE> for idx in reversed(range(1, self.num_layers)): <NEW_LINE> <INDENT> dx[idx], grads['W%d' % idx], grads['b%d' % idx] = affine_relu_backward(dx[idx + 1], cache_layer[idx]) <NEW_LINE> grads['W%d' % idx] += self.reg * self.params['W%d' % idx] <NEW_LINE> <DEDENT> return loss, grads
learning_rate = [1e-3] Compute loss and gradient for the fully-connected net. Input / output: Same as TwoLayerNet above.
625941be29b78933be1e55d4
def nextPair(self, startNext=True): <NEW_LINE> <INDENT> self.step = self.step + 1 <NEW_LINE> if self.step >= len(self.roundList): <NEW_LINE> <INDENT> if all(self.returned): <NEW_LINE> <INDENT> if (startNext): <NEW_LINE> <INDENT> self.nextRound() <NEW_LINE> if self.roundList == None or self.roundList == []: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> return -1 <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> print('one or more pairs not returned yet') <NEW_LINE> o = [p for p in self.roundList if not self.returned[self.roundList.index(p)]] <NEW_LINE> self.roundList = o[:] <NEW_LINE> return random.choice(o) <NEW_LINE> <DEDENT> <DEDENT> self.unservedRoundList[self.step] = None <NEW_LINE> return self.roundList[self.step]
Returns next pair. Will start new rounds automatically if startNext is true
625941be44b2445a33931fba
def _cliMain(): <NEW_LINE> <INDENT> import argparse <NEW_LINE> prelim_parser = argparse.ArgumentParser(add_help=False) <NEW_LINE> prelim_parser.add_argument( '-H', '--examples', action = 'store_true', help = 'print detailed help and exit') <NEW_LINE> (args, _) = prelim_parser.parse_known_args() <NEW_LINE> do_print_tutorial = args.examples <NEW_LINE> parser = argparse.ArgumentParser( parents = [prelim_parser], description = 'Copy files/directories from a Time Machine backup.', formatter_class = argparse.RawDescriptionHelpFormatter) <NEW_LINE> parser.add_argument( 'src', nargs = '+', help = 'source of copy') <NEW_LINE> parser.add_argument( 'dst', help = 'destination for copy') <NEW_LINE> parser.add_argument( '-D', metavar = 'inodes_dir', help = 'path to Time Machine fake inode directory') <NEW_LINE> if len(sys.argv) == 1: <NEW_LINE> <INDENT> parser.print_help() <NEW_LINE> sys.exit(1) <NEW_LINE> <DEDENT> if do_print_tutorial: <NEW_LINE> <INDENT> parser.print_help() <NEW_LINE> print(_tutorial(sys.argv[0]), file=sys.stderr) <NEW_LINE> sys.exit(0) <NEW_LINE> <DEDENT> args = parser.parse_args() <NEW_LINE> try: <NEW_LINE> <INDENT> tmcp(args.src, args.dst, args.D) <NEW_LINE> <DEDENT> except BaseException as exc: <NEW_LINE> <INDENT> print('{0}: {1}'.format(type(exc).__name__, exc), file=sys.stderr) <NEW_LINE> sys.exit(1)
Parses command-line arguments and passes them into tmcp().
625941be32920d7e50b280f1
def path_relative_to_cwd(path: PathString) -> Path: <NEW_LINE> <INDENT> path = Path(path) <NEW_LINE> try: <NEW_LINE> <INDENT> return path.relative_to(os.getcwd()) <NEW_LINE> <DEDENT> except ValueError: <NEW_LINE> <INDENT> return path
Return path as relative to $PWD if underneath, absolute path otherwise
625941bea79ad161976cc068
def load_items(file): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> while True: <NEW_LINE> <INDENT> yield pickle.load(file) <NEW_LINE> <DEDENT> <DEDENT> except EOFError: <NEW_LINE> <INDENT> return
Deserialize item by item in one stream generator >>> with open("test_items.tmp", "rb") as f: ... gen = load_items(f) ... print(list(gen)) ['a', 'b', 'c'] :return: generator
625941be96565a6dacc8f5f0
def msg(color, msg_text, exitcode=0, *, end="\n", flush=True, output=None): <NEW_LINE> <INDENT> color_dic = { "blue": "\033[0;34m", "red": "\033[1;31m", "green": "\033[0;32m", "yellow": "\033[0;33m", "cyan": "\033[0;36m", "resetcolor": "\033[0m", } <NEW_LINE> if not output: <NEW_LINE> <INDENT> output = sys.stdout <NEW_LINE> <DEDENT> if not color or color == "nocolor": <NEW_LINE> <INDENT> print(msg_text, end=end, file=output, flush=flush) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if color not in color_dic: <NEW_LINE> <INDENT> raise ValueError("Invalid color") <NEW_LINE> <DEDENT> print( "{}{}{}".format(color_dic[color], msg_text, color_dic["resetcolor"]), end=end, file=output, flush=flush, ) <NEW_LINE> <DEDENT> if exitcode: <NEW_LINE> <INDENT> sys.exit(exitcode)
Print colored text. Arguments: color (str): color name (blue, red, green, yellow, cyan or nocolor) msg_text (str): text to be printed exitcode (int, opt): Optional parameter. If exitcode is different from zero, it terminates the script, i.e, it calls sys.exit with the exitcode informed Keyword arguments (optional): end (str): string appended after the last char in "msg_text" default a newline flush (True/False): whether to forcibly flush the stream. default True output (stream): a file-like object (stream). default sys.stdout Example: msg("blue", "nice text in blue") msg("red", "Error in my script. terminating", 1)
625941be99cbb53fe6792b0a
def load_configuration(arg_list, log_printer, arg_parser=None): <NEW_LINE> <INDENT> cli_sections = parse_cli(arg_list=arg_list, arg_parser=arg_parser) <NEW_LINE> check_conflicts(cli_sections) <NEW_LINE> if ( bool(cli_sections['default'].get('find_config', 'False')) and str(cli_sections['default'].get('config')) == ''): <NEW_LINE> <INDENT> cli_sections['default'].add_or_create_setting( Setting('config', re.escape(find_user_config(os.getcwd())))) <NEW_LINE> <DEDENT> targets = [] <NEW_LINE> for item in list(cli_sections['default'].contents.pop('targets', '')): <NEW_LINE> <INDENT> targets.append(item.lower()) <NEW_LINE> <DEDENT> if bool(cli_sections['default'].get('no_config', 'False')): <NEW_LINE> <INDENT> sections = cli_sections <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> base_sections = load_config_file(Constants.system_coafile, log_printer) <NEW_LINE> user_sections = load_config_file( Constants.user_coafile, log_printer, silent=True) <NEW_LINE> default_config = str(base_sections['default'].get('config', '.coafile')) <NEW_LINE> user_config = str(user_sections['default'].get( 'config', default_config)) <NEW_LINE> config = os.path.abspath( str(cli_sections['default'].get('config', user_config))) <NEW_LINE> try: <NEW_LINE> <INDENT> save = bool(cli_sections['default'].get('save', 'False')) <NEW_LINE> <DEDENT> except ValueError: <NEW_LINE> <INDENT> save = True <NEW_LINE> <DEDENT> coafile_sections = load_config_file(config, log_printer, silent=save) <NEW_LINE> sections = merge_section_dicts(base_sections, user_sections) <NEW_LINE> sections = merge_section_dicts(sections, coafile_sections) <NEW_LINE> sections = merge_section_dicts(sections, cli_sections) <NEW_LINE> <DEDENT> for section in sections: <NEW_LINE> <INDENT> if section != 'default': <NEW_LINE> <INDENT> sections[section].defaults = sections['default'] <NEW_LINE> <DEDENT> <DEDENT> str_log_level = str(sections['default'].get('log_level', '')).upper() <NEW_LINE> log_printer.log_level = LOG_LEVEL.str_dict.get(str_log_level, LOG_LEVEL.INFO) <NEW_LINE> return sections, targets
Parses the CLI args and loads the config file accordingly, taking default_coafile and the users .coarc into account. :param arg_list: The list of command line arguments. :param log_printer: The LogPrinter object for logging. :return: A tuple holding (log_printer: LogPrinter, sections: dict(str, Section), targets: list(str)). (Types indicated after colon.)
625941be8e05c05ec3eea295
def _update_num_batches(self): <NEW_LINE> <INDENT> possible_num_batches = self.data.shape[0] // self.batch_size <NEW_LINE> if self.max_num_batches == -1: <NEW_LINE> <INDENT> self.num_batches = possible_num_batches <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.num_batches = min(self.max_num_batches, possible_num_batches)
Updates number of batches to iterate over.
625941be099cdd3c635f0b80
def grouping(x,q,labels,axis=0,thresh=None): <NEW_LINE> <INDENT> def _grouping_1d(series,q,labels,thresh=None): <NEW_LINE> <INDENT> if thresh==None: <NEW_LINE> <INDENT> thresh=q*10 <NEW_LINE> <DEDENT> series=series.dropna() <NEW_LINE> if series.shape[0]>thresh: <NEW_LINE> <INDENT> return pd.qcut(series,q,labels) <NEW_LINE> <DEDENT> <DEDENT> if x.ndim==1: <NEW_LINE> <INDENT> return _grouping_1d(x,q,labels,thresh) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if axis==1: <NEW_LINE> <INDENT> return x.apply(lambda s:_grouping_1d(s,q,labels,thresh)) <NEW_LINE> <DEDENT> elif axis==0: <NEW_LINE> <INDENT> return x.T.apply(lambda s:_grouping_1d(s,q,labels,thresh))
sort and name for series or dataframe,for dataframe,axis is required with 0 denoting row-by-row and 1 denoting col-by-col :param x: :param q: :param labels: :param axis: :param thresh: :return:
625941be507cdc57c6306bf8
@contract <NEW_LINE> def f2(a): <NEW_LINE> <INDENT> pass
:type a: list(my_condition)
625941be167d2b6e31218ab9
def get_patch(img, x, y, size=32): <NEW_LINE> <INDENT> patch = img[..., x:(x + size), y:(y + size)] <NEW_LINE> return patch
Slices out a square patch from `img` starting from the (x,y) top-left corner. If `im` is a 3D array of shape (l, n, m), then the same (x,y) is broadcasted across the first dimension, and the output has shape (l, size, size). Args: img: numpy.ndarray (n, m), input image x, y: int, top-left corner of the patch size: int, patch size Returns: patch: numpy.ndarray (size, size)
625941be9f2886367277a7b3
@app.route("/api/v1/mentees/<user_id>/", methods=["GET"]) <NEW_LINE> def get_mentee(user_id, with_partners=1): <NEW_LINE> <INDENT> db = get_db() <NEW_LINE> users = db.users <NEW_LINE> if not isinstance(user_id, str): <NEW_LINE> <INDENT> raise APIException(status_code=400, message='user_id not a string') <NEW_LINE> <DEDENT> cursor = users.find({"role": "Mentee", "user_id": user_id}) <NEW_LINE> if cursor.count() is 0: <NEW_LINE> <INDENT> raise APIException(status_code=404, message='no Mentee with user_id found') <NEW_LINE> <DEDENT> context = {} <NEW_LINE> for document in cursor: <NEW_LINE> <INDENT> temp = document <NEW_LINE> del temp['_id'] <NEW_LINE> if with_partners is 0: <NEW_LINE> <INDENT> del temp['partners'] <NEW_LINE> return temp <NEW_LINE> <DEDENT> if with_partners is 1: <NEW_LINE> <INDENT> temp2 = [] <NEW_LINE> for partner_id in document['partners']: <NEW_LINE> <INDENT> val = get_mentor(partner_id, with_partners=0) <NEW_LINE> temp2.append(val) <NEW_LINE> <DEDENT> temp['partners'] = temp2 <NEW_LINE> <DEDENT> context = temp <NEW_LINE> <DEDENT> context['url'] = "/api/v1/mentees/" + user_id + "/" <NEW_LINE> return flask.jsonify(**context)
Lookup a Mentee based on their user_id.
625941bede87d2750b85fcb3
def show_chart(y_predicted, y_actual, tp=None, title=None): <NEW_LINE> <INDENT> tp = TestPlotter() if tp is None else tp <NEW_LINE> if title is not None: <NEW_LINE> <INDENT> tp.title = title <NEW_LINE> <DEDENT> for idx in range(y_predicted.shape[0]): <NEW_LINE> <INDENT> p_val = y_predicted[idx] if y_predicted[idx] != -1.0 else None <NEW_LINE> tp.add_values(idx, y_predicted=p_val, redraw=False) <NEW_LINE> <DEDENT> for idx in range(y_actual.shape[0]): <NEW_LINE> <INDENT> a_val = y_actual[idx] if y_actual[idx] != -1.0 else None <NEW_LINE> tp.add_values(idx, y_actual=a_val, redraw=False) <NEW_LINE> <DEDENT> tp.redraw() <NEW_LINE> print("Close the chart to continue.") <NEW_LINE> tp.block()
Shows a plot using the LossAccPlotter and all provided values. Args: y_predicted: predicted label values of the test dataset. y_actual: actual label values of the test dataset. tp: A TestPlotter-Instance or None. If None then a new TestPlotter will be instantiated. (Default is None.) title: The title to use for the plot, i.e. TestPlotter.title.
625941be26238365f5f0ed8e
def instantiate(self, seed=0, serial_id=0, preset='default', extra_args=None) -> gym.Env: <NEW_LINE> <INDENT> settings = self.get_preset(preset) <NEW_LINE> return env_maker(self.envname, seed, serial_id, **settings)
Make a single environment compatible with the experiments
625941be596a8972360899e7
def __init__(self): <NEW_LINE> <INDENT> self.encoding = 'utf-8'
Initialize the writer
625941bed4950a0f3b08c274
def get_matrix(x_len, y_len, x_begin=1, y_begin=1): <NEW_LINE> <INDENT> listt = [] <NEW_LINE> for i in range(y_begin, y_begin + y_len): <NEW_LINE> <INDENT> listt += [get_row(i, x_len, x_begin)] <NEW_LINE> <DEDENT> return listt
获取一个矩阵的值,返回二维列表 (矩阵的长,矩阵的高,矩阵起始横坐标,矩阵起始纵坐标)
625941bed164cc6175782c71
def getNumReplacement(self): <NEW_LINE> <INDENT> return self.nReplacement
Return the number of individuals that will be replaced in the migration process
625941bebaa26c4b54cb1046
def getNeighbors(self): <NEW_LINE> <INDENT> i = self.board.index(0) <NEW_LINE> if i in [0, 1, 2]: <NEW_LINE> <INDENT> up = None <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> temp = list(self.board) <NEW_LINE> temp[i] = temp[i-3] <NEW_LINE> temp[i-3] = 0 <NEW_LINE> up = State(temp, self) <NEW_LINE> up.path = "Up" <NEW_LINE> <DEDENT> if i in [6, 7, 8]: <NEW_LINE> <INDENT> down = None <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> temp = list(self.board) <NEW_LINE> temp[i] = temp[i+3] <NEW_LINE> temp[i+3] = 0 <NEW_LINE> down = State(temp, self) <NEW_LINE> down.path = "Down" <NEW_LINE> <DEDENT> if i in [0, 3, 6]: <NEW_LINE> <INDENT> left = None <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> temp = list(self.board) <NEW_LINE> temp[i] = temp[i-1] <NEW_LINE> temp[i-1] = 0 <NEW_LINE> left = State(temp, self) <NEW_LINE> left.path = "Left" <NEW_LINE> <DEDENT> if i in [2, 5, 8]: <NEW_LINE> <INDENT> right = None <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> temp = list(self.board) <NEW_LINE> temp[i] = temp[i+1] <NEW_LINE> temp[i+1] = 0 <NEW_LINE> right = State(temp, self) <NEW_LINE> right.path = "Right" <NEW_LINE> <DEDENT> neighbors = [] <NEW_LINE> if up is not None: <NEW_LINE> <INDENT> neighbors.append(up) <NEW_LINE> <DEDENT> if down is not None: <NEW_LINE> <INDENT> neighbors.append(down) <NEW_LINE> <DEDENT> if left is not None: <NEW_LINE> <INDENT> neighbors.append(left) <NEW_LINE> <DEDENT> if right is not None: <NEW_LINE> <INDENT> neighbors.append(right) <NEW_LINE> <DEDENT> return neighbors
This function returns the list neighbors at the current state.
625941be287bf620b61d3989
def test_add_duplicate_tag(self): <NEW_LINE> <INDENT> d, n = os.path.split(choice(self.filepaths)) <NEW_LINE> for tg in ((make_random_word(),), (make_random_word(), make_random_word())): <NEW_LINE> <INDENT> with database.get_conn(self.db_name) as c: <NEW_LINE> <INDENT> api.apply_tag(c, d, n, *tg) <NEW_LINE> <DEDENT> with database.get_conn(self.db_name) as c: <NEW_LINE> <INDENT> self.assertEqual(1, api.apply_tag(c, d, n, *tg))
Testing if api.apply_tag handles adding duplicate tags.
625941be60cbc95b062c6466
def test_pid_assign(app, db): <NEW_LINE> <INDENT> runner = CliRunner() <NEW_LINE> script_info = ScriptInfo(create_app=lambda info: app) <NEW_LINE> with runner.isolated_filesystem(): <NEW_LINE> <INDENT> result = runner.invoke(cmd, [ 'create', 'doi', '10.1234/foo' ], obj=script_info) <NEW_LINE> assert 0 == result.exit_code <NEW_LINE> with app.app_context(): <NEW_LINE> <INDENT> pid = PersistentIdentifier.get('doi', '10.1234/foo') <NEW_LINE> assert not pid.has_object() <NEW_LINE> assert pid.get_assigned_object() is None <NEW_LINE> assert pid.get_assigned_object('rec') is None <NEW_LINE> <DEDENT> rec_uuid = uuid.uuid4() <NEW_LINE> result = runner.invoke(cmd, [ 'assign', 'doi', '10.1234/foo', '-t', 'rec', '-i', str(rec_uuid) ], obj=script_info) <NEW_LINE> assert 0 == result.exit_code <NEW_LINE> with app.app_context(): <NEW_LINE> <INDENT> pid = PersistentIdentifier.get('doi', '10.1234/foo') <NEW_LINE> assert pid.has_object() <NEW_LINE> assert pid.get_assigned_object() == rec_uuid <NEW_LINE> assert pid.get_assigned_object('rec') == rec_uuid <NEW_LINE> assert pid.get_assigned_object('oth') is None <NEW_LINE> <DEDENT> result = runner.invoke(cmd, [ 'assign', 'doi', '10.1234/foo', '-t', 'rec', '-i', str(rec_uuid) ], obj=script_info) <NEW_LINE> assert 0 == result.exit_code <NEW_LINE> result = runner.invoke(cmd, [ 'assign', 'doi', '10.1234/foo', ], obj=script_info) <NEW_LINE> assert 2 == result.exit_code <NEW_LINE> result = runner.invoke(cmd, [ 'assign', 'doi', '10.1234/foo', '-t', 'rec', ], obj=script_info) <NEW_LINE> assert 2 == result.exit_code <NEW_LINE> result = runner.invoke(cmd, [ 'assign', 'doi', '10.1234/foo', '-i', str(rec_uuid), ], obj=script_info) <NEW_LINE> assert 2 == result.exit_code <NEW_LINE> new_uuid = uuid.uuid4() <NEW_LINE> result = runner.invoke(cmd, [ 'assign', 'doi', '10.1234/foo', '-t', 'rec', '-i', str(new_uuid) ], obj=script_info) <NEW_LINE> assert -1 == result.exit_code <NEW_LINE> result = runner.invoke(cmd, [ 'assign', 'doi', '10.1234/foo', '-s', 'REGISTERED', '-t', 'rec', '-i', str(new_uuid), '--overwrite' ], obj=script_info) <NEW_LINE> assert 0 == result.exit_code <NEW_LINE> with app.app_context(): <NEW_LINE> <INDENT> pid = PersistentIdentifier.get('doi', '10.1234/foo') <NEW_LINE> assert pid.has_object() <NEW_LINE> assert pid.status == PIDStatus.REGISTERED <NEW_LINE> assert pid.get_assigned_object() == new_uuid <NEW_LINE> assert pid.get_assigned_object('rec') == new_uuid <NEW_LINE> assert pid.get_assigned_object('oth') is None
Test pid object assignment.
625941be8a349b6b435e8097
def run(): <NEW_LINE> <INDENT> use_tpu = FLAGS.use_tpu <NEW_LINE> print('Mode:', 'TPU' if use_tpu else 'CPU') <NEW_LINE> if FLAGS.fake_data: <NEW_LINE> <INDENT> print('Using fake data') <NEW_LINE> x_train = np.random.random((BATCH_SIZE, IMG_ROWS, IMG_COLS)) <NEW_LINE> y_train = np.zeros([BATCH_SIZE, 1], dtype=np.int32) <NEW_LINE> x_test, y_test = x_train, y_train <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> print('Using real data') <NEW_LINE> (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data() <NEW_LINE> <DEDENT> x_train = x_train.reshape(x_train.shape[0], IMG_ROWS, IMG_COLS, 1) <NEW_LINE> x_test = x_test.reshape(x_test.shape[0], IMG_ROWS, IMG_COLS, 1) <NEW_LINE> input_shape = (IMG_ROWS, IMG_COLS, 1) <NEW_LINE> x_train = x_train.astype('float32') <NEW_LINE> x_test = x_test.astype('float32') <NEW_LINE> x_train /= 255 <NEW_LINE> x_test /= 255 <NEW_LINE> print('x_train shape:', x_train.shape) <NEW_LINE> print(x_train.shape[0], 'train samples') <NEW_LINE> print(x_test.shape[0], 'test samples') <NEW_LINE> y_train = tf.keras.utils.to_categorical(y_train, NUM_CLASSES) <NEW_LINE> y_test = tf.keras.utils.to_categorical(y_test, NUM_CLASSES) <NEW_LINE> model = mnist_model(input_shape) <NEW_LINE> if use_tpu: <NEW_LINE> <INDENT> strategy = tf.contrib.tpu.TPUDistributionStrategy( tf.contrib.cluster_resolver.TPUClusterResolver(tpu=FLAGS.tpu) ) <NEW_LINE> model = tf.contrib.tpu.keras_to_tpu_model(model, strategy=strategy) <NEW_LINE> <DEDENT> model.compile( loss=tf.keras.losses.categorical_crossentropy, optimizer=tf.train.GradientDescentOptimizer(learning_rate=0.05), metrics=['accuracy']) <NEW_LINE> callbacks = [] <NEW_LINE> if FLAGS.model_dir: <NEW_LINE> <INDENT> callbacks = [tf.keras.callbacks.TensorBoard(log_dir=FLAGS.model_dir)] <NEW_LINE> <DEDENT> model.fit( x_train, y_train, batch_size=BATCH_SIZE, callbacks=callbacks, epochs=EPOCHS, verbose=1, validation_data=(x_test, y_test)) <NEW_LINE> return model.evaluate(x_test, y_test, batch_size=BATCH_SIZE, verbose=1)
Run the model training and return evaluation output.
625941be8e7ae83300e4aef0
def get_layer_dict(wxs, wxs_url, ckanapi, org_dict, group_dict, pdf_dict, res_format="WMS", debug=False, fallback_org_name='lgate'): <NEW_LINE> <INDENT> foid = ckanapi.action.organization_show(id=fallback_org_name)["id"] <NEW_LINE> return [wxs_to_dict(wxs.contents[layername], wxs_url, org_dict, group_dict, pdf_dict, debug=debug, res_format=res_format, fallback_org_id=foid) for layername in wxs.contents]
Return a list of CKAN API package_show-compatible dicts Arguments: wxs A wxsclient loaded from a WXS enpoint wxs_url The WXS endpoint URL to use as dataset resource URL ckanapi A ckanapi instance with at least read permission org_dict A dict of CKAN org names and ids pdf_dict A dict of dataset names and corresponding PDF URLs debug Debug noise fallback_org_name The fallback CKAN org name , default:'lgate' Returns: A list of CKAN API package_show-compatible dicts
625941be55399d3f055885d7
def save_main_name(name): <NEW_LINE> <INDENT> if name is None: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> q = Query() <NEW_LINE> db.upsert({'type': 'properties', 'g_id': 0, 'name': name}, (q.type == 'properties') & (q.g_id == 0)) <NEW_LINE> return True
Update the name of the graph with id 0 in the database to **name** by updating the field 'name' of the document containing that id with the type 'properties' :param name: new name of the graph with id 0 :return: True if the name is not None and False otherwise
625941be7b25080760e3937e
def hangman_game_flow(self, letter=''): <NEW_LINE> <INDENT> lettersGuessed = self.lettersGuessed <NEW_LINE> secretWord = self.secretWord <NEW_LINE> try: <NEW_LINE> <INDENT> if len(letter) == 1: <NEW_LINE> <INDENT> if letter in lettersGuessed: <NEW_LINE> <INDENT> guessed = '' <NEW_LINE> guessed = self.show_letter_guessed(guessed, secretWord, lettersGuessed) <NEW_LINE> print ('Oops! You have already guessed that letter: ', guessed) <NEW_LINE> <DEDENT> elif letter in secretWord: <NEW_LINE> <INDENT> lettersGuessed.append(letter) <NEW_LINE> guessed = '' <NEW_LINE> guessed = self.show_letter_guessed(guessed, secretWord, lettersGuessed) <NEW_LINE> print ('Good Guess: ', guessed) <NEW_LINE> <DEDENT> elif letter.isdigit(): <NEW_LINE> <INDENT> guessed = '' <NEW_LINE> guessed = self.show_letter_guessed(guessed, secretWord, lettersGuessed) <NEW_LINE> print ('\nYou have to insert one letter, not numbers!\n') <NEW_LINE> print ('Word to guess:', guessed) <NEW_LINE> <DEDENT> elif letter.isspace(): <NEW_LINE> <INDENT> guessed = '' <NEW_LINE> guessed = self.show_letter_guessed(guessed, secretWord, lettersGuessed) <NEW_LINE> print ('\nYou have to insert one letter, not whitespaces!\n') <NEW_LINE> print ('Word to guess:', guessed) <NEW_LINE> <DEDENT> elif letter in string.punctuation: <NEW_LINE> <INDENT> guessed = '' <NEW_LINE> guessed = self.show_letter_guessed(guessed, secretWord, lettersGuessed) <NEW_LINE> print ('\nYou have to insert one letter,' 'not special characters!\n') <NEW_LINE> print ('Word to guess:', guessed) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.guesses -=1 <NEW_LINE> lettersGuessed.append(letter) <NEW_LINE> guessed = '' <NEW_LINE> guessed = self.show_letter_guessed(guessed, secretWord, lettersGuessed) <NEW_LINE> print ('Oops! That letter is not in my word: ', guessed) <NEW_LINE> <DEDENT> <DEDENT> elif len(letter) == 0: <NEW_LINE> <INDENT> guessed = '' <NEW_LINE> guessed = self.show_letter_guessed(guessed, secretWord, lettersGuessed) <NEW_LINE> print('\nYour guess must be one letter, not an empty value!\n') <NEW_LINE> print ('Word to guess:', guessed) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> guessed = '' <NEW_LINE> guessed = self.show_letter_guessed(guessed, secretWord, lettersGuessed) <NEW_LINE> print('\nYour guess must be just one letter!\n') <NEW_LINE> print ('Word to guess:', guessed) <NEW_LINE> <DEDENT> <DEDENT> except ValueError: <NEW_LINE> <INDENT> print('Input value not recognized!\nInsert another word!') <NEW_LINE> <DEDENT> print ('------------')
Define todos os fluxos do jogo e seus comportamentos: Caso o usuário insira letra correta, incorreta ou não disponível.
625941bec432627299f04b68
def ipv6_addr(self, iface): <NEW_LINE> <INDENT> nr = '10' + int_to_hex_str(iface.nr, 2) <NEW_LINE> node_id = int_to_hex_str(iface.sliver.node_id, 4) <NEW_LINE> slice_id = int_to_hex_str(iface.sliver.slice_id, 12) <NEW_LINE> ipv6_words = DEBUG_IPV6_PREFIX.split(':')[:3] <NEW_LINE> ipv6_words.extend([node_id, nr]) <NEW_LINE> ipv6_words.extend(split_len(slice_id, 4)) <NEW_LINE> return IP(':'.join(ipv6_words))
DEBUG_IPV6_PREFIX:N:10ii:ssss:ssss:ssss
625941bee5267d203edcdbc3
def stop(self, drop_results=True): <NEW_LINE> <INDENT> self._is_running = False <NEW_LINE> self._is_working = False <NEW_LINE> self._drop_results = drop_results <NEW_LINE> self._queue.put(None)
Stops the worker thread.
625941be9b70327d1c4e0cf8
def _rotate_to_right(self, node): <NEW_LINE> <INDENT> x = node.left <NEW_LINE> p = node <NEW_LINE> g = node.parent <NEW_LINE> p.left = x.right <NEW_LINE> if x.right: <NEW_LINE> <INDENT> x.right.parent = p <NEW_LINE> <DEDENT> x.right = p <NEW_LINE> p.parent = x <NEW_LINE> if not g: <NEW_LINE> <INDENT> self.root = x <NEW_LINE> self.root.parent = None <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> g.left = x <NEW_LINE> x.parent = g
g root(P) /\ g / p D / x /\ ---> x D x C / /\ A p A B / B C
625941be0a366e3fb873e73c
def throw_out(filepath, why='dont know'): <NEW_LINE> <INDENT> global _FilesDict <NEW_LINE> global _SubDirs <NEW_LINE> subdir, _ = os.path.split(filepath) <NEW_LINE> name = os.path.basename(subdir) <NEW_LINE> erase(name, filepath, why)
A more smart way to remove not needed temporary file, accept a full ``filepath``.
625941bed10714528d5ffc04
def verify_declared_bit(self, obj): <NEW_LINE> <INDENT> if obj.name not in self.current_symtab: <NEW_LINE> <INDENT> raise QasmError("Cannot find symbol '" + obj.name + "' in argument list for gate, line", str(obj.line), 'file', obj.file) <NEW_LINE> <DEDENT> sym = self.current_symtab[obj.name] <NEW_LINE> if not (sym.type == 'id' and sym.is_bit): <NEW_LINE> <INDENT> raise QasmError("Bit", obj.name, 'is not declared as a bit in the gate.')
Verify a qubit id against the gate prototype.
625941befff4ab517eb2f35e
def append(self, row): <NEW_LINE> <INDENT> if not isinstance(row, SequenceCollectionType): <NEW_LINE> <INDENT> raise InvalidTypeException(u"新行的类型必须是list或者tuple类型") <NEW_LINE> <DEDENT> if len(row) > self.column_num: <NEW_LINE> <INDENT> raise InvalidSizeException( u"新行的列数为{0},表格的列数为{1},两者不一致".format( len(row), self.column_num )) <NEW_LINE> <DEDENT> self._data.append(row)
添加新的一行
625941be442bda511e8be340
def simulate(self, X, R, Q=False): <NEW_LINE> <INDENT> Y, Q = self._setup_parameters(X, Q) <NEW_LINE> for i in np.arange(Y.size): <NEW_LINE> <INDENT> Y[i] = self._select_action( Q[X[i]] ) <NEW_LINE> delta = R[i,Y[i]] - Q[X[i, Y[i]]] <NEW_LINE> if delta > 0: <NEW_LINE> <INDENT> Q[X[i, Y[i]]] += self._eta_p * delta <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> Q[X[i, Y[i]]] += self._eta_n * delta <NEW_LINE> <DEDENT> <DEDENT> return Y
Simulate bandit task for agent. Parameters ---------- X : array, shape=(n_trials, 2) Predetermined machine presentation order for bandit task. R : array, shape=(n_trials, 2) Predetermined reward values for bandit task. Q : array, shape=(n_machines,) Initial values for Q-table. If scalar, Q initialized as 1-d array with all the same value. Returns ------- Y : array, shape=(n_trials,) Choices on each trial.
625941bef9cc0f698b140522
def error_analysis(self, session, X_test, Y_test, gold_candidate_set=None, b=0.5, set_unlabeled_as_neg=True, display=True, scorer=MentionScorer, **kwargs): <NEW_LINE> <INDENT> test_marginals = self.marginals(X_test, **kwargs) <NEW_LINE> test_candidates = [ X_test.get_candidate(session, i) for i in range(X_test.shape[0]) ] if not self.representation else X_test <NEW_LINE> s = scorer(test_candidates, Y_test, gold_candidate_set) <NEW_LINE> return s.score(test_marginals, train_marginals=None, b=b, display=display, set_unlabeled_as_neg=set_unlabeled_as_neg)
Prints full score analysis using the Scorer class, and then returns the a tuple of sets containing the test candidates bucketed for error analysis, i.e.: * For binary: TP, FP, TN, FN * For categorical: correct, incorrect :param X_test: The input test candidates, as a list or annotation matrix :param Y_test: The input test labels, as a list or annotation matrix :param gold_candidate_set: Full set of TPs in the test set :param b: Decision boundary *for binary setting only* :param set_unlabeled_as_neg: Whether to map 0 labels -> -1, *binary setting* :param display: Print score report :param scorer: The Scorer sub-class to use
625941bef9cc0f698b140521
def mean_confidence_interval(data, confidence): <NEW_LINE> <INDENT> a = 1.0 * numpy.array(data) <NEW_LINE> n = len(a) <NEW_LINE> m, se = numpy.mean(a), stats.sem(a) <NEW_LINE> h = se * stats.t.ppf((1 + confidence) / 2., n - 1) <NEW_LINE> return m, m - h, m + h
:param data: sample data :param confidence: confidence :return: confidence interval from sample data
625941be26238365f5f0ed8f
def testV1DeploymentTriggerImageChangeParams(self): <NEW_LINE> <INDENT> model = lib_openshift.models.v1_deployment_trigger_image_change_params.V1DeploymentTriggerImageChangeParams()
Test V1DeploymentTriggerImageChangeParams
625941bea8370b77170527c5
def get_phi(self): <NEW_LINE> <INDENT> if self._phi is None: <NEW_LINE> <INDENT> self._theta, self._phi= eq2ang(self._ra, self._dec) <NEW_LINE> <DEDENT> return self._phi
get a reference to the phi data
625941be73bcbd0ca4b2bf9a
def get_before(instring, find, offset_count=1): <NEW_LINE> <INDENT> if find in instring: <NEW_LINE> <INDENT> offset_loc = index_of_count(instring, find, offset_count) <NEW_LINE> if offset_loc != -1: <NEW_LINE> <INDENT> return instring[:offset_loc] <NEW_LINE> <DEDENT> return instring <NEW_LINE> <DEDENT> return instring
Returns the string that occurs before the find string. If the find string is not in the string, this returns the entire string. :param instring: the string to search :param find: the string to look for :param offset_count: find the nth copy of the find string :return: the string that immediatly preceeds the find string. example: >>> get_before('look for the [key] in the lock','[') 'look for the '
625941be4527f215b584c37e
def __cmp__(self, other): <NEW_LINE> <INDENT> if not isinstance(other, FiniteField_prime_modn): <NEW_LINE> <INDENT> return cmp(type(self), type(other)) <NEW_LINE> <DEDENT> return cmp(self.__char, other.__char)
Compare ``self`` with ``other``. Two finite prime fields are considered equal if their characteristic is equal. EXAMPLES:: sage: K = FiniteField(3) sage: copy(K) == K True
625941be6aa9bd52df036cc7
def save(self, commit=True): <NEW_LINE> <INDENT> if self.create_from_template: <NEW_LINE> <INDENT> return Forms.models.save_instance( self, Event(created_by=self.created_by), self._meta.fields, 'created', commit, False) <NEW_LINE> <DEDENT> self.instance.created_by = self.created_by <NEW_LINE> self.instance = super(EventForm, self).save(commit=True) <NEW_LINE> if self.instance.template_name and not self.this_is_a_template: <NEW_LINE> <INDENT> template = Event(template_name=self.instance.template_name, created_by=self.created_by) <NEW_LINE> self.instance.template_name = '' <NEW_LINE> self.instance.save() <NEW_LINE> forms.models.save_instance( self, template, self._meta.fields, 'created', commit, False) <NEW_LINE> <DEDENT> return self.instance
Saves twice if a template should be created.
625941bea8ecb033257d2ff2
def check_request(request): <NEW_LINE> <INDENT> if not request.user.is_authenticated(): <NEW_LINE> <INDENT> raise Http404
Check if AJAX request from authenticated user
625941be796e427e537b04e7
def drawCheckerBoard(M=8, N=5, red=GLfloat_3(1,0,0), blue=GLfloat_3(0,0,1) ): <NEW_LINE> <INDENT> glDisable(GL_LIGHTING) <NEW_LINE> try: <NEW_LINE> <INDENT> for x in range(0, N): <NEW_LINE> <INDENT> for y in range(0,M): <NEW_LINE> <INDENT> if (x + y) % 2 == 0: <NEW_LINE> <INDENT> glColor3fv(red) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> glColor3fv(blue) <NEW_LINE> <DEDENT> glRectf(x, y, x + 1, y + 1) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> finally: <NEW_LINE> <INDENT> glEnable(GL_LIGHTING)
Draw an 2N*2N checkerboard with given colours
625941bea17c0f6771cbdf77
def test_valid_page_size(rf): <NEW_LINE> <INDENT> view = search_utils.requires_search_args(inner_fn) <NEW_LINE> result = view(rf.get('?q=term')) <NEW_LINE> result = view(rf.get('?q=term&page_size=10')) <NEW_LINE> assert result.page_size == 10
Valid page sizes should pass through.
625941be66673b3332b91fb5
def create_future_enrollment(self, user, auto_enroll=True): <NEW_LINE> <INDENT> pfm = CcxFutureMembershipFactory.create( ccx=self.ccx, email=user.email, auto_enroll=auto_enroll ) <NEW_LINE> return pfm
utility method to create future enrollment
625941be462c4b4f79d1d5f4
def conv_node(nodes, children, feature_size, output_size): <NEW_LINE> <INDENT> with tf.name_scope('conv_node'): <NEW_LINE> <INDENT> std = 1.0 / math.sqrt(feature_size) <NEW_LINE> w_t, w_l, w_r = ( tf.Variable(tf.truncated_normal([feature_size, output_size], stddev=std), name='Wt'), tf.Variable(tf.truncated_normal([feature_size, output_size], stddev=std), name='Wl'), tf.Variable(tf.truncated_normal([feature_size, output_size], stddev=std), name='Wr'), ) <NEW_LINE> init = tf.truncated_normal([output_size, ], stddev=math.sqrt(2.0 / feature_size)) <NEW_LINE> b_conv = tf.Variable(init, name='b_conv') <NEW_LINE> with tf.name_scope('summaries'): <NEW_LINE> <INDENT> tf.summary.histogram('w_t', [w_t]) <NEW_LINE> tf.summary.histogram('w_l', [w_l]) <NEW_LINE> tf.summary.histogram('w_r', [w_r]) <NEW_LINE> tf.summary.histogram('b_conv', [b_conv]) <NEW_LINE> <DEDENT> return conv_step(nodes, children, feature_size, w_t, w_r, w_l, b_conv)
Perform convolutions over every batch sample.
625941be046cf37aa974cc6e
def MidiFile_toInt(*args): <NEW_LINE> <INDENT> return _CsoundAC.MidiFile_toInt(*args)
MidiFile_toInt(int c1, int c2, int c3, int c4) -> int
625941bea05bb46b383ec748