content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
from typing import List def split_text(text: str) -> List[str]: """Split arbitrary text into words (str).""" return text.split()
4c48a604b1e76bd7d712cbbc670d9c8d3439e6e0
693,839
def _filter_pairs(starts, stops, coords, indices): # pragma: no cover """ Converts all the pairs into a single integer mask, additionally filtering by the indices. Parameters ---------- starts, stops : list[int] The starts and stops to convert into an array. coords : np.ndarray The coordinates to filter by. indices : np.ndarray The indices in the form of slices such that indices[:, 0] are starts, indices[:, 1] are stops and indices[:, 2] are steps. Returns ------- mask : list The output integer mask. Examples -------- >>> import numpy as np >>> starts = [2] >>> stops = [7] >>> coords = np.array([[0, 1, 2, 3, 4, 5, 6, 7]]) >>> indices = np.array([[2, 8, 2]]) # Start, stop, step pairs >>> _filter_pairs(starts, stops, coords, indices) [2, 4, 6] """ mask = [] # For each pair, for i in range(len(starts)): # For each element match within the pair range for j in range(starts[i], stops[i]): match = True # Check if it matches all indices for k in range(len(indices)): idx = indices[k] elem = coords[k, j] match &= ((elem - idx[0]) % idx[2] == 0 and ((idx[2] > 0 and idx[0] <= elem < idx[1]) or (idx[2] < 0 and idx[0] >= elem > idx[1]))) # and append to the mask if so. if match: mask.append(j) return mask
52bbbb1538d50742fa48564ec8d13ef1c129bb13
693,840
import os def is_argo(): """Returns: True for Airflow env""" return "ARGO_AGENT_TASK_WORKERS" in os.environ or \ "ARGO_KUBELET_PORT" in os.environ
b3eef8f527416d84746753cecacd52dad8fe0d5c
693,841
def get_uid_search_xpath(uid): # type: (str) -> str """Method to get the XPath expression for a UID that might contain quote characters. Parameters ---------- uid : str Original UID string with XPath expression. Returns ------- str Processed XPath expression to escape quote characters using "concat()". """ if '"' in uid or '&quot;' in uid: uid_concat = "concat('%s')" % uid.replace('&quot;', "\',\'\"\',\'").replace('"', "\',\'\"\',\'") return './/*[@uID=' + uid_concat + ']' else: return './/*[@uID="' + uid + '"]'
f3626595b13a9a9cae44d7f5cb9729602e902db9
693,842
import math def round_half_up(x): """ Return x rounded to the nearest integer, with halves rounded up to match SLiM's behaviour. Python's round() function rounds halves to the nearest even number. """ floor_x = math.floor(x) if x - floor_x < 0.5: return floor_x else: return math.ceil(x)
e670457f89a46fd560a08f3cbcb956a2a1f10d72
693,843
import os import socket def find_zookeeper_id(): """ Find self zookeeper ID by comparing IP addresses """ zookeeper_ips_str = os.environ.get("ZOOKEEPER_IPS") assert zookeeper_ips_str, "ZOOKEEPER_IPS env variable must be set" zookeeper_ips = zookeeper_ips_str.split(",") self_ip = socket.gethostbyname(socket.getfqdn()) for idx, zookeeper_ip in enumerate(zookeeper_ips): if zookeeper_ip == self_ip: return idx + 1 assert False, \ "No entry for %s found in ZOOKEEPER_IPS: %s" % (self_ip, zookeeper_ips) return None
6df55616c04349a07b6cee0cb17d0c3d4dd40da2
693,844
import argparse import os def parse_command_line_args(): """Parse command line arguments.""" parser = argparse.ArgumentParser( description='Example of Google Cloud IoT registry and ' 'device management.') # Required arguments parser.add_argument( '--project_id', default=os.environ.get("GOOGLE_CLOUD_PROJECT"), required=True, help='GCP cloud project name.') parser.add_argument( '--pubsub_subscription', required=True, help='Google Cloud Pub/Sub subscription name.') # Optional arguments parser.add_argument( '--service_account_json', default=os.environ.get("GOOGLE_APPLICATION_CREDENTIALS"), help='Path to service account json file.') return parser.parse_args()
81acdc5b39a0801cb689254b668cb2bfdfa90376
693,845
def Reverse(trim_2): """ Deze functie keert de sequentie om. Deze wordt vervolgens gereturned. """ r_trim_2 = trim_2[::-1] return r_trim_2
0dd2fdb42dee29ca11478efec3ea960eb3e10890
693,846
def DEFAULT_NULLVALUE(test): """ Returns a null value for each of various kinds of test values. **Parameters** **test** : bool, int, float or string Value to test. **Returns** **null** : element in `[False, 0, 0.0, '']` Null value corresponding to the given test value: * if `test` is a `bool`, return `False` * else if `test` is an `int`, return `0` * else if `test` is a `float`, return `0.0` * else `test` is a `str`, return `''` """ return False if isinstance(test,bool) \ else 0 if isinstance(test,int) \ else 0.0 if isinstance(test,float) \ else ''
ce9e4df2fc9b3b492bc3a4b3a98c96aa71eca3d0
693,847
def offset_flat_index(idx, offset): """Return an index into a flat array with the given offset applied. All indices are assumed to have been converted to explicit form, so no negative indices, slices with ':', or tuples are allowed. """ if isinstance(idx, slice): return slice(idx.start+offset, idx.stop+offset, idx.step) else: # simple index or index array return idx + offset
3b5504852aa25b8e15a79a5ddacac3804ac0eb7f
693,848
def spam(): """Spam function. Returns ------- str Returns the string 'spam'. """ return "spam"
4e4964e95cd5c142b1ee9be8be417763d859ead2
693,849
from typing import List def is_branch_id_equal_to_head_id( references_content: List[str], branch_index: int ) -> bool: """Returns True if the id of the given brnach is identical to the id of HEAD; False if otherwise. """ if branch_index == -1: return False head, _, head_id = references_content[0].strip().partition("=") branch_name, _, branch_id = references_content[branch_index].strip().partition("=") return head_id == branch_id
168e65fbdd49806ed12350ff4f13ef21b60a6f65
693,850
import hashlib def sha256_string(content: str) -> str: """ Hash a string using SHA256. Cache results for repeated use. """ h = hashlib.sha256() h.update(content.encode("utf-8")) return h.hexdigest()
a6cf8dfe6becc7ffb7d3e71c6bb50b6092f2480d
693,851
import math def get_bearing(origin_point, destination_point): """ Calculate the bearing between two lat-lng points. Each argument tuple should represent (lat, lng) as decimal degrees. Bearing represents angle in degrees (clockwise) between north and the direction from the origin point to the destination point. Parameters ---------- origin_point : tuple (lat, lng) destination_point : tuple (lat, lng) Returns ------- bearing : float the compass bearing in decimal degrees from the origin point to the destination point """ if not (isinstance(origin_point, tuple) and isinstance(destination_point, tuple)): raise TypeError("origin_point and destination_point must be (lat, lng) tuples") # get latitudes and the difference in longitude, as radians lat1 = math.radians(origin_point[0]) lat2 = math.radians(destination_point[0]) diff_lng = math.radians(destination_point[1] - origin_point[1]) # calculate initial bearing from -180 degrees to +180 degrees x = math.sin(diff_lng) * math.cos(lat2) y = math.cos(lat1) * math.sin(lat2) - (math.sin(lat1) * math.cos(lat2) * math.cos(diff_lng)) initial_bearing = math.atan2(x, y) # normalize initial bearing to 0-360 degrees to get compass bearing initial_bearing = math.degrees(initial_bearing) bearing = (initial_bearing + 360) % 360 return bearing
cfba6ccd27e0b2e2b8fa34f71611b061692f6dbf
693,852
import argparse def parse_args(args): """ Parses arguments. :return: Arguments. """ parser = argparse.ArgumentParser('Makes bounding boxes on images') parser.add_argument('-j', '--json', help='JSON file results from darknet', required=True) parser.add_argument('-d', '--dir', help='output directory for images with bounding box', required=True) parser.add_argument('-c', '--colors', help='number of colors', type=int, default=10, required=False) parser.add_argument('--ttf', help='true type font path', default='/usr/share/fonts/truetype/dejavu/DejaVuSerif.ttf', required=False) parser.add_argument('--ttfsize', help='true type font size', default=12, type=int, required=False) return parser.parse_args(args)
5f99b44c4e125392b48062a8ae1044886469fb8f
693,853
def partition_int_custom(n, components): """Partition an integer in a custom way. `n`: integer to partition. `components`: iterable of ints; numbers that are allowed to appear in the partitioning result. Each number `m` must satisfy `1 <= m <= n`. See `partition_int`, `partition_triangular`. """ if not isinstance(n, int): raise TypeError(f"n must be integer; got {type(n)} with value {repr(n)}") if n < 1: raise ValueError(f"n must be positive; got {n}") components = tuple(components) invalid_components = [not isinstance(x, int) for x in components] if any(invalid_components): raise TypeError(f"each component must be an integer; got invalid components {invalid_components}") invalid_components = [not (1 <= x <= n) for x in components] if any(invalid_components): raise ValueError(f"each component x must be 1 <= x <= n; got n = {n}, with invalid components {invalid_components}") def rec(components): for k in components: m = n - k if m == 0: yield (k,) else: out = [] for item in partition_int_custom(m, tuple(x for x in components if x <= m)): out.append((k,) + item) for term in out: yield term return rec(components)
eb88d5d4e9397021bf0543b37d825fe3d968313b
693,854
def fibonacci_numbering(order_col): """ almost fibonacci- skip the first 2 steps e.g. 1, 2, 3, 5, 8, ... instead of 0, 1, 1, 2, 3, ... otherwise ordering of the elements at '1' is undefined... ;) """ def f(index, collection): if index == 0: return 1 elif index == 1: return 2 else: return getattr(collection[index - 1], order_col) + getattr( collection[index - 2], order_col ) return f
b2bdb59749fc6cbed30bf555afeb9b62815a54a8
693,855
def parseRating(line): """ Parses a rating record in MovieLens format userId::movieId::rating::count . """ fields = line.strip().split("::") return int(fields[3]), (int(fields[0]), int(fields[1]), float(fields[2]))
de5ee9c8bfc810aae08c27de650a003a9d69a790
693,856
import math def get_MCC(mx): """ Gets the Matthews Correlation Coefficient (MCC)""" [tp, fp], [fn, tn] = mx return (tp * tn - fp * fn) / math.sqrt( (tp + fp) * (tp + fn) * (tn + fp) * (tn + fn) )
59de25c96f75d02e8bf4283bfeeb909d2e9646a9
693,857
def noise_complaint_handler(sender, message, bot_id, app_id): """If you're being too loud, register a noise complaint""" return "Noise complaint registered with Redwood room 204"
057947c6d874f7d244d7597ed00d00ededa60d5f
693,859
import os import glob def ecs_files(): """Return the schema file list to load""" schema_glob = os.path.join(os.path.dirname(__file__), '../../schemas/*.yml') return sorted(glob.glob(schema_glob))
515656b5a685b3a1047de8c0de11bfadcbc5f982
693,861
def power_of_2(n): """ The base case. This is where you catch edge cases that don't fit the problem (2∗2𝑛−1). Since we aren't considering any 𝑛<0 valid, 2∗2𝑛−1 can't be used when 𝑛 is 0. This section of the code returns the solution to 2^0 without using 2∗2𝑛−1. """ if n == 0: return 1 """ This code is where it breaks the problem down into smaller instances. Calling itself to calculate 2𝑛−1 """ return 2 * power_of_2(n - 1)
00ca21b3c84c1ad9539db786944b87eb91095e72
693,862
def format_list(resources): """ Format comma-separated list of IDs from Django queryset. """ return ', '.join(map(str, sorted(resources.values_list('id', flat=True))))
96b13f3fce990a0c6ec1dd8d75a0cddb8f4b3fa4
693,865
def simpc(f, a, b, m): """Composite simpson's rule for function f on [a,b]""" h = 0.5*(b-a)/m sum = 0 for i in range(1,m+1): x1 = a + (2*i -2)*h x2 = a + (2*i -1)*h x3 = a + 2*i*h sum += x1+x2+x3 return sum*h/3
97c48991f9116c4113b46049816befa1b9ea7e09
693,866
import os import requests def get_spec() -> str: """Reach Alpaca Device spec from local cache with fallback to URL fetch.""" local_copy = os.path.join(os.path.expanduser('~'), 'AlpacaDeviceAPI_v1.yaml') if os.path.exists(local_copy): with open(local_copy, 'r') as f: text = f.read() print(f'Read spec from cache file {local_copy}') else: response = requests.get( 'https://www.ascom-standards.org/api/AlpacaDeviceAPI_v1.yaml') text = response.text with open(local_copy, 'w') as f: f.write(response.text) print(f'Cached spec in file {local_copy}') return text
b883257ece8bae2164cb068ee7f15abedbd3b6e1
693,868
def to(act: bool, models): """Convert Uvicore Model or List[Model] to JSON""" # Uvicore models already have a pydantic .json() method # But if its a List of models, we must handle manually if not act: return models if not models: return '{}' if type(models) == list: results = [] for model in models: results.append(model.json()) json_results = '[' for result in results: json_results += result + ',' return json_results[0:-1] + ']' # A single uvicore model return models.json()
766477a17aa6e0456c650d9dc6f1ad3545953f5b
693,869
def healthcheck(): """ Get a dummy string 'alive' to ensure the server is responding """ return "alive"
3d5dc3304c9a67735afa75cf5f41ebcad79e680d
693,870
def getPath(keyword, topicNames, topicPaths, topicIDs): """ Function to get the path of a particular keyword in ACM Tree. Parameters: keyword (string) - the keyword for which we want the path. topicNames (dictionary) - the name of the topic as the key and the topic ID number as value. topicPaths (dictionary) - the topic ID as the key, and its parent as value. topicIDs (dictionary) - the topic ID as the key, and the topic name as the value. Returns: path (list) - the path of that keyword (backwards) """ topicId = topicNames[keyword] path = [keyword] topicParent = topicPaths[topicId] #Start from the keyword and backtrack until the first parent. while topicParent != 0: curr = topicIDs[topicParent] path.append(curr) topicParent = topicPaths[topicParent] return path
22dd07a92290c4a7b9e1baf70ef0cc98960b4520
693,871
def ort_categories(reisende): """Returns all unique location tuples in reisende""" ort = reisende[["Linie", "Richtung", "Haltestelle"]].itertuples( index=False, name=None ) ort = list(set(ort)) return ort
e4637e36485632333d6b48fd415be155e300e1cb
693,872
def round_to_period(current_time, period_min, direction='floor', offset=0): """ Args: current_time ([type]): [description] period_min ([type]): [description] direction (str, optional): [floor|ceiling]. Defaults to 'floor'. offset (int, optional): [description]. Defaults to 0. Returns: [int]: [rounded_time] """ current_time -= (current_time % 60) # exact minute current_time -= (current_time % period_min ) # exact scale if direction == 'floor': rounded_time = current_time + offset elif direction == 'ceiling': rounded_time = current_time + period_min + offset else: raise Exception(f'Unknown direction: {direction}') return int(rounded_time)
89ee2bcc88be0f5ebc587e0e0dd2d7074dc944ad
693,873
def get(name, default=None): """ Get the value of a variable in the settings module scope. """ return globals().get(name, default)
15cb84209896fb39785f3e6014861fa9f74374a1
693,874
import json def _get_group_code_names(fob_file_path): """Extract code names from first run in fobs file.""" with open(fob_file_path) as f: data = json.load(f)[0] return [r['name'] for r in data['runs']]
82529c133a1a55092d0b629d01522487fcc8bdaf
693,875
def generate_topktopp(model, tokenizer, text, max_length): """ User top-k top-p sampling """ input_ids = tokenizer.encode(text, return_tensors='pt').cuda() sample_output = model.generate( input_ids, do_sample=True, max_length=50, top_k=50, top_p=0.95) return tokenizer.decode(sample_output[0], skip_special_tokens=True)
b727a6fcce6c8333c306b643f415d0701ffb3440
693,876
def process_index(index, intensity, interaction_symbol): """ Process index to get edge tuple. Disregard edge weight. Args: index (str): index of the edge list dataframe. intensity (float): intensity of the edge. interaction_symbol (str): symbol used to separate node labels. Returns: tuple: a tuple containing edge labels. """ return tuple(index.split(interaction_symbol))
923602fd8be1281b0d28769ccd38e95c03d645b1
693,877
def _profile_strings(base, tails): """Creates a list of profile strings by concatenating base with all permutations of tails""" return [base + level + tail[1] for tail in tails for level in tail[0]]
9a6175d23a7ce89ca3f943a91749859137eb1a6f
693,878
def get_tracks_in_frame(frame_ind, track_list): """ Return list of all tracks present in frame ind. """ tracks_in_frame = [] for track in track_list: if (track['last_frame'] >= frame_ind and track['first_frame'] <= frame_ind): tracks_in_frame.append(track) return tracks_in_frame
dd0c76b3729d7ac427ff64be6121c9947af253c8
693,879
def _bjorklund(subsequences): """ Distribute onsets as evenly as possible by modifying subsequences """ while True: remainder = subsequences[-1] distributed = [] while subsequences and subsequences[-1] == remainder: distributed.append(subsequences.pop()) if not subsequences or len(distributed) <= 1: subsequences.extend(distributed) return subsequences for i in range(min(len(distributed), len(subsequences))): subsequences[i].extend(distributed.pop()) subsequences.extend(distributed)
ed6bb2f864961f1f3f32676133420b75746b30d2
693,880
def callables(potential_callables): """Ensure that the callables are in fact a sequence.""" if callable(potential_callables): return [potential_callables] return potential_callables
9ce117902deda63a5ed2c6546c418cbd700104c4
693,882
def get_literals_given_scope_of_selection(available_lits_in_scopes: dict, scope_of_selection: str, target_function_range: str) -> set: """ Given literals in different scopes and the scope of selection, get the list of possible literals that may be used as an unbound literal token :param available_lits_in_scopes: :param scope_of_selection: :param target_function_range: The range (token location) of the function where the target is present :return: """ if scope_of_selection == 'function': return set(available_lits_in_scopes['functions_to_literals'][target_function_range]) elif scope_of_selection == 'file': return set(available_lits_in_scopes['all_literals_in_same_file']) else: # Also include top-K most frequent return set(available_lits_in_scopes['all_literals_in_same_file'] + available_lits_in_scopes[ 'K_most_frequent_literals'])
60bbd4158c6d3205b7bd71c39ccf21b5e622e11f
693,884
from typing import List import os def load_raw_data(base_path: str, file_type: str = '.fif') -> List[List[str]]: """ Loads raw data from a directory. Args: base_path: Path to the directory containing the raw data. file_type: File type of the raw data. Returns: List of lists of raw data. """ all_raw_data_paths = list() database_paths = [f.path for f in os.scandir(base_path) if f.is_dir()] for database_path in database_paths: database_raw_data_paths = list() for dir_path, _, files in os.walk(database_path): for file in files: if file.lower().endswith(file_type): database_raw_data_paths.append(os.path.join(dir_path, file)) all_raw_data_paths.append(database_raw_data_paths) return all_raw_data_paths
6aabfe3d5e761b3c5240447505711fd01be99b4d
693,885
def get_new_board(dimension): """ Return a multidimensional list that represents an empty board (i.e. string with a space at every position). :param: dimension: integer representing the nxn dimension of your board. For example, if dimension is 3, you should return a 3x3 board :return: For example if dimension is 3x3, you should return: --> [[" ", " ", " "], [" ", " ", " "], [" ", " ", " "]] """ result = [] for _ in range(dimension): result.append([]) for _ in range(dimension): result[-1].append(" ") return result
28e65a0d5eef00d9c27f1d915d41f9e16c3d9026
693,886
def jobs_with_duration(jobs_df): """Return the list of jobs that have a duration""" return jobs_df[jobs_df.duration_m.notnull()]
1900ec9f9ac61bc4313055f2d71bef20794bec08
693,887
import json import pkg_resources def get_metadata_info(): """Get the descriptor metadata info""" resource_path = '/'.join(('descriptors', 'descriptors.nomadmetainfo.json')) desc_metainfo = json.loads(pkg_resources.resource_string('ai4materials', resource_path)) return desc_metainfo
5bb5d2dc5ee26c371974203d88ffb7f78a815f81
693,889
from typing import List from typing import Tuple from typing import Union import random import math def sampling( same_name_different_cluster: List[Tuple[str, str, Union[int, float]]], different_name_same_cluster: List[Tuple[str, str, Union[int, float]]], same_name_same_cluster: List[Tuple[str, str, Union[int, float]]], different_name_different_cluster: List[Tuple[str, str, Union[int, float]]], sample_size: int, balanced_homonyms_and_synonyms: bool, random_seed: int, ) -> List[Tuple[str, str, Union[int, float]]]: """ Samples pairs from the input list of pairs computed exhaustively from pair_sampling. Two criteria includes whether balance pairs based on positive/negative classes only or also consider balancing homonyms and synonyms. Parameters ---------- same_name_different_cluster: List list of signature pairs (s1, s2) with same name, but from different clusters--> (s1, s2, 0). different_name_same_cluster: List list of signature pairs (s1, s2) with different name, but from same cluster--> (s1, s2, 1). same_name_same_cluster: List list of signature pairs (s1, s2) with same name, also from same cluster--> (s1, s2, 1). different_name_different_cluster: List list of signature pairs (s1, s2) with different name, also from different clusters--> (s1, s2, 0). sample_size: int The desired sample size balanced_homonyms_and_synonyms: bool False -- balance for positive and negative classes True -- balance for homonyms and synonyms under positive and negative classes as well (i.e., same_name_different_cluster, different_name_same_cluster, same_name_same_cluster and different_name_different_cluster) random_seed: int random seed for sampling Returns ------- List: list of sampled signature pairs """ random.seed(random_seed) if balanced_homonyms_and_synonyms: same_name_different_cluster_pairs = random.sample( same_name_different_cluster, min(len(same_name_different_cluster), math.ceil(sample_size / 4)), ) different_name_same_cluster_pairs = random.sample( different_name_same_cluster, min(len(different_name_same_cluster), math.ceil(sample_size / 4)), ) same_name_same_cluster_pairs = random.sample( same_name_same_cluster, min(len(same_name_same_cluster), math.ceil(sample_size / 4)), ) different_name_different_cluster_pairs = random.sample( different_name_different_cluster, min(len(different_name_different_cluster), math.ceil(sample_size / 4)), ) pairs = ( same_name_different_cluster_pairs + different_name_same_cluster_pairs + same_name_same_cluster_pairs + different_name_different_cluster_pairs ) else: positive = same_name_same_cluster + different_name_same_cluster negative = same_name_different_cluster + different_name_different_cluster pairs = random.sample(positive, min(len(positive), math.ceil(sample_size / 2))) + random.sample( negative, min(len(negative), math.ceil(sample_size / 2)) ) return random.sample(pairs, len(pairs))
8a5bef90b76b3d29f63fd7d8d510930671c78b7f
693,890
def compare( data_a: list, data_b: list ): """ Compares two sets of evaluated data with the same Args: data_a (list): data set A data_b (list): data set B Returns: A String "Correct/Total Answers" and the percentage """ # asserts the number of questions is the same assert len(data_a) == len(data_b) cnt = 0 for i in range(len(data_a)): if data_a[i] == data_b[i]: cnt += 1 return (f"{cnt}/{len(data_a)}", cnt / len(data_a) * 100)
6bb1ad3ab8a77c10a89174267e29e80bbc5f9ccd
693,891
def is_number(n): """ Checa si un string es un numero, si lo es regresa el numero de lo contrario regresa False """ try: return int(n) except Exception as e: return False
4d1d28a2d949afe2a14d6edf7236eaf09f040122
693,892
import time def get_packed_dir_name(path, task_id): """拼接本次任务打包路径 @param path: @param task_id: @return: """ file_name = time.strftime("%Y%m%d", time.localtime()) + "_" + str(task_id) return "".join([path, file_name, "/"])
e84499b662ad8cc6ad9d969e502e9d6a41b112c1
693,893
def cleanSpikes(highPrice,lowPrice,closePrice): """ Remove high and low prices significantly out of the normal range Remove highest price if greater than 2x the second highest price Remove lowest price if less than 1/2 the second lowest price Input highPrice: List of high prices over the period Input lowPrice: List of low prices over the period Input closePrice: List of close prices over the period Output (highPrice,lowPrice): Cleaned high and low price lists """ tempHigh,tempLow = highPrice, lowPrice #tempHigh.pop(tempHigh.index(max(tempHigh))) #tempLow.pop(tempLow.index(min(tempLow))) maxIndex = highPrice.index(max(highPrice)) minIndex = lowPrice.index(min(lowPrice)) max2,min2= 0,max(lowPrice) for i in range(len(highPrice)): if i!=maxIndex: if highPrice[i] > max2: max2 = highPrice[i] if i!=minIndex: if lowPrice[i] < min2: min2 = lowPrice[i] if max(highPrice) > max2*3: highPrice[maxIndex] = closePrice[maxIndex] if min(lowPrice) < min2*0.4: lowPrice[minIndex] = closePrice[minIndex] return highPrice,lowPrice
a3f20462cdf0dab03abd0b23b5beae83bbe8b436
693,894
import random def broken_shuffle_2(values): """this returns a single instance of each value, shuffled""" new_values = list(set(values)) random.shuffle(new_values) return new_values
e4c36a38f76c47e408eeb96658607804ed184386
693,895
import sys def _import_module(import_str): """Import a module.""" __import__(import_str) return sys.modules[import_str]
e815e32f0146821e10faa37709b8a7c018be459e
693,896
def reduce(op, in_array, out_array=None, axis=None, identity=None): """ Reduces an array according to a binary operation `op`, starting with initial value `identity`, over the given axis (or axes if axis is a list), to `out_array`. Requires `out_array` with `len(axis)` dimensions less than `in_array`, or a scalar if `axis` is None. :param op: binary operation to use for reduction. :param in_array: array to reduce. :param out_array: output array to write the result to. If `None`, a new array will be returned. :param axis: the axis or axes to reduce over. If `None`, all axes will be reduced. :param identity: intial value for the reduction. If `None`, uses value stored in output. :return: `None` if out_array is given, or the newly created `out_array` if `out_array` is `None`. """ # The function is empty because it is parsed in the Python frontend return None
39eba76236682ea82ae63c5d826c230d2e008702
693,897
def minimus(*args): """Find the smallest number""" print(min(*args)) return min(*args)
c94c4879abdd8d5bd5b252e72575efea4e5afff4
693,898
import numpy def calculate_forces_on_a_node(nodes, node, force): """ Calculate the total amount of repelling force on a node. Center the node we're working with at 0, 0 and then build a list of vectors to all other nodes that have been scaled by a repelling force. Sum all of those vectors together and return the result. If you're pulling the parameters from a list of nodes, you can call it like: ...(nodes[:2] + nodes[3:], nodes[2]) params: nodes (list), a list of node Arrays node (Array), x,y coordinates for the node we're calculating forces on. force (function), a function like spring_force() or repel_force(), that takes a distance and returns a new distance. """ return numpy.sum([force(numpy.subtract(n, node)) for n in nodes], 0)
4927d7f8e677f31712bf93984c8a388fba1f9a92
693,899
import random def create_synthetic_data(num_labels, num_feats, num_train_labelled, num_unlabelled, num_test, sparsity=3, skew=2, rand_seed=None): # How to get the """Returns Synthetic Data in a dictionary with keys: "X_train", "y_train", "X_unlabelled", "X_test", "y_test" """ # results=[X_train,y_train,X_unlabelled,X_test,y_test] assert num_feats <= 26 feats = set('abcdefghijklmnopqrstuvwxyz'[:num_feats]) labels = range(1, num_labels + 1) assert sparsity >= skew if rand_seed != None: random.seed(rand_seed) feat_probs = {} for f in feats: feat_probs[f] = random.random() ** (sparsity - skew) feat_label_probs = {l: {} for l in labels} for l in labels: for f in feats: feat_label_probs[l][f] = random.random() ** skew * feat_probs[f] def generate_X_Y(n): Y = [random.randint(1, num_labels) for x in range(n)] X = [] for i in range(n): X.append(set()) for f in feats: if random.random() < feat_label_probs[Y[i]][f]: X[-1].add(f) return X, Y data = {} data["X_train"], data["y_train"] = generate_X_Y(num_train_labelled) data["X_unlabelled"], y = generate_X_Y(num_unlabelled) data["X_test"], data["y_test"] = generate_X_Y(num_train_labelled) return data
debcd10314a5d4fbf2e58252fb4e2c595d94273f
693,900
def _(font): """ If user give me a QFont instance, use it. :param setting_dict: user input :return: QFont """ return font
33db19b3ccb8587e24a66477ee36a17f94c74263
693,901
def _labels_to_state(scan_label, compscan_label): """Use scan and compscan labels to derive basic state of antenna.""" if not scan_label or scan_label == 'slew': return 'slew' if scan_label == 'cal': return 'track' return 'track' if compscan_label == 'track' else 'scan'
a5a6429fa7c7d108fff469b0efe0848e4eb02fcb
693,902
from functools import reduce def Sum(iterable): """Return the concatenation of the instances in the iterable Can't use the built-in sum() on non-integers""" it = [ item for item in iterable ] if it: return reduce(lambda x,y:x+y, it) else: return ''
2a70a282771f5707a545a12c674f4602cd2f6d7c
693,903
def organiza(aluno_nota): """ :param aluno_nota: lista de alunos, nota :return: aluno com a segunda menor nota, caso haja mesma nota, retorna aluno em ordem alfabética """ segunda_menor = sorted(set([nota for aluno, nota in aluno_nota]))[1] """ Em segunda_menor temos a seguinte logica, criamos um loop para colocar as notas em uma lista Após isso, convertemos a lista em set, para que os valores repetidos possuam apenas 1 elemento Então ordenamos a lista do menor para o maior e pegamos o elemento [1], segundo menor valor """ return '\n'.join([aluno for aluno, nota in sorted(aluno_nota) if nota == segunda_menor]) # Por fim, organizamos a lista de alunos pelo nome. # Caso a nota do aluno for igual ao segundo_menor ele é retornado em uma lista # que separa seus elementos por \n
a4cc3d183db831244c12fe1f052582726db3ac65
693,904
def read_def_file(def_file): """Read variables defined in def file.""" ret = {} f = open(def_file, 'r') for line_ in f: if line_.startswith("#"): continue line = line_.rstrip() if len(line) == 0: continue if "=" not in line: continue var_and_val = line.split("=") var = var_and_val[0] val = var_and_val[1].split()[0] ret[var] = val f.close() return ret
3999378bca9ad4cdccaa480a50635ad567473c66
693,905
from pathlib import Path def compute_custom_vars(job_dict, dirs): """ DO NOT CHANGE THE NAME OR ARGS OF THIS FUNCTION!!!!!! This function will receive as input a dictionary representing all the run parameters for a given job, enhanced with all the keys provided as global run parameters in your specification YAML file. It will also receive as input a dictionary referencing various helpful paths in your structure, for example: {'base': '/home/fcmeyer/scratch-midway2/running', 'checks': '/home/fcmeyer/scratch-midway2/running/checks', 'slurm_scripts': '/home/fcmeyer/scratch-midway2/running/scripts/slurm', 'slurm_logs': '/home/fcmeyer/scratch-midway2/running/logs/slurm', 'job_scripts': '/home/fcmeyer/scratch-midway2/running/scripts/jobs', 'job_logs': '/home/fcmeyer/scratch-midway2/running/logs/jobs', 'job_inputs': '/home/fcmeyer/scratch-midway2/running/inputs', 'job_work': '/home/fcmeyer/scratch-midway2/running/work'} If you are planning to have some job-specific stuff be computed, then please ensure that the return of this function is a dict including all the key:items in job_dict, plus the key:item pairs you would like to estimate for a job. NOTE: the parameters 'job_id', 'path_work' and 'path_inputs' were already automatically calculated for you and added to the dict you are getting. Please do NOT estimate them here! If they are not needed for your spec, they will be cleared out :) TIP: please include any non-base python imports within the scope of this function (under the def: statement) since they might not be loaded in my og code. Also, make sure you install them to your env! :param job_dict: a dictionary representing a row. for example, if you had a csv file with rows [sub,ses,task,run,order_id], and also defined globals [conda_env_path, matlab_path], you would get a dict { sub: NIDA2322 ses: 1, task: 'rest', run: 2, order_id:5, conda_env_path:'/project2/conda/myenv', matlab_path:'/usr/bin/matlab' } :param dirs: output of ..utils.io:calculate_directories() :return: job_dict, plus keys you add! """ job_dict["run_inputs"] = str( Path(dirs["job_work"]) .joinpath("%05d" % job_dict["order_id"]) .joinpath("derivatives") ) # If you do not have anything to add, just return job_dict. return job_dict
17c010bb7d4871211707dac04e6b2d312b3aa233
693,906
def state_to_pixel(state): """Convert generator state to pixel value. Parameters ---------- state : `str` Returns ------- `int` """ return ord(state[-1])
1ec1ac8bd39403e21d2fe1972ef3e3445ac05e4f
693,909
def convert_size(size): """Convert bytes to mb or kb depending on scale""" kb = size // 1000 mb = round(kb / 1000, 1) if kb > 1000: return f'{mb:,.1f} MB' else: return f'{kb:,d} KB'
6fb83f3b4aa7db8dcdf28d48608b08ccd101819f
693,910
import argparse def get_args(): """ get args""" parser = argparse.ArgumentParser(description='get ecolite train dataset') parser.add_argument('type', type=str, default="1", choices=['1', '2'], help='1 is used for single record infer, 2 is used for full data infer') parser.add_argument('root_data_dir', type=str) parser.add_argument('batch_size', default=16, type=int, metavar='N', help='mini-batch size (default: 16)') return parser.parse_args()
bb4f7ba391db2de5852eb9f47e35d63c6091bc0f
693,911
def filter_issues_on_state(issues, value): """Filter issues based on the value of their state. Parameters ---------- issues : list Issue data. value : str Value required for the state, e.g. "open". Returns ------- filtered_issues : list Filtered issues. """ filtered_issues = [issue for issue in issues if issue["state"] == value] return filtered_issues
3bbff601afb621a4b6cfe5caf89bd04aa6c85063
693,912
def map_faces(rotor): """Are you ready for bad entry pinning mapping?""" pos = rotor.position + 1 # We need a pin number, rather than an index neutral_to_pins = {} pins_to_neutral = {} for i in range(1, 27): if pos > 26: pos -= 26 neutral_to_pins.update({i: pos}) pins_to_neutral.update({pos: i}) # This is probably not right... pos += 1 return neutral_to_pins, pins_to_neutral
7eaf91c7b8ed0b8d662d2664d8561f4be8222cb4
693,913
import os def count_dirs_and_files(directory="."): """Count the amount of of directories and files in passed in "directory" arg. Return a tuple of (number_of_directories, number_of_files) """ files = folders = 0 for _, dirnames, filenames in os.walk(directory): files += len(filenames) folders += len(dirnames) return (folders, files)
0072be59f340d477a49c8e343c18cb9d5c4cbe9c
693,914
from pathlib import Path def data_path(path): """Return a path inside the `data` folder""" return Path(__file__).parent.parent.parent / 'tests' / 'data' / path
37af3bde0869bd9746d9743d237b03b09373a408
693,915
def inputs(form_args): """ Creates list of input elements """ element = [] html_field = '<input type="hidden" name="{}" value="{}"/>' for name, value in form_args.items(): if name == "scope" and isinstance(value, list): value = " ".join(value) element.append(html_field.format(name, value)) return "\n".join(element)
92e5c16297adf2e9c9a93b1e7b47096528c6dae2
693,916
def select_bin(bin_list, values): """ Select a bin from a list of bins based on an array of values. Args: bin_list (arr): List of Bin objects values (arr): Array of parameters. Not that the parameters need to be in the same order that the bins were generated from. Returns: Bin corresponding to the appropriate parameters. If no bin exists for those parameters, returns None. """ for b in bin_list: if b.values_in_bin(values): return b return None
5ec77d2cddcf596e786467d96ce79ed3687515fc
693,917
def slice_bounds(seq, slice_obj, allow_step=False): """Calculate the effective (start, stop) bounds of a slice. Takes into account ``None`` indices and negative indices. :returns: tuple ``(start, stop, 1)``, s.t. ``0 <= start <= stop <= len(seq)`` :raises ValueError: if slice_obj.step is not None. :param allow_step: If true, then the slice object may have a non-None step. If it does, then return a tuple (start, stop, step).""" start, stop = (slice_obj.start, slice_obj.stop) if allow_step: slice_obj.step = 1 if slice_obj.step is None else slice_obj.step # Use a recursive call without allow_step to find the slice # bounds. If step is negative, then the roles of start and # stop (in terms of default values, etc), are swapped. if slice_obj.step < 0: start, stop, _ = slice_bounds(seq, slice(stop, start)) else: start, stop, _ = slice_bounds(seq, slice(start, stop)) return start, stop, slice_obj.step elif slice_obj.step not in (None, 1): raise ValueError('slices with steps are not supported by %s' % seq.__class__.__name__) start = 0 if start is None else start stop = len(seq) if stop is None else stop start = max(0, len(seq) + start) if start < 0 else start stop = max(0, len(seq) + stop) if stop < 0 else stop if stop > 0: # Make sure stop doesn't go past the end of the list. try: # Avoid calculating len(seq), may be expensive for lazy sequences seq[stop - 1] except IndexError: stop = len(seq) start = min(start, stop) return start, stop, 1
1005ce9eccaa078d057dfa1a07be5fbdba3611a7
693,918
def get_parameters(): """ Get parameter dictionary """ param_dict = { # where all the brenda data files are 'BRENDA_DB_loc': ( '/home/atarzia/psp/molecule_DBs/brenda_details/' ), 'out_CSV_pi': "output_data_pi.csv", 'out_columns_pi': ['fasta_file', 'acc.code', 'organism', 'EC.code', 'species', 'note', 'pi', 'modification', 'category'], 'out_CSV_br': "output_data_br.csv", 'out_columns_br': ['fasta_file'], # cut off for ZIF growth from pI 'cutoff_pi': 6, # modification types + colours # just implement succinylation for now # why would you want to do acetylation if you can succinylate?? # currently succinylation == LYS swapped with GLU 'modifications': { '0': { 'colour': 'k', 'name': 'unmodified', }, '1': { 'colour': 'firebrick', 'name': 'succinylated', 'target_res': 'LYS', 'replace_res': 'GLU', } }, 'diffuse_threshold': 4.2, } return param_dict
87dd61d6b8b4bdb8925b45d37760d24f1f0c808d
693,919
def get_quota(trans, id): """Get a Quota from the database by id.""" # Load user from database id = trans.security.decode_id(id) quota = trans.sa_session.query(trans.model.Quota).get(id) return quota
3c989923e77a837541f414a3e54a7be8911e1faf
693,920
import re def REGEXMATCH(text, regular_expression): """ Returns whether a piece of text matches a regular expression. >>> REGEXMATCH("Google Doc 101", "[0-9]+") True >>> REGEXMATCH("Google Doc", "[0-9]+") False >>> REGEXMATCH("The price today is $826.25", "[0-9]*\\.[0-9]+[0-9]+") True >>> REGEXMATCH("(Content) between brackets", "\\(([A-Za-z]+)\\)") True >>> REGEXMATCH("Foo", "Bar") False """ return bool(re.search(regular_expression, text))
d5625e9cd06bf3f05569892d09fe22c781f59c00
693,921
import json def is_jsonable(obj): """ Checks if obj is JSONable (can be converted to JSON object). Parameters ---------- obj : any The object/data-type we want to know if it is JSONable. Returns ----- boolean True if obj is JSONable, or False if it is not. """ try: json.dumps(obj) return True except TypeError: return False
50a568b898c9609206993372983d40add0be4603
693,922
def get_el_config(charge): """ Returns the electronic shell structure associated with a nuclear charge """ # Electronic shells: 1s, 2s, 2p, 3s, 3p, 4s, 3d, 4p, 5s, 4d, 5p, 6s, 4f, 5d, 6p, 7s, 5f, 6d, 7p el_shell = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] # Maximum number of electrons for each shell max_el = [2, 2, 6, 2, 6, 2, 10, 6, 2, 10, 6, 2, 14, 10, 6, 2, 14, 10, 6] # Set atomic charge as number of remaining electrons rest_electrons = charge # Start with first shell (1s) shell = 0 # Until all electrons are depleted: while rest_electrons != 0: # Compare actual shell with maximum occupation number of current shell if el_shell[shell] != max_el[shell]: # If current shell is not full, add one electron and deplete it el_shell[shell] += 1 rest_electrons -= 1 else: # If the shell is full go to the next one shell += 1 # Return value(s) return el_shell
db8a06ab5e277ce5468b0e6d43670d92d9a57580
693,923
from datetime import datetime def _elapsed_sec(start_date, end_date): """ end_data - start_date の秒数計算 """ start_timestamp = datetime.strptime(start_date[0:19], "%Y-%m-%dT%H:%M:%S") end_timestamp = datetime.strptime(end_date[0:19], "%Y-%m-%dT%H:%M:%S") elapsed_sec = (end_timestamp - start_timestamp).total_seconds() return int(elapsed_sec)
ea61c4de0fd74b9e2d36b339a1ccabb25b1cc414
693,925
def parse_range(rng): """Convert this into bins""" return [float(f) for f in rng.split("-")]
ea38ad66345d013a5343d62ed0d10e03434bfab8
693,926
from typing import List import collections def preprocess(tokens: List[str], fs=False): """Return text length, vocabulary size and optionally the frequency spectrum. :param fs: additionally calculate and return the frequency spectrum """ txt_len = len(tokens) vocab_size = len(set(tokens)) if fs: frequency_list = collections.Counter(tokens) freq_spectrum = dict(collections.Counter(frequency_list.values())) return txt_len, vocab_size, freq_spectrum return txt_len, vocab_size
03a20358116b3a44791fff32641abe9508e7c233
693,927
from pathlib import Path def get_run_number(run_path: Path) -> int: """Get the "run number" of a specific run. Parameters ---------- run_path : Path Path to the run directory Returns ------- int "run number" """ run_info = run_path.name.split("_")[3] run_num_str = run_info.split("of")[0] run_num = int(run_num_str) return run_num
08946b69bd2b4be818b2fbce2136c04f004bcf05
693,928
def find_winner_line(line): """Return the winner of the line if one exists. Otherwise return None. A line is a row, a column or a diagonal. """ if len(line) != 3: raise ValueError('invalid line') symbols = set(line) if len(symbols) == 1: # All equal. return line[0] # Could be None. return None
c83bd89a601eacf0ef1d9cfc40ec895b39f18f40
693,929
def build_simc_file(talent_string, covenant_string, profile_name): """Returns output file name based on talent and covenant strings""" if covenant_string: if talent_string: return "profiles/{0}/{1}/{2}.simc".format(talent_string, covenant_string, profile_name) return "profiles/{0}/{1}.simc".format(covenant_string, profile_name) if talent_string: return "profiles/{0}/{1}.simc".format(talent_string, profile_name) return "profiles/{0}.simc".format(profile_name)
eeccdd7d5d82a7268e860eb139c52a50ce983577
693,930
def tokenize(text): """ Pretend this uses the EXACT same tokenization algorithm used by CoreNLP (or some other NER tagger being used). It returns a list of tokens in @text. """ tokens = text.split() return tokens
62797fcfc0b3b63ecd83a39176edb80c21131efa
693,931
def create_svg_mistral_tasks(task_list, radius=45): """Create an SVG UI diagram of Mistral task flow. This takes the output of get_mistral_tasks() and generates an SVG-based graphical diagram of the ordered Mistral tasks. The code automatically scales the SVG based on the circle radius value. Note that SVG circles don't scale radius by percentages very well which is why this is still pixel math. The circle radius is the diagonal length of the viewport which is not very useful in this case. """ indent = radius * 1.1 diameter = radius * 2 num_tasks = len(task_list) if num_tasks < 1: return "[No Tasks Found]" svg_output = ("<svg height=\"%d\" width=\"%d\">\n" % ((diameter * 1.10), ((num_tasks-1) * diameter * 1.3) + indent * 2)) svg_output += (" <line x1=\"%d\" y1=\"50%%\" x2=\"%d\" y2=\"50%%\" style=" "\"stroke:rgb(0,0,0);stroke-width:3\"/>\n" % (indent, ((num_tasks-1) * diameter * 1.2) + indent)) svg_output += (" <g stroke=\"black\" stroke-width=\"3\" fill=" "\"lightgrey\">\n") for counter in range(num_tasks): svg_output += (" <circle cx=\"%d\" cy=\"50%%\" r=\"%d\"/>\n" % ((counter * diameter * 1.2 + indent), radius)) svg_output += " </g>\n" svg_output += " <g style=\"text-anchor: middle; font-size: 13px\">\n" for counter in range(num_tasks): svg_output += (" <text x=\"%d\" y=\"55%%\">%s</text>\n" % ((counter * diameter * 1.2 + indent), task_list[counter][0])) svg_output += " </g>\n" svg_output += "</svg>\n" return svg_output
efd9d6695a1a6e1b924a3f5ddf7bbd41fee16454
693,932
def unique(o, idfun=repr): """Reduce a list down to its unique elements.""" seen = {} return [seen.setdefault(idfun(e),e) for e in o if idfun(e) not in seen]
9cc1d629e7b084acc838b4672dafd0af4df25298
693,933
import json def get_json_input(): """ get pre-formed json to make sure testing is uniform """ with open('json_for_testing.json', 'r') as input_source: json_input = json.load(input_source) input_source.close() return json_input
5b52d7437dabb4b3bb01403d2ce80797681f327d
693,934
import random def random_text(size): """Randomly generate text""" alphabet_and_numbers = 'abcdefghijklmnopqrstuvwqyz1234567890' return(''.join(random.choice(alphabet_and_numbers) for _ in range(size)))
fe72e51479756da32456a57c671d51ccee067d72
693,935
import torch def get_device(is_gpu=True): """Return the correct device""" return torch.device( 'cuda' if torch.cuda.is_available() and is_gpu else 'cpu')
30a3389fb165d4dfb6719a50432d4576885461ba
693,937
def isnumeric(x): """Test whether the value can be represented as a number""" try: float(x) return True except: return False
d98817a855e31ea90a9c26e71f60327217c3323d
693,938
import win32process def kill_win(process): """Kills a process with its windows handle. Has no effect on other platforms. """ try: # Unable to import 'module' # pylint: disable=F0401 # Access to a protected member _handle of a client class # pylint: disable=W0212 return win32process.TerminateProcess(process._handle, -1) except ImportError: pass
f5ca526e84c1a13dcb554adb7811f3cf752606ae
693,939
def get_output_layers(net): """ Gets layers that make detections. """ layer_names = net.getLayerNames() output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()] return output_layers
30e50aa1183e46c717c909a8cf2190c661d18eae
693,940
import uuid def generateCMSJobID(): """ _generateCMSJobID_ Generate a global job ID at the UI """ return repr(uuid.uuid4())
a24df46301fe010f193ac21c2f88cb9d6abbe780
693,941
import codecs def get_data(fileopen): """ สำหรับใช้อ่านทั้งหมดทั้งในไฟล์ทีละรรทัดออกมาเป็น list """ with codecs.open(fileopen, 'r',encoding='utf8') as f: lines = f.read().splitlines() return lines
a3899bc619005740b343bfef2e396abe6bc49008
693,942
import re def get_jinja_comments(text): """Gets jinja comments Args: line (string): text to get jinja comments Returns: [list]: returns list of jinja comments """ comments = [] regex_pattern = re.compile( "(\\{#)((.|\n)*?)(\\#})", re.MULTILINE) for line in regex_pattern.finditer(text): comments.append(line.group(2)) return comments
fd9f25a2a5695085616aeacdb1bf40c7d3adf9cf
693,943
def dict_equal(first, second): """ This is a utility function used in testing to determine if two dictionaries are, in a nested sense, equal (ie they have the same keys and values at all levels). :param dict first: The first dictionary to compare. :param dict second: The second dictionary to compare. :return: Whether or not the dictionaries are (recursively) equal. :rtype: bool """ if not set(first.keys()) == set(second.keys()): return False for k1, v1 in first.items(): if isinstance(v1, dict) and isinstance(second[k1], dict): if not dict_equal(v1, second[k1]): return False elif not isinstance(v1, dict) and not isinstance(second[k1], dict): if v1 != second[k1]: return False else: return False return True
269ae3059462155e812ff1a5337f59845cfa86d2
693,944
import math def is_prime(n): """Primality test""" if n <= 1: return False if n <= 3: return True if n % 2 == 0 or n % 3 == 0: return False for k in range(5, int(math.sqrt(n)), 6): if n % k == 0 or n % (k + 2) == 0: return False return True
4238510a36e63e7e54f887f828c4c8b5d5714f07
693,945
def PFAM_ID(input_file): """file with pfam accession list""" pfam_ids = open(input_file) lines=pfam_ids.readlines() strip_pfam_ids = [line.strip().split('.')[0] for line in lines ] pfam_ids.close() return strip_pfam_ids
dfbdaaf0d254a13396b48d9f933a14cf242bd533
693,946
import math def ln(input): """Logaritmo natural Devuelve el logaritmo base e de un número. Args: input(Number): valor real Returns: Number """ return math.log(input,math.e)
aca994104608bca957cd0ab4edfc9e7d42c0800b
693,947
def _normalize_sequence(input, rank): """If input is a scalar, create a sequence of length equal to the rank by duplicating the input. If input is a sequence, check if its length is equal to the length of array. """ is_str = isinstance(input, str) if hasattr(input, '__iter__') and not is_str: normalized = list(input) if len(normalized) != rank: err = "sequence argument must have length equal to input rank" raise RuntimeError(err) else: normalized = [input] * rank return normalized
00f2b650ed3507f4ea79d52d825b188770097412
693,948
def max_sub(a_list): """ 最大下标距离, O(n**2) >>> max_sub([1,2,3,4,5,6]) (0, 5) >>> max_sub([1,2,3,4,5,6,0]) (0, 5) >>> max_sub([15, 48, 1, 1, 1, 24, 2, 12, 14, 1, 47, 1]) (0, 10) """ temp = 0 i_index = 0 j_index = 0 l = len(a_list) for i in range(l - 1): for j in range(i + 1, l): if a_list[j] > a_list[i]: if j - i > temp: temp = j - i i_index = i j_index = j return i_index, j_index
25606d7d4b3e77828c5dad0ce51e722b12132c32
693,949
import csv def get_data_from_file(file): """ :rtype: object """ with open(file) as f: plot_list = csv.reader(f) csv_headings = next(plot_list) values = next(plot_list) zip_iterator = zip(csv_headings, values) return dict(zip_iterator)
2e8f2d077499d4d714bddc724dd347c8e0e91003
693,950