max_stars_repo_path stringlengths 4 237 | max_stars_repo_name stringlengths 6 117 | max_stars_count int64 0 95.2k | id stringlengths 1 7 | content stringlengths 12 593k | input_ids listlengths 7 549k |
|---|---|---|---|---|---|
office365/sharepoint/portal/group_site_manager.py | rikeshtailor/Office365-REST-Python-Client | 0 | 9312 | <reponame>rikeshtailor/Office365-REST-Python-Client
from office365.runtime.client_object import ClientObject
from office365.runtime.client_result import ClientResult
from office365.runtime.http.http_method import HttpMethod
from office365.runtime.queries.service_operation_query import ServiceOperationQuery
from office365.runtime.resource_path import ResourcePath
from office365.sharepoint.portal.group_creation_params import GroupCreationInformation
from office365.sharepoint.portal.group_site_info import GroupSiteInfo
class GroupSiteManager(ClientObject):
def __init__(self, context):
super(GroupSiteManager, self).__init__(context, ResourcePath("GroupSiteManager"), None)
def create_group_ex(self, display_name, alias, is_public, optional_params=None):
"""
Create a modern site
:param str display_name:
:param str alias:
:param bool is_public:
:param office365.sharepoint.portal.group_creation_params.GroupCreationParams or None optional_params:
"""
payload = GroupCreationInformation(display_name, alias, is_public, optional_params)
result = ClientResult(self.context, GroupSiteInfo())
qry = ServiceOperationQuery(self, "CreateGroupEx", None, payload, None, result)
self.context.add_query(qry)
return result
def delete(self, site_url):
"""
Deletes a SharePoint Team site
:type site_url: str
"""
payload = {
"siteUrl": site_url
}
qry = ServiceOperationQuery(self, "Delete", None, payload)
self.context.add_query(qry)
return self
def get_status(self, group_id):
"""Get the status of a SharePoint site
:type group_id: str
"""
result = ClientResult(self.context, GroupSiteInfo())
qry = ServiceOperationQuery(self, "GetSiteStatus", None, {'groupId': group_id}, None, result)
self.context.add_query(qry)
def _construct_status_request(request):
request.method = HttpMethod.Get
request.url += "?groupId='{0}'".format(group_id)
self.context.before_execute(_construct_status_request)
return result
| [
1,
529,
276,
1112,
420,
29958,
5357,
267,
400,
737,
272,
29914,
27247,
29941,
29953,
29945,
29899,
1525,
1254,
29899,
11980,
29899,
4032,
13,
3166,
8034,
29941,
29953,
29945,
29889,
15634,
29889,
4645,
29918,
3318,
1053,
12477,
2061,
13,
... |
python/cracking_codes_with_python/k_columnar_transposition_cipher_hack.py | MerrybyPractice/book-challanges-and-tutorials | 0 | 48975 | # Columnar Transposition Hack per Cracking Codes with Python
# https://www.nostarch.com/crackingcodes/ (BSD Licensed)
import pyperclip
from j_detect_english import is_english
from g_decrypt_columnar_transposition_cipher import decrypt_message as decrypt
def hack_transposition(text):
print('Press Ctrl-C to quit at any time.')
print('Hacking...')
for key in range(1, len(text)):
print('Trying key #%s...' % (key))
print()
print('...')
decrypted_text = decrypt(key, text)
print()
print('...')
if is_english(decrypted_text):
print()
print('Possible encryption hack:')
print('Key %s: %s' % (key, decrypted_text[:100]))
print()
print('Enter D if done, anything else to continue the hack:')
response = input('>')
if response.strip().upper().startswith('D'):
return decrypted_text
return None
def main(text):
hacked_text = hack_transposition(text)
if hacked_text == None:
print('Failed to hack the Columnar Transposition Encryption')
else:
print('Copying hacked string to clipboard:')
print(hacked_text)
pyperclip.copy(hacked_text)
if __name__ == '__main__':
text = input('What would you like to decrypt? ')
main(text)
| [
1,
396,
12481,
279,
4103,
3283,
379,
547,
639,
14279,
384,
292,
315,
2631,
411,
5132,
13,
29937,
2045,
597,
1636,
29889,
6582,
1279,
29889,
510,
29914,
29883,
22282,
292,
18137,
29914,
313,
29933,
7230,
10413,
21144,
29897,
13,
13,
5215... |
btree.py | chrisconley/python-data-structures | 0 | 1607164 | from collections import deque
class BinarySearchTree:
def __init__(self):
self.root = None
def get(self, key):
return self._get(self.root, key)
def _get(self, node, key):
if node is None:
return None
if key < node.key:
return self._get(node.left, key)
elif key > node.key:
return self._get(node.right, key)
else:
return node.value
def put(self, key, value):
self.root = self._put(self.root, key, value)
def _put(self, node, key, value):
if node is None:
return _Node(key, value, 1)
if key < node.key:
node.left = self._put(node.left, key, value)
elif key > node.key:
node.right = self._put(node.right, key, value)
else:
node.value = value
node.num_nodes = self._size(node.left) + self._size(node.right) + 1
return node
def delete(self, key):
self.root = self._delete(self.root, key)
def _delete(self, node, key):
if node is None:
return None
if key < node.key:
node.left = self._delete(node.left, key)
elif key > node.key:
node.right = self._delete(node.right, key)
else:
if node.right is None:
return node.left
if node.left is None:
return node.right
tmp = node
node = self._min(tmp.right)
node.right = self._delete_min(tmp.right)
node.left = tmp.left
node.num_nodes = self._size(node.left) + self._size(node.right) + 1
return node
def _delete_min(self, node):
if node.left is None:
return node.right
node.left = self._delete_min(node.left)
node.num_nodes = self._size(node.left) + self._size(node.right) + 1
return node
def _min(self, node):
if node.left is None:
return node
return self._min(node.left)
@property
def size(self):
return self._size(self.root)
def _size(self, node):
return node.size if node else 0
def serialize(self):
height = self.height
queue = deque([(self.root, 0)])
result = []
while len(queue) > 0:
parent, level = queue.popleft()
ret = parent and parent.key
if level == -1:
result.append(parent)
continue
result.append(ret)
if level != height-1:
if parent.left is None:
queue.append((None, -1))
else:
queue.append((parent.left, level+1))
if parent.right is None:
queue.append((None, -1))
else:
queue.append((parent.right, level+1))
return result
@property
def height(self):
return self._height(self.root)
def _height(self, node):
if node is not None:
if node.left is None and node.right is None:
return 1
else:
return 1 + max(self._height(node.left), self._height(node.right))
else:
return 0
class _Node:
def __init__(self, key, value, num_nodes):
self.key = key
self.value = value
self.num_nodes = num_nodes
self.left, self.right = None, None
@property
def size(self):
return self.num_nodes | [
1,
515,
16250,
1053,
316,
802,
13,
13,
13,
1990,
29479,
7974,
9643,
29901,
13,
1678,
822,
4770,
2344,
12035,
1311,
1125,
13,
4706,
1583,
29889,
4632,
353,
6213,
13,
13,
1678,
822,
679,
29898,
1311,
29892,
1820,
1125,
13,
4706,
736,
... |
pastebin/migrations/0006_auto_20170129_1502.py | johannessarpola/django-pastebin | 0 | 47244 | <reponame>johannessarpola/django-pastebin<filename>pastebin/migrations/0006_auto_20170129_1502.py<gh_stars>0
# -*- coding: utf-8 -*-
# Generated by Django 1.11a1 on 2017-01-29 15:02
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pastebin', '0005_auto_20170129_1333'),
]
operations = [
migrations.AlterField(
model_name='paste',
name='creation_date',
field=models.DateTimeField(verbose_name='creation date'),
),
migrations.AlterField(
model_name='paste',
name='expiry_date',
field=models.DateTimeField(verbose_name='expiration date'),
),
]
| [
1,
529,
276,
1112,
420,
29958,
29926,
1148,
812,
404,
6834,
2963,
29914,
14095,
29899,
16179,
2109,
29966,
9507,
29958,
16179,
2109,
29914,
26983,
800,
29914,
29900,
29900,
29900,
29953,
29918,
6921,
29918,
29906,
29900,
29896,
29955,
29900,
... |
3_2_HelloWorld.py | Asurada2015/TensorFlowactual-combat_code | 4 | 159118 | <reponame>Asurada2015/TensorFlowactual-combat_code
#%%
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
print(mnist.train.images.shape, mnist.train.labels.shape)
print(mnist.test.images.shape, mnist.test.labels.shape)
print(mnist.validation.images.shape, mnist.validation.labels.shape)
"""(55000, 784) (55000, 10)
(10000, 784) (10000, 10)
(5000, 784) (5000, 10)"""
import tensorflow as tf
sess = tf.InteractiveSession()
x = tf.placeholder(tf.float32, [None, 784])
# 数据类型,数据的格式,none表示不限制输入数据的条目,784表示每条输入是一个784维的数据
#
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
y = tf.nn.softmax(tf.matmul(x, W) + b) # 关于这里二维数组和一维数组相加的事情,我还纠结了半天
y_ = tf.placeholder(tf.float32, [None, 10])
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
tf.global_variables_initializer().run()
for i in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
train_step.run({x: batch_xs, y_: batch_ys})
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(accuracy.eval({x: mnist.test.images, y_: mnist.test.labels}))
| [
1,
529,
276,
1112,
420,
29958,
2887,
332,
1114,
29906,
29900,
29896,
29945,
29914,
29911,
6073,
17907,
19304,
29899,
510,
10222,
29918,
401,
13,
29937,
7686,
13,
29937,
14187,
1266,
29871,
29906,
29900,
29896,
29945,
450,
323,
6073,
17907,
... |
DQN DDQN Dueling/network.py | eayvali/DeepRL | 2 | 21356 | # -*- coding: utf-8 -*-
"""
Created on Sun Jan 19 23:19:43 2020
@author: elif.ayvali
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDict
class deep_Q_net(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, seed):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fc1_units (int): Number of nodes in first hidden layer
fc2_units (int): Number of nodes in second hidden layer
"""
super(deep_Q_net, self).__init__()
self.seed = torch.manual_seed(seed)
self.dqn_net = nn.Sequential(OrderedDict([
('fc1', nn.Linear(state_size, 256)),
('relu1', nn.ReLU()),
('fc2', nn.Linear(256, 128)),
('relu2', nn.ReLU()),
('fc3', nn.Linear(128, 64)),
('relu3', nn.ReLU()),
('fc4', nn.Linear(64, action_size))
]))
def forward(self, state):
"""Build a network that maps state -> action values."""
return self.dqn_net(state)
class dueling_Q_net(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, seed):
"""Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
"""
super(dueling_Q_net, self).__init__()
self.feature_modules = nn.Sequential(OrderedDict([
('fc1', nn.Linear(state_size, 256)),
('relu1', nn.ReLU()),
('fc2', nn.Linear(256, 128)),
('relu2', nn.ReLU()),
('fc3', nn.Linear(128, 64)),
]))
self.value_modules = nn.Sequential(OrderedDict([
('fc_v1', nn.Linear(64, 32)),
('relu)v1', nn.ReLU()),
('fc_v2', nn.Linear(32, 1)),
]))
self.advantage_modules = nn.Sequential(OrderedDict([
('fc_a1', nn.Linear(64, 32)),
('relu_a1', nn.ReLU()),
('fc_a2', nn.Linear(32, action_size)),
]))
def forward(self, state):
#Get common features
common_layers=self.feature_modules(state)
advantage=self.advantage_modules(common_layers)# batch_size x action_size
value=self.value_modules(common_layers) #batch_size x 1
return value + advantage - advantage.mean(dim=1).unsqueeze(1)
| [
1,
396,
448,
29930,
29899,
14137,
29901,
23616,
29899,
29947,
448,
29930,
29899,
13,
15945,
29908,
13,
20399,
373,
8991,
2627,
29871,
29896,
29929,
29871,
29906,
29941,
29901,
29896,
29929,
29901,
29946,
29941,
29871,
29906,
29900,
29906,
299... |
javascript/forms.py | uadson/studies | 0 | 99561 | <reponame>uadson/studies<gh_stars>0
from django import forms
class CalcImcForm(forms.Form):
peso = forms.CharField(
required=False)
altura = forms.CharField(
required=False) | [
1,
529,
276,
1112,
420,
29958,
29884,
328,
1100,
29914,
18082,
583,
29966,
12443,
29918,
303,
1503,
29958,
29900,
13,
3166,
9557,
1053,
7190,
13,
13,
1990,
3037,
29883,
1888,
29883,
2500,
29898,
9514,
29889,
2500,
1125,
13,
1678,
8928,
... |
src/bot/handlers/essence_part_handler.py | nchursin/claimant | 3 | 34211 | from typing import Optional, List
from aiogram import types, Dispatcher, filters
from aiogram.dispatcher import FSMContext
from aiogram.dispatcher.filters.state import StatesGroup, State
from aiogram.types import ReplyKeyboardMarkup
from handlers.common_actions_handlers import process_manual_enter, process_option_selection, \
process_complete_part_editing, claim_tmp_option_chosen, show_claim_tmp_example
from keyboards import emojis, get_common_start_kb, get_next_actions_kb, get_claim_parts_kb
from repository import Repository
from statistics import collect_statistic
CLAIM_PART: str = "essence"
class EssencePart(StatesGroup):
waiting_for_user_action = State()
waiting_for_option_chosen = State()
@collect_statistic(event_name="essence:start")
async def essence_start(message: types.Message, state: FSMContext):
repository: Repository = Repository()
claim_data: dict = repository.get_claim_data(message.from_user.id)
required_parts: List[str] = ["story"]
if claim_data.get("claim_data") is None or \
not any([part_name in claim_data["claim_data"].keys() for part_name in required_parts]):
claim_parts_kb: ReplyKeyboardMarkup = get_claim_parts_kb(message.from_user.id)
await message.reply("Пожалуйста, сперва заполните раздел 'фабула'.",
reply_markup=claim_parts_kb)
return
await EssencePart.waiting_for_user_action.set()
start_kb: ReplyKeyboardMarkup = get_common_start_kb()
await message.reply("Опишите суть нарушения. "
"Введите, почему вы считаете, что ваши права нарушают. "
"Или выберите одну из следующий опций.",
reply_markup=start_kb)
@collect_statistic(event_name="essence:show_example")
async def show_example(message: types.Message, state: FSMContext):
await show_claim_tmp_example(message, CLAIM_PART)
async def action_selected(message: types.Message, state: FSMContext):
option: Optional[str] = message.text
if option.endswith("выбрать из списка") or option.endswith("добавить еще из списка"):
await process_option_selection(message, CLAIM_PART, EssencePart)
return
if option.endswith("закончить заполнение"):
await process_complete_part_editing(message, state, CLAIM_PART)
return
await process_manual_enter(message, state, EssencePart)
async def option_chosen(callback_query: types.CallbackQuery, state: FSMContext):
await claim_tmp_option_chosen(callback_query, state, CLAIM_PART)
async def finish_option_choosing(callback_query: types.CallbackQuery):
await callback_query.answer()
await EssencePart.waiting_for_user_action.set()
next_actions_kb: ReplyKeyboardMarkup = get_next_actions_kb()
await callback_query.message.answer("Введите свой вариант самостоятельно. "
"Или выберите дальнейшее действие с помощью клавиатуры",
reply_markup=next_actions_kb)
def register_handlers(dp: Dispatcher):
dp.register_message_handler(essence_start, filters.Regexp(f"^{emojis.key} суть нарушения"))
dp.register_message_handler(show_example,
filters.Regexp(f"^{emojis.red_question_mark} показать пример"),
state=EssencePart.states)
dp.register_message_handler(action_selected, state=EssencePart.waiting_for_user_action)
dp.register_callback_query_handler(
option_chosen,
filters.Text(startswith="option"),
state=EssencePart.waiting_for_option_chosen
)
dp.register_callback_query_handler(finish_option_choosing,
filters.Text(equals="complete options"),
state=EssencePart.waiting_for_option_chosen)
| [
1,
515,
19229,
1053,
28379,
29892,
2391,
13,
13,
3166,
7468,
13342,
1053,
4072,
29892,
3295,
5041,
261,
29892,
18094,
13,
3166,
7468,
13342,
29889,
13369,
261,
1053,
383,
17061,
2677,
13,
3166,
7468,
13342,
29889,
13369,
261,
29889,
26705... |
piccel/ui/__init__.py | lesca-research/piccel | 2 | 188313 | from .generated import access_ui
from .generated import data_sheet_ui
from .generated import form_item_ui
from .generated import form_ui
from .generated import item_boolean_checkboxes_ui
from .generated import item_choice_radio_ui
from .generated import item_datetime_ui
from .generated import item_single_line_ui
from .generated import item_text_multi_line_ui
from .generated import login_ui
from .generated import progress_bar_ui
from .generated import resources
from .generated import section_ui
from .generated import selector_ui
from .generated import text_editor_ui
from .generated import workbook_ui
from .generated import workbook_creation_ui
from .generated import sheet_creation_ui
# from .generated import dynamic_vlist_ui
# from .generated import dynamic_vlist_item_ui
from .generated import form_editor_widget_ui
from .generated import form_editor_file_ui
from .generated import form_editor_sheet_ui
from .generated import form_edit_ui
from .generated import section_edit_ui
from .generated import item_edit_ui
from .generated import choice_edit_ui
from .generated import variable_edit_ui
from .generated import section_transition_edit_ui
from . import widgets
from . import main_qss
| [
1,
515,
869,
13525,
1053,
2130,
29918,
1481,
13,
3166,
869,
13525,
1053,
848,
29918,
9855,
29918,
1481,
13,
3166,
869,
13525,
1053,
883,
29918,
667,
29918,
1481,
13,
3166,
869,
13525,
1053,
883,
29918,
1481,
13,
3166,
869,
13525,
1053,
... |
locan/data/hulls/__init__.py | super-resolution/Locan | 8 | 9558 | <filename>locan/data/hulls/__init__.py
"""
Hull objects of localization data.
Submodules:
-----------
.. autosummary::
:toctree: ./
hull
alpha_shape
"""
from locan.data.hulls.alpha_shape import *
from locan.data.hulls.hull import *
__all__ = []
__all__.extend(hull.__all__)
__all__.extend(alpha_shape.__all__)
| [
1,
529,
9507,
29958,
2029,
273,
29914,
1272,
29914,
29882,
913,
29879,
29914,
1649,
2344,
26914,
2272,
13,
15945,
29908,
13,
29950,
913,
3618,
310,
1887,
2133,
848,
29889,
13,
13,
4035,
7576,
29901,
13,
1378,
5634,
13,
13,
636,
1120,
... |
09-PiDay2022/IBM_quantum_randomness.py | StrangeGirlMurph/CodingProjects | 0 | 175546 | from qiskit import *
from qiskit import IBMQ
from qiskit.tools.monitor import job_monitor
from qiskit.providers.ibmq import least_busy
def random_qubit():
IBMQ.load_account()
provider = IBMQ.get_provider("ibm-q")
small_devices = provider.backends(
filters=lambda x: x.configuration().n_qubits == 5
and not x.configuration().simulator
)
qcomp = least_busy(small_devices)
qr = QuantumRegister(1)
cr = ClassicalRegister(1)
circuit = QuantumCircuit(qr, cr)
circuit.h(0)
circuit.measure(0, 0)
job = execute(circuit, backend=qcomp, shots=1)
job_monitor(job)
return str(list(job.result().get_counts().keys())[0])
print(random_qubit())
| [
1,
515,
3855,
3873,
277,
1053,
334,
13,
3166,
3855,
3873,
277,
1053,
15731,
25566,
13,
3166,
3855,
3873,
277,
29889,
8504,
29889,
3712,
2105,
1053,
4982,
29918,
3712,
2105,
13,
3166,
3855,
3873,
277,
29889,
771,
29454,
29889,
747,
28466... |
moredata/enricher/elasticsearch_connector/__init__.py | thomassonobe/more-data | 0 | 57654 | from .elasticsearch_connector import *
from .index_handler import *
from .policy_handler import *
| [
1,
515,
869,
295,
20291,
29918,
11958,
2801,
1053,
334,
13,
3166,
869,
2248,
29918,
13789,
1053,
334,
13,
3166,
869,
22197,
29918,
13789,
1053,
334,
13,
2
] |
scripts/03_glove_build_counts.py | svlandeg/sense2vec | 1,140 | 124120 | <reponame>svlandeg/sense2vec<gh_stars>1000+
#!/usr/bin/env python
import os
from pathlib import Path
from wasabi import msg
import typer
def main(
# fmt: off
glove_dir: str = typer.Argument(..., help="Directory containing the GloVe build"),
in_dir: str = typer.Argument(..., help="Directory with preprocessed .s2v files"),
out_dir: str = typer.Argument(..., help="Path to output directory"),
min_count: int = typer.Option(5, "--min-count", "-c", help="Minimum count for inclusion in vocab"),
memory: float = typer.Option(4.0, "--memory", "-m", help="Soft limit for memory consumption, in GB"),
window_size: int = typer.Option(15, "--window-size", "-w", help="Number of context words on either side"),
verbose: int = typer.Option(2, "--verbose", "-v", help="Set verbosity: 0, 1, or 2"),
# fmt: on
):
"""
Step 3: Build vocabulary and frequency counts
Expects a directory of preprocessed .s2v input files and will use GloVe to
collect unigram counts and construct and shuffle cooccurrence data. See here
for installation instructions: https://github.com/stanfordnlp/GloVe
Note that this script will call into GloVe and expects you to pass in the
GloVe build directory (/build if you run the Makefile). The commands will
also be printed if you want to run them separately.
"""
input_path = Path(in_dir)
output_path = Path(out_dir)
if not Path(glove_dir).exists():
msg.fail("Can't find GloVe build directory", glove_dir, exits=1)
if not input_path.exists() or not input_path.is_dir():
msg.fail("Not a valid input directory", in_dir, exits=1)
input_files = [str(fp) for fp in input_path.iterdir() if fp.suffix == ".s2v"]
if not input_files:
msg.fail("No .s2v files found in input directory", in_dir, exits=1)
msg.info(f"Using {len(input_files)} input files")
if not output_path.exists():
output_path.mkdir(parents=True)
msg.good(f"Created output directory {out_dir}")
vocab_file = output_path / f"vocab.txt"
cooc_file = output_path / f"cooccurrence.bin"
cooc_shuffle_file = output_path / f"cooccurrence.shuf.bin"
msg.info("Creating vocabulary counts")
cmd = (
f"cat {' '.join(input_files)} | {glove_dir}/vocab_count "
f"-min-count {min_count} -verbose {verbose} > {vocab_file}"
)
print(cmd)
vocab_cmd = os.system(cmd)
if vocab_cmd != 0 or not Path(vocab_file).exists():
msg.fail("Failed creating vocab counts", exits=1)
msg.good("Created vocab counts", vocab_file)
msg.info("Creating cooccurrence statistics")
cmd = (
f"cat {' '.join(input_files)} | {glove_dir}/cooccur -memory {memory} "
f"-vocab-file {vocab_file} -verbose {verbose} "
f"-window-size {window_size} > {cooc_file}"
)
print(cmd)
cooccur_cmd = os.system(cmd)
if cooccur_cmd != 0 or not Path(cooc_file).exists():
msg.fail("Failed creating cooccurrence statistics", exits=1)
msg.good("Created cooccurrence statistics", cooc_file)
msg.info("Shuffling cooccurrence file")
cmd = (
f"{glove_dir}/shuffle -memory {memory} -verbose {verbose} "
f"< {cooc_file} > {cooc_shuffle_file}"
)
print(cmd)
shuffle_cmd = os.system(cmd)
if shuffle_cmd != 0 or not Path(cooc_shuffle_file).exists():
msg.fail("Failed to shuffle cooccurrence file", exits=1)
msg.good("Shuffled cooccurrence file", cooc_shuffle_file)
if __name__ == "__main__":
typer.run(main)
| [
1,
529,
276,
1112,
420,
29958,
4501,
28328,
29887,
29914,
29879,
1947,
29906,
2003,
29966,
12443,
29918,
303,
1503,
29958,
29896,
29900,
29900,
29900,
29974,
13,
29937,
14708,
4855,
29914,
2109,
29914,
6272,
3017,
13,
5215,
2897,
13,
3166,
... |
Lib/vanilla/vanillaGradientButton.py | miguelsousa/vanilla | 21 | 97761 | from AppKit import *
from vanillaButton import ImageButton
class GradientButton(ImageButton):
nsBezelStyle = NSSmallSquareBezelStyle
| [
1,
515,
2401,
13117,
1053,
334,
13,
3166,
1109,
2911,
3125,
1053,
7084,
3125,
13,
13,
13,
1990,
19295,
993,
3125,
29898,
2940,
3125,
1125,
13,
13,
1678,
17534,
3629,
10533,
5568,
353,
405,
1799,
29885,
497,
29903,
4718,
3629,
10533,
5... |
ui_test/user_flows.py | uktrade/dit-contact-forms | 2 | 156652 | from ui_test.selectors.questionnaire import QUESTIONNAIRE
from ui_test.selectors.form import FORM
def select_questionnaire(browser, options):
for key, value in options.items():
browser.find_by_css(QUESTIONNAIRE[key][value]).click()
browser.find_by_css(QUESTIONNAIRE["continue"]).click()
def submit_form(browser, options):
browser.find_by_css(FORM["message"]).first.type(options["message"])
browser.find_by_css(FORM["name"]).first.type(options["name"])
browser.find_by_css(FORM["email"]).first.type(options["email"])
browser.find_by_css(FORM["accept_terms"]).click()
browser.find_by_css(QUESTIONNAIRE["continue"]).click()
| [
1,
515,
14313,
29918,
1688,
29889,
2622,
943,
29889,
12470,
15421,
1053,
660,
4462,
1254,
2725,
3521,
29902,
1525,
13,
3166,
14313,
29918,
1688,
29889,
2622,
943,
29889,
689,
1053,
383,
12054,
13,
13,
13,
1753,
1831,
29918,
12470,
15421,
... |
pybomberman/__init__.py | pybomberman/pybomberman | 2 | 32978 | from .map import Map
print("Soon... https://github.com/pybomberman/pybomberman")
| [
1,
515,
869,
1958,
1053,
7315,
13,
13,
2158,
703,
29903,
6150,
856,
2045,
597,
3292,
29889,
510,
29914,
2272,
29890,
290,
495,
1171,
29914,
2272,
29890,
290,
495,
1171,
1159,
13,
2
] |
base_python/tests/test_flatten.py | cogment/cogment-verse | 23 | 106746 | <reponame>cogment/cogment-verse
# Copyright 2021 AI Redefined Inc. <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from data_pb2 import Space
from cogment_verse.spaces import flattened_dimensions
def test_flattened_dimensions_discrete():
assert flattened_dimensions(Space(properties=[Space.Property(discrete=Space.Discrete(num=2))])) == 2
assert (
flattened_dimensions(
Space(
properties=[
Space.Property(
discrete=Space.Discrete(
labels=["brake", "accelerate", "do nothing"],
num=2, # Will be ignored as there are more labels
)
)
]
)
)
== 3
)
assert (
flattened_dimensions(
Space(
properties=[
Space.Property(discrete=Space.Discrete(labels=["brake", "accelerate", "do nothing"], num=12))
]
)
)
== 12
)
assert (
flattened_dimensions(
Space(
properties=[
Space.Property(key="a", discrete=Space.Discrete(labels=["brake", "accelerate", "do nothing"])),
Space.Property(key="b", discrete=Space.Discrete(num=5)),
]
)
)
== 8
)
def test_flattened_dimensions_box():
assert flattened_dimensions(Space(properties=[Space.Property(box=Space.Box(shape=[2]))])) == 2
assert flattened_dimensions(Space(properties=[Space.Property(box=Space.Box(shape=[4]))])) == 4
assert flattened_dimensions(Space(properties=[Space.Property(box=Space.Box(shape=[2, 3, 4]))])) == 24
assert (
flattened_dimensions(
Space(
properties=[
Space.Property(key="a", box=Space.Box(shape=[10])),
Space.Property(key="b", box=Space.Box(shape=[2, 3, 4])),
]
)
)
== 34
)
def test_flattened_dimensions_mixed():
assert (
flattened_dimensions(
Space(
properties=[
Space.Property(key="a", box=Space.Box(shape=[10])),
Space.Property(key="b", discrete=Space.Discrete(labels=["brake", "accelerate", "do nothing"])),
Space.Property(key="c", box=Space.Box(shape=[2, 3, 4])),
]
)
)
== 37
)
| [
1,
529,
276,
1112,
420,
29958,
29883,
468,
358,
29914,
29883,
468,
358,
29899,
3901,
13,
29937,
14187,
1266,
29871,
29906,
29900,
29906,
29896,
319,
29902,
4367,
5598,
9266,
29889,
3532,
26862,
6227,
6778,
13,
29937,
13,
29937,
10413,
211... |
cbh.py | jensengroup/fragreact | 2 | 33656 | <reponame>jensengroup/fragreact<gh_stars>1-10
#!/usr/bin/env python
import numpy as np
import re
from rdkit import Chem
from rdkit.Chem import rdMolDescriptors
from itertools import combinations
import copy
def print_smiles(smiles_list, human=False):
smiles_dict = count_smiles(smiles_list)
keys = smiles_dict.keys()
keys.sort()
out = []
for key in keys:
out += [str(smiles_dict[key]) + " " + key]
return " ".join(out)
def print_reaction(reactants, products, human=False):
if not human:
reaction = ">>".join([".".join(reactants), ".".join(products)])
else:
reactants = print_smiles(reactants)
products = print_smiles(products)
reaction = reactants+ ">>"+ products
return reaction
def canonical(smiles):
"""
SMILES provided is canonical, so the output should be the same no matter
how a particular molecule is input
"""
m = Chem.MolFromSmiles(smiles)
smiles = Chem.MolToSmiles(m)
return smiles
def kekulize(smiles):
m = Chem.MolFromSmiles(smiles)
Chem.Kekulize(m)
smiles = Chem.MolToSmiles(m, kekuleSmiles=True)
return smiles
def count_hydrogens(smiles):
""" """
m = Chem.MolFromSmiles(smiles)
n_hydrogen = 0
for a in m.GetAtoms():
n_hydrogen += a.GetTotalNumHs()
# print a.GetAtomicNum()
# print a.GetTotalNumHs()
# print a.GetNumExplicitHs()
# print a.GetNumImplicitHs()
return n_hydrogen
def count_smiles(smiles_list):
"""
Count SMILES by creating a dictionary with SMILES as keys, point to the
number of that particular SMILES.
e.i. dict[smiles] = # of smiles
"""
smiles_dict = {}
components, components_count = np.unique(smiles_list, return_counts=True)
for comp, count in zip(components, components_count):
smiles_dict[comp] = count
return smiles_dict
def substract_smiles(A, B):
"""
A - B = Cp + Cn
where Cp has positive results
and Cn has negative results
"""
if isinstance(A, str): A = A.split(".")
if isinstance(B, str): B = B.split(".")
Cp = []
Cn = []
A = count_smiles(A)
B = count_smiles(B)
for key in np.unique(list(A.keys()) + list(B.keys())):
if key not in A:
Cn += [key] * B[key]
continue
if key not in B:
Cp += [key] * A[key]
continue
diff = A[key] - B[key]
if diff == 0:
continue
elif diff > 0:
Cp += [key]*diff
elif diff < 0:
Cn += [key]*abs(diff)
return Cp, Cn
def tuning(left_side, right_side):
corrected_left = []
corrected_right = []
left_side = count_smiles(left_side)
right_side = count_smiles(right_side)
for key in np.unique(list(left_side.keys()) + list(right_side.keys())):
if key not in left_side:
print("hello")
quit()
if key not in right_side:
print("hello2")
quit()
diff = right_side[key] - left_side[key]
if diff == 0:
continue
elif diff > 0:
corrected_left += [key] * diff
elif diff < 0:
corrected_right += [key] * diff
return corrected_left, corrected_right
def get_bond_type(m, a, b):
# NOTE
# If m is not kekulized then bonds can be AROMATIC
# which is a problem for the component schemes
try:
bond_type = str(m.GetBondBetweenAtoms(a, b).GetBondType())
except AttributeError:
return False
if bond_type == "SINGLE":
bond = ""
elif bond_type == "DOUBLE":
bond = "="
elif bond_type == "TRIPLE":
bond = "#"
else:
bond = False
return bond
def get_atoms(smiles, ignore_hydrogen=True):
smiles = kekulize(smiles)
p = re.compile(r"[A-Z][a-z]?")
atoms = p.findall(smiles)
if ignore_hydrogen:
atoms = [atom for atom in atoms if atom != "H"]
return atoms
def add_neighbours(mol, substructures):
substructures = list(substructures)
for j, idx in enumerate(substructures):
for i in idx:
A = mol.GetAtomWithIdx(i)
for B in A.GetNeighbors():
k = B.GetIdx()
substructures[j] += (k,)
return substructures
def get_components_neighbors(mol, atoms):
atoms = list(atoms)
for idx in atoms:
idx, = idx
A = mol.GetAtomWithIdx(idx)
for B in A.GetNeighbors():
idx_b = B.GetIdx()
atom = B.GetAtomicNum()
charge = B.GetFormalCharge()
bond = Chem.GetBondBetweenAtoms(mol, idx, idx_b)
return
def get_components(smiles, smart, kekulize=True, add=False):
m = Chem.MolFromSmiles(smiles)
smart = Chem.MolFromSmarts(smart)
if kekulize:
Chem.Kekulize(m)
substructures = m.GetSubstructMatches(smart)
components = []
if add:
substructures = add_neighbours(m, substructures)
for sub in substructures:
if add:
m_new = copy.copy(m)
m_new = Chem.RWMol(m_new)
for B, C in combinations(sub[1:], 2):
m_new.RemoveBond(B, C)
else:
m_new = m
component = Chem.MolFragmentToSmiles(m_new,
atomsToUse=sub,
isomericSmiles=True,
kekuleSmiles=True,
canonical=True)
A = m.GetAtomWithIdx(sub[0])
mc = Chem.MolFromSmiles(component)
n_atoms = mc.GetNumAtoms()
n_bonds = len(mc.GetBonds())
component = Chem.MolToSmiles(mc)
if "+" in component or "-" in component or "H" in component:
# Very awful hack to fix the charged molecules and their explicit
# hydrogens
charges = np.zeros(n_atoms, dtype=int)
for idx in range(n_atoms):
atom = mc.GetAtomWithIdx(idx)
atom.SetNumExplicitHs(0)
charge = atom.GetFormalCharge()
charges[idx] = charge
atom.SetFormalCharge(0)
component = Chem.MolToSmiles(mc, canonical=False)
component = component.replace("[", "").replace("]","")
mc = Chem.MolFromSmiles(component)
for idx, charge in zip(range(n_atoms), charges):
atom = mc.GetAtomWithIdx(idx)
charge = int(charge)
atom.SetFormalCharge(charge)
component = Chem.MolToSmiles(mc)
if n_atoms <= n_bonds:
mw = Chem.RWMol(m)
if len(sub) == 3:
mw.RemoveBond(sub[0], sub[-1])
elif len(sub) == 4 or len(sub) == 5:
for i in range(0, n_atoms):
for j in range(i+1, n_atoms):
if i == 1 or j == 1: continue
mw.RemoveBond(sub[i], sub[j])
component = Chem.MolFragmentToSmiles(mw,
atomsToUse=sub,
isomericSmiles=True,
kekuleSmiles=True,
canonical=True)
if "1" in component:
quit("Error connectivity")
else:
component = Chem.MolToSmiles(mc)
# charge = Chem.GetFormalCharge(mc)
#
# if not charge == 0:
# # NOTE
# # Lots of lots of if case down this road
#
# n_atoms = mc.GetNumAtoms()
#
# for i in range(n_atoms):
#
# atom = mc.GetAtomWithIdx(i)
# charge = atom.GetFormalCharge()
#
# if not charge == 0:
# atom.SetFormalCharge(0)
component = canonical(component)
components += [component]
return components
def get_components_scheme1(smiles, kekulize=True):
c1 = "[*]~[*]"
if "+" in smiles or "-" in smiles:
pass
else:
return get_components(smiles, c1)
# The code below doesn't get charges
return get_components(smiles, c1)
c1 = Chem.MolFromSmarts(c1)
m = Chem.MolFromSmiles(smiles)
if kekulize:
Chem.Kekulize(m)
substructures = m.GetSubstructMatches(c1)
components = []
for sub in substructures:
a, b = sub
ab = get_bond_type(m, a, b)
a = m.GetAtomWithIdx(a).GetSymbol()
b = m.GetAtomWithIdx(b).GetSymbol()
component = a + ab + b
components.append(component)
components = [canonical(component) for component in components]
return components
def get_components_scheme2(smiles, kekulize=True):
c1 = "[D2]"
c2 = "[*]~[D2]~[*]"
c3 = "[*]~[D3](~[*])~[*]"
c4 = "[*]~[*](~[*])(~[*])~[*]"
# if "+" in smiles or "-" in smiles:
# pass
# else:
components = []
components += get_components(smiles, c1, add=True)
# components += get_components(smiles, c2)
components += get_components(smiles, c3)
components += get_components(smiles, c4)
return components
c2 = Chem.MolFromSmarts(c2)
c3 = Chem.MolFromSmarts(c3)
c4 = Chem.MolFromSmarts(c4)
m = Chem.MolFromSmiles(smiles)
if kekulize:
Chem.Kekulize(m)
substructures = m.GetSubstructMatches(c2)
components = []
for sub in substructures:
a, b, c = sub
ab = get_bond_type(m, a, b)
bc = get_bond_type(m, b, c)
a = m.GetAtomWithIdx(a).GetSymbol()
b = m.GetAtomWithIdx(b).GetSymbol()
c = m.GetAtomWithIdx(c).GetSymbol()
component = a + ab + b + bc + c
components.append(component)
substructures = m.GetSubstructMatches(c3)
for sub in substructures:
a, b, c, d = sub
ab = get_bond_type(m, a, b)
bc = get_bond_type(m, b, c)
bd = get_bond_type(m, b, d)
a = m.GetAtomWithIdx(a).GetSymbol()
b = m.GetAtomWithIdx(b).GetSymbol()
c = m.GetAtomWithIdx(c).GetSymbol()
d = m.GetAtomWithIdx(d).GetSymbol()
component = a + ab + b + "(" + bc + c + ")" + bd + d
components.append(component)
substructures = m.GetSubstructMatches(c4)
for sub in substructures:
a, b, c, d, e = sub
ab = get_bond_type(m, a, b)
bc = get_bond_type(m, b, c)
bd = get_bond_type(m, b, d)
be = get_bond_type(m, b, e)
a = m.GetAtomWithIdx(a).GetSymbol()
b = m.GetAtomWithIdx(b).GetSymbol()
c = m.GetAtomWithIdx(c).GetSymbol()
d = m.GetAtomWithIdx(d).GetSymbol()
e = m.GetAtomWithIdx(e).GetSymbol()
component = a + ab + b
component += "(" + bc + c + ")"
component += "(" + bd + d + ")"
component += be + e
components.append(component)
components = [canonical(component) for component in components]
return components
def decompontent(smiles, scheme=1):
if scheme == 1: decompontent_scheme = decompontent_scheme1
elif scheme == 2: decompontent_scheme = decompontent_scheme2
left, right = decompontent_scheme(smiles)
return left, right
def decompontent_scheme1(smiles):
"""
Tune the equation
A (bb) => aa
where
A (target) is big smiles
aa (scheme1 components) is scheme2 components
bb (atoms) is additional bonds required, to have equald bonds on each side
this is done for each A which consists of len(aa) > 0
"""
components = get_components_scheme1(smiles)
if len(components) == 0:
return [], []
bonds_leftside = get_atoms(smiles)
bonds_rightside = []
for component in components:
bonds_rightside += get_atoms(component)
left, right = tuning(bonds_leftside, bonds_rightside)
right += components
return left, right
def decompontent_scheme2(smiles):
"""
Tune the equation
A (bb) => aa
where
A (target) is big smiles
aa (scheme2 components) is scheme2 components
bb (single bonds) is additional bonds required, to have equald bonds on each side
this is done for each A which consists of len(aa) > 0
"""
components = get_components_scheme2(smiles)
if len(components) == 0:
return [], []
bonds_leftside = get_components_scheme1(smiles)
bonds_rightside = []
for component in components:
bonds_rightside += get_components_scheme1(component)
left, right = tuning(bonds_leftside, bonds_rightside)
right += components
if not check_atoms([smiles] + left, right):
print("Error in fragreact tuneing:", smiles)
print([smiles], left, right)
quit()
return left, right
def resultant(reactants, products, scheme=1):
"""
assummed that smiles lists are both split(".") and canonical at this point
"""
reactants_leftside = []
reactants_rightside = []
products_leftside = []
products_rightside = []
reactants_missing = []
products_missing = []
if scheme == 1:
decompontent_scheme = decompontent_scheme1
elif scheme == 2:
decompontent_scheme = decompontent_scheme2
for reactant in reactants:
left, right = decompontent_scheme(reactant)
if len(left) == 0 and len(right) == 0:
reactants_missing += [reactant]
reactants_leftside += left
reactants_rightside += right
for product in products:
left, right = decompontent_scheme(product)
if len(left) == 0 and len(right) == 0:
products_missing += [product]
products_leftside += left
products_rightside += right
left_positive, left_negative = substract_smiles(products_leftside, reactants_leftside)
right_positive, right_negative = substract_smiles(products_rightside, reactants_rightside)
left = left_positive + right_negative + reactants_missing
right = right_positive + left_negative + products_missing
left, right = substract_smiles(left, right)
hydrogens_left = 0
hydrogens_right = 0
for each in left:
hydrogens_left += count_hydrogens(each)
for each in right:
hydrogens_right += count_hydrogens(each)
tune_hydrogens = hydrogens_left - hydrogens_right
if tune_hydrogens < 0:
left += ['[H+]']*abs(tune_hydrogens)
if tune_hydrogens > 0:
right += ['[H+]']*tune_hydrogens
return left, right
def split_smiles(smiles, num_sep=None):
"""
number seperator num_sep (e.g. 3xCC, num_spe="x")
"""
if type(smiles) == type(""):
smiles_list = smiles.split(".")
else:
smiles_list = smiles
for i, smiles in enumerate(smiles_list):
smiles = smiles.split(".")
if len(smiles) > 1:
smiles_list[i] = smiles[0]
smiles_list += smiles[1:]
if num_sep:
for i, smiles in enumerate(smiles_list):
if num_sep in smiles:
num, smiles = smiles.split(num_sep)
num = int(num)
smiles_list[i] = smiles
smiles_list += [smiles]*(num-1)
return smiles_list
def cbh_n(reactants, products, scheme, do_canonical=True):
"""
Use connectivity-based hieracy for reaction (reactants -> products)
in:
reactants -- list of SMILES
products -- list of SMILES
scheme -- int level of connecitivty
out:
left -- list of smiles for the reactant part of the CBHn reaction
right -- list of smiles for the product part of the CBHn reaction
"""
if do_canonical:
reactants = [canonical(smiles) for smiles in reactants]
products = [canonical(smiles) for smiles in products]
left, right = resultant(reactants, products, scheme=scheme)
return left, right
def check_atoms(reactants, products):
"""
Check the validity of the reaction.
Reaction should have eq. no. of atoms for both reactants and products.
"""
ratoms = [get_atoms(smiles) for smiles in reactants]
patoms = [get_atoms(smiles) for smiles in products]
# flatten
ratoms = sum(ratoms, [])
patoms = sum(patoms, [])
ratoms.sort()
patoms.sort()
return ratoms == patoms
def check_reaction(reactants, products):
"""
"""
if isinstance(reactants, list): reactants = ".".join(reactants)
if isinstance(products, list): products = ".".join(products)
reactants = Chem.MolFromSmiles(reactants)
products = Chem.MolFromSmiles(products)
return rdMolDescriptors.CalcMolFormula(reactants) == rdMolDescriptors.CalcMolFormula(products)
| [
1,
529,
276,
1112,
420,
29958,
29926,
14762,
2972,
29914,
29888,
1431,
8423,
29966,
12443,
29918,
303,
1503,
29958,
29896,
29899,
29896,
29900,
13,
29937,
14708,
4855,
29914,
2109,
29914,
6272,
3017,
13,
13,
5215,
12655,
408,
7442,
13,
52... |
mlcomp/parallelm/pipeline/component_dir_helper.py | lisapm/mlpiper | 7 | 198337 | <filename>mlcomp/parallelm/pipeline/component_dir_helper.py<gh_stars>1-10
import pkg_resources
import logging
import os
from parallelm.common.base import Base
class ComponentDirHelper(Base):
def __init__(self, pkg, main_program):
"""
Extract component directory outside of egg, so an external command can run
:param main_program: The main program to run. E.g. main.py
:param pkg: The package the main_program is in, this is required in order to extract the files of the componenbt
outisde the egg
"""
super(ComponentDirHelper, self).__init__(logging.getLogger(self.logger_name()))
self._logger.debug("pkg: {}, main_program: {}".format(pkg, main_program))
self._pkg = pkg
self._main_program = main_program
def extract_component_out_of_egg(self):
"""
The artifact dir will contain all the files needed to run the R code.
This method will copy the files outside of the egg into the artifact dir
:return:
"""
# TODO: check what happens if we have a directory inside the component dir
ll = pkg_resources.resource_listdir(self._pkg, "")
for file_name in ll:
real_file_name = pkg_resources.resource_filename(self._pkg, file_name)
self._logger.debug("file: {}".format(file_name))
self._logger.debug("real_file: {}".format(real_file_name))
# Finding the directory we need to CD into
base_file = os.path.basename(self._main_program)
self._logger.debug("base_file: ".format(base_file))
real_file_name = pkg_resources.resource_filename(self._pkg, base_file)
component_extracted_dir = os.path.dirname(real_file_name)
self._logger.debug("Extraction dir: {}".format(component_extracted_dir))
self._logger.debug("Done building artifact dir:")
self._logger.debug("======================")
return component_extracted_dir
| [
1,
529,
9507,
29958,
828,
2388,
29914,
23482,
29885,
29914,
13096,
5570,
29914,
9700,
29918,
3972,
29918,
20907,
29889,
2272,
29966,
12443,
29918,
303,
1503,
29958,
29896,
29899,
29896,
29900,
13,
5215,
282,
9415,
29918,
13237,
13,
5215,
12... |
test.py | nerdingitout/STT-- | 0 | 28057 | <gh_stars>0
import pandas as pd
import json
import csv
# importing the module
import json
# Opening JSON file
with open('response.json') as json_file:
data = json.load(json_file)
# for reading nested data [0] represents
# the index value of the list
print(data['results'][0]['alternatives']['transcript'])
# for printing the key-value pair of
# nested dictionary for looop can be used
print("\nPrinting nested dicitonary as a key-value pair\n")
for i in data['people1']:
print("Name:", i['name'])
print("Website:", i['website'])
print("From:", i['from'])
print()
#def json_csv(filename):
# with open(filename) as data_file: #opening json file
# data = json.load(data_file) #loading json data
# normalized_df = pd.json_normalize(data)
# print(normalized_df['results'][0])
# normalized_df.to_csv('my_csv_file.csv',index=False)
# return pd.DataFrame(data['results'])
json_csv('response.json') #calling the json_csv function, paramter is the source json file
#file = open('response.json')
#obj = json.load(file)
#for element in obj['results']:
# for alternative in element['alternatives']:
# for stamp in alternative['timestamps']:
# name, value1, value2 = stamp
# print(stamp)
| [
1,
529,
12443,
29918,
303,
1503,
29958,
29900,
13,
5215,
11701,
408,
10518,
13,
5215,
4390,
13,
5215,
11799,
13,
13,
13,
13,
29937,
28348,
278,
3883,
29871,
13,
5215,
4390,
29871,
13,
259,
13,
29937,
4673,
292,
4663,
934,
29871,
13,
... |
vulnerable_people_form/integrations/google_analytics.py | uk-gov-mirror/alphagov.govuk-shielded-vulnerable-people-service | 3 | 125960 | import requests
import sentry_sdk
from flask import current_app
def track_event(category, action, label=None, value=0):
data = {
"v": "1", # API Version.
"tid": current_app.config["GA_TRACKING_ID"], # Tracking ID / Property ID.
# Anonymous Client Identifier. Ideally, this should be a UUID that
# is associated with particular user, device, or browser instance.
"cid": "555",
"t": "event", # Event hit type.
"ec": category, # Event category.
"ea": action, # Event action.
"el": label, # Event label.
"ev": value, # Event value, must be an integer
"ua": "Opera/9.80 (Windows NT 6.0) Presto/2.12.388 Version/12.14",
}
response = requests.post("https://www.google-analytics.com/collect", data=data)
# If the request fails, this will raise a RequestException. Depending
# on your application's needs, this may be a non-error and can be caught
# by the caller.
response.raise_for_status()
def track_nhs_userinfo_and_form_answers_differs():
try:
track_event("NHS info retrieved via oidc and form differs", "registration")
except Exception as e:
sentry_sdk.capture_exception(e)
def track_nhs_number_and_form_value_differs():
try:
track_event("NHS number retrieved via oidc and that in form differs", "registration")
except Exception as e:
sentry_sdk.capture_exception(e)
| [
1,
1053,
7274,
13,
5215,
2665,
719,
29918,
15348,
13,
3166,
29784,
1053,
1857,
29918,
932,
13,
13,
13,
1753,
5702,
29918,
3696,
29898,
7320,
29892,
3158,
29892,
3858,
29922,
8516,
29892,
995,
29922,
29900,
1125,
13,
1678,
848,
353,
426,... |
assignment3/code/q4.1.py | liusida/ds2 | 0 | 121156 | # requirements.txt:
# pyro 1.6.0
# torch 1.8.0
import pyro
from pyro.distributions import Normal,Gamma,InverseGamma,Bernoulli,Poisson
import matplotlib.pyplot as plt
# import pyro.poutine as poutine
pyro.set_rng_seed(101)
def normal_density_estimation(obs, N):
assert obs is None or N==obs.shape[0]
loc = pyro.sample("loc", Normal(0,1))
inverse_scale = pyro.sample("inverse_scale", Gamma(3,2))
with pyro.plate("n", N):
data = pyro.sample(f"data", Normal(loc, 1/inverse_scale), obs=obs)
return data
if __name__ == "__main__":
# simple test
data = normal_density_estimation(None, 100000)
plt.hist(data.detach().numpy(), bins="auto")
plt.show() | [
1,
396,
11780,
29889,
3945,
29901,
13,
29937,
11451,
307,
29871,
29896,
29889,
29953,
29889,
29900,
13,
29937,
4842,
305,
29871,
29896,
29889,
29947,
29889,
29900,
13,
13,
5215,
11451,
307,
13,
3166,
11451,
307,
29889,
27691,
29879,
1053,
... |
openrave/docs/source/tutorials/openravepy_examples/simple_environment_loading.py | jdsika/TUM_HOly | 2 | 88903 | <gh_stars>1-10
"""Loads up an environment, attaches a viewer, loads a scene, and requests information about the robot.
"""
from openravepy import *
env = Environment() # create openrave environment
env.SetViewer('qtcoin') # attach viewer (optional)
env.Load('data/lab1.env.xml') # load a simple scene
robot = env.GetRobots()[0] # get the first robot
with env: # lock the environment since robot will be used
raveLogInfo("Robot "+robot.GetName()+" has "+repr(robot.GetDOF())+" joints with values:\n"+repr(robot.GetDOFValues()))
robot.SetDOFValues([0.5],[0]) # set joint 0 to value 0.5
T = robot.GetLinks()[1].GetTransform() # get the transform of link 1
raveLogInfo("The transformation of link 1 is:\n"+repr(T))
| [
1,
529,
12443,
29918,
303,
1503,
29958,
29896,
29899,
29896,
29900,
13,
15945,
29908,
5896,
29879,
701,
385,
5177,
29892,
10641,
267,
263,
6316,
556,
29892,
15376,
263,
9088,
29892,
322,
7274,
2472,
1048,
278,
19964,
29889,
13,
15945,
299... |
TestMatrices.py | Tsyrema/Computing-the-Distance-Matrix-and-the-Covariance-Matrix-of-Data | 0 | 108405 | # script to test your computation code
# do not change this file
from ComputeMatrices import compute_distance_naive, \
compute_distance_smart, compute_correlation_naive, \
compute_correlation_smart
import numpy as np
from sklearn.datasets import load_iris
# my computation
def my_comp_distance(X):
N = X.shape[0]
D = X[0].shape[0]
M = np.zeros([N,N])
return M
# an example code for testing
def main():
iris = load_iris()
X = iris.data
distance_true = my_comp_distance(X)
distance_loop = compute_distance_naive(X)
distance_cool = compute_distance_smart(X)
print np.allclose(distance_true, distance_loop)
print np.allclose(distance_true, distance_cool)
if __name__ == "__main__": main()
| [
1,
396,
2471,
304,
1243,
596,
16287,
775,
13,
29937,
437,
451,
1735,
445,
934,
13,
13,
3166,
11796,
29872,
29924,
8141,
1575,
1053,
10272,
29918,
19244,
29918,
1056,
573,
29892,
320,
13,
1678,
10272,
29918,
19244,
29918,
3844,
442,
2989... |
sailfish/solvers/scdg_1d.py | Javk5pakfa/sailfish | 1 | 94987 | <reponame>Javk5pakfa/sailfish
"""
An n-th order discontinuous Galerkin solver for 1D scalar advection.
"""
from typing import NamedTuple
from numpy.polynomial.legendre import leggauss, Legendre
from sailfish.mesh import PlanarCartesianMesh
from sailfish.solver import SolverBase
class CellData:
"""
Gauss weights, quadrature points, and tabulated Legendre polonomials.
This class works for n-th order Gaussian quadrature in 1D.
"""
def __init__(self, order=1):
import numpy as np
if order <= 0:
raise ValueError("cell order must be at least 1")
def leg(x, n, m=0):
c = [(2 * n + 1) ** 0.5 if i is n else 0.0 for i in range(n + 1)]
return Legendre(c).deriv(m)(x)
f = [-1.0, 1.0] # xsi-coordinate of faces
g, w = leggauss(order)
self.gauss_points = g
self.weights = w
self.phi_faces = np.array([[leg(x, n, m=0) for n in range(order)] for x in f])
self.phi_value = np.array([[leg(x, n, m=0) for n in range(order)] for x in g])
self.phi_deriv = np.array([[leg(x, n, m=1) for n in range(order)] for x in g])
self.order = order
def to_weights(self, ux):
w = self.weights
p = self.phi_value
o = self.order
return [sum(ux[j] * p[j][n] * w[j] for j in range(o)) * 0.5 for n in range(o)]
def sample(self, uw, j):
return dot(uw, self.phi_value[j])
@property
def num_points(self):
return self.order
def dot(u, p):
return sum(u[i] * p[i] for i in range(u.shape[0]))
def rhs(physics, uw, cell, dx, uwdot):
import numpy as np
if physics.equation == "advection":
wavespeed = physics.wavespeed
def flux(ux):
return wavespeed * ux
def upwind(ul, ur):
if wavespeed > 0.0:
return flux(ul)
else:
return flux(ur)
elif physics.equation == "burgers":
def flux(ux):
return 0.5 * ux * ux
def upwind(ul, ur):
al = ul
ar = ur
if al > 0.0 and ar > 0.0:
return flux(ul)
elif al < 0.0 and ar < 0.0:
return flux(ur)
else:
return 0.0
nz = uw.shape[0]
pv = cell.phi_value
pf = cell.phi_faces
pd = cell.phi_deriv
w = cell.weights
h = [-1.0, 1.0]
for i in range(nz):
im1 = (i - 1 + nz) % nz
ip1 = (i + 1 + nz) % nz
uimh_l = dot(uw[im1], pf[1])
uimh_r = dot(uw[i], pf[0])
uiph_l = dot(uw[i], pf[1])
uiph_r = dot(uw[ip1], pf[0])
fimh = upwind(uimh_l, uimh_r)
fiph = upwind(uiph_l, uiph_r)
fs = [fimh, fiph]
ux = [cell.sample(uw[i], j) for j in range(cell.order)]
fx = [flux(u) for u in ux]
for n in range(cell.order):
udot_s = -sum(fs[j] * pf[j][n] * h[j] for j in range(2)) / dx
udot_v = +sum(fx[j] * pd[j][n] * w[j] for j in range(cell.num_points)) / dx
uwdot[i, n] = udot_s + udot_v
class Options(NamedTuple):
order: int = 1
integrator: str = "rk2"
class Physics(NamedTuple):
wavespeed: float = 1.0
equation: str = "advection" # or burgers
class Solver(SolverBase):
"""
An n-th order, discontinuous Galerkin solver for 1D scalar advection.
Time-advance integrator options:
- :code:`rk1`: Forward Euler
- :code:`rk2`: SSP-RK2 of Shu & Osher (1988; Eq. 2.15)
- :code:`rk3`: SSP-RK3 of Shu & Osher (1988; Eq. 2.18)
- :code:`rk3-sr02`: four-stage 3rd Order SSP-4RK3 of Spiteri & Ruuth (2002)
"""
def __init__(
self,
setup=None,
mesh=None,
time=0.0,
solution=None,
num_patches=1,
mode="cpu",
physics=dict(),
options=dict(),
):
import numpy as np
options = Options(**options)
physics = Physics(**physics)
cell = CellData(order=options.order)
if num_patches != 1:
raise ValueError("only works on one patch")
if type(mesh) != PlanarCartesianMesh:
raise ValueError("only the planar cartesian mesh is supported")
if mode != "cpu":
raise ValueError("only cpu mode is supported")
if setup.boundary_condition != "periodic":
raise ValueError("only periodic boundaries are supported")
if physics.equation not in ["advection", "burgers"]:
raise ValueError("physics.equation must be advection or burgers")
if options.integrator not in ["rk1", "rk2", "rk3", "rk3-sr02"]:
raise ValueError("options.integrator must be rk1|rk2|rk3|rk3-sr02")
if options.order <= 0:
raise ValueError("option.order must be greater than 0")
if solution is None:
num_zones = mesh.shape[0]
xf = mesh.faces(0, num_zones) # face coordinates
px = np.zeros([num_zones, cell.num_points, 1])
ux = np.zeros([num_zones, cell.num_points, 1])
uw = np.zeros([num_zones, cell.order, 1])
dx = mesh.dx
for i in range(num_zones):
for j in range(cell.num_points):
xsi = cell.gauss_points[j]
xj = xf[i] + (xsi + 1.0) * 0.5 * dx
setup.primitive(time, xj, px[i, j])
ux[...] = px[...] # the conserved variable is also the primitive
for i in range(num_zones):
uw[i] = cell.to_weights(ux[i])
self.conserved_w = uw
else:
self.conserved_w = solution
self.t = time
self.mesh = mesh
self.cell = cell
self._options = options
self._physics = physics
@property
def solution(self):
return self.conserved_w
@property
def primitive(self):
return self.conserved_w[:, 0]
@property
def time(self):
return self.t
@property
def maximum_cfl(self):
return 1.0
@property
def options(self):
return self._options._asdict()
@property
def physics(self):
return self._physics._asdict()
@property
def maximum_cfl(self):
k = self.cell.order - 1
if self._options.integrator == "rk1":
return 1.0 / (2 * k + 1)
if self._options.integrator == "rk2":
return 1.0 / (2 * k + 1)
if self._options.integrator == "rk3":
return 1.0 / (2 * k + 1)
if self._options.integrator == "rk3-sr02":
return 2.0 / (2 * k + 1)
def maximum_wavespeed(self):
if self._physics.equation == "advection":
return abs(self._physics.wavespeed)
elif self._physics.equation == "burgers":
return abs(self.conserved_w[:, 0]).max()
def advance(self, dt):
import numpy as np
def udot(u):
udot = np.zeros_like(u)
rhs(self._physics, u, self.cell, self.mesh.dx, udot)
return udot
if self._options.integrator == "rk1":
u = self.conserved_w
u += dt * udot(u)
if self._options.integrator == "rk2":
b1 = 0.0
b2 = 0.5
u = u0 = self.conserved_w.copy()
u = u0 * b1 + (1.0 - b1) * (u + dt * udot(u))
u = u0 * b2 + (1.0 - b2) * (u + dt * udot(u))
if self._options.integrator == "rk3":
b1 = 0.0
b2 = 3.0 / 4.0
b3 = 1.0 / 3.0
u = u0 = self.conserved_w.copy()
u = u0 * b1 + (1.0 - b1) * (u + dt * udot(u))
u = u0 * b2 + (1.0 - b2) * (u + dt * udot(u))
u = u0 * b3 + (1.0 - b3) * (u + dt * udot(u))
if self._options.integrator == "rk3-sr02":
u = u0 = self.conserved_w.copy()
u = u0 + 0.5 * dt * udot(u)
u = u + 0.5 * dt * udot(u)
u = 2.0 / 3.0 * u0 + 1.0 / 3.0 * (u + 0.5 * dt * udot(u))
u = u + 0.5 * dt * udot(u)
self.conserved_w = u
self.t += dt
| [
1,
529,
276,
1112,
420,
29958,
29967,
485,
29895,
29945,
29886,
557,
5444,
29914,
29879,
737,
15161,
13,
15945,
29908,
13,
2744,
302,
29899,
386,
1797,
766,
20621,
681,
5208,
5968,
262,
899,
369,
363,
29871,
29896,
29928,
17336,
594,
34... |
pychron/lasers/power/composite_calibration_manager.py | ASUPychron/pychron | 31 | 2675 | <reponame>ASUPychron/pychron
# ===============================================================================
# Copyright 2012 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from traits.api import HasTraits, Instance, DelegatesTo, Button, List, Any, Float
from traitsui.api import View, Item, VGroup, HGroup, Group, spring, TabularEditor
# ============= standard library imports ========================
import pickle
import os
from numpy import polyval
# ============= local library imports ==========================
from pychron.managers.manager import Manager
from pychron.database.selectors.power_calibration_selector import (
PowerCalibrationSelector,
)
from pychron.database.adapters.power_calibration_adapter import PowerCalibrationAdapter
from pychron.paths import paths
from pychron.graph.graph import Graph
from pychron.hardware.meter_calibration import MeterCalibration
"""
use a dbselector to select data
"""
class BoundsSelector(HasTraits):
graph = Instance(Graph)
def traits_view(self):
v = View(
Item("graph", show_label=False, style="custom"),
buttons=["OK", "Cancel"],
kind="livemodal",
)
return v
class CompositeCalibrationManager(Manager):
db = Instance(PowerCalibrationAdapter)
selector = Instance(PowerCalibrationSelector)
append = Button
replace = Button
load_graph = Button
save = Button
selected_calibrations = List
selected = Any
results = DelegatesTo("selector")
graph = Instance(Graph)
dclicked = Any
parent_name = "FusionsDiode"
power = Float
input = Float
def _power_changed(self):
pc = self._load_calibration()
pc
if pc is not None:
self.input, _ = pc.get_input(self.power)
def _load_calibration(self):
try:
p = self._get_calibration_path()
with open(p, "rb") as f:
pc = pickle.load(f)
except:
return
return pc
def _dclicked_changed(self):
s = self.selected
if s is not None:
s.bounds = None
s.load_graph()
s.graph.add_range_selector()
bc = BoundsSelector(graph=s.graph)
info = bc.edit_traits()
if info.result:
bounds = s.graph.plots[0].default_index.metadata["selections"]
s.bounds = bounds
s.calibration_bounds = (
polyval(s.coefficients, bounds[0]),
polyval(s.coefficients, bounds[1]),
)
def _append_fired(self):
s = self.selector.selected
if s is not None:
for si in s:
trs = list(si.traits().keys()).remove("graph")
self.selected_calibrations.append(si.clone_traits(traits=trs))
def _replace_fired(self):
s = self.selector.selected
trs = list(s.traits().keys()).remove("graph")
self.selected_calibrations = s.clone_traits(traits=trs)
def _save_fired(self):
self._dump_calibration()
def _dump_calibration(self):
pc = MeterCalibration()
coeffs = []
bounds = []
for s in self.selected_calibrations:
coeffs.append(s.coefficients)
bounds.append(s.calibration_bounds)
pc.coefficients = coeffs
pc.bounds = bounds
p = self._get_calibration_path()
self.info("saving calibration to {}".format(p))
with open(p, "wb") as f:
pickle.dump(pc, f)
def _get_calibration_path(self):
p = os.path.join(
paths.hidden_dir, "{}_power_calibration".format(self.parent_name)
)
return p
def _load_graph_fired(self):
g = self.graph
g.clear()
# g.new_plot(zoom=True, pan=True,
# padding=[40, 10, 10, 40]
# )
has_bounds = False
for i, s in enumerate(self.selected_calibrations):
if s.bounds:
has_bounds = True
elif has_bounds:
g.clear()
self._plot_factory(g)
self.warning_dialog("{} does not have its bounds set".format(s.rid))
break
s.load_graph(graph=g, new_plot=i == 0)
g.redraw()
def traits_view(self):
selector_grp = Group(Item("selector", style="custom", show_label=False))
transfer_grp = VGroup(
spring,
VGroup(Item("append", show_label=False), Item("replace", show_label=False)),
spring,
)
editor = TabularEditor(
adapter=self.selector.tabular_adapter(),
editable=False,
dclicked="object.dclicked",
selected="object.selected",
)
selected_grp = Item("selected_calibrations", editor=editor, show_label=False)
data_tab = Group(
HGroup(selector_grp, transfer_grp, selected_grp),
show_border=True,
label="Data",
)
process_tab = Group(
HGroup(
Item("power"),
Item("input", format_str=" %0.3f ", style="readonly"),
spring,
Item("save", show_label=False),
Item("load_graph", show_label=False),
),
Item("graph", style="custom", show_label=False),
show_border=True,
label="Process",
)
v = View(
VGroup(data_tab, process_tab),
resizable=True,
title="Composite {} Power Calibration".format(self.parent_name),
)
return v
def _graph_default(self):
g = Graph(
container_dict={
# 'fill_padding':True,
# 'bgcolor':'red',
"padding": 5
}
)
self._plot_factory(g)
return g
def _plot_factory(self, graph):
graph.new_plot(
zoom=True,
pan=True,
padding=[50, 10, 10, 40],
xtitle="Setpoint (%)",
ytitle="Measured Power (W)",
)
def _db_default(self):
if self.parent_name == "FusionsDiode":
name = paths.diodelaser_db
else:
name = paths.co2laser_db
db = PowerCalibrationAdapter(name=name, kind="sqlite")
db.connect()
return db
def _selector_default(self):
return self.db._selector_factory()
if __name__ == "__main__":
ccm = CompositeCalibrationManager()
ccm.configure_traits()
# ============= EOF =============================================
| [
1,
529,
276,
1112,
420,
29958,
3289,
4897,
3376,
1617,
29914,
2272,
5904,
13,
29937,
1275,
9166,
9166,
9166,
9166,
4936,
2751,
29922,
13,
29937,
14187,
1266,
29871,
29906,
29900,
29896,
29906,
529,
5813,
29958,
13,
29937,
13,
29937,
10413... |
library/modulemanager.py | l29ah/vk4xmpp | 77 | 94195 | <reponame>l29ah/vk4xmpp
# coding: utf-8
# This file is a part of VK4XMPP transport
# © simpleApps, 2015.
"""
Manages python modules as xmpppy handlers
"""
__author__ = "mrDoctorWho <<EMAIL>>"
__version__ = "1.1"
import os
from writer import *
from __main__ import Component, TransportFeatures, UserFeatures
def proxy(func):
def wrapper(type, *args):
if type:
for (handler, typ, ns, makefirst) in args:
if isinstance(ns, list):
while ns:
func(type, handler, typ, ns.pop(), makefirst=makefirst)
else:
func(type, handler, typ, ns, makefirst=makefirst)
return wrapper
@proxy
def register(*args, **kwargs):
Component.RegisterHandler(*args, **kwargs)
@proxy
def unregister(*args, **kwargs):
Component.UnregisterHandler(*args)
def addFeatures(features, list=TransportFeatures):
for feature in features:
list.add(feature)
def removeFeatures(features, list=TransportFeatures):
for feature in features:
if feature in list:
list.remove(feature)
class ModuleManager(object):
"""
A complete module manager.
You can easy load, reload and unload any module using it.
Modules are different from extensions:
While extensions works in main globals() and have their callbacks,
modules works in their own globals() and they're not affect to the core.
Unfortunately, most of modules are not protected from harm
so they may have affect on the connection
"""
loaded = set([])
@staticmethod
def getFeatures(module):
return getattr(module, "MOD_FEATURES_USER", [])
@classmethod
def __register(cls, module):
register(module.MOD_TYPE, *module.MOD_HANDLERS)
addFeatures(module.MOD_FEATURES)
addFeatures(cls.getFeatures(module), UserFeatures)
cls.loaded.add(module.__name__)
@classmethod
def __unregister(cls, module):
unregister(module.MOD_TYPE, *module.MOD_HANDLERS)
removeFeatures(module.MOD_FEATURES)
removeFeatures(cls.getFeatures(module), UserFeatures)
cls.loaded.remove(module.__name__)
@classmethod
def list(cls):
modules = []
for file in os.listdir("modules"):
name, ext = os.path.splitext(file)
if ext == ".py":
modules.append(name)
return modules
@classmethod
def __load(cls, name, reload_=False):
try:
if reload_:
module = sys.modules[name]
cls.__unregister(module)
module = reload(module)
else:
module = __import__(name, globals(), locals())
except Exception:
crashLog("modulemanager.load")
module = None
return module
@classmethod
def load(cls, list=[]):
result = []
errors = []
for name in list:
loaded = name in cls.loaded
module = cls.__load(name, loaded)
if not module:
errors.append(name)
continue
result.append(name)
cls.__register(module)
return (result, errors)
@classmethod
def unload(cls, list=[]):
result = []
for name in list:
if name in sys.modules:
cls.__unregister(sys.modules[name])
del sys.modules[name]
result.append(name)
return result
| [
1,
529,
276,
1112,
420,
29958,
29880,
29906,
29929,
801,
29914,
29894,
29895,
29946,
29916,
29885,
407,
13,
29937,
14137,
29901,
23616,
29899,
29947,
13,
29937,
910,
934,
338,
263,
760,
310,
478,
29968,
29946,
29990,
3580,
29925,
8608,
13... |
appengine-compat/exported_appengine_sdk/google/storage/speckle/proto/jdbc_type.py | speedplane/python-compat-runtime | 26 | 6849 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Python equivalent of jdbc_type.h.
Python definition of the JDBC type constant values defined in Java class
java.sql.Types. Since the values don't fall into the range allowed by
a protocol buffer enum, we use Python constants instead.
If you update this, update jdbc_type.py also.
"""
BIT = -7
TINYINT = -6
SMALLINT = 5
INTEGER = 4
BIGINT = -5
FLOAT = 6
REAL = 7
DOUBLE = 8
NUMERIC = 2
DECIMAL = 3
CHAR = 1
VARCHAR = 12
LONGVARCHAR = -1
DATE = 91
TIME = 92
TIMESTAMP = 93
BINARY = -2
VARBINARY = -3
LONGVARBINARY = -4
NULL = 0
OTHER = 1111
JAVA_OBJECT = 2000
DISTINCT = 2001
STRUCT = 2002
ARRAY = 2003
BLOB = 2004
CLOB = 2005
REF = 2006
DATALINK = 70
BOOLEAN = 16
ROWID = -8
NCHAR = -15
NVARCHAR = -9
LONGNVARCHAR = -16
NCLOB = 2011
SQLXML = 2009
| [
1,
18787,
4855,
29914,
2109,
29914,
6272,
3017,
13,
29937,
13,
29937,
14187,
1266,
29871,
29906,
29900,
29900,
29955,
5087,
9266,
29889,
13,
29937,
13,
29937,
10413,
21144,
1090,
278,
13380,
19245,
29892,
10079,
29871,
29906,
29889,
29900,
... |
vertexLite/views.py | FelixTheC/onlineOrderForm | 0 | 182004 | <reponame>FelixTheC/onlineOrderForm
from django.forms import formset_factory
from django.shortcuts import redirect
from django.urls import reverse
from django.views.generic import UpdateView
from extras.views import PDFView
from extras.views import ThanksForOderView
from orderForm.own_decorator import hash_is_allowed
from orderForm.own_mixins import HashRequiredMixin
from extras.views import customer_deliver_address_function
from survey.forms import BaseArticleForm
from orderForm.own_mixins import ProveHashMixin
from .forms import VertexLiteCreateForm
from .forms import VLArticleForm
from .forms import VLPaymentOptionForm
from .models import VertexLiteOrderModel
@hash_is_allowed
def vertexLiteForm(request, hash):
return redirect(reverse('vertex_lite:vertex_lite_customer_form',
kwargs={'hash': hash,
'pk': hash[14:]}
))
class CustomerVertexLiteUpdate(HashRequiredMixin, ProveHashMixin, UpdateView):
model = VertexLiteOrderModel
form_class = VertexLiteCreateForm
template_name = 'vertexLiteForm.html'
formset_error = ''
def get_context_data(self, **kwargs):
context = super(CustomerVertexLiteUpdate, self).get_context_data(**kwargs)
ArticleFormSet = formset_factory(VLArticleForm, extra=4)
initial = []
if self.object.battery_size:
for i in range(0, len(self.object.battery_size.split('$')) - 1):
initial.append({'battery_size': self.object.battery_size.split('$')[i],
'number_of_collars': self.object.number_of_collars.split('$')[i],
'nom_collar_circumference': self.object.nom_collar_circumference.split('$')[i], })
context['com_type_gl'] = self.object.globalstar
context['com_type_ir'] = self.object.iridium
context['com_type_gsm'] = self.object.gsm
context['com_type_none'] = self.object.store_on_board
context['telno'] = self.object.gsm_customer_sim_telephone_no
context['pin'] = self.object.gsm_customer_sim_pin
context['puk'] = self.object.gsm_customer_sim_puk
context['formset'] = ArticleFormSet(initial=initial)
context['formset_error'] = self.formset_error
return context
def get_initial(self):
initial = super(CustomerVertexLiteUpdate, self).get_initial()
return initial
def get_success_url(self, **kwargs):
return reverse('vertex_lite:vertex_lite_delivery_form', kwargs={'hash': self.kwargs['hash'],
'pk': self.kwargs['pk']})
def form_valid(self, form):
article_form_set = formset_factory(VLArticleForm, min_num=1, validate_min=True, formset=BaseArticleForm)
data = {
'form-TOTAL_FORMS': '4',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '0',
'form-0-battery_size': self.request.POST['form-0-battery_size'],
'form-0-number_of_collars': self.request.POST['form-0-number_of_collars'],
'form-0-nom_collar_circumference': self.request.POST['form-0-nom_collar_circumference'],
'form-1-battery_size': self.request.POST['form-1-battery_size'],
'form-1-number_of_collars': self.request.POST['form-1-number_of_collars'],
'form-1-nom_collar_circumference': self.request.POST['form-1-nom_collar_circumference'],
'form-2-battery_size': self.request.POST['form-2-battery_size'],
'form-2-number_of_collars': self.request.POST['form-2-number_of_collars'],
'form-2-nom_collar_circumference': self.request.POST['form-2-nom_collar_circumference'],
'form-3-battery_size': self.request.POST['form-3-battery_size'],
'form-3-number_of_collars': self.request.POST['form-3-number_of_collars'],
'form-3-nom_collar_circumference': self.request.POST['form-3-nom_collar_circumference'],
}
formset = article_form_set(data)
batterie_sizes_string = ''
num_collars_string = ''
circumference_string = ''
if formset.is_valid():
for f in formset.cleaned_data:
if len(f) > 1 is not None:
batterie_sizes_string += f['battery_size'] + '$'
num_collars_string += str(f['number_of_collars']) + '$'
circumference_string += f['nom_collar_circumference'] + '$'
else:
self.formset_error = 'error'
return super(CustomerVertexLiteUpdate, self).form_invalid(form)
instance = form.save(commit=False)
instance.number_of_collars = num_collars_string
instance.battery_size = batterie_sizes_string
instance.nom_collar_circumference = circumference_string
instance.save()
return super(CustomerVertexLiteUpdate, self).form_valid(form)
def form_invalid(self, form):
return super(CustomerVertexLiteUpdate, self).form_invalid(form)
def customer_deliver_address(request, hash, pk):
return customer_deliver_address_function(request, hash, pk, VertexLiteOrderModel, 'vertex_lite:thanks', VLPaymentOptionForm)
class ThanksView(ThanksForOderView):
def __init__(self):
super(ThanksView, self).__init__(VertexLiteOrderModel, 'vertexlite_thanks.html')
class VertexLitePDFView(PDFView):
def __init__(self):
super(VertexLitePDFView, self).__init__(VertexLiteOrderModel, 'vertexLiteHtml4Pdf.html', 'order_from') | [
1,
529,
276,
1112,
420,
29958,
29943,
295,
861,
1576,
29907,
29914,
14627,
7514,
2500,
13,
3166,
9557,
29889,
9514,
1053,
883,
842,
29918,
14399,
13,
3166,
9557,
29889,
12759,
7582,
29879,
1053,
6684,
13,
3166,
9557,
29889,
26045,
1053,
... |
DFS/BinaryTreeMaxPathSum.py | karan2808/Python-Data-Structures-and-Algorithms | 2 | 53631 | class Solution:
def __init__(self):
self.result = None
def findMax(self, root):
if root == None:
return 0
# find max for left and right node
left = self.findMax(root.left)
right = self.findMax(root.right)
# can either go straight down i.e. from root to one of the children and downwards
maxStraight = max(max(left, right) + root.val, root.val)
# or can come to root from either of the child nodes and go to other child node
maxCurved = max(left + right + root.val, maxStraight)
# update the result
self.result = max(self.result, maxCurved)
# can only return max straight, since we're going upwards
return maxStraight
def maxPathSum(self, root):
if root == None:
return 0
self.result = float('-inf')
self.findMax(root)
return self.result
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
def main():
root = TreeNode(5)
root.left = TreeNode(2)
root.right = TreeNode(7)
root.left.left = TreeNode(1)
root.left.right = TreeNode(3)
root.right.left = TreeNode(5)
root.right.right = TreeNode(9)
mySol = Solution()
print("The max path sum in the binary tree is " + str(mySol.maxPathSum(root)))
if __name__ == "__main__":
main()
| [
1,
770,
24380,
29901,
13,
1678,
822,
4770,
2344,
12035,
1311,
1125,
13,
4706,
1583,
29889,
2914,
353,
6213,
13,
13,
1678,
822,
1284,
7976,
29898,
1311,
29892,
3876,
1125,
13,
4706,
565,
3876,
1275,
6213,
29901,
13,
9651,
736,
29871,
2... |
Processing.py | Ruframapi/reconoSERTest | 0 | 145596 | <filename>Processing.py
"""
File Processing without Spark. Just to take a look.
"""
import sys
import os
import json
import re
import random
import nltk
import datetime
from nltk.tokenize import TweetTokenizer
from nltk.corpus import stopwords
from nltk.util import skipgrams
def main():
filepath = sys.argv[1]
processing_type = sys.argv[2] # "user_count, user_filter, user_filter_reviews, user_filter_with_reviews_2"
filter_number = int(sys.argv[3])
if not os.path.isfile(filepath):
print("File path {} does not exist. Exiting...".format(filepath))
sys.exit()
if processing_type == "user_count":
user_count(filepath)
elif processing_type == "user_filter":
user_filter(filepath,filter_number)
elif processing_type == "user_filter_reviews":
user_filter_with_reviews(filepath,filter_number)
elif processing_type == "user_filter_reviews_2":
user_filter_with_reviews_2(filepath,filter_number)
else:
print("Wrong second argument (Processing type)")
sys.exit()
def user_filter_with_reviews(filepath,filter_number):
"""For those user with more than -filter_number- reviews the comments are extracted"""
filename = "UserFilter"+ str(filter_number) + ".json"
with open(filename, 'r', encoding="utf8", errors='ignore') as fp:
data=fp.read()
dict_users = json.loads(data)
filename = "UserFilter"+ str(filter_number) + "-Reviews.json"
with open(filepath, encoding="utf8", errors='ignore') as fp:
cnt = 0
excnt = 0
text_to_write = ""
#for i in xrange(6):
# f.next()
for line in fp:
excnt += 1
if excnt % 100000 == 0:
print(excnt)
if "review/userId" in line:
actualuser = line.replace('review/userId:', '').strip()
if "review/text:" in line:
if actualuser in dict_users:
review = cleanhtml(line.replace('review/text:', '').strip())
review = tokenize(review)
text_to_write = text_to_write + actualuser + " || " + json.dumps(review) + "\n"
cnt += 1
if cnt == 10000:
print("cnt {} excnt {}".format(cnt,excnt))
with open(filename, 'a') as fw:
fw.write(text_to_write)
cnt = 1
text_to_write = ""
def user_filter_with_reviews_2(filepath,filter_number):
""" Found filter with reviews 2 """
filename = "UserFilter"+ str(filter_number) + "-Random3.json"
with open(filename, 'r', encoding="utf8", errors='ignore') as fp:
data=fp.read()
dict_users = json.loads(data)
dict_total_training = {}
dict_total_testing = {}
cnt = 1
print(datetime.datetime.now().time())
filename = "UserFilter"+ str(filter_number) + "-Reviews.json"
with open(filename, encoding="utf8", errors='ignore') as fp:
for line in fp:
list_line=line.split("||")
user = list_line[0].strip()
if user in dict_users:
if user not in dict_total_training:
dict_total_training[user] = {"reviews": []}
if user not in dict_total_testing:
dict_total_testing[user] = {"reviews": []}
if random.random() < 0.5:
word_list_training = json.loads(list_line[1].strip())
dict_total_training[user]["reviews"].extend(word_list_training)
else:
word_list_testing = json.loads(list_line[1].strip())
dict_total_testing[user]["reviews"].append(word_list_testing)
#dict_total[user]["pos"].extend(pos_tagger(word_list))
cnt += 1
if cnt % 100000 == 0:
print(datetime.datetime.now().time())
list_total_training = []
for key in dict_total_training:
dictdemo = {}
dictdemo["user"] = key
dictdemo["reviews"] = dict_total_training[key]["reviews"]
list_total_training.append(dictdemo)
dict_total_training = {}
filename = "UserFilter"+ str(filter_number) + "-Training-Random3.json"
with open(filename, 'w') as fp:
json.dump(list_total_training, fp, indent=4)
list_total_training = []
list_total_testing = []
for key in dict_total_testing:
dictdemo = {}
dictdemo["user"] = key
dictdemo["reviews"] = dict_total_testing[key]["reviews"]
list_total_testing.append(dictdemo)
dict_total_testing = {}
filename = "UserFilter"+ str(filter_number) + "-Testing-Random3.json"
with open(filename, 'w') as fp:
json.dump(list_total_testing, fp, indent=4)
def user_filter(filepath,filter_number):
""" Found users with more than -filter_number- reviews """
with open(filepath, 'r', encoding="utf8", errors='ignore') as fp:
data=fp.read()
dict_users = json.loads(data)
dict_users_filter = dict(filter(lambda elem: elem[1] >= filter_number ,dict_users.items()))
len10p = round(len(dict_users_filter)*0.03)
dict_users_filter_rand = dict(random.sample(dict_users_filter.items(), len10p))
filename = "UserFilter"+ str(filter_number) + "-Random3.json"
with open(filename, 'w') as fp:
json.dump(dict_users_filter_rand, fp, indent=4)
def user_count(filepath):
""" Count the number of reviews per user"""
bag_of_users = {}
with open(filepath, encoding="utf8", errors='ignore') as fp:
cnt = 0
for line in fp:
if "review/userId" in line:
readuser = line.replace('review/userId:', '').strip()
record_user_cnt(readuser, bag_of_users)
cnt += 1
if cnt == 100000:
print("line {}".format(line))
cnt = 1
#break
sorted_users = order_bag_of_users(bag_of_users, desc=True)
with open('userReviewCount.json', 'w') as fp:
json.dump(sorted_users, fp, indent=4)
def order_bag_of_users(bag_of_users, desc=False):
"""Order by number of reviews"""
users = [(user, cnt) for user, cnt in bag_of_users.items()]
users_sort = sorted(users, key=lambda x: x[1], reverse=desc)
print("User with more reviews {}".format(users_sort[:10]))
return dict(users_sort)
def record_user_cnt(user, bag_of_users):
"""Record the reviews count """
if user != '':
if user in bag_of_users:
bag_of_users[user] += 1
else:
bag_of_users[user] = 1
def cleanhtml(raw_html):
"""Delete HTML Tags"""
cleanr = re.compile('<.*?>')
cleantext = re.sub(cleanr, '', raw_html)
return cleantext
def pos_tagger(s):
"""POS"""
return [i[1] for i in nltk.pos_tag(s)]
def tokenize(s):
"""Tokenizer"""
s = s.lower()
token = TweetTokenizer()
return token.tokenize(s)
if __name__ == '__main__':
main() | [
1,
529,
9507,
29958,
7032,
292,
29889,
2272,
13,
15945,
29908,
13,
2283,
10554,
292,
1728,
20814,
29889,
3387,
304,
2125,
263,
1106,
29889,
13,
13,
15945,
29908,
13,
13,
5215,
10876,
13,
5215,
2897,
13,
5215,
4390,
13,
5215,
337,
13,
... |
twenty47/__init__.py | gary-dalton/Twenty47 | 0 | 77220 | <reponame>gary-dalton/Twenty47
from flask import Flask, flash
from flask.ext.mongoengine import MongoEngine
from flask_wtf.csrf import CsrfProtect
from flask_mail import Mail
import blinker
app = Flask(__name__)
#app.config.from_object(__name__)
# Load the config
app.config.from_pyfile('local.config.py')
#app.config.from_envvar('config', silent=True)
db = MongoEngine(app)
mail = Mail(app)
csrf = CsrfProtect()
csrf.init_app(app)
"""
Register my signals
"""
twenty47_signals = blinker.Namespace()
subscription_updated = twenty47_signals.signal("subscription-updated")
subscription_pending = twenty47_signals.signal("subscription-pending")
sns_error = twenty47_signals.signal("sns-error")
dispatch_created = twenty47_signals.signal("dispatch-created")
if app.config['DEBUG']:
def debug(*args):
pass
#debug = flash
else:
def debug(*args):
pass
def register_blueprints(app):
# Prevents circular imports
from twenty47.views import dispatch
app.register_blueprint(dispatch)
from twenty47.admin import admin
app.register_blueprint(admin)
from twenty47.admin_dispatch import admin_dispatch
app.register_blueprint(admin_dispatch)
from twenty47.subscriber import subscriber
app.register_blueprint(subscriber)
def subscribe_to_signals(app):
import signals
register_blueprints(app)
subscribe_to_signals(app)
if __name__ == '__main__':
app.run()
| [
1,
529,
276,
1112,
420,
29958,
29887,
653,
29899,
12293,
880,
29914,
27418,
6478,
29946,
29955,
13,
3166,
29784,
1053,
2379,
1278,
29892,
11013,
13,
3166,
29784,
29889,
1062,
29889,
29885,
7443,
10599,
1053,
18294,
12412,
13,
3166,
29784,
... |
similarityworkbench/bin/mcs/Cmcs.py | gitanna/chemminetools | 2 | 98957 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 1.3.35
#
# Don't modify this file, modify the SWIG interface instead.
# This file is compatible with both classic and new-style classes.
import _Cmcs
import new
new_instancemethod = new.instancemethod
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(
self,
class_type,
name,
value,
static=1,
):
if name == 'thisown':
return self.this.own(value)
if name == 'this':
if type(value).__name__ == 'PySwigObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if not static or hasattr(self, name):
self.__dict__[name] = value
else:
raise AttributeError('You cannot add attributes to %s' % self)
def _swig_setattr(
self,
class_type,
name,
value,
):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr(self, class_type, name):
if name == 'thisown':
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
raise AttributeError, name
def _swig_repr(self):
try:
strthis = 'proxy of ' + self.this.__repr__()
except:
strthis = ''
return '<%s.%s; %s >' % (self.__class__.__module__,
self.__class__.__name__, strthis)
import types
try:
_object = types.ObjectType
_newclass = 1
except AttributeError:
class _object:
pass
_newclass = 0
del types
max = _Cmcs.max
read_graph = _Cmcs.read_graph
parse_sdf = _Cmcs.parse_sdf
get_best = _Cmcs.get_best
is_null = _Cmcs.is_null
set_timeout = _Cmcs.set_timeout
| [
1,
18787,
4855,
29914,
2109,
29914,
4691,
13,
29937,
448,
29930,
29899,
14137,
29901,
23616,
29899,
29947,
448,
29930,
29899,
13,
13,
29937,
910,
934,
471,
6336,
5759,
491,
25289,
6259,
313,
1124,
597,
1636,
29889,
2774,
335,
29889,
990,
... |
alpa/timer.py | TarzanZhao/alpa | 0 | 1609155 | """Global timer for profiling."""
import time
from typing import Callable
do_sync = True
class _Timer:
"""An internal timer."""
def __init__(self, name: str):
self.name = name
self.started = False
self.start_time = None
self.costs = []
# Loop timer
self.ever_suspended = False
self.accum_cost = 0.0
def start(self, sync_func: Callable = None):
"""Start the timer."""
assert not self.started, "timer has already been started"
if sync_func and do_sync:
sync_func()
self.start_time = time.time()
self.started = True
def suspend(self, sync_func: Callable = None):
"""Suspend the timer in a loop."""
assert self.started
self.ever_suspended = True
# we accumulate on the accum_cost
if sync_func and do_sync:
sync_func()
self.accum_cost += time.time() - self.start_time
self.started = False
def stop(self, sync_func: Callable = None):
"""Stop the timer."""
if self.ever_suspended:
assert not self.started, (
f"Stop the timer {self.name} before suspending it.")
else:
assert self.started, (
f"timer {self.name} is not started nor ever suspended.")
if sync_func and do_sync:
sync_func()
if self.ever_suspended:
self.costs.append(self.accum_cost)
self.accum_cost = 0.0
else:
cost = time.time() - self.start_time
self.costs.append(cost)
self.started = False
def reset(self):
"""Reset timer."""
self.costs = []
self.accum_cost = 0.0
self.started = False
self.ever_suspended = False
def elapsed(self, mode: str = "average"):
"""Calculate the elapsed time."""
if not self.costs:
return 0.0
if mode == "average":
return sum(self.costs) / len(self.costs)
elif mode == "sum":
return sum(self.costs)
else:
raise RuntimeError("Supported mode is: average | sum")
def log(self, mode: str = "average", normalizer: float = 1.0):
"""Log a timer's cost in different modes."""
assert normalizer > 0.0
string = "time (ms)"
elapsed = self.elapsed(mode) * 1000.0 / normalizer
string += f" | {self.name}: {elapsed:.2f}"
print(string, flush=True)
class Timers:
"""A group of timers."""
def __init__(self):
self.timers = {}
def __call__(self, name: str):
if name not in self.timers:
self.timers[name] = _Timer(name)
return self.timers[name]
def __contains__(self, name: str):
return name in self.timers
def log(self, names: str, normalizer: float = 1.0):
"""Log a group of timers."""
assert normalizer > 0.0
string = "time (ms)"
for name in names:
elapsed_time = self.timers[name].elapsed() * 1000.0 / normalizer
string += f" | {name}: {elapsed_time:.2f}"
print(string, flush=True)
timers = Timers()
| [
1,
9995,
12756,
12237,
363,
20077,
292,
1213,
15945,
13,
5215,
931,
13,
3166,
19229,
1053,
8251,
519,
13,
13,
1867,
29918,
16593,
353,
5852,
13,
13,
13,
1990,
903,
14745,
29901,
13,
1678,
9995,
2744,
7463,
12237,
1213,
15945,
13,
13,
... |
AcimdesTrainingServer.py | IvanDerdicDer/Acimdes | 0 | 97326 | import random
from typing import List
import NeuralNetwork as nn
import os
import multiprocessing as mp
import threading as thrd
import pickle
import time
class Cards:
def __init__(self):
self.cards = []
for _ in range(4):
temp = random.sample(range(8), 8)
for j in temp:
self.cards.append(j)
def lastcard(self):
last = self.cards[-1]
self.cards.pop()
return last
class Player:
def __init__(self, username, nn: nn.NeuralNetwork):
self.username = username
self.cardsInHand = []
self.takenCards = [0]*8
self.score = 0
self.isLast = False
self.isFirst = False
self.takesTheHand = False
self.playingNetwork = nn
def __str__(self):
return self.username
def __eq__(self, other):
if isinstance(other, str):
return self.username == other
if isinstance(other, Player):
return self.username == other.username
return False
def throwcard(self, n):
card = self.cardsInHand[n]
self.cardsInHand.pop(n)
return card
class Game:
def __init__(self, players: List[Player]):
self.cardsRoman = ['VII', 'VIII', 'IX', 'X', 'D', 'B', 'K', 'A']
self.allowedInput: List[str] = ['0', '1', '2', '3', 'end']
self.players: List[Player] = players
self.players[random.randint(0, 3)].isFirst = True
self.cards: Cards = Cards()
self.dealCards(self.cards, self.players)
@staticmethod
def generateInputList(cardsInHand: list[int], hand: list[int], takenCards: list[int], scoreUs: int, scoreThem: int):
inputList = cardsInHand.copy()
while len(inputList) < 4:
inputList.append(-1)
inputList += hand
while len(inputList) < 19:
inputList.append(-1)
inputList += takenCards + [scoreUs, scoreThem]
return inputList
@staticmethod
def draw(cards, players):
for i in players:
i.cardsInHand.append(cards.lastcard())
@staticmethod
def dealCards(cards, players):
for _ in range(2):
for j in range(4):
players[j].cardsInHand.append(cards.lastcard())
players[j].cardsInHand.append(cards.lastcard())
@staticmethod
def sortPlayers(players: List[Player]):
for _ in range(4):
if players[0].isFirst:
break
else:
temp_p = players[0]
players.pop(0)
players.append(temp_p)
def canPlayerContinue(self, cardToBeat, first, i):
if (cardToBeat not in self.players[0].cardsInHand and not first and i == self.players[0] and
0 not in self.players[0].cardsInHand):
return True
return False
def printHand(self, hand, first):
handOut = '| '
if not first:
print("Bačene karte: ")
for n in hand:
handOut += self.cardsRoman[n] + ' | '
print(handOut)
def printPlayer(self, i):
cardsInHandOut = '| '
print(i.__str__())
for n in i.cardsInHand:
cardsInHandOut += self.cardsRoman[n] + ' | '
print("Ruka: " + cardsInHandOut)
return i.playingNetwork.runNetwork()
def printOrder(self):
print("Redoslijed igre: ")
for i in self.players:
print(f"\t- {i}")
@staticmethod
def cardTakesTheHand(thrownCard, cardToBeat, i, players):
if thrownCard == cardToBeat or thrownCard == 0:
for j in players:
j.takesTheHand = False
j.isFirst = False
i.takesTheHand = True
i.isFirst = True
@staticmethod
def pointSum(hand, players):
sumPoints = 0
for i in hand:
if i == 3 or i == 7:
sumPoints += 10
for i in players:
if i.takesTheHand:
i.score += sumPoints
players[players.index(i)-2].score += sumPoints
for j in hand:
i.takenCards[j] += 1
players[players.index(i) - 2].takenCards[j] += 1
break
def pointReset(self):
for i in self.players:
i.score = 0
def contDeal(self, firstPlayer):
if len(self.cards.cards) != 0:
for i in range(min(4-len(firstPlayer.cardsInHand), int(len(self.cards.cards)/4))):
self.draw(self.cards, self.players)
def checkCardInput(self, cardToThrow, cardToBeat, first, a, i, firstPlayer):
if cardToThrow not in self.allowedInput:
#print(f"Nedozvoljeni ulaz.")
return False
if cardToThrow == 'end':
if i != firstPlayer or first:
#print("Trenutno nije moguće završiti rundu!")
return False
return True
if int(cardToThrow) > (3-a):
#print(f"Odabrana karta nije unutar raspona.")
return False
try:
if i.cardsInHand[int(cardToThrow)] != cardToBeat and i.cardsInHand[int(cardToThrow)] != 0 and not first and i == firstPlayer:
#print(f"Odabrana karta nije ispravna.")
return False
except:
return False
return True
@property
def handplay(self):
hand = []
killCommand = False
breakHand = False
first = True
cardToBeat = None
i: Player
for i in self.players:
i.cardsInHand.sort()
# Sortiranje igrača
self.sortPlayers(self.players)
firstPlayer = self.players[0]
# Početak ruke
if len(firstPlayer.cardsInHand) != 0:
# self.printOrder()
# Krugovi
for a in range(4):
if len(self.cards.cards)%2:
killCommand = True
break
# Igrači
for i in self.players:
# Provjera može li prvi igrač nastaviti ruku
breakHand = self.canPlayerContinue(cardToBeat, first, i)
if breakHand:
# self.printHand(hand, first)
break
# self.printHand(hand, first)
#print(self.generateInputList(i.cardsInHand, hand, i.takenCards, i.score, self.players[self.players.index(i)-1].score))
cardToThrowList = i.playingNetwork.runNetwork(self.generateInputList(i.cardsInHand, hand, i.takenCards, i.score, self.players[self.players.index(i)-1].score))
cardToThrow = cardToThrowList.index(max(cardToThrowList))
if cardToThrow == 4:
cardToThrow = "end"
# print(f"{os.getpid()} {cardToThrow}")
# Provjera da li je ulaz dobar
if not self.checkCardInput(str(cardToThrow), cardToBeat, first, a, i, firstPlayer):
breakHand = True
killCommand = True
break
if cardToThrow == 'end':
breakHand = True
break
# Postavlja kartu za uzimanje
thrownCard = i.throwcard(int(cardToThrow))
if first:
cardToBeat = thrownCard
first = False
print(f"{os.getpid()} {i.username} {thrownCard}")
# Provjerava da li bačena karta uzima ruku
self.cardTakesTheHand(thrownCard, cardToBeat, i, self.players)
# Bačene karte
hand.append(thrownCard)
if breakHand:
print("Runda je završila.")
break
# Zbrajanje bodova
self.pointSum(hand, self.players)
# Dijeljenje karata
self.contDeal(firstPlayer)
if killCommand:
print(f"Remainig cards: {self.cards.cards}")
return True
if not breakHand:
print("Runda je završila.")
pass
return False
else:
print(f"Remainig cards: {self.cards.cards}")
return True
def playgame(self):
self.pointReset()
# print("[STARTING]Starting game.")
# print(f"Timovi: \n\t-{self.players[0]} i {self.players[2]}\n\t-{self.players[1]} i {self.players[3]}")
timeStart = time.time()
while not self.handplay:
pass
f = open("generationResults.txt", "ab")
save = []
if self.players[0].score > self.players[1].score:
save.append(self.players[0].playingNetwork.neuralNetwork)
save.append(self.players[2].playingNetwork.neuralNetwork)
save.append(self.players[0].score + 1 + time.time() - timeStart - len(self.cards.cards))
elif self.players[0].score < self.players[1].score:
save.append(self.players[1].playingNetwork.neuralNetwork)
save.append(self.players[3].playingNetwork.neuralNetwork)
save.append(self.players[1].score + 1 + time.time() - timeStart - len(self.cards.cards))
else:
if self.players[0].takesTheHand + self.players[2].takesTheHand:
save.append(self.players[0].playingNetwork.neuralNetwork)
save.append(self.players[2].playingNetwork.neuralNetwork)
save.append(self.players[0].score + 1 + time.time() - timeStart - len(self.cards.cards))
else:
save.append(self.players[1].playingNetwork.neuralNetwork)
save.append(self.players[3].playingNetwork.neuralNetwork)
save.append(self.players[1].score + 1 + time.time() - timeStart - len(self.cards.cards))
pickle.dump(save, f)
f.close()
def runGame(x: Game):
x.playgame()
if __name__ == "__main__":
# random.seed(2)
trainingTimeStart = time.time()
print(f"Generation: 0")
genTimeStart = time.time()
numberOfGames = 25
numberOfPlayers = numberOfGames * 4
botPlayers = [Player("bot" + str(i), nn.NeuralNetwork()) for i in range(numberOfPlayers)]
for i in range(numberOfPlayers):
botPlayers[i].playingNetwork.addInputLayer(29)
botPlayers[i].playingNetwork.addLayer(15)
botPlayers[i].playingNetwork.addLayer(15)
botPlayers[i].playingNetwork.addLayer(5)
numberOfGeneration = 1000
games = [Game([botPlayers.pop() for _ in range(4)]) for _ in range(numberOfGames)]
pool = mp.Pool()
results = pool.map(runGame, games)
print(f"Time of generation 0: {time.time() - genTimeStart}")
"""processes = []
for i in games:
processes.append(mp.Process(target=i.playgame))
processes[-1].start()
processes[-1].join()"""
"""threads = []
for i in games:
threads.append(thrd.Thread(target=i.playgame))
threads[-1].start()
threads[-1].join()"""
for i in range(numberOfGeneration):
print(f"Generation: {i + 1}")
genTimeStart = time.time()
generationResults = []
f = open("generationResults.txt", "rb")
for _ in range(numberOfGames):
try:
generationResults.append(pickle.load(f))
except:
pass
f.close()
f = open("generationResults.txt", "w")
f.close()
bestInGeneration = generationResults[0]
for j in generationResults:
if j[2] > bestInGeneration[2]:
bestInGeneration = j
botPlayers = [Player("bot" + str(j) + "_" + str(i), nn.NeuralNetwork()) for j in range(numberOfPlayers)]
for j in range(numberOfPlayers):
if j < numberOfPlayers/2:
botPlayers[j].playingNetwork.neuralNetwork = bestInGeneration[0]
else:
botPlayers[j].playingNetwork.neuralNetwork = bestInGeneration[1]
random.shuffle(botPlayers)
games = [Game([botPlayers.pop() for _ in range(4)]) for _ in range(numberOfGames)]
pool = mp.Pool()
results = pool.map(runGame, games)
print(f"Time of generation {i+1}: {time.time() - genTimeStart}")
"""threads = []
for j in games:
threads.append(thrd.Thread(target=j.playgame))
threads[-1].start()
threads[-1].join()"""
"""processes = []
for j in games:
processes.append(mp.Process(target=j.playgame))
processes[-1].start()
processes[-1].join()"""
print(f"Training time: {time.time() - trainingTimeStart}")
generationResults = []
f = open("generationResults.txt", "rb")
for _ in range(numberOfGames):
generationResults.append(pickle.load(f))
f.close()
bestInGeneration = generationResults[0]
for j in generationResults:
if j[2] > bestInGeneration[2]:
bestInGeneration = j
f.close()
f = open("generationResults.txt", "wb")
pickle.dump(bestInGeneration, f)
f.close() | [
1,
1053,
4036,
30004,
13,
3166,
19229,
1053,
2391,
30004,
13,
5215,
2448,
3631,
13724,
408,
302,
29876,
30004,
13,
5215,
2897,
30004,
13,
5215,
6674,
307,
985,
292,
408,
22326,
30004,
13,
5215,
3244,
292,
408,
1468,
29881,
30004,
13,
... |
solutions/807/807-kir3i.py | iknoom/LeetCode-Solutions | 4 | 101366 | <reponame>iknoom/LeetCode-Solutions<filename>solutions/807/807-kir3i.py
class Solution:
def maxIncreaseKeepingSkyline(self, grid):
skyline = []
for v_line in grid:
skyline.append([max(v_line)] * len(grid))
for x, h_line in enumerate(list(zip(*grid))):
max_h = max(h_line)
for y in range(len(skyline)):
skyline[y][x] = min(skyline[y][x], max_h)
ans = sum([sum(l) for l in skyline]) - sum([sum(l) for l in grid])
return ans | [
1,
529,
276,
1112,
420,
29958,
638,
1217,
290,
29914,
3226,
300,
3399,
29899,
13296,
17925,
29966,
9507,
29958,
2929,
17925,
29914,
29947,
29900,
29955,
29914,
29947,
29900,
29955,
29899,
14166,
29941,
29875,
29889,
2272,
13,
1990,
24380,
2... |
cds_migrator_kit/circulation/__init__.py | kprzerwa/cds-migrator-kit | 0 | 81310 | <reponame>kprzerwa/cds-migrator-kit
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# cds-migrator-kit is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""CDS Migrator Circulation module."""
import click
from flask.cli import with_appcontext
from .items.cli import libraries, items
from .users.cli import users
@click.group()
def circulation():
"""CDS Migrator Circulation commands."""
@circulation.command()
@click.argument('users_json', type=click.Path(exists=True))
@with_appcontext
def borrowers(users_json):
"""Load users from JSON files and output ILS Records."""
users(users_json)
@circulation.command()
@click.argument('libraries_json', type=click.Path(exists=True))
@with_appcontext
def libraries(libraries_json):
"""Load libraries from JSON files and output ILS Records."""
libraries(libraries_json)
@circulation.command()
@click.argument('items_json_folder', type=click.Path(exists=True))
@click.argument('locations_json', type=click.Path(exists=True))
@with_appcontext
def items(items_json_folder, locations_json):
"""Load items from JSON files.
:param str items_json_folder: The path to the JSON dump of the legacy items
:param str locations_json: The path to the JSON records of the new ILS
libraries (already migrated)
"""
items(items_json_folder, locations_json)
| [
1,
529,
276,
1112,
420,
29958,
29895,
558,
3298,
2766,
29914,
2252,
29879,
29899,
26983,
1061,
29899,
7354,
13,
29937,
448,
29930,
29899,
14137,
29901,
23616,
29899,
29947,
448,
29930,
29899,
13,
29937,
13,
29937,
910,
934,
338,
760,
310,... |
onadata/apps/logger/management/commands/set_xform_surveys_with_geopoints.py | childhelpline/myhelpline | 1 | 135766 | #!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4 fileencoding=utf-8
from django.core.management.base import BaseCommand
from django.utils.translation import ugettext_lazy
from onadata.apps.logger.models.xform import XForm
from onadata.libs.utils.model_tools import queryset_iterator
class Command(BaseCommand):
help = ugettext_lazy("Import a folder of XForms for ODK.")
def handle(self, *args, **kwargs):
xforms = XForm.objects.all()
total = xforms.count()
count = 0
for xform in queryset_iterator(XForm.objects.all()):
has_geo = xform.geocoded_submission_count() > 0
try:
xform.instances_with_geopoints = has_geo
xform.save()
except Exception as e:
self.stderr.write(e)
else:
count += 1
self.stdout.write("%d of %d forms processed." % (count, total))
| [
1,
18787,
4855,
29914,
2109,
29914,
6272,
3017,
13,
29937,
325,
326,
29901,
7468,
18696,
29922,
29946,
380,
29879,
29922,
29946,
634,
2381,
29922,
29946,
934,
22331,
29922,
9420,
29899,
29947,
13,
13,
3166,
9557,
29889,
3221,
29889,
21895,
... |
bookings/views.py | nintran1995/python-example | 0 | 180096 | from django.shortcuts import render
from django.http import HttpResponse
from django.template import loader
from .models import Boarding
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.views import generic
class IndexView(generic.ListView):
model = Boarding
template_name = 'bookings/index.html'
def search(request, boarding_zip_code, boarding_start, boarding_end, boarding_size):
# selected_choice = question.choice_set.get(pk=request.POST['choice'])
# return HttpResponse("You're looking at question %s." % boarding_zip_code)
boardings = Boarding.objects.filter(
size=boarding_size).order_by('-start_date')[:5]
return render(request, 'bookings/search.html', {
'boardings': boardings,
})
| [
1,
515,
9557,
29889,
12759,
7582,
29879,
1053,
4050,
13,
3166,
9557,
29889,
1124,
1053,
9056,
5103,
13,
3166,
9557,
29889,
6886,
1053,
23466,
13,
3166,
869,
9794,
1053,
12590,
292,
13,
3166,
9557,
29889,
1124,
1053,
9056,
5103,
24735,
1... |
trialbot/data/datasets/tabular_dataset.py | zxteloiv/TrialBot | 3 | 1616063 | from .file_dataset import FileDataset
class TabSepFileDataset(FileDataset):
def get_example(self, i):
line = super(TabSepFileDataset, self).get_example(i)
parts = line.rstrip('\r\n').split('\t')
return tuple(parts)
| [
1,
515,
869,
1445,
29918,
24713,
1053,
3497,
16390,
24541,
13,
13,
1990,
11090,
29903,
1022,
2283,
16390,
24541,
29898,
2283,
16390,
24541,
1125,
13,
1678,
822,
679,
29918,
4773,
29898,
1311,
29892,
474,
1125,
13,
4706,
1196,
353,
2428,
... |
custom_components/tastyigniter/binary_sensor.py | djtimca/hatastyigniter | 0 | 111306 | """Definition and setup of the TastyIgniter Binary Sensors for Home Assistant."""
import logging
import time
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
UpdateFailed,
)
from homeassistant.components.binary_sensor import BinarySensorEntity
from homeassistant.const import ATTR_NAME
from homeassistant.components.sensor import ENTITY_ID_FORMAT
from . import TastyIgniterCoordinator
from .const import ATTR_IDENTIFIERS, ATTR_MANUFACTURER, ATTR_MODEL, DOMAIN, COORDINATOR
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up the binary sensor platforms."""
coordinator = hass.data[DOMAIN][entry.entry_id][COORDINATOR]
sensors = []
for location in coordinator.data["locations"]:
sensors.append(
TastyIgniterSensor(
coordinator,
location,
"mdi:food",
"ti_location",
)
)
async_add_entities(sensors)
class TastyIgniterSensor(BinarySensorEntity):
"""Defines a TastyIgniter Binary sensor."""
def __init__(
self,
coordinator: TastyIgniterCoordinator,
location: dict,
icon: str,
device_identifier: str,
):
"""Initialize Entities."""
self._name = f"TI - {location['location_name']}"
self._location_id = location["location_id"]
self._unique_id = f"ti_{self._location_id}"
self._state = None
self._icon = icon
self._device_identifier = device_identifier
self.coordinator = coordinator
self._location = location
self.attrs = {}
@property
def should_poll(self) -> bool:
"""No need to poll. Coordinator notifies entity of updates."""
return False
@property
def available(self) -> bool:
"""Return if entity is available."""
return self.coordinator.last_update_success
@property
def unique_id(self):
"""Return the unique Home Assistant friendly identifier for this entity."""
return self._unique_id
@property
def name(self):
"""Return the friendly name of this entity."""
return self._name
@property
def icon(self):
"""Return the icon for this entity."""
return self._icon
@property
def device_state_attributes(self):
"""Return the attributes."""
telephone = self._location["location_telephone"].replace("-","")
telephone = telephone.replace(" ","")
telephone = telephone.replace("(","")
telephone = telephone.replace(")","")
if len(telephone) == 10:
telephone = f"+1{telephone}"
else:
telephone = ""
self.attrs["phone"] = telephone
return self.attrs
@property
def device_info(self):
"""Define the device based on device_identifier."""
device_name = "TastyIgniter"
device_model = "Order Alerts"
return {
ATTR_IDENTIFIERS: {(DOMAIN, self._device_identifier)},
ATTR_NAME: device_name,
ATTR_MANUFACTURER: "TastyIgniter",
ATTR_MODEL: device_model,
}
@property
def is_on(self) -> bool:
"""Return the state."""
order_data = self.coordinator.data["orders"]
if order_data.get(self._location_id):
return True
else:
return False
async def async_update(self):
"""Update TastyIgniter Binary Sensor Entity."""
await self.coordinator.async_request_refresh()
async def async_added_to_hass(self):
"""Subscribe to updates."""
self.async_on_remove(
self.coordinator.async_add_listener(self.async_write_ha_state)
)
| [
1,
9995,
14683,
322,
6230,
310,
278,
323,
579,
29891,
17273,
1524,
29479,
317,
575,
943,
363,
8778,
4007,
22137,
1213,
15945,
13,
13,
5215,
12183,
13,
5215,
931,
13,
13,
3166,
3271,
465,
22137,
29889,
3952,
6774,
29889,
5504,
29918,
1... |
poetry/console/commands/self/update.py | mgasner/poetry | 0 | 7786 | <reponame>mgasner/poetry<gh_stars>0
import hashlib
import os
import shutil
import subprocess
import sys
import tarfile
from functools import cmp_to_key
from gzip import GzipFile
try:
from urllib.error import HTTPError
from urllib.request import urlopen
except ImportError:
from urllib2 import HTTPError
from urllib2 import urlopen
from cleo import argument
from cleo import option
from ..command import Command
class SelfUpdateCommand(Command):
name = "update"
description = "Updates poetry to the latest version."
arguments = [argument("version", "The version to update to.", optional=True)]
options = [option("preview", None, "Install prereleases.")]
BASE_URL = "https://github.com/sdispater/poetry/releases/download"
@property
def home(self):
from poetry.utils._compat import Path
from poetry.utils.appdirs import expanduser
home = Path(expanduser("~"))
return home / ".poetry"
@property
def lib(self):
return self.home / "lib"
@property
def lib_backup(self):
return self.home / "lib-backup"
def handle(self):
from poetry.__version__ import __version__
from poetry.repositories.pypi_repository import PyPiRepository
from poetry.semver import Version
from poetry.utils._compat import Path
current = Path(__file__)
try:
current.relative_to(self.home)
except ValueError:
raise RuntimeError(
"Poetry was not installed with the recommended installer. "
"Cannot update automatically."
)
version = self.argument("version")
if not version:
version = ">=" + __version__
repo = PyPiRepository(fallback=False)
packages = repo.find_packages(
"poetry", version, allow_prereleases=self.option("preview")
)
if not packages:
self.line("No release found for the specified version")
return
packages.sort(
key=cmp_to_key(
lambda x, y: 0
if x.version == y.version
else int(x.version < y.version or -1)
)
)
release = None
for package in packages:
if package.is_prerelease():
if self.option("preview"):
release = package
break
continue
release = package
break
if release is None:
self.line("No new release found")
return
if release.version == Version.parse(__version__):
self.line("You are using the latest version")
return
self.update(release)
def update(self, release):
version = release.version
self.line("Updating to <info>{}</info>".format(version))
if self.lib_backup.exists():
shutil.rmtree(str(self.lib_backup))
# Backup the current installation
if self.lib.exists():
shutil.copytree(str(self.lib), str(self.lib_backup))
shutil.rmtree(str(self.lib))
try:
self._update(version)
except Exception:
if not self.lib_backup.exists():
raise
shutil.copytree(str(self.lib_backup), str(self.lib))
shutil.rmtree(str(self.lib_backup))
raise
finally:
if self.lib_backup.exists():
shutil.rmtree(str(self.lib_backup))
self.line("")
self.line("")
self.line(
"<info>Poetry</info> (<comment>{}</comment>) is installed now. Great!".format(
version
)
)
def _update(self, version):
from poetry.utils.helpers import temporary_directory
platform = sys.platform
if platform == "linux2":
platform = "linux"
checksum = "poetry-{}-{}.sha256sum".format(version, platform)
try:
r = urlopen(self.BASE_URL + "/{}/{}".format(version, checksum))
except HTTPError as e:
if e.code == 404:
raise RuntimeError("Could not find {} file".format(checksum))
raise
checksum = r.read().decode()
# We get the payload from the remote host
name = "poetry-{}-{}.tar.gz".format(version, platform)
try:
r = urlopen(self.BASE_URL + "/{}/{}".format(version, name))
except HTTPError as e:
if e.code == 404:
raise RuntimeError("Could not find {} file".format(name))
raise
meta = r.info()
size = int(meta["Content-Length"])
current = 0
block_size = 8192
bar = self.progress_bar(max=size)
bar.set_format(" - Downloading <info>{}</> <comment>%percent%%</>".format(name))
bar.start()
sha = hashlib.sha256()
with temporary_directory(prefix="poetry-updater-") as dir_:
tar = os.path.join(dir_, name)
with open(tar, "wb") as f:
while True:
buffer = r.read(block_size)
if not buffer:
break
current += len(buffer)
f.write(buffer)
sha.update(buffer)
bar.set_progress(current)
bar.finish()
# Checking hashes
if checksum != sha.hexdigest():
raise RuntimeError(
"Hashes for {} do not match: {} != {}".format(
name, checksum, sha.hexdigest()
)
)
gz = GzipFile(tar, mode="rb")
try:
with tarfile.TarFile(tar, fileobj=gz, format=tarfile.PAX_FORMAT) as f:
f.extractall(str(self.lib))
finally:
gz.close()
def process(self, *args):
return subprocess.check_output(list(args), stderr=subprocess.STDOUT)
def _bin_path(self, base_path, bin):
if sys.platform == "win32":
return (base_path / "Scripts" / bin).with_suffix(".exe")
return base_path / "bin" / bin
| [
1,
529,
276,
1112,
420,
29958,
29885,
25496,
1089,
29914,
1129,
27184,
29966,
12443,
29918,
303,
1503,
29958,
29900,
13,
5215,
6608,
1982,
13,
5215,
2897,
13,
5215,
528,
4422,
13,
5215,
1014,
5014,
13,
5215,
10876,
13,
5215,
9913,
1445,... |
Backend/models/risklayerPrognosis.py | dbvis-ukon/coronavis | 15 | 6542 | from db import db
class RisklayerPrognosis(db.Model):
__tablename__ = 'risklayer_prognosis'
datenbestand = db.Column(db.TIMESTAMP, primary_key=True, nullable=False)
prognosis = db.Column(db.Float, nullable=False)
# class RisklayerPrognosisSchema(SQLAlchemyAutoSchema):
# class Meta:
# strict = True
# model = RisklayerPrognosis
#
# timestamp = fields.Timestamp(data_key="datenbestand")
# prognosis = fields.Number(data_key="prognosis")
| [
1,
515,
4833,
1053,
4833,
13,
13,
13,
1990,
390,
3873,
13148,
1184,
5138,
19263,
29898,
2585,
29889,
3195,
1125,
13,
1678,
4770,
3891,
2435,
420,
1649,
353,
525,
3780,
6321,
2747,
29918,
771,
5138,
19263,
29915,
13,
13,
1678,
1418,
26... |
falkon/kernels/distance_kernel.py | vishalbelsare/falkon | 130 | 91717 | from typing import Union, Optional, Dict
import torch
from falkon import sparse
from falkon.kernels.diff_kernel import DiffKernel
from falkon.la_helpers.square_norm_fn import square_norm_diff
from falkon.options import FalkonOptions
from falkon.sparse import SparseTensor
SQRT3 = 1.7320508075688772
SQRT5 = 2.23606797749979
def validate_sigma(sigma: Union[float, torch.Tensor]) -> torch.Tensor:
if isinstance(sigma, torch.Tensor):
# Sigma is a 1-item tensor ('single')
try:
sigma.item()
return sigma
except ValueError:
pass
# Sigma is a vector ('diag')
if sigma.dim() == 1 or sigma.shape[1] == 1:
return sigma.reshape(-1)
else:
# TODO: Better error
raise ValueError("sigma must be a scalar or a vector.")
else:
try:
return torch.tensor([float(sigma)], dtype=torch.float64)
except TypeError:
raise TypeError("Sigma must be a scalar or a tensor.")
def _sq_dist(mat1, mat2, norm_mat1, norm_mat2, out: Optional[torch.Tensor]) -> torch.Tensor:
if mat1.dim() == 3:
if out is None:
out = torch.baddbmm(norm_mat1, mat1, mat2.transpose(-2, -1), alpha=-2, beta=1) # b*n*m
else:
out = torch.baddbmm(norm_mat1, mat1, mat2.transpose(-2, -1), alpha=-2, beta=1,
out=out) # b*n*m
else:
if out is None:
out = torch.addmm(norm_mat1, mat1, mat2.transpose(-2, -1), alpha=-2, beta=1) # n*m
else:
out = torch.addmm(norm_mat1, mat1, mat2.transpose(-2, -1), alpha=-2, beta=1,
out=out) # n*m
out.add_(norm_mat2.transpose(-2, -1))
out.clamp_min_(1e-20)
return out
def _sparse_sq_dist(X1_csr: SparseTensor, X2_csr: SparseTensor,
X1: SparseTensor, X2: SparseTensor,
out: torch.Tensor) -> torch.Tensor:
sq1 = torch.empty(X1_csr.size(0), dtype=X1_csr.dtype, device=X1_csr.device)
sparse.sparse_square_norm(X1_csr, sq1) # TODO: This must be implemented for CUDA tensors
sq1 = sq1.reshape(-1, 1)
sq2 = torch.empty(X2_csr.size(0), dtype=X2_csr.dtype, device=X2_csr.device)
sparse.sparse_square_norm(X2_csr, sq2)
sq2 = sq2.reshape(-1, 1)
sparse.sparse_matmul(X1, X2, out)
out.mul_(-2.0)
out.add_(sq1.to(device=X1.device))
out.add_(sq2.to(device=X2.device).t())
out.clamp_min_(1e-20)
return out
def rbf_core(mat1, mat2, out: Optional[torch.Tensor], sigma):
"""
Note 1: if out is None, then this function will be differentiable wrt all three remaining inputs.
Note 2: this function can deal with batched inputs
Parameters
----------
sigma
mat1
mat2
out
Returns
-------
"""
mat1_div_sig = mat1 / sigma
mat2_div_sig = mat2 / sigma
norm_sq_mat1 = square_norm_diff(mat1_div_sig, -1, True) # b*n*1 or n*1
norm_sq_mat2 = square_norm_diff(mat2_div_sig, -1, True) # b*m*1 or m*1
out = _sq_dist(mat1_div_sig, mat2_div_sig, norm_sq_mat1, norm_sq_mat2, out)
out.mul_(-0.5)
out.exp_()
return out
def rbf_core_sparse(mat1: SparseTensor, mat2: SparseTensor,
mat1_csr: SparseTensor, mat2_csr: SparseTensor,
out: torch.Tensor, sigma) -> torch.Tensor:
gamma = 0.5 / (sigma ** 2)
out = _sparse_sq_dist(X1_csr=mat1_csr, X2_csr=mat2_csr, X1=mat1, X2=mat2, out=out)
out.mul_(-gamma)
out.exp_()
return out
def laplacian_core(mat1, mat2, out: Optional[torch.Tensor], sigma):
mat1_div_sig = mat1 / sigma
mat2_div_sig = mat2 / sigma
norm_sq_mat1 = square_norm_diff(mat1_div_sig, -1, True) # b*n*1
norm_sq_mat2 = square_norm_diff(mat2_div_sig, -1, True) # b*m*1
orig_out = out
out = _sq_dist(mat1_div_sig, mat2_div_sig, norm_sq_mat1, norm_sq_mat2, out)
out.sqrt_() # Laplacian: sqrt of squared-difference
# The gradient calculation needs the output of sqrt_
if orig_out is None: # TODO: We could be more explicit in the parameters about whether the gradient is or isn't needed
out = out.neg()
else:
out.neg_()
out.exp_()
return out
def laplacian_core_sparse(mat1: SparseTensor, mat2: SparseTensor,
mat1_csr: SparseTensor, mat2_csr: SparseTensor,
out: torch.Tensor, sigma) -> torch.Tensor:
gamma = 1 / sigma
out = _sparse_sq_dist(X1_csr=mat1_csr, X2_csr=mat2_csr, X1=mat1, X2=mat2, out=out)
out.sqrt_()
out.mul_(-gamma)
out.exp_()
return out
def matern_core(mat1, mat2, out: Optional[torch.Tensor], sigma, nu):
if nu == 0.5:
return laplacian_core(mat1, mat2, out, sigma)
elif nu == float('inf'):
return rbf_core(mat1, mat2, out, sigma)
orig_out = out
mat1_div_sig = mat1 / sigma
mat2_div_sig = mat2 / sigma
norm_sq_mat1 = square_norm_diff(mat1_div_sig, -1, True) # b*n*1
norm_sq_mat2 = square_norm_diff(mat2_div_sig, -1, True) # b*m*1
out = _sq_dist(mat1_div_sig, mat2_div_sig, norm_sq_mat1, norm_sq_mat2, out)
if nu == 1.5:
# (1 + sqrt(3)*D) * exp(-sqrt(3)*D))
out.sqrt_()
if orig_out is None: # TODO: We could be more explicit in the parameters about whether the gradient is or isn't needed
out = out.mul(SQRT3)
else:
out.mul_(SQRT3)
out_neg = torch.neg(out) # extra n*m block
out_neg.exp_()
out.add_(1.0).mul_(out_neg)
elif nu == 2.5:
# (1 + sqrt(5)*D + (sqrt(5)*D)^2 / 3 ) * exp(-sqrt(5)*D)
out_sqrt = torch.sqrt(out)
if orig_out is None: # TODO: We could be more explicit in the parameters about whether the gradient is or isn't needed
out_sqrt = out_sqrt.mul(SQRT5)
else:
out_sqrt.mul_(SQRT5)
out.mul_(5.0 / 3.0).add_(out_sqrt).add_(1.0)
out_sqrt.neg_().exp_()
out.mul_(out_sqrt)
return out
def matern_core_sparse(mat1: SparseTensor, mat2: SparseTensor,
mat1_csr: SparseTensor, mat2_csr: SparseTensor,
out: torch.Tensor, sigma, nu) -> torch.Tensor:
if nu == 0.5:
return laplacian_core_sparse(mat1, mat2, mat1_csr, mat2_csr, out, sigma)
elif nu == float('inf'):
return rbf_core_sparse(mat1, mat2, mat1_csr, mat2_csr, out, sigma)
gamma = 1 / (sigma ** 2)
out = _sparse_sq_dist(X1_csr=mat1_csr, X2_csr=mat2_csr, X1=mat1, X2=mat2, out=out)
out.mul_(gamma)
# For certain nu = 1.5, 2.5 we will need an extra n*m block
if nu == 1.5:
# (1 + sqrt(3)*D) * exp(-sqrt(3)*D))
out.sqrt_()
out.mul_(SQRT3)
out_neg = torch.neg(out)
out_neg.exp_()
out.add_(1.0).mul_(out_neg)
elif nu == 2.5:
# (1 + sqrt(5)*D + (sqrt(5)*D)^2 / 3 ) * exp(-sqrt(5)*D)
out_sqrt = torch.sqrt(out)
out_sqrt.mul_(SQRT5)
out.mul_(5.0 / 3.0).add_(out_sqrt).add_(1.0)
out_sqrt.neg_().exp_()
out.mul_(out_sqrt)
return out
class GaussianKernel(DiffKernel):
r"""Class for computing the Gaussian kernel and related kernel-vector products
The Gaussian kernel is one of the most common and effective kernel embeddings
since it is infinite dimensional, and governed by a single parameter. The kernel length-scale
determines the width of the Gaussian distribution which is placed on top of each point.
A larger sigma corresponds to a wide Gaussian, so that the relative influence of far away
points will be high for computing the kernel at a given datum.
On the opposite side of the spectrum, a small sigma means that only nearby points will
influence the kernel.
Parameters
-----------
sigma
The length-scale of the kernel.
This can be a scalar, and then it corresponds to the standard deviation
of the Gaussian distribution from which the kernel is derived.
If `sigma` is a vector of size `d` (where `d` is the dimensionality of the data), it is
interpreted as the diagonal standard deviation of the Gaussian distribution.
It can also be a matrix of size `d*d` where `d`, in which case sigma will be the precision
matrix (inverse covariance).
opt
Additional options to be forwarded to the matrix-vector multiplication
routines.
Examples
--------
Creating a Gaussian kernel with a single length-scale. Operations on this kernel will not
use KeOps.
>>> K = GaussianKernel(sigma=3.0, opt=FalkonOptions(keops_active="no"))
Creating a Gaussian kernel with a different length-scale per dimension
>>> K = GaussianKernel(sigma=torch.tensor([1.0, 3.5, 7.0]))
Creating a Gaussian kernel object with full covariance matrix (randomly chosen)
>>> mat = torch.randn(3, 3, dtype=torch.float64)
>>> sym_mat = mat @ mat.T
>>> K = GaussianKernel(sigma=sym_mat)
>>> K
GaussianKernel(sigma=tensor([[ 2.0909, 0.0253, -0.2490],
[ 0.0253, 0.3399, -0.5158],
[-0.2490, -0.5158, 4.4922]], dtype=torch.float64)) #random
Notes
-----
The Gaussian kernel with a single length-scale follows
.. math::
k(x, x') = \exp{-\dfrac{\lVert x - x' \rVert^2}{2\sigma^2}}
When the length-scales are specified as a matrix, the RBF kernel is determined by
.. math::
k(x, x') = \exp{-\dfrac{1}{2}x\Sigma x'}
In both cases, the actual computation follows a different path, working on the expanded
norm.
"""
kernel_name = "gaussian"
core_fn = rbf_core
def __init__(self, sigma: Union[float, torch.Tensor], opt: Optional[FalkonOptions] = None):
self.sigma = validate_sigma(sigma)
super().__init__(self.kernel_name, opt, core_fn=GaussianKernel.core_fn, sigma=self.sigma)
def _keops_mmv_impl(self, X1, X2, v, kernel, out, opt: FalkonOptions):
formula = 'Exp(SqDist(x1 / g, x2 / g) * IntInv(-2)) * v'
aliases = [
'x1 = Vi(%d)' % (X1.shape[1]),
'x2 = Vj(%d)' % (X2.shape[1]),
'v = Vj(%d)' % (v.shape[1]),
'g = Pm(%d)' % (self.sigma.shape[0])
]
other_vars = [self.sigma.to(device=X1.device, dtype=X1.dtype)]
return self.keops_mmv(X1, X2, v, out, formula, aliases, other_vars, opt)
def extra_mem(self) -> Dict[str, float]:
return {
# Data-matrix / sigma in prepare + Data-matrix / sigma in apply
'nd': 2,
'md': 1,
# Norm results in prepare
'm': 1,
'n': 1,
}
def detach(self) -> 'GaussianKernel':
detached_params = self._detach_params()
return GaussianKernel(detached_params["sigma"], opt=self.params)
# noinspection PyMethodOverriding
def compute_sparse(self, X1: SparseTensor, X2: SparseTensor, out: torch.Tensor,
X1_csr: SparseTensor, X2_csr: SparseTensor) -> torch.Tensor:
if len(self.sigma) > 1:
raise NotImplementedError("Sparse kernel is only implemented for scalar sigmas.")
dev_kernel_tensor_params = self._move_kernel_params(X1)
return rbf_core_sparse(X1, X2, X1_csr, X2_csr, out, dev_kernel_tensor_params["sigma"])
def __repr__(self):
return f"GaussianKernel(sigma={self.sigma})"
def __str__(self):
return f"Gaussian kernel<{self.sigma}>"
class LaplacianKernel(DiffKernel):
r"""Class for computing the Laplacian kernel, and related kernel-vector products.
The Laplacian kernel is similar to the Gaussian kernel, but less sensitive to changes
in the parameter `sigma`.
Parameters
----------
sigma
The length-scale of the Laplacian kernel
Notes
-----
The Laplacian kernel is determined by the following formula
.. math::
k(x, x') = \exp{-\frac{\lVert x - x' \rVert}{\sigma}}
"""
kernel_name = "laplacian"
def __init__(self, sigma: Union[float, torch.Tensor], opt: Optional[FalkonOptions] = None):
self.sigma = validate_sigma(sigma)
super().__init__(self.kernel_name, opt, core_fn=laplacian_core, sigma=self.sigma)
def _keops_mmv_impl(self, X1, X2, v, kernel, out, opt: FalkonOptions):
formula = 'Exp(-Sqrt(SqDist(x1 / g, x2 / g))) * v'
aliases = [
'x1 = Vi(%d)' % (X1.shape[1]),
'x2 = Vj(%d)' % (X2.shape[1]),
'v = Vj(%d)' % (v.shape[1]),
'g = Pm(%d)' % (self.sigma.shape[0])
]
other_vars = [self.sigma.to(device=X1.device, dtype=X1.dtype)]
return self.keops_mmv(X1, X2, v, out, formula, aliases, other_vars, opt)
def extra_mem(self) -> Dict[str, float]:
return {
# Data-matrix / sigma in prepare + Data-matrix / sigma in apply
'nd': 2,
'md': 1,
# Norm results in prepare
'm': 1,
'n': 1,
}
def detach(self) -> 'LaplacianKernel':
detached_params = self._detach_params()
return LaplacianKernel(detached_params["sigma"], opt=self.params)
# noinspection PyMethodOverriding
def compute_sparse(self, X1: SparseTensor, X2: SparseTensor, out: torch.Tensor,
X1_csr: SparseTensor, X2_csr: SparseTensor) -> torch.Tensor:
if len(self.sigma) > 1:
raise NotImplementedError("Sparse kernel is only implemented for scalar sigmas.")
dev_kernel_tensor_params = self._move_kernel_params(X1)
return laplacian_core_sparse(X1, X2, X1_csr, X2_csr, out, dev_kernel_tensor_params["sigma"])
def __repr__(self):
return f"LaplacianKernel(sigma={self.sigma})"
def __str__(self):
return f"Laplaciankernel<{self.sigma}>"
class MaternKernel(DiffKernel):
r"""Class for computing the Matern kernel, and related kernel-vector products.
The Matern kernels define a generic class of kernel functions which includes the
Laplacian and Gaussian kernels. The class is parametrized by 'nu'. When `nu = 0.5`
this kernel is equivalent to the Laplacian kernel, when `nu = float('inf')`, the
Matern kernel is equivalent to the Gaussian kernel.
This class implements the Matern kernel only for the values of nu which have a closed
form solution, which are 0.5, 1.5, 2.5, and infinity.
Parameters
----------
sigma
The length-scale of the Matern kernel. The length-scale can be either a scalar
or a vector. Matrix-valued length-scales are not allowed for the Matern kernel.
nu
The parameter of the Matern kernel. It should be one of `0.5`, `1.5`, `2.5` or
`inf`.
Notes
-----
While for `nu = float('inf')` this kernel is equivalent to the :class:`GaussianKernel`,
the implementation is more general and using the :class:`GaussianKernel` directly
may be computationally more efficient.
"""
_valid_nu_values = frozenset({0.5, 1.5, 2.5, float('inf')})
def __init__(self,
sigma: Union[float, torch.Tensor],
nu: Union[float, torch.Tensor],
opt: Optional[FalkonOptions] = None):
self.sigma = validate_sigma(sigma)
self.nu = self.validate_nu(nu)
self.kernel_name = f"{self.nu:.1f}-matern"
super().__init__(self.kernel_name, opt, core_fn=matern_core, sigma=self.sigma, nu=self.nu)
def _keops_mmv_impl(self, X1, X2, v, kernel, out, opt: FalkonOptions):
if self.nu == 0.5:
formula = 'Exp(-Norm2(x1 / s - x2 / s)) * v'
elif self.nu == 1.5:
formula = ('(IntCst(1) + Sqrt(IntCst(3)) * Norm2(x1 / s - x2 / s)) * '
'(Exp(-Sqrt(IntCst(3)) * Norm2(x1 / s - x2 / s)) * v)')
elif self.nu == 2.5:
formula = ('(IntCst(1) + Sqrt(IntCst(5)) * Norm2(x1 / s - x2 / s) + '
'(IntInv(3) * IntCst(5)) * SqNorm2(x1 / s - x2 / s)) * '
'(Exp(-Sqrt(IntCst(5)) * Norm2(x1 / s - x2 / s)) * v)')
elif self.nu == float('inf'):
formula = 'Exp(IntInv(-2) * SqDist(x1 / s, x2 / s)) * v'
else:
raise RuntimeError(f"Unrecognized value of nu ({self.nu}). "
f"The onnly allowed values are 0.5, 1.5, 2.5, inf.")
aliases = [
'x1 = Vi(%d)' % (X1.shape[1]),
'x2 = Vj(%d)' % (X2.shape[1]),
'v = Vj(%d)' % (v.shape[1]),
's = Pm(%d)' % (self.sigma.shape[0])
]
other_vars = [self.sigma.to(device=X1.device, dtype=X1.dtype)]
return self.keops_mmv(X1, X2, v, out, formula, aliases, other_vars, opt)
def extra_mem(self) -> Dict[str, float]:
extra_mem = {
# Data-matrix / sigma
'nd': 1,
'md': 1,
# Norm results in prepare
'm': 1,
'n': 1,
}
if self.nu in {1.5, 2.5}:
# Extra kernel block in transform
extra_mem['nm'] = 1
return extra_mem
def detach(self) -> 'MaternKernel':
detached_params = self._detach_params()
return MaternKernel(detached_params["sigma"], detached_params["nu"], opt=self.params)
@staticmethod
def validate_nu(nu: Union[torch.Tensor, float]) -> float:
if isinstance(nu, torch.Tensor):
if nu.requires_grad:
raise ValueError("The nu parameter of the Matern kernel is not differentiable, "
"and must not require gradients.")
try:
out_nu = round(nu.item(), ndigits=2)
except ValueError:
raise ValueError("nu=%s is not convertible to a scalar." % (nu))
elif isinstance(nu, float):
out_nu = round(nu, ndigits=2)
else:
raise TypeError(f"nu must be a float or a tensor, not a {type(nu)}")
if out_nu not in MaternKernel._valid_nu_values:
raise ValueError(f"The given value of nu = {out_nu} can only take "
f"values {MaternKernel._valid_nu_values}.")
return out_nu
# noinspection PyMethodOverriding
def compute_sparse(self, X1: SparseTensor, X2: SparseTensor, out: torch.Tensor,
X1_csr: SparseTensor, X2_csr: SparseTensor) -> torch.Tensor:
if len(self.sigma) > 1:
raise NotImplementedError("Sparse kernel is only implemented for scalar sigmas.")
dev_kernel_tensor_params = self._move_kernel_params(X1)
return matern_core_sparse(X1, X2, X1_csr, X2_csr, out, dev_kernel_tensor_params["sigma"],
self.nu)
def __repr__(self):
return f"MaternKernel(sigma={self.sigma}, nu={self.nu:.1f})"
def __str__(self):
return f"Matern kernel<{self.sigma}, {self.nu:.1f}>"
| [
1,
515,
19229,
1053,
7761,
29892,
28379,
29892,
360,
919,
13,
13,
5215,
4842,
305,
13,
13,
3166,
285,
2235,
265,
1053,
29234,
13,
3166,
285,
2235,
265,
29889,
22178,
1379,
29889,
12765,
29918,
17460,
1053,
360,
2593,
29968,
5851,
13,
... |
scripts/graph.py | maurizioabba/rose | 488 | 56323 | #!/usr/bin/env python
#
###############################################################################
#
# Author: <NAME>
# Date: 8/24/2006
# File: graph.py
# Purpose: Plots ROSE performance data
#
###############################################################################
import sys
import os
import string
import getopt
import csv
import math
from gclass import *
from optparse import *
###############################################################################
def getHash(rawData):
"""
"""
hash = dict([ (rawData[i],rawData[i+1]) for i in range(0,len(rawData)-1,2) ])
return hash
##############################################################################
def generateGraph(reader,fout,xKey,yExclude,yInclude,separator):
"""
"""
if yInclude != []:
keys = yInclude
for row in reader:
hash = getHash(row)
data = ""
if yInclude == [] and yExclude != []:
keys = hash.keys()
keys.remove(xKey)
for y in yExclude:
keys.remove(y)
for key in keys:
data = data + separator + hash[key]
fout.write(hash[xKey] + data + '\n')
return keys
###############################################################################
def csv2gnuplot(inputs,output,xKey,yExclude,yInclude,xlabel,ylabel,
Format,height,width,pointsize,with,
yMin,yMax,xMin,xMax):
"""
"""
fout = open(output + ".dat",'a')
for i in inputs:
reader = csv.reader(open(i,'r'),doublequote=True,skipinitialspace=True)
keys = generateGraph(reader,fout,xKey,yExclude,yInclude,"\t")
# gnuplot formatting
index = 0
fscriptout = open(output + ".plt",'a')
while index < len(keys):
if index > 0:
fscriptout.write("re")
fscriptout.write("plot \"" + output + ".dat" +"\" using 1:" +
str(index + 2) + ' ')
if with != "":
fscriptout.write("smooth csplines ")
fscriptout.write("title \"" + keys[index] + "\"" + ' ')
fscriptout.write("with " + with + '\n')
else:
fscriptout.write("title \"" + keys[index] + "\"" + '\n')
index += 1
# while
if Format != "":
fscriptout.write("set terminal " + Format + '\n')
fscriptout.write("set output \"" + output + '.' + Format + "\"\n")
if xMin != "" or xMax != "":
fscriptout.write("set xrange [" + xMin + ':' + xMax + "]\n")
if yMin != "" or yMax != "":
fscriptout.write("set yrange [" + yMin + ':' + yMax + "]\n")
if xlabel != "":
fscriptout.write("set xlabel \"" + xlabel + "\"\n")
else:
fscriptout.write("set xlabel \"" + xKey + "\"\n")
if ylabel != "":
fscriptout.write("set ylabel \"" + ylabel + "\"\n")
# if
fscriptout.write("set key below\nset key box\n")
fscriptout.write("set size " + width + ',' + height + '\n')
fscriptout.write("set pointsize " + pointsize + '\n')
fscriptout.write("replot\n")
# end gnuplot formatting
return
# cvs2gnuplot()
###############################################################################
def csv2excel(inputs,output,xKey,yExclude):
"""
"""
fout = open(output + ".csv",'a')
for i in inputs:
reader = csv.reader(open(i,'r'),doublequote=True,skipinitialspace=True)
generateGraph(reader,fout,xKey,yExclude,',')
return
###############################################################################
def csv2matlab(inputs,output,xKey,yExclude,xlabel,ylabel,height,width,Format):
"""
"""
fout = open(output + ".dat",'a')
# Matlab data
for i in inputs:
reader = csv.reader(open(i,'r'),doublequote=True,skipinitialspace=True)
keys = generateGraph(reader,fout,xKey,yExclude," ")
# Matlab script
fscriptout = open(output + ".m",'a')
index = 2
ceilSqrt = int(math.ceil(math.sqrt(len(keys))))
if xlabel == "":
xlabel = xKey
fscriptout.write("load " + output + ".dat" + '\n')
fscriptout.write("set(gcf,'position',[0 0 " + str(width) + ' ' +
str(height) + "])\n")
fscriptout.write("x = " + output + "(:,1)\n")
while index < len(keys) + 2:
fscriptout.write("y" + str(index) + " = " + output + "(:,"
+ str(index) + ")\n")
fscriptout.write("xlabel('" + xlabel + "')\n")
fscriptout.write("ylabel('" + ylabel + "')\n")
#fscriptout.write("ylabel('" + keys[index - 2] + "')\n")
fscriptout.write("subplot(" + str(ceilSqrt) + ',' + str(ceilSqrt) +
',' + str(index - 1) + ") ; ")
fscriptout.write("plot(x,y" + str(index) + ",'o')\n")
fscriptout.write("legend('" + keys[index - 2] + "')\n")
index += 1
if Format != "":
fscriptout.write("set(gcf,'PaperPositionMode','auto')\n")
fscriptout.write("print(gcf,'-d" + Format + "'," + '\'' +
output + '.' + Format + "')\n")
fscriptout.write("quit\n")
# Matlab script
return
###############################################################################
def cmdOptionParse(parser):
"""
cmdOptionParse():
Parses command-line arguments and redirects to appropriate functions.
arguments:
parser -- a optparse object that stores command-line arguments
"""
# parse out options and input file arguments
(options,inputs) = parser.parse_args()
if inputs == []:
sys.stderr.write("Error: No input file(s) specified\n")
sys.exit(1)
if options.output != "":
output = options.output.split('.')[0]
else:
sys.stderr.write("Error: No output file name specified\n")
sys.exit(1)
if options.list:
print "Supported formats:"
print "1. Gnuplot (.dat .plt) -fgnuplot"
print "2. MS Excel (.csv) -fexcel"
print "3. Matlab (.dat) -fmatlab"
sys.exit(0)
if options.x == "":
sys.stderr.write("Error: X-Axis data not specified, please specify with -x\n")
sys.exit(1)
# if, error checking
if options.format == "gnuplot":
if options.e != [] and options.y != []:
sys.stderr.write("Error: Options -e and -y may not be used concurrently\n")
sys.exit(1)
csv2gnuplot(inputs,output,options.x,options.e,options.y,
options.xlabel,options.ylabel,options.Format,
options.Height,options.Width,options.pointsize,
options.with,options.ymin,options.ymax,
options.xmin,options.xmax)
if options.rehash != "" and options.granularity != "":
rehash(output + ".dat",string.atoi(options.granularity),'\t')
elif options.rehash != "" and options.granularity == "":
sys.stderr.write("Error: You must specifiy -g or --granularity with --rehash\n")
sys.exit(1)
if options.run:
args = []
args.append("")
args.append(output + ".plt")
os.execvp("gnuplot",args)
# if
# if
elif options.format == "excel":
csv2excel(inputs,options.output,options.x,options.e)
elif options.format == "matlab":
csv2matlab(inputs,options.output,options.x,options.e,
options.xlabel,options.ylabel,
options.Height,options.Width,
options.Format)
if options.run:
args = []
args.append("")
args.append("-nodesktop")
args.append("-r")
args.append(output)
os.execvp("matlab",args)
else:
sys.stderr.write("Error: Unrecognized output format\n")
return
###############################################################################
def cmdOptionInit(arguments):
"""
cmdOptionInit():
Initializes command-line parser optparse object. Specifies which option
flags behave in what way according to optparse.
arguments:
arguments -- sys.argv list of command-line arguments
variables:
parser -- optparse, OptionParser()
"""
parser = OptionParser()
parser.set_usage("graph.py <input file> [options]")
parser.add_option("-f","--format",help="Output file format",
metavar="%FORMAT%")
parser.add_option("-F","--Format",help="Secondard output format",
metavar="%FORMAT%",default="")
parser.add_option("-l","--list", help="List supported output formats",
action="store_true")
parser.add_option("-o","--output",help="Output file name",metavar="%FILE%",
default="")
parser.add_option("-r","--run",help="Run plotting tool",action="store_true")
parser.add_option("-x",help="X Axis Key Data",metavar="<XKEY>",default="")
parser.add_option("-y",help="Include Y Axis Data",metavar="<KEY>",
action="append",default=[])
parser.add_option("-e",help="Exclude Y Axis Data",metavar="<KEY>",
action="append",default=[])
parser.add_option("-g","--granularity",
help="granularity range for data manipulation",
metavar="<#>",default="")
parser.add_option("-w","--with",help="With lines,points,etc.",
metavar="%WITH%",default="")
parser.add_option("-H","--Height",help="Output Height default=1",
metavar="<H#>",default="1")
parser.add_option("-W","--Width",help="Output Width default=1",
metavar="<W#>",default="1")
parser.add_option("-P","--pointsize",help="Set pointsize default=1",
metavar="<P#>",default="1")
parser.add_option("--rehash",help="Rehash Data",metavar="%MODE%",
default="")
parser.add_option("--xlabel",help="X-Axis Label",metavar="%LABEL%",
default="")
parser.add_option("--xmin",help="Minimum X range value",metavar="<#>",
default="")
parser.add_option("--xmax",help="Maximum X range value",metavar="<#>",
default="")
parser.add_option("--ylabel",help="Y-Axis Label",metavar="%LABEL%",
default="")
parser.add_option("--ymin",help="Minimum Y range value",metavar="<#>",
default="")
parser.add_option("--ymax",help="Maximum Y range value",metavar="<#>",
default="")
return parser
###############################################################################
###############################################################################
parser = cmdOptionInit(sys.argv)
cmdOptionParse(parser)
# control flow:
# main->cmdOptionInit->main->cmdOptionParse->csv2{}->generateGraph<->getHash()
| [
1,
18787,
4855,
29914,
2109,
29914,
6272,
3017,
13,
29937,
13,
13383,
13383,
13383,
13383,
7346,
4136,
2277,
29937,
13,
29937,
13,
29937,
13361,
29901,
529,
5813,
29958,
13,
29937,
4712,
29901,
29871,
29947,
29914,
29906,
29946,
29914,
2990... |
scripts/migrate_to_rapidpro/fix_auto_baby_switch_data.py | praekeltfoundation/ndoh-hub | 0 | 176707 | import asyncio
import csv
import os
import time
from datetime import datetime, timedelta
import aiohttp
import psycopg2
from six.moves import urllib_parse
CONCURRENCY = 10
HUB_OUTPUT_FILE = "hub_babyswitches.csv"
RAPIDPRO_OUTPUT_FILE = "rapidpro_babyswitch_updates.csv"
LIMIT = 10_000_000
RAPIDPRO_URL = "https://rapidpro.prd.momconnect.co.za/"
RAPIDPRO_TOKEN = os.environ["RAPIDPRO_TOKEN"]
HUB_DB_PASSWORD = os.environ["HUB_PASS"]
total = 0
excluded = 0
start, d_print = time.time(), time.time()
async def get_rapidpro_contact(session, contact_id):
url = urllib_parse.urljoin(RAPIDPRO_URL, f"/api/v2/contacts.json?uuid={contact_id}")
headers = {
"Authorization": f"TOKEN {RAPIDPRO_TOKEN}",
"Content-Type": "application/json",
"Connection": "Keep-Alive",
}
async with session.get(url, headers=headers) as response:
response_body = await response.json()
if response_body["results"]:
return response_body["results"][0]
return None
def in_postbirth_group(contact):
for group in contact["groups"]:
if "post" in group["name"].lower():
return True
return False
def get_contact_msisdn(contact):
for urn in contact["urns"]:
if "whatsapp" in urn:
return "+" + urn.split(":")[1]
def get_baby_dob_field(fields):
for i in range(1, 4):
dob_field = f"baby_dob{i}"
if not fields[dob_field]:
return dob_field
def get_babyswitches(conn):
babyswitches = {}
cursor = conn.cursor("baby_switches")
print("Fetching Baby Switches...")
cursor.execute(
f"""
select contact_id, timestamp
from eventstore_babyswitch
order by timestamp asc
limit {LIMIT}
"""
) # 158680
total = 0
start, d_print = time.time(), time.time()
for (contact_id, timestamp) in cursor:
babyswitches[contact_id] = timestamp
if time.time() - d_print > 1:
print(
f"\rFetched {total} babyswitches at "
f"{total/(time.time() - start):.0f}/s",
end="",
)
d_print = time.time()
total += 1
print(f"\nFetched {total} babyswitches in {time.time() - start:.0f}s")
print("-------------------------------------------")
return babyswitches
def get_optouts(conn):
optouts = {}
print("Fetching Optouts...")
cursor = conn.cursor("optouts")
cursor.execute(
f"""
select contact_id, timestamp
from eventstore_optout
order by timestamp asc
limit {LIMIT}
"""
) # 255855
total = 0
start, d_print = time.time(), time.time()
for (contact_id, timestamp) in cursor:
optouts[contact_id] = timestamp
if time.time() - d_print > 1:
print(
f"\rFetched {total} optouts at " f"{total/(time.time() - start):.0f}/s",
end="",
)
d_print = time.time()
total += 1
print(f"\nFetched {total} optouts in {time.time() - start:.0f}s")
print("-------------------------------------------")
return optouts
def get_registrations(conn, babyswitches, optouts):
registrations = []
print("Fetching Prebirth Registrations...")
cursor = conn.cursor("prebirth_registrations")
cursor.execute(
f"""
select contact_id, timestamp
from eventstore_prebirthregistration
where edd < '2021-04-20'
order by timestamp asc
limit {LIMIT}
"""
) # 216808
total = 0
start, d_print = time.time(), time.time()
for (contact_id, timestamp) in cursor:
if contact_id in babyswitches and timestamp < babyswitches[contact_id]:
continue
if contact_id in optouts and timestamp < optouts[contact_id]:
continue
registrations.append(contact_id)
if time.time() - d_print > 1:
print(
f"\rFetched {total} registrations at "
f"{total/(time.time() - start):.0f}/s",
end="",
)
d_print = time.time()
total += 1
print(f"\nFetched {total} registrations in {time.time() - start:.0f}s")
print("-------------------------------------------")
return registrations
async def process_registration(session, contact_id, hub_writer, rp_writer):
global total
global excluded
global d_print
global start
total += 1
contact = await get_rapidpro_contact(session, contact_id)
if contact:
msisdn = get_contact_msisdn(contact)
in_group = in_postbirth_group(contact)
if (
in_group
or not msisdn
or contact["fields"].get("preferred_channel") != "WhatsApp"
):
excluded += 1
else:
baby_dob_field = get_baby_dob_field(contact["fields"])
edd = str(contact["fields"]["edd"]).replace("Z", "")
try:
dob = datetime.fromisoformat(edd) + timedelta(days=14)
except (TypeError, ValueError):
excluded += 1
return
rp_writer.writerow(
{
"contact_id": contact_id,
"baby_dob_field": baby_dob_field,
"baby_dob": dob.isoformat(),
}
)
# write to csv for jembi and hub
hub_writer.writerow(
{
"contact_id": contact_id,
"msisdn": msisdn,
"timestamp": datetime.now().isoformat(),
}
)
if time.time() - d_print > 1:
print(
f"\rProcessed {total}({excluded}) registrations at "
f"{total/(time.time() - start):.0f}/s",
end="",
)
d_print = time.time()
async def bounded_process_registration(session, contact_id, hub_writer, rp_writer, sem):
async with sem:
await process_registration(session, contact_id, hub_writer, rp_writer)
async def process_registrations(registrations):
global total
global start
sema = asyncio.Semaphore(CONCURRENCY)
print("Processing Registrations...")
with open(HUB_OUTPUT_FILE, "w", newline="") as hub_target, open(
RAPIDPRO_OUTPUT_FILE, "w", newline=""
) as rp_target:
hub_writer = csv.DictWriter(
hub_target, fieldnames=["contact_id", "msisdn", "timestamp"]
)
hub_writer.writeheader()
rp_writer = csv.DictWriter(
rp_target, fieldnames=["contact_id", "baby_dob_field", "baby_dob"]
)
rp_writer.writeheader()
connector = aiohttp.TCPConnector(limit=CONCURRENCY)
async with aiohttp.ClientSession(connector=connector) as session:
tasks = []
for contact_id in registrations:
tasks.append(
bounded_process_registration(
session, contact_id, hub_writer, rp_writer, sema
)
)
await asyncio.gather(*tasks)
print(f"\nProcessed {total} registrations in {time.time() - start:.0f}s")
if __name__ == "__main__":
conn = psycopg2.connect(
dbname="hub", user="hub", password=<PASSWORD>, host="localhost", port=7000
)
babyswitches = get_babyswitches(conn)
optouts = get_optouts(conn)
registrations = get_registrations(conn, babyswitches, optouts)
asyncio.run(process_registrations(registrations))
| [
1,
1053,
408,
948,
3934,
13,
5215,
11799,
13,
5215,
2897,
13,
5215,
931,
13,
3166,
12865,
1053,
12865,
29892,
5335,
287,
2554,
13,
13,
5215,
263,
601,
1124,
13,
5215,
6529,
29891,
9708,
29887,
29906,
13,
3166,
4832,
29889,
13529,
267,... |
packer/python/packerTest1.py | RalphBrynard/sophos_attack_range | 0 | 175864 | from packerlicious import builder, provisioner, Template
template = Template()
template.add_builder(
builder.vmware-iso(
)
)
{
"description": "Packer Windows Server 2016 build template file.",
"_comment": "Template file provides framework for subsequent packer builds.",
"variables": {
"os-iso-path": "[nfsdatastore01] os/microsoft/server/2016/windows2016_noprompt.iso"
},
"builders": [
{
"type": "vsphere-iso",
"vcenter_server": "{{user `vsphere-server`}}",
"username": "{{user `vsphere-user`}}",
"password": "{{user `vsphere-password`}}",
"datacenter": "{{user `vsphere-datacenter`}}",
"cluster": "{{user `vsphere-cluster`}}",
"datastore": "{{user `vsphere-datastore`}}",
"folder": "{{user `vsphere-folder`}}",
"insecure_connection": "{{user `insecure-connection`}}",
"communicator": "winrm",
"winrm_username": "Administrator",
"winrm_password": "<PASSWORD>",
"winrm_timeout": "15m",
"convert_to_template": "true",
"vm_name": "windows2016.gold",
"guest_os_type": "windows9Server64Guest",
"CPUs": 2,
"RAM": 4096,
"RAM_reserve_all": true,
"firmware": "efi",
"cdrom_type": "sata",
"storage": [
{
"disk_size": 61440,
"disk_thin_provisioned": true
}
],
"disk_controller_type": "pvscsi",
"network_adapters": [
{
"network": "{{user `vsphere-network`}}",
"network_card": "vmxnet3"
}
],
"notes": "{{user `vm-notes`}}",
"iso_paths": [
"{{user `os-iso-path`}}",
"[] /vmimages/tools-isoimages/windows.iso"
],
"floppy_files": [
"server_standard/autounattend.xml",
"../drivers/pvscsi-win8/pvscsi.cat",
"../drivers/pvscsi-win8/pvscsi.inf",
"../drivers/pvscsi-win8/pvscsi.sys",
"../drivers/pvscsi-win8/txtsetup.oem",
"../scripts/2016/00-vmtools.ps1",
"../scripts/2016/01-initialize.ps1",
"../scripts/2016/03-systemsettings.ps1",
"../scripts/2016/95-enablerdp.ps1"
],
"shutdown_command": "shutdown /f /s /t 10 /d p:4:1 /c \"Packer Complete\""
}
],
"provisioners": [
{
"type": "powershell",
"scripts": [
"../scripts/2016/03-systemsettings.ps1",
"../scripts/2016/95-enablerdp.ps1"
]
},
{
"type": "windows-restart",
"restart_timeout": "30m"
},
{
"type": "windows-update",
"search_criteria": "IsInstalled=0",
"filters": [
"exclude:$_.Title -like '*Preview*'",
"include:$true"
]
}
]
} | [
1,
515,
4870,
261,
506,
2738,
1053,
12856,
29892,
25161,
261,
29892,
25663,
13,
6886,
353,
25663,
580,
13,
6886,
29889,
1202,
29918,
16409,
29898,
13,
1678,
12856,
29889,
6925,
2519,
29899,
10718,
29898,
13,
308,
13,
1678,
1723,
13,
298... |
.travis/tests.py | frogunder/salt-winrepo-ng | 0 | 184897 | <gh_stars>0
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
import getopt
import git
import pycurl as curl
import sys
import traceback
import yaml, glob, os
from jinja2 import Template
from pprint import pprint
from StringIO import StringIO
from urlparse import urlparse
reload(sys)
sys.setdefaultencoding('utf8')
test_status = True
head = git.Repo(".").commit("HEAD")
changed = [i for i in head.stats.files.keys() if '.sls' in i]
def printd(message=None, extra_debug_data=None):
global debug
try:
if debug:
sys.stderr.write(message)
sys.stderr.write("\t--\t")
pprint(extra_debug_data, stream=sys.stderr)
return None
except Exception:
pass
def usage():
print("""
Use either of these flags!
-h | --help_ Show this help_
-t | --travis Run in travis (ignores files that have not been changed in last commit)
-c | --cron Run in cron mode (Checks
-d | --debug Run in debug mode (Prints more info)
""")
try:
opts, args = getopt.getopt(
sys.argv[1:], "tcdh", ["travis", "cron", "debug", "help_"])
opts = dict(opts)
except getopt.GetoptError:
usage()
sys.exit(2)
travis, cron, debug, help_ = (False, False, False, False)
try:
if opts.has_key('-t') or opts.has_key('--travis'):
travis = True
if opts.has_key('-c') or opts.has_key('--cron'):
cron = True
if opts.has_key('-d') or opts.has_key('--debug'):
debug = True
from pprint import pprint
printd("ploop", {'extra':'debug', 'data': None})
if opts.has_key('-h') or opts.has_key('--help_'):
help_ = True
except KeyError:
pass
printd("opts, args", (opts, args))
printd("travis, cron, debug, help_ ", (travis, cron, debug, help_))
if help_ or len(opts) < 1 and len(args) < 1:
usage()
exit(0)
##################
def process_each(softwares):
global test_status
# pprint(softwares)
for s, software in softwares.items():
try:
if software.get('skip_urltest', False):
continue
except KeyError:
pass
for v, version in software.items():
try:
if version.get('skip_urltest', False):
continue
except KeyError:
pass
# Testing each non-salt URL for availability
scheme = urlparse(version['installer']).scheme
if scheme in ['http', 'https']:
headers = StringIO()
printd("version['installer']", version['installer'])
C = curl.Curl()
C.setopt(curl.URL, version['installer'])
C.setopt(curl.NOBODY, True)
C.setopt(curl.FOLLOWLOCATION, True)
C.setopt(curl.CONNECTTIMEOUT, 2)
C.setopt(curl.TIMEOUT, 5)
C.setopt(C.HEADERFUNCTION, headers.write)
try:
C.perform()
# assert C.getinfo(curl.HTTP_CODE) != 404, "[ERROR]\tURL returned code 404. File Missing? "
http_code = C.getinfo(curl.HTTP_CODE)
# print(headers.getvalue().split('\r\n')[1:])
try:
content_type = dict([tuple(l.split(': ', 1)) for l in headers.getvalue().split('\r\n')[1:] if ':' in l])['Content-Type']
except:
content_type = 'None/None'
printd("content_type:", content_type)
if http_code == 404:
# This build is failing !
print("PROBLEM HERE (404) : %s -- %s -- %s " % (s, v, version['installer']))
test_status = False
if 'application/' not in content_type and 'binary/' not in content_type:
print("PROBLEM HERE (Bad content type) : %s -- %s -- %s -- %s " % (s, v, version['installer'], content_type))
# print(headers.getvalue().split())
else:
print("VALID : %s" % version['installer'])
except curl.error as e:
errno, errstr = e
printd("errno, errstr", (errno, errstr))
if errno == 28:
print('[ERROR]\tConnection timeout or no server | '
'errno: ' + str(errno) + ' | ' + errstr)
pass
C.close()
if travis:
our_files = changed
else:
our_files = glob.glob('*.sls')
for cpuarch in ['AMD64', 'x86']:
try:
print("--------(arch: %s)--------" % cpuarch)
if len(our_files) == 0:
print("No files to check. No problem.")
continue
for file in our_files:
try:
print("---( " + file + " )---")
with open(file, 'r') as stream:
template = stream.read()
t = Template(template)
yml = t.render(grains={'cpuarch': cpuarch})
data = yaml.load(yml, Loader=yaml.FullLoader)
process_each(data)
except Exception:
exc = sys.exc_info()[0]
print("[EXCEPTION] " + str(exc))
traceback.print_exc()
pass
print("-" * 80)
except Exception:
exc = sys.exc_info()[0]
print("[EXCEPTION] " + str(exc))
traceback.print_exc()
pass
assert test_status, "BUILD FAILING. You can grep for 'PROBLEM HERE' to find " \
"out how to fix this."
print("Everything went smoothly. No errors were found. Happy deployment!")
| [
1,
529,
12443,
29918,
303,
1503,
29958,
29900,
13,
29937,
14708,
4855,
29914,
2109,
29914,
6272,
3017,
29906,
29889,
29955,
13,
29937,
448,
29930,
29899,
14137,
29901,
23616,
29899,
29947,
448,
29930,
29899,
13,
13,
5215,
679,
3670,
13,
5... |
tools/pca_outcore.py | escorciav/deep-action-proposals | 28 | 16618 | <filename>tools/pca_outcore.py
#!/usr/bin/env python
"""
PCA done via matrix multiplication out-of-core.
"""
import argparse
import time
import h5py
import hickle as hkl
import numpy as np
def input_parse():
description = 'Compute PCA with A.T * A computation out of core'
p = argparse.ArgumentParser(description=description)
p.add_argument('dsfile', help='HDF5-file with features')
p.add_argument('pcafile', help='HDF5-file with PCA results')
p.add_argument('-ll', '--log_loop', default=500, type=int,
help='Verbose in terms of number of videos')
return p
def main(dsfile, pcafile, t_size=16, t_stride=8, source='c3d_features',
log_loop=100):
print time.ctime(), 'start: loading hdf5'
fid = h5py.File(dsfile, 'r')
video_names = fid.keys()
feat_dim = fid[video_names[0]][source].shape[1]
print time.ctime(), 'finish: loading hdf5'
print time.ctime(), 'start: compute mean'
x_mean, n = np.zeros((1, feat_dim), dtype=np.float32), 0
for i, v in fid.iteritems():
feat = v[source][:]
n += feat.shape[0]
x_mean += feat.sum(axis=0)
x_mean /= n
print time.ctime(), 'finish: compute mean'
def compute_ATA(chunk, f=fid, source=source, mean=x_mean):
feat_dim = f[chunk[0]][source].shape[1]
ATA_c = np.zeros((feat_dim, feat_dim), dtype=np.float32)
for i in chunk:
feat_c = f[i][source][:]
feat_c_ = feat_c - mean
ATA_c += np.dot(feat_c_.T, feat_c_)
return ATA_c
print time.ctime(), 'start: out-of-core matrix multiplication'
j, n_videos = 0, len(video_names)
ATA = np.zeros((feat_dim, feat_dim), dtype=np.float32)
for i, v in fid.iteritems():
feat = v[source][:]
feat_ = feat - x_mean
ATA += np.dot(feat_.T, feat_)
j += 1
if j % log_loop == 0:
print time.ctime(), 'Iteration {}/{}'.format(j, n_videos)
print time.ctime(), 'finish: out-of-core matrix multiplication'
# SVD
print time.ctime(), 'start: SVD in memory'
U, S, _ = np.linalg.svd(ATA)
print time.ctime(), 'finish: SVD in memory'
print time.ctime(), 'serializing ...'
hkl.dump({'x_mean': x_mean, 'U': U, 'S': S, 'n_samples': n}, pcafile)
if __name__ == '__main__':
p = input_parse()
args = p.parse_args()
main(**vars(args))
| [
1,
529,
9507,
29958,
8504,
29914,
29886,
1113,
29918,
449,
3221,
29889,
2272,
13,
29937,
14708,
4855,
29914,
2109,
29914,
6272,
3017,
13,
15945,
29908,
13,
13,
29925,
5454,
2309,
3025,
4636,
21666,
714,
29899,
974,
29899,
3221,
29889,
13,... |
src/response.py | technomunk/bottica | 2 | 179123 | # Response messages.
import random
from discord.ext import commands
SUCCESSES = (
"Success",
"Done",
":100:",
":ok:",
":smile::+1:",
)
FAILS = (
"Fail",
"Bump",
"Poop",
":poop:",
":frowning::-1:",
)
JEALOUS = (
"Who's this?",
"Who is that?",
"What are they doing here?",
"Are you cheating on me?",
"But what am I to you?",
)
REACTIONS = {
"command_seen": "👀",
"command_failed": "❌",
"command_succeeded": "✅",
"mention": ["💋", "👧", "🤖", "🕵️♀️", "👩💻", "🤹♀️"],
"jealousy": ["🌭", "🕵️♀️", "🤡", "💩", "💢"],
}
DEFAULT_TIMEOUT = 10
async def random_fail(ctx: commands.Context):
return await ctx.reply(random.choice(FAILS), delete_after=DEFAULT_TIMEOUT)
async def random_success(ctx: commands.Context):
return await ctx.reply(random.choice(SUCCESSES), delete_after=DEFAULT_TIMEOUT)
| [
1,
396,
13291,
7191,
29889,
13,
13,
5215,
4036,
13,
13,
3166,
2313,
536,
29889,
1062,
1053,
8260,
13,
13,
14605,
4174,
2890,
1660,
29903,
353,
313,
13,
1678,
376,
14191,
613,
13,
1678,
376,
25632,
613,
13,
1678,
29242,
29896,
29900,
... |
test/test.py | bciar/ppp-web | 2 | 1696 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Unit tests."""
import os
import unittest
from copy import copy
from webui.app import create_app
class TestRoutes(unittest.TestCase):
"""Test routes."""
ignore_routes = ('/static/<path:filename>',)
ignore_end_patterns = ('>',)
def setUp(self):
"""Set up: Put Flask app in test mode."""
app = create_app()
self.initial_app = copy(app)
app.testing = True
self.app = app.test_client()
@staticmethod
def valid_route(route):
"""Validate route.
Args:
route (str): Route url pattern.
Returns:
bool: True if valid, else False.
"""
if route in TestRoutes.ignore_routes \
or route.endswith(TestRoutes.ignore_end_patterns):
return False
return True
def test_routes(self):
"""Smoke test routes to ensure no runtime errors.."""
routes = [route.rule for route in self.initial_app.url_map.iter_rules()
if self.valid_route(route.rule)]
for route in routes:
self.app.get(route)
if __name__ == '__main__':
from test.utils.doctest_unittest_runner import doctest_unittest_runner
TEST_DIR = os.path.dirname(os.path.realpath(__file__)) + '/'
doctest_unittest_runner(test_dir=TEST_DIR, relative_path_to_root='../',
package_names=['webui', 'test'])
| [
1,
18787,
4855,
29914,
2109,
29914,
6272,
3017,
29941,
13,
29937,
448,
29930,
29899,
14137,
29901,
23616,
29899,
29947,
448,
29930,
29899,
13,
15945,
29908,
8325,
6987,
1213,
15945,
13,
5215,
2897,
13,
5215,
443,
27958,
13,
3166,
3509,
10... |
dcgan/utils/train.py | Ontheway361/ak47GAN | 0 | 30840 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import time
import torch
import numpy as np
import torchvision
import torch.nn as nn
import torch.optim as optim
import matplotlib.pyplot as plt
from torch.utils.data import DataLoader
from utils import *
from IPython import embed
class DCGAN(object):
def __init__(self, args):
self.args = args
self.model = dict()
self.data = dict()
self.rescache = dict()
self.device = args.use_gpu and torch.cuda.is_available()
def _report_settings(self):
''' Report the settings '''
str = '-' * 16
print('%sEnvironment Versions%s' % (str, str))
print("- Python : {}".format(sys.version.strip().split('|')[0]))
print("- PyTorch : {}".format(torch.__version__))
print("- TorchVison: {}".format(torchvision.__version__))
print("- USE_GPU : {}".format(self.device))
print('-' * 52)
def _model_loader(self):
self.model['generator'] = Generator(self.args.in_dim, self.args.gchannels)
self.model['discriminator'] = Discriminator(self.args.dchannels)
self.model['criterion'] = nn.BCELoss()
self.model['opti_gene'] = optim.Adam(self.model['generator'].parameters(), \
lr=self.args.base_lr, betas=(self.args.beta, 0.999))
self.model['opti_disc'] = optim.Adam(self.model['discriminator'].parameters(), \
lr=self.args.base_lr, betas=(self.args.beta, 0.999))
# self.model['scheduler'] = torch.optim.lr_scheduler.MultiStepLR(
# self.model['optimizer'], milestones=[12, 20, 30, 45], gamma=self.args.gamma)
if self.device:
self.model['generator'] = self.model['generator'].cuda()
self.model['discriminator'] = self.model['discriminator'].cuda()
if len(self.args.gpu_ids) > 1:
self.model['generator'] = torch.nn.DataParallel(self.model['generator'], device_ids=self.args.gpu_ids)
self.model['discriminator'] = torch.nn.DataParallel(self.model['discriminator'], device_ids=self.args.gpu_ids)
torch.backends.cudnn.benchmark = True
print('Parallel mode was going ...')
else:
print('Single-gpu mode was going ...')
else:
print('CPU mode was going ...')
if len(self.args.resume) > 2:
checkpoint = torch.load(self.args.resume, map_location=lambda storage, loc: storage)
self.args.start = checkpoint['epoch']
self.model['generator'].load_state_dict(checkpoint['generator'])
self.model['discriminator'].load_state_dict(checkpoint['discriminator'])
print('Resuming the train process at %3d epoches ...' % self.args.start)
print('Model loading was finished ...')
def _data_loader(self):
self.data['train_loader'] = DataLoader(
CelebA(args=self.args),
batch_size = self.args.batch_size, \
shuffle = True,\
num_workers= self.args.workers)
self.data['fixed_noise'] = torch.randn(64, self.args.in_dim ,1, 1)
if self.device:
self.data['fixed_noise'] = self.data['fixed_noise'].cuda()
self.rescache['gloss'] = []
self.rescache['dloss'] = []
self.rescache['fake'] = []
print('Data loading was finished ...')
def _model_train(self, epoch = 0):
total_dloss, total_gloss = 0, 0
for idx, imgs in enumerate(self.data['train_loader']):
# update discriminator
self.model['discriminator'].train()
self.model['generator'].eval()
imgs.requires_grad = False
if self.device:
imgs = imgs.cuda()
b_size = imgs.size(0)
self.model['discriminator'].zero_grad()
gty = torch.full((b_size,), 1)
if self.device:
gty = gty.cuda()
predy = self.model['discriminator'](imgs).view(-1)
dloss_real = self.model['criterion'](predy, gty)
dloss_real.backward()
noise = torch.randn(b_size, self.args.in_dim, 1, 1)
if self.device:
noise = noise.cuda()
fake = self.model['generator'](noise)
gty.fill_(0) # TODO
predy = self.model['discriminator'](fake.detach()).view(-1)
dloss_fake = self.model['criterion'](predy, gty)
dloss_fake.backward()
self.model['opti_disc'].step()
d_loss_real = dloss_real.mean().item()
d_loss_fake = dloss_fake.mean().item()
d_loss = d_loss_real + d_loss_fake
self.rescache['dloss'].append(d_loss)
total_dloss += d_loss
# update generator
self.model['generator'].train()
self.model['discriminator'].eval()
self.model['generator'].zero_grad()
gty.fill_(1) # TODO
predy = self.model['discriminator'](fake).view(-1)
gloss = self.model['criterion'](predy, gty)
gloss.backward()
self.model['opti_gene'].step()
g_loss = gloss.mean().item()
self.rescache['gloss'].append(g_loss)
total_gloss += g_loss
if (idx + 1) % self.args.print_freq == 0:
print('epoch : %2d|%2d, iter : %4d|%4d, dloss : %.4f, gloss : %.4f' % \
(epoch, self.args.epoches, idx+1, len(self.data['train_loader']), \
np.mean(self.rescache['dloss']), np.mean(self.rescache['gloss'])))
if (idx + 1) % self.args.monitor_freq == 0:
with torch.no_grad():
self.model['generator'].eval()
fake = self.model['generator'](self.data['fixed_noise']).detach().cpu()
self.rescache['fake'].append(fake)
return total_dloss, total_gloss
def _main_loop(self):
min_loss = 1e3
for epoch in range(self.args.start, self.args.epoches + 1):
start_time = time.time()
dloss, gloss = self._model_train(epoch)
train_loss = dloss + gloss
# self.model['scheduler'].step()
end_time = time.time()
print('Single epoch cost time : %.2f mins' % ((end_time - start_time)/60))
if not os.path.exists(self.args.save_to):
os.mkdir(self.args.save_to)
if (min_loss > train_loss) and (not self.args.is_debug):
print('%snew SOTA was found%s' % ('*'*16, '*'*16))
min_loss = train_loss
filename = os.path.join(self.args.save_to, 'sota.pth.tar')
torch.save({
'epoch' : epoch,
'generator' : self.model['generator'].state_dict(),
'discriminator' : self.model['discriminator'].state_dict(),
'loss' : min_loss,
}, filename)
if (epoch % self.args.save_freq == 0) and (not self.args.is_debug):
filename = os.path.join(self.args.save_to, 'epoch_'+str(epoch)+'.pth.tar')
torch.save({
'epoch' : epoch,
'generator' : self.model['generator'].state_dict(),
'discriminator' : self.model['discriminator'].state_dict(),
'loss' : train_loss,
}, filename)
if self.args.is_debug:
break
def _visual_res(self):
''' Visual the training process '''
# gloss and dloss
plt.figure(figsize=(10,5))
plt.title("Generator and Discriminator Loss During Training")
plt.plot(self.rescache['gloss'], label="gloss")
plt.plot(self.rescache['dloss'], label="dloss")
plt.xlabel("iterations")
plt.ylabel("loss")
plt.legend()
plt.savefig('loss.jpg', dpi=400)
# save the fake-images
np.save('fake.npy', self.rescache['fake'])
def train_runner(self):
self._report_settings()
self._model_loader()
self._data_loader()
self._main_loop()
self._visual_res()
if __name__ == "__main__":
faceu = DCGAN(training_args())
faceu.train_runner()
| [
1,
18787,
4855,
29914,
2109,
29914,
6272,
3017,
29941,
13,
29937,
448,
29930,
29899,
14137,
29901,
23616,
29899,
29947,
448,
29930,
29899,
13,
13,
5215,
2897,
13,
5215,
10876,
13,
5215,
931,
13,
5215,
4842,
305,
13,
5215,
12655,
408,
74... |
projects/migrations/0003_projectport_container_port.py | kilinger/marathon-rocketchat-hubot | 1 | 184119 | <reponame>kilinger/marathon-rocketchat-hubot
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0002_auto_20160122_0305'),
]
operations = [
migrations.AddField(
model_name='projectport',
name='container_port',
field=models.IntegerField(default=5000),
preserve_default=False,
),
]
| [
1,
529,
276,
1112,
420,
29958,
16757,
5621,
29914,
3034,
25206,
29899,
20821,
3486,
271,
29899,
29882,
431,
327,
13,
29937,
448,
29930,
29899,
14137,
29901,
23616,
29899,
29947,
448,
29930,
29899,
13,
3166,
4770,
29888,
9130,
1649,
1053,
... |
test.py | sbcshop/PiRelay-8 | 2 | 4654 | <reponame>sbcshop/PiRelay-8<filename>test.py<gh_stars>1-10
from PiRelay8 import Relay
import time
r1 = Relay("RELAY1")
r2 = Relay("RELAY2")
r3 = Relay("RELAY3")
r4 = Relay("RELAY4")
r5 = Relay("RELAY5")
r6 = Relay("RELAY6")
r7 = Relay("RELAY7")
r8 = Relay("RELAY8")
r1.off()
r2.off()
r3.off()
r4.off()
r5.off()
r6.off()
r7.off()
r8.off()
r1.on()
time.sleep(0.5)
r1.off()
time.sleep(0.5)
r2.on()
time.sleep(0.5)
r2.off()
time.sleep(0.5)
r3.on()
time.sleep(0.5)
r3.off()
time.sleep(0.5)
r4.on()
time.sleep(0.5)
r4.off()
time.sleep(0.5)
r5.on()
time.sleep(0.5)
r5.off()
time.sleep(0.5)
r6.on()
time.sleep(0.5)
r6.off()
time.sleep(0.5)
r7.on()
time.sleep(0.5)
r7.off()
time.sleep(0.5)
r8.on()
time.sleep(0.5)
r8.off()
time.sleep(0.5)
| [
1,
529,
276,
1112,
420,
29958,
29879,
12328,
19032,
29914,
12197,
9662,
388,
29899,
29947,
29966,
9507,
29958,
1688,
29889,
2272,
29966,
12443,
29918,
303,
1503,
29958,
29896,
29899,
29896,
29900,
13,
3166,
7362,
9662,
388,
29947,
1053,
637... |
src/median_of_medians.py | SynerClust/SynerClust | 7 | 70112 | <reponame>SynerClust/SynerClust
import math
import numpy
def pivot(l, left, right): # right should be len(l)-1
"""
Returns the position of the median of medians.
Left and right values should be the start and end positions of the part of the array to use.
"""
if (right - left < 5):
return partition5(l, left, right)
for i in xrange(left, right + 1, 5): # maybe right+1
subRight = i + 4
if subRight > right:
subRight = right
median5 = partition5(l, i, subRight)
tmp = numpy.copy(l[median5])
l[median5] = l[left + int(math.floor((i - left) / 5))]
l[left + int(math.floor((i - left)/5))] = tmp
return pivot(l, left, left + ((right - left) / 5)) # no ceil((right-left)/5.0-1) because /5 already takes floor
# return select(l, left, left + ((right - left) / 5), left + (right - left) / 10) # no ceil((right-left)/5.0-1) because /5 already takes floor
def partition5(l, left, right):
"""
Insertion Sort of list of at most 5 elements and return the position of the median.
"""
j = left
for i in xrange(left, right + 1):
t = numpy.copy(l[i])
for j in xrange(i, left - 1, -1):
if l[j - 1][0] < t[0]:
break
l[j] = l[j - 1]
l[j] = t
return int(math.floor((left + right) / 2))
def select(l, left, right, n):
while(True):
if left == right:
return left
pivotIndex = pivot(l, left, right)
pivotIndex = partition(l, left, right - 1, pivotIndex)
if (n == pivotIndex):
return n
elif n < pivotIndex:
right = pivotIndex - 1
left = pivotIndex + 1
def partition(l, left, right, pivotIndex): # right is included
pivotValue = numpy.copy(l[pivotIndex])
l[pivotIndex] = l[right]
l[right] = pivotValue
storeIndex = left
tmp = 0
for i in xrange(left, right):
if l[i][0] < pivotValue[0]:
tmp = l[storeIndex]
l[storeIndex] = l[i]
l[i] = tmp
storeIndex += 1
l[right] = l[storeIndex]
l[storeIndex] = pivotValue
return storeIndex
def for2DArray(l):
return pivot(l, 0, l.shape[0]-1) | [
1,
529,
276,
1112,
420,
29958,
29216,
261,
6821,
504,
29914,
29216,
261,
6821,
504,
13,
5215,
5844,
13,
5215,
12655,
13,
13,
1753,
24438,
29898,
29880,
29892,
2175,
29892,
1492,
1125,
29871,
396,
1492,
881,
367,
7431,
29898,
29880,
6817... |
soluciones/df_concatenar.py | carlosviveros/Soluciones | 1 | 120260 | """AyudaEnPython: https://www.facebook.com/groups/ayudapython
TODO: add docstring and tests.
"""
import pandas as pd
from prototools import retrieve_argname, tabulate, red
HEADERS = ["Source", "A", "B", "C"]
source_1 = pd.DataFrame(
{
"A": [1, 2, 3],
"B": [4, 5, 6],
"C": [7, 8, 9]
}
)
source_2 = pd.DataFrame(
{
"A": [10, 11, 12],
"B": [13, 14, 15],
"C": [16, 17, 18]
}
)
def concatenar(a, b):
a["Source"] = retrieve_argname(a)
b["Source"] = retrieve_argname(b)
df = pd.concat([a, b], ignore_index=True)
return df.reindex(columns=HEADERS)
df = concatenar(source_1, source_2)
data = df.values.tolist()
print(tabulate(
data,
headless=True,
headers=HEADERS,
border_type="double",
title="Dataset",
color=red,
)) | [
1,
9995,
29909,
29891,
6191,
2369,
11980,
29901,
2045,
597,
1636,
29889,
15445,
29889,
510,
29914,
13155,
29914,
388,
566,
481,
1656,
13,
13,
4986,
3970,
29901,
788,
1574,
1807,
322,
6987,
29889,
13,
15945,
29908,
13,
5215,
11701,
408,
... |
squares.py | SrujanAakurathi/Programming-Assignments | 0 | 77537 | """
Given an integer number n, define a function named printDict() which can print a dictionary where the keys are numbers between 1 and n (both included) and the values are square of keys.
The function printDict() doesn't take any argument.
Input Format:
The first line contains the number n.
Output Format:
Print the dictionary in one line.
Example:
Input:
5
Output:
{1: 1, 2: 4, 3: 9, 4: 16, 5: 25}
NOTE: You are supposed to write the code for the function printDict() only. The function has already been called in the main part of the code.
"""
def printDict():
n = int(input())
d = {}
for i in range(n):
d[i+1] = (i+1)**2
print(d, end = " ")
printDict()
| [
1,
9995,
13,
13,
11221,
385,
6043,
1353,
302,
29892,
4529,
263,
740,
4257,
1596,
21533,
580,
607,
508,
1596,
263,
8600,
988,
278,
6611,
526,
3694,
1546,
29871,
29896,
322,
302,
313,
20313,
5134,
29897,
322,
278,
1819,
526,
6862,
310,
... |
src/triage/component/postmodeling/contrast/utils/aux_funcs.py | silvrwolfboy/triage-1 | 0 | 1614161 | <reponame>silvrwolfboy/triage-1<filename>src/triage/component/postmodeling/contrast/utils/aux_funcs.py
'''
A4uxiliary functions and helpers:
This set of functions are helper functions to format data
(i.e., prediction matrices, etc.) for plotting. This functions
are called in both Model class and ModelGroup class in
evaluation.py.
'''
from sqlalchemy import create_engine
from sqlalchemy.sql import text
from collections import namedtuple
import yaml
ModelEvaluator = namedtuple('ModelEvaluator',
('model_group_id', 'model_id'))
def create_pgconn(credentials_yaml):
'''
Create SQL connection object using a psycopg2 cursor and abiding to new
dssg/dsapp db user configuration.
Arguments:
- credentials_yaml: .yaml file with db credentials
'''
with open(credentials_yaml) as f:
configs = yaml.load(f)
try:
conn = create_engine("postgresql://{user}:{password}@{host}:{port}/{dbname}".format(**configs))
except:
print("Error connecting to db.")
return conn
def get_models_ids(audited_model_group_ids, conn):
'''
This helper functions will retrieve the model_id's from a set
of model_group_ids and will instantiate each model into the
ModelEvaluator class.
Aguments:
- audited_model_group_ids: List of model_group_ids
(ideally from Audition's output)
- conn: sql engine
This function will return a list of ModelEvaluator objects
'''
query = conn.execute(text("""
SELECT model_group_id,
model_id
FROM model_metadata.models
WHERE model_group_id = ANY(:ids);
"""), ids=audited_model_group_ids)
return [ModelEvaluator._make(row) for row in query]
| [
1,
529,
276,
1112,
420,
29958,
25590,
13416,
29893,
4369,
19415,
29914,
3626,
482,
29899,
29896,
29966,
9507,
29958,
4351,
29914,
3626,
482,
29914,
9700,
29914,
2490,
4299,
292,
29914,
9996,
579,
29914,
13239,
29914,
2993,
29918,
7692,
2395... |
scripts/create_targets.py | pjsier/horoscope-markov | 0 | 66936 | import sys
from datetime import date, timedelta
SIGN_IDS = list(range(1, 13))
if __name__ == "__main__":
start_date = date(2019, 1, 1)
end_date = date.today()
for date_diff in range(0, (end_date - start_date).days + 1):
date_str = (start_date + timedelta(days=date_diff)).strftime("%Y%m%d")
for sign_id in SIGN_IDS:
sys.stdout.write(f"{date_str}/{sign_id}\n")
| [
1,
1053,
10876,
13,
3166,
12865,
1053,
2635,
29892,
5335,
287,
2554,
13,
13,
5425,
20728,
29918,
1367,
29903,
353,
1051,
29898,
3881,
29898,
29896,
29892,
29871,
29896,
29941,
876,
13,
13,
13,
361,
4770,
978,
1649,
1275,
376,
1649,
3396... |
lib/python2.7/site-packages/braintree/coinbase_account.py | ervinpepic/E-commerce | 2 | 95242 | <reponame>ervinpepic/E-commerce
import braintree
from braintree.resource import Resource
class CoinbaseAccount(Resource):
def __init__(self, gateway, attributes):
Resource.__init__(self, gateway, attributes)
if "subscriptions" in attributes:
self.subscriptions = [braintree.subscription.Subscription(gateway, subscription) for subscription in self.subscriptions]
| [
1,
529,
276,
1112,
420,
29958,
261,
3845,
412,
16447,
29914,
29923,
29899,
22529,
13,
5215,
4105,
524,
929,
13,
3166,
4105,
524,
929,
29889,
10314,
1053,
18981,
13,
13,
1990,
3189,
262,
3188,
10601,
29898,
6848,
1125,
13,
1678,
822,
4... |
tests/auth_test.py | moeyensj/adam_home | 9 | 1606589 | import json
import unittest
from adam import Auth
from adam.rest_proxy import _RestProxyForTest
# This test suite is testing authentication, but authentication is not really done through this
# API. Authentication (i.e. adding authentication credentials to HTTP requests) is handled in
# AuthenticatingRestProxy. Keeping this class here for now, but the test should be rewritten for
# real use cases (e.g. just testing that the auth.Auth() class is doing the right thing).
class AuthTest(unittest.TestCase):
"""Unit tests for auth module
"""
def test_successful_authentication(self):
# Use REST proxy for testing
rest = _RestProxyForTest()
auth = Auth(rest)
# Before authenticating, auth should reflect not logged in.
self.assertEqual(auth.get_user(), '')
self.assertEqual(auth.get_logged_in(), False)
# A successful authentication should store token and set user to returned value.
rest.expect_get('/me', 200,
{'email': '<EMAIL>', 'loggedIn': True})
auth.authenticate()
self.assertEqual(auth.get_user(), '<EMAIL>')
self.assertEqual(auth.get_logged_in(), True)
def test_unsuccessful_authentication(self):
# Use REST proxy for testing
rest = _RestProxyForTest()
auth = Auth(rest)
# Authenticate in order to fill in email/logged_in so that next test
# can verify that these are cleared.
rest.expect_get('/me', 200,
{'email': '<EMAIL>', 'loggedIn': True})
auth.authenticate()
# An unsuccessful authentication should clear token and other values.
# An example of the few ways that the server might reject a user. Others look
# like this with different messages.
server_error_on_bad_token = """
{
"error": {
"errors": [
{
"domain": "global",
"reason": "backendError",
"message": "org.apache.shiro.authc.IncorrectCredentialsException"
}
],
"code": 503,
"message": "org.apache.shiro.authc.IncorrectCredentialsException"
}
}
"""
rest.expect_get('/me', 503,
json.loads(server_error_on_bad_token))
auth.authenticate()
self.assertEqual(auth.get_user(), '')
self.assertEqual(auth.get_logged_in(), False)
def test_authentication_empty_token(self):
# Use REST proxy for testing
rest = _RestProxyForTest()
auth = Auth(rest)
# Authenticate in order to fill in email/logged_in so that next test
# can verify that these are cleared.
rest.expect_get('/me', 200,
{'email': '<EMAIL>', 'loggedIn': True})
auth.authenticate()
# Authentication with an empty token should be no problem and result in an empty
# auth object.
rest.expect_get('/me', 200, {"loggedIn": False})
auth.authenticate()
self.assertEqual(auth.get_user(), '')
self.assertEqual(auth.get_logged_in(), False)
def test_authentication_server_error(self):
# Use REST proxy for testing
rest = _RestProxyForTest()
auth = Auth(rest)
# Authenticate in order to fill in email/logged_in/token so that next test
# can verify that these are not cleared.
rest.expect_get('/me', 200,
{'email': '<EMAIL>', 'loggedIn': True})
auth.authenticate()
# Authentication should throw on a non-200 response and leave auth contents
# unchanged.
rest.expect_get('/me', 404, {})
with self.assertRaises(RuntimeError):
auth.authenticate()
self.assertEqual(auth.get_user(), '<EMAIL>')
self.assertEqual(auth.get_logged_in(), True)
if __name__ == '__main__':
unittest.main()
| [
1,
1053,
4390,
13,
5215,
443,
27958,
13,
13,
3166,
594,
314,
1053,
13189,
13,
3166,
594,
314,
29889,
5060,
29918,
14701,
1053,
903,
15078,
14048,
2831,
3057,
13,
13,
13,
29937,
910,
1243,
9460,
338,
6724,
10760,
29892,
541,
10760,
338... |
examples/simple_email_pattern.py | Mattlk13/python-cybox | 40 | 161095 | #!/usr/bin/env python
# Copyright (c) 2017, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
"""Creates the CybOX content for CybOX_Simple_Email_Pattern.xml
"""
from cybox.core import Observables
from cybox.objects.email_message_object import EmailMessage
def main():
m = EmailMessage()
m.from_ = ["<EMAIL>",
"<EMAIL>",
"<EMAIL>"]
m.from_.condition = "Equals"
m.subject = "New modifications to the specification"
m.subject.condition = "Equals"
print(Observables(m).to_xml(encoding=None))
if __name__ == "__main__":
main()
| [
1,
18787,
4855,
29914,
2109,
29914,
6272,
3017,
13,
13,
29937,
14187,
1266,
313,
29883,
29897,
29871,
29906,
29900,
29896,
29955,
29892,
450,
341,
1806,
1525,
15025,
29889,
2178,
10462,
21676,
29889,
13,
29937,
2823,
365,
2965,
1430,
1660,
... |
notebooks/_solutions/05-spatial-operations-overlays11.py | jorisvandenbossche/DS-python-geospatial | 58 | 36103 | # Calculate the intersection of the land use polygons with Muette
land_use_muette = land_use.geometry.intersection(muette) | [
1,
396,
20535,
403,
278,
17686,
310,
278,
2982,
671,
1248,
4790,
787,
411,
8229,
2353,
13,
1049,
29918,
1509,
29918,
2589,
2353,
353,
2982,
29918,
1509,
29889,
19156,
29889,
1639,
2042,
29898,
2589,
2353,
29897,
2
] |
scorecard/views.py | desafinadude/municipal-data | 0 | 1612110 | from django.shortcuts import redirect
from django.views.generic.base import TemplateView
from django.http import Http404, HttpResponse
from django.urls import reverse
from scorecard.profiles import get_profile
from scorecard.models import Geography, LocationNotFound
from infrastructure.models import Project
from household.models import HouseholdServiceTotal, HouseholdBillTotal
from household.chart import stack_chart, chart_data, percent_increase, yearly_percent
import json
from . import models
import municipal_finance
from . import serializers
from rest_framework import viewsets
import subprocess
from django.conf import settings
class GeographyViewSet(viewsets.ReadOnlyModelViewSet):
queryset = models.Geography.objects.all()
serializer_class = serializers.GeographySerializer
class MunicipalityProfileViewSet(viewsets.ReadOnlyModelViewSet):
queryset = municipal_finance.models.MunicipalityProfile.objects.all()
serializer_class = serializers.MunicipalityProfileSerializer
def infra_dict(project):
return {
"description": project.project_description,
"expenditure_amount": project.expenditure.first().amount,
"url": reverse('project-detail-view', args=[project.id]),
}
class LocateView(TemplateView):
template_name = "webflow/locate.html"
def get(self, request, *args, **kwargs):
self.lat = self.request.GET.get("lat", None)
self.lon = self.request.GET.get("lon", None)
self.nope = False
if self.lat and self.lon:
place = None
places = Geography.get_locations_from_coords(
latitude=self.lat, longitude=self.lon
)
if places:
place = places[0]
# if multiple, prefer the metro/local municipality if available
if len(places) > 1:
places = [p for p in places if p.geo_level == "municipality"]
if places:
place = places[0]
return redirect(
reverse("geography_detail", kwargs={"geography_id": place.geoid})
)
self.nope = True
return super(LocateView, self).get(request, *args, **kwargs)
def get_context_data(self, *args, **kwargs):
return {
"page_data_json": json.dumps(
{"nope": self.nope},
cls=serializers.JSONEncoder,
sort_keys=True,
indent=4 if settings.DEBUG else None
),
}
class GeographyDetailView(TemplateView):
template_name = "webflow/muni-profile.html"
def dispatch(self, *args, **kwargs):
self.geo_id = self.kwargs.get("geography_id", None)
try:
self.geo_level, self.geo_code = self.geo_id.split("-", 1)
self.geo = Geography.find(self.geo_code, self.geo_level)
except (ValueError, LocationNotFound):
raise Http404
# check slug
if kwargs.get("slug") or self.geo.slug:
if kwargs["slug"] != self.geo.slug:
kwargs["slug"] = self.geo.slug
url = "/profiles/%s-%s-%s/" % (
self.geo_level,
self.geo_code,
self.geo.slug,
)
return redirect(url, permanent=True)
return super(GeographyDetailView, self).dispatch(*args, **kwargs)
def pdf_url(self):
return "/profiles/%s-%s-%s.pdf" % (
self.geo_level,
self.geo_code,
self.geo.slug,
)
def get_context_data(self, *args, **kwargs):
page_json = {}
profile = get_profile(self.geo)
page_json.update(profile)
profile["geography"] = self.geo.as_dict()
page_json["geography"] = self.geo
page_json["pdf_url"] = self.pdf_url()
profile["demarcation"]["disestablished_to_geos"] = [
Geography.objects.filter(geo_code=code).first().as_dict()
for code in profile["demarcation"].get("disestablished_to", [])
]
profile["demarcation"]["established_from_geos"] = [
Geography.objects.filter(geo_code=code).first().as_dict()
for code in profile["demarcation"].get("established_from", [])
]
for date in profile["demarcation"]["land_gained"]:
for change in date["changes"]:
change["geo"] = (
Geography.objects.filter(geo_code=change["demarcation_code"])
.first()
.as_dict()
)
for date in profile["demarcation"]["land_lost"]:
for change in date["changes"]:
change["geo"] = (
Geography.objects.filter(geo_code=change["demarcation_code"])
.first()
.as_dict()
)
infrastructure_financial_year = "2019/2020"
infrastructure = (
Project.objects.prefetch_related(
"geography",
"expenditure__budget_phase",
"expenditure__financial_year",
"expenditure",
)
.filter(
geography__geo_code=self.geo_code,
expenditure__budget_phase__name="Budget year",
expenditure__financial_year__budget_year=infrastructure_financial_year,
)
.order_by("-expenditure__amount")
)
page_json["infrastructure_summary"] = {
"projects": [infra_dict(p) for p in infrastructure[:5]],
"project_count": infrastructure.count(),
"financial_year": infrastructure_financial_year[5:9]
}
households = HouseholdBillTotal.summary.bill_totals(self.geo_code)
page_json["household_percent"] = percent_increase(households)
page_json["yearly_percent"] = yearly_percent(households)
chart = chart_data(households)
page_json["household_chart_overall"] = chart
service_middle = (
HouseholdServiceTotal.summary.active(self.geo_code)
.middle()
.order_by("financial_year__budget_year")
)
service_affordable = (
HouseholdServiceTotal.summary.active(self.geo_code)
.affordable()
.order_by("financial_year__budget_year")
)
service_indigent = (
HouseholdServiceTotal.summary.active(self.geo_code)
.indigent()
.order_by("financial_year__budget_year")
)
chart_middle = stack_chart(service_middle, households)
chart_affordable = stack_chart(service_affordable, households)
chart_indigent = stack_chart(service_indigent, households)
page_json["household_chart_middle"] = chart_middle
page_json["household_chart_affordable"] = chart_affordable
page_json["household_chart_indigent"] = chart_indigent
page_context = {
"page_data_json": json.dumps(
page_json,
cls=serializers.JSONEncoder,
sort_keys=True,
indent=4 if settings.DEBUG else None
),
"page_title": f"{ self.geo.name} - Municipal Money",
"page_description": f"Financial Performance for { self.geo.name }, and other information.",
}
return page_context
class GeographyPDFView(GeographyDetailView):
def get(self, request, *args, **kwargs):
# render as pdf
path = "/profiles/%s-%s-%s?print=1" % (
self.geo_level,
self.geo_code,
self.geo.slug,
)
url = request.build_absolute_uri(path)
# !!! This relies on GeographyDetailView validating the user-provided
# input to the path to avoid arbitraty command execution
command = ["node", "makepdf.js", url]
try:
completed_process = subprocess.run(
command,
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
except subprocess.CalledProcessError as e:
print(e.output)
raise e
filename = "%s-%s-%s.pdf" % (self.geo_level, self.geo_code, self.geo.slug)
response = HttpResponse(completed_process.stdout, content_type='application/pdf')
response['Content-Disposition'] = f'attachment; filename="{ filename }"'
return response
class SitemapView(TemplateView):
template_name = "sitemap.txt"
content_type = "text/plain"
def get_context_data(self):
return {"geos": Geography.objects.all()}
| [
1,
515,
9557,
29889,
12759,
7582,
29879,
1053,
6684,
13,
3166,
9557,
29889,
7406,
29889,
19206,
29889,
3188,
1053,
25663,
1043,
13,
3166,
9557,
29889,
1124,
1053,
9056,
29946,
29900,
29946,
29892,
9056,
5103,
13,
3166,
9557,
29889,
26045,
... |
h1theswan_utils/treefiles/treefile_utils.py | h1-the-swan/h1theswan_utils | 0 | 84308 | import pandas as pd
import numpy as np
class Treefile(object):
"""Tools for working with a treefile (.tree)"""
def __init__(self,
fname=None,
comment_char="#",
field_sep=" ",
cluster_sep=":"):
"""
:fname: filename for the treefile
"""
self.fname = fname
self.comment_char = comment_char
self.field_sep = field_sep
self.cluster_sep = cluster_sep
self.d = None
self.df = None
self.top_cluster_counts = None
def parse(self, fname=None):
"""Parse the treefile
:fname: filename for the treefile
:returns: list of dictionaries. each item in the list is a row in the treefile
"""
if fname is not None:
self.fname = fname
else:
fname = self.fname
d = []
with open(fname, 'r') as f:
for line in f:
if line[0] == self.comment_char:
continue
line = line.strip().split(self.field_sep)
this_row = {}
this_row['path'] = line[0]
this_row['flow'] = float(line[1])
this_row['name'] = line[2].strip('"')
if len(line) > 3:
this_row['node'] = int(line[3])
d.append(this_row)
self.d = d
def load_df(self):
"""load treefile as a pandas dataframe
:returns: pandas dataframe
"""
if not self.d:
self.parse()
self.df = pd.DataFrame(self.d)
return self.df
def add_top_cluster_column_to_df(self, df=None):
if df is None:
df = self.df
if df is None:
raise RuntimeError("df is not specified. call load_df() to load the dataframe")
top_cluster = df['path'].apply(lambda x: x.split(self.cluster_sep)[0])
top_cluster.name = 'top_cluster'
df['top_cluster'] = top_cluster
return df
def get_top_cluster_counts(self, df=None):
if df is None:
df = self.df
if df is None: # if it's still not there, load it (parsing the treefile if necessary)
df = self.load_df()
df = self.add_top_cluster_column_to_df(df=df)
self.top_cluster_counts = df['top_cluster'].value_counts()
return self.top_cluster_counts
def get_nodes_for_cluster(self, cluster_name=None, df=None):
"""get a list of the node names for one cluster
:returns: list of node names
"""
if cluster_name is None:
raise RuntimeError("must specify cluster_name")
# TODO: could reimplement this so it doesn't use pandas. might be more efficient
if df is None:
df = self.df
if df is None: # if it's still not there, load it (parsing the treefile if necessary)
df = self.load_df()
if self.cluster_sep not in str(cluster_name):
# assume this is a top-level cluster
if 'top_cluster' not in df.columns:
self.df = add_top_cluster_column_to_df(df=df)
subset = df[df['top_cluster']==cluster_name]
else:
# make sure the cluster separator is the last character in cluster_name
if cluster_name[-1] != self.cluster_sep:
cluster_name = cluster_name + self.cluster_sep
subset = df[df['path'].str.startswith(cluster_name)]
return subset['name'].tolist()
| [
1,
1053,
11701,
408,
10518,
13,
5215,
12655,
408,
7442,
13,
13,
1990,
15472,
1445,
29898,
3318,
1125,
13,
13,
1678,
9995,
24183,
363,
1985,
411,
263,
5447,
1445,
14544,
8336,
5513,
15945,
13,
13,
1678,
822,
4770,
2344,
12035,
1311,
29... |
tests/conftest.py | marqov/procrastinate | 0 | 44025 | <filename>tests/conftest.py<gh_stars>0
import os
import signal as stdlib_signal
from contextlib import closing
import aiopg
import psycopg2
import pytest
from psycopg2 import sql
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
from procrastinate import aiopg_connector
from procrastinate import app as app_module
from procrastinate import jobs, migration, testing
# Just ensuring the tests are not polluted by environment
for key in os.environ:
if key.startswith("PROCRASTINATE_"):
os.environ.pop(key)
def _execute(cursor, query, *identifiers):
cursor.execute(
sql.SQL(query).format(
*(sql.Identifier(identifier) for identifier in identifiers)
)
)
@pytest.fixture(scope="session")
def setup_db():
with closing(psycopg2.connect("", dbname="postgres")) as connection:
connection.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
with connection.cursor() as cursor:
_execute(
cursor, "DROP DATABASE IF EXISTS {}", "procrastinate_test_template"
)
_execute(cursor, "CREATE DATABASE {}", "procrastinate_test_template")
job_store = aiopg_connector.PostgresJobStore(dbname="procrastinate_test_template")
migrator = migration.Migrator(job_store=job_store)
migrator.migrate()
# We need to close the psycopg2 underlying connection synchronously
job_store._connection._conn.close()
with closing(
psycopg2.connect("", dbname="procrastinate_test_template")
) as connection:
connection.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
yield connection
with closing(psycopg2.connect("", dbname="postgres")) as connection:
connection.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
with connection.cursor() as cursor:
_execute(
cursor, "DROP DATABASE IF EXISTS {}", "procrastinate_test_template"
)
@pytest.fixture
def connection_params(setup_db):
with setup_db.cursor() as cursor:
_execute(cursor, "DROP DATABASE IF EXISTS {}", "procrastinate_test")
_execute(
cursor,
"CREATE DATABASE {} TEMPLATE {}",
"procrastinate_test",
"procrastinate_test_template",
)
yield {"dsn": "", "dbname": "procrastinate_test"}
with setup_db.cursor() as cursor:
_execute(cursor, "DROP DATABASE IF EXISTS {}", "procrastinate_test")
@pytest.fixture
async def connection(connection_params):
async with aiopg.connect(**connection_params) as connection:
yield connection
@pytest.fixture
async def pg_job_store(connection_params):
job_store = aiopg_connector.PostgresJobStore(**connection_params)
yield job_store
connection = await job_store.get_connection()
await connection.close()
@pytest.fixture
def kill_own_pid():
def f(signal=stdlib_signal.SIGTERM):
os.kill(os.getpid(), signal)
return f
@pytest.fixture
def job_store():
return testing.InMemoryJobStore()
@pytest.fixture
def get_all(connection):
async def f(table, *fields):
async with connection.cursor(
cursor_factory=aiopg_connector.RealDictCursor
) as cursor:
await cursor.execute(f"SELECT {', '.join(fields)} FROM {table}")
return await cursor.fetchall()
return f
@pytest.fixture
def app(job_store):
return app_module.App(job_store=job_store)
@pytest.fixture
def pg_app(pg_job_store):
return app_module.App(job_store=pg_job_store)
@pytest.fixture
def job_factory():
defaults = {
"id": 42,
"task_name": "bla",
"task_kwargs": {},
"lock": None,
"queue": "queue",
}
def factory(**kwargs):
final_kwargs = defaults.copy()
final_kwargs.update(kwargs)
return jobs.Job(**final_kwargs)
return factory
| [
1,
529,
9507,
29958,
21150,
29914,
535,
615,
342,
29889,
2272,
29966,
12443,
29918,
303,
1503,
29958,
29900,
13,
5215,
2897,
13,
5215,
7182,
408,
3659,
1982,
29918,
25436,
13,
3166,
3030,
1982,
1053,
14382,
13,
13,
5215,
7468,
459,
2988... |
porthole/management/commands/brocade.py | jsayles/Porthole | 0 | 9828 | <filename>porthole/management/commands/brocade.py
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from porthole import models, brocade
class Command(BaseCommand):
help = "Command the Brocade switch stacks"
args = ""
requires_system_checks = False
def add_arguments(self, parser):
parser.add_argument(
'--print_stacks',
action='store_true',
dest='print_stacks',
help='Show the VLAN data from all switch stacks',
)
def handle(self, *args, **options):
if options['print_stacks']:
self.print_stacks()
def print_stacks(self):
for s in models.SwitchStack.objects.all():
stack = brocade.SwitchStack(s.name, s.ip_address, s.raw_username, s.raw_password, port=s.port)
stack.print_stack()
print()
| [
1,
529,
9507,
29958,
1971,
386,
1772,
29914,
21895,
29914,
26381,
29914,
6729,
6332,
29889,
2272,
13,
3166,
9557,
29889,
3221,
29889,
21895,
29889,
3188,
1053,
7399,
6255,
29892,
10516,
2392,
13,
3166,
9557,
29889,
5527,
1053,
6055,
13,
1... |
helpers/time.py | fswzb/autotrade | 1 | 181598 | <gh_stars>1-10
# coding:utf8
import datetime
def is_weekend(now_time):
return now_time.weekday() >= 5
#to get the latest trade day
except_trade_day_list=['2015-05-01','2015-06-22','2015-09-03','2015-10-01','2015-10-02','2015-10-06', \
'2015-10-07','2015-10-08', '2016-04-04','2016-05-02','2016-06-09','2016-06-10', \
'2016-09-15','2016-09-16','2016-10-03','2016-10-04','2016-10-05','2016-10-06', \
'2016-10-07','2017-01-02','2017-01-30','2017-01-31','2017-02-01','2017-02-02', \
'2017-04-03','2017-05-29','2017-10-02','2017-10-03','2017-10-04','2017-10-05','2017-10-06']
def is_trade_date(given_date_str=None):
"""
:param given_date_str: str type, like '2017-10-01'
:return: bool type
"""
this_day = datetime.datetime.now()
date_format = '%Y-%m-%d'
this_str = this_day.strftime(date_format)
open_str = ' 09:15:00'
if this_str in except_trade_day_list:
return False
return this_day.isoweekday() < 6
def get_latest_trade_date(this_date=None, date_format='%Y-%m-%d'):
"""
:param this_date: datetime.datetim type, like datetime.datetime.now()
:return: latest_day_str, str type
"""
this_day = datetime.datetime.now()
if this_date != None:
this_day = this_date
open_str = ' 09:25:00'
time_format = date_format + ' %X'
this_str = this_day.strftime(time_format)
if (this_day.hour >= 0 and this_day.hour < 9) or (this_day.hour == 9 and
this_day.minute < 15):
this_day = datetime.datetime.strptime(
this_str, time_format) + datetime.timedelta(days=-1)
this_str = this_day.strftime(date_format)
latest_day_str = ''
this_str = this_str[:10]
while this_str >= '1990-01-01':
if is_trade_date(this_str):
return this_str
#break
else:
this_day = this_day + datetime.timedelta(days=-1)
this_str = this_day.strftime(date_format)
def get_next_trade_date(now_time):
"""
:param now_time: datetime.datetime
:return:
>>> import datetime
>>> get_next_trade_date(datetime.date(2016, 5, 5))
datetime.date(2016, 5, 6)
"""
now = now_time
max_days = 365
days = 0
while 1:
days += 1
now += datetime.timedelta(days=1)
if is_trade_date(now):
if isinstance(now, datetime.date):
return now
else:
return now.date()
if days > max_days:
raise ValueError('无法确定 %s 下一个交易日' % now_time)
OPEN_TIME = ((datetime.time(9, 15, 0), datetime.time(11, 30, 0)),
(datetime.time(13, 0, 0), datetime.time(15, 0, 0)), )
def is_tradetime(now_time):
"""
:param now_time: datetime.time()
:return:
"""
now = now_time.time()
for begin, end in OPEN_TIME:
if begin <= now < end:
return True
else:
return False
PAUSE_TIME = ((datetime.time(11, 30, 0), datetime.time(12, 59, 30)), )
def is_pause(now_time):
"""
:param now_time:
:return:
"""
now = now_time.time()
for b, e in PAUSE_TIME:
if b <= now < e:
return True
CONTINUE_TIME = ((datetime.time(12, 59, 30), datetime.time(13, 0, 0)), )
def is_continue(now_time):
now = now_time.time()
for b, e in CONTINUE_TIME:
if b <= now < e:
return True
return False
CLOSE_TIME = (datetime.time(15, 0, 0), )
def is_closing(now_time, start=datetime.time(14, 54, 30)):
now = now_time.time()
for close in CLOSE_TIME:
if start <= now < close:
return True
return False
| [
1,
529,
12443,
29918,
303,
1503,
29958,
29896,
29899,
29896,
29900,
13,
29937,
14137,
29901,
9420,
29947,
13,
13,
5215,
12865,
13,
13,
13,
1753,
338,
29918,
18448,
355,
29898,
3707,
29918,
2230,
1125,
13,
1678,
736,
1286,
29918,
2230,
2... |
tet/i18n/__init__.py | koirikivi/tet | 1 | 105074 | import sys
from pyramid.config import Configurator
from pyramid.i18n import get_localizer, TranslationStringFactory
from pyramid.threadlocal import get_current_request
def add_renderer_globals(event):
request = event.get('request')
if request is None:
request = get_current_request()
event['_'] = request.translate
event['gettext'] = request.translate
event['ngettext'] = request.pluralize
event['localizer'] = request.localizer
def configure_i18n(config: Configurator, default_domain: str):
config.add_subscriber(add_renderer_globals,
'pyramid.events.BeforeRender')
config.add_subscriber(add_renderer_globals,
'tet.viewlet.IBeforeViewletRender')
config.registry.tsf = tsf = TranslationStringFactory(default_domain)
def translate(request):
localizer = request.localizer
def auto_translate(string, *, domain=default_domain, mapping=None, context=None):
if isinstance(string, str):
string = tsf(string, context=context)
return localizer.translate(string, domain=domain, mapping=mapping)
return auto_translate
def pluralize(request):
localizer = request.localizer
def auto_pluralize(singular, plural, n, *, domain=default_domain, mapping=None, context=None):
if isinstance(singular, str):
singular = tsf(singular, context=context)
return localizer.pluralize(singular, plural, n, domain=domain, mapping=mapping)
return auto_pluralize
config.add_request_method(translate, property=True, reify=True)
config.add_request_method(pluralize, property=True, reify=True)
config.add_request_method(get_localizer, name='localize', property=True, reify=True)
def includeme(config: Configurator):
default_domain = config.get_settings().get('default_i18n_domain',
config.package.__name__)
configure_i18n(config, default_domain)
| [
1,
1053,
10876,
13,
13,
3166,
11451,
2572,
333,
29889,
2917,
1053,
12782,
332,
1061,
13,
3166,
11451,
2572,
333,
29889,
29875,
29896,
29947,
29876,
1053,
679,
29918,
2997,
3950,
29892,
4103,
18411,
1231,
5126,
13,
3166,
11451,
2572,
333,
... |
Wrappers/Python/cil/processors/Masker.py | ClaireDelplancke/CCPi-Framework | 30 | 75693 | <reponame>ClaireDelplancke/CCPi-Framework
# -*- coding: utf-8 -*-
# This work is part of the Core Imaging Library (CIL) developed by CCPi
# (Collaborative Computational Project in Tomographic Imaging), with
# substantial contributions by UKRI-STFC and University of Manchester.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cil.framework import DataProcessor, AcquisitionData, ImageData, ImageGeometry, DataContainer
import warnings
import numpy
from scipy import interpolate
class Masker(DataProcessor):
r'''
Processor to fill missing values provided by mask. Please use the desiried method to configure a processor for your needs.
'''
@staticmethod
def value(mask=None, value=0):
r'''This sets the masked values of the input data to the requested value.
:param mask: A boolean array with the same dimensions as input, where 'False' represents masked values. Mask can be generated using 'MaskGenerator' processor to identify outliers.
:type mask: DataContainer, ImageData, AcquisitionData, numpy.ndarray
:param value: values to be assigned to missing elements
:type value: float, default=0
'''
processor = Masker(mode='value', mask=mask, value=value)
return processor
@staticmethod
def mean(mask=None, axis=None):
r'''This sets the masked values of the input data to the mean of the unmasked values across the array or axis.
:param mask: A boolean array with the same dimensions as input, where 'False' represents masked values. Mask can be generated using 'MaskGenerator' processor to identify outliers.
:type mask: DataContainer, ImageData, AcquisitionData, numpy.ndarray
:param axis: specify axis as int or from 'dimension_labels' to calculate mean.
:type axis: str, int
'''
processor = Masker(mode='mean', mask=mask, axis=axis)
return processor
@staticmethod
def median(mask=None, axis=None):
r'''This sets the masked values of the input data to the median of the unmasked values across the array or axis.
:param mask: A boolean array with the same dimensions as input, where 'False' represents masked values. Mask can be generated using 'MaskGenerator' processor to identify outliers.
:type mask: DataContainer, ImageData, AcquisitionData, numpy.ndarray
:param axis: specify axis as int or from 'dimension_labels' to calculate median.
:type axis: str, int
'''
processor = Masker(mode='median', mask=mask, axis=axis)
return processor
@staticmethod
def interpolate(mask=None, axis=None, method='linear'):
r'''This operates over the specified axis and uses 1D interpolation over remaining flattened array to fill in missing vaues.
:param mask: A boolean array with the same dimensions as input, where 'False' represents masked values. Mask can be generated using 'MaskGenerator' processor to identify outliers.
:type mask: DataContainer, ImageData, AcquisitionData, numpy.ndarray
:param axis: specify axis as int or from 'dimension_labels' to loop over and perform 1D interpolation.
:type axis: str, int
:param method: One of the following interpoaltion methods: linear, nearest, zeros, linear, quadratic, cubic, previous, next
:param method: str, default='linear'
'''
processor = Masker(mode='interpolate', mask=mask, axis=axis, method=method)
return processor
def __init__(self,
mask = None,
mode = 'value',
value = 0,
axis = None,
method = 'linear'):
r'''Processor to fill missing values provided by mask.
:param mask: A boolean array with the same dimensions as input, where 'False' represents masked values. Mask can be generated using 'MaskGenerator' processor to identify outliers.
:type mask: DataContainer, ImageData, AcquisitionData, numpy.ndarray
:param mode: a method to fill in missing values (value, mean, median, interpolate)
:type mode: str, default=value
:param value: substitute all outliers with a specific value
:type value: float, default=0
:param axis: specify axis as int or from 'dimension_labels' to calculate mean or median in respective modes
:type axis: str or int
:param method: One of the following interpoaltion methods: linear, nearest, zeros, linear, quadratic, cubic, previous, next
:param method: str, default='linear'
:return: DataContainer or it's subclass with masked outliers
:rtype: DataContainer or it's subclass
'''
kwargs = {'mask': mask,
'mode': mode,
'value': value,
'axis': axis,
'method': method}
super(Masker, self).__init__(**kwargs)
def check_input(self, data):
if self.mask is None:
raise ValueError('Please, provide a mask.')
if not (data.shape == self.mask.shape):
raise Exception("Mask and Data must have the same shape." +
"{} != {}".format(self.mask.mask, data.shape))
if hasattr(self.mask, 'dimension_labels') and data.dimension_labels != self.mask.dimension_labels:
raise Exception("Mask and Data must have the same dimension labels." +
"{} != {}".format(self.mask.dimension_labels, data.dimension_labels))
if self.mode not in ['value', 'mean', 'median', 'interpolate']:
raise Exception("Wrong mode. One of the following is expected:\n" +
"value, mean, median, interpolate")
return True
def process(self, out=None):
data = self.get_input()
return_arr = False
if out is None:
out = data.copy()
arr = out.as_array()
return_arr = True
else:
arr = out.as_array()
#assumes mask has 'as_array' method, i.e. is a DataContainer or is a numpy array
try:
mask_arr = self.mask.as_array()
except:
mask_arr = self.mask
try:
mask_invert = ~mask_arr
except TypeError:
raise TypeError("Mask expected to be a boolean array got {}".format(mask_arr.dtype))
try:
axis_index = data.dimension_labels.index(self.axis)
except:
if type(self.axis) == int:
axis_index = self.axis
else:
axis_index = None
if self.mode == 'value':
arr[mask_invert] = self.value
elif self.mode == 'mean' or self.mode == 'median':
if axis_index is not None:
ndim = data.number_of_dimensions
slice_obj = [slice(None, None, 1)] * ndim
for i in range(arr.shape[axis_index]):
current_slice_obj = slice_obj[:]
current_slice_obj[axis_index] = i
current_slice_obj = tuple(current_slice_obj)
slice_data = arr[current_slice_obj]
if self.mode == 'mean':
slice_data[mask_invert[current_slice_obj]] = numpy.mean(slice_data[mask_arr[current_slice_obj]])
else:
slice_data[mask_invert[current_slice_obj]] = numpy.median(slice_data[mask_arr[current_slice_obj]])
arr[current_slice_obj] = slice_data
else:
if self.mode == 'mean':
arr[mask_invert] = numpy.mean(arr[mask_arr])
else:
arr[mask_invert] = numpy.median(arr[mask_arr])
elif self.mode == 'interpolate':
if self.method not in ['linear', 'nearest', 'zeros', 'linear', \
'quadratic', 'cubic', 'previous', 'next']:
raise TypeError("Wrong interpolation method, one of the follwoing is expected:\n" +
"linear, nearest, zeros, linear, quadratic, cubic, previous, next")
ndim = data.number_of_dimensions
shape = arr.shape
if axis_index is None:
raise NotImplementedError ('Currently Only 1D interpolation is available. Please specify an axis to interpolate over.')
res_dim = 1
for i in range(ndim):
if i != axis_index:
res_dim *= shape[i]
# get axis for 1D interpolation
interp_axis = numpy.arange(shape[axis_index])
# loop over slice
for i in range(res_dim):
rest_shape = []
for j in range(ndim):
if j != axis_index:
rest_shape.append(shape[j])
rest_shape = tuple(rest_shape)
rest_idx = numpy.unravel_index(i, rest_shape)
k = 0
idx = []
for j in range(ndim):
if j == axis_index:
idx.append(slice(None,None,1))
else:
idx.append(rest_idx[k])
k += 1
idx = tuple(idx)
if numpy.any(mask_invert[idx]):
tmp = arr[idx]
f = interpolate.interp1d(interp_axis[mask_arr[idx]], tmp[mask_arr[idx]],
fill_value='extrapolate',
assume_sorted=True,
kind=self.method)
tmp[mask_invert[idx]] = f(numpy.where(mask_arr[idx] == False)[0])
arr[idx] = tmp
else:
raise ValueError('Mode is not recognised. One of the following is expected: ' +
'value, mean, median, interpolate')
out.fill(arr)
if return_arr is True:
return out
| [
1,
529,
276,
1112,
420,
29958,
20216,
533,
13157,
9018,
10055,
29914,
4174,
12197,
29899,
16660,
13,
29937,
448,
29930,
29899,
14137,
29901,
23616,
29899,
29947,
448,
29930,
29899,
13,
29937,
259,
910,
664,
338,
760,
310,
278,
10239,
1954... |
mpf/devices/segment_display/transitions.py | haggispinball/mpf_fathom_fast | 163 | 64855 | """Text transitions used for segment displays."""
import abc
from typing import Optional, List
from mpf.core.placeholder_manager import TextTemplate
from mpf.core.rgb_color import RGBColor
from mpf.devices.segment_display.segment_display_text import SegmentDisplayText, UncoloredSegmentDisplayText
STEP_OUT_OF_RANGE_ERROR = "Step is out of range"
TRANSITION_DIRECTION_UNKNOWN_ERROR = "Transition uses an unknown direction value"
class TransitionBase(metaclass=abc.ABCMeta):
"""Base class for text transitions in segment displays."""
__slots__ = ["output_length", "config", "collapse_dots", "collapse_commas"]
def __init__(self, output_length: int, collapse_dots: bool, collapse_commas: bool, config: dict) -> None:
"""Initialize the transition."""
self.output_length = output_length
self.config = config
self.collapse_dots = collapse_dots
self.collapse_commas = collapse_commas
for key, value in config.items():
if hasattr(self, key):
setattr(self, key, value)
@abc.abstractmethod
def get_step_count(self):
"""Return the total number of steps required for the transition."""
raise NotImplementedError
# pylint: disable=too-many-arguments
@abc.abstractmethod
def get_transition_step(self, step: int, current_text: str, new_text: str,
current_colors: Optional[List[RGBColor]] = None,
new_colors: Optional[List[RGBColor]] = None) -> SegmentDisplayText:
"""Calculate all the steps in the transition."""
raise NotImplementedError
class TransitionRunner:
"""Class to run/execute transitions using an iterator."""
__slots__ = ["_transition", "_step", "_current_placeholder", "_new_placeholder", "_current_colors", "_new_colors"]
# pylint: disable=too-many-arguments
def __init__(self, machine, transition: TransitionBase, current_text: str, new_text: str,
current_colors: Optional[List[RGBColor]] = None,
new_colors: Optional[List[RGBColor]] = None) -> None:
"""Class initializer."""
self._transition = transition
self._step = 0
self._current_placeholder = TextTemplate(machine, current_text)
self._new_placeholder = TextTemplate(machine, new_text)
self._current_colors = current_colors
self._new_colors = new_colors
def __iter__(self):
"""Return the iterator."""
return self
def __next__(self):
"""Evaluate and return the next transition step."""
if self._step >= self._transition.get_step_count():
raise StopIteration
transition_step = self._transition.get_transition_step(self._step,
self._current_placeholder.evaluate({}),
self._new_placeholder.evaluate({}),
self._current_colors,
self._new_colors)
self._step += 1
return transition_step
class NoTransition(TransitionBase):
"""Segment display no transition effect."""
def get_step_count(self):
"""Return the total number of steps required for the transition."""
return 1
# pylint: disable=too-many-arguments
def get_transition_step(self, step: int, current_text: str, new_text: str,
current_colors: Optional[List[RGBColor]] = None,
new_colors: Optional[List[RGBColor]] = None) -> SegmentDisplayText:
"""Calculate all the steps in the transition."""
if step < 0 or step >= self.get_step_count():
raise AssertionError(STEP_OUT_OF_RANGE_ERROR)
return SegmentDisplayText.from_str(new_text, self.output_length, self.collapse_dots, self.collapse_commas,
new_colors)
class PushTransition(TransitionBase):
"""Segment display push transition effect."""
def __init__(self, output_length: int, collapse_dots: bool, collapse_commas: bool, config: dict) -> None:
"""Class initializer."""
self.direction = 'right'
self.text = None
self.text_color = None
super().__init__(output_length, collapse_dots, collapse_commas, config)
if self.text is None:
self.text = ''
def get_step_count(self):
"""Return the total number of steps required for the transition."""
return self.output_length + len(self.text)
# pylint: disable=too-many-arguments
def get_transition_step(self, step: int, current_text: str, new_text: str,
current_colors: Optional[List[RGBColor]] = None,
new_colors: Optional[List[RGBColor]] = None) -> SegmentDisplayText:
"""Calculate all the steps in the transition."""
if step < 0 or step >= self.get_step_count():
raise AssertionError(STEP_OUT_OF_RANGE_ERROR)
current_display_text = SegmentDisplayText.from_str(current_text, self.output_length, self.collapse_dots,
self.collapse_commas, current_colors)
new_display_text = SegmentDisplayText.from_str(new_text, self.output_length, self.collapse_dots,
self.collapse_commas, new_colors)
if self.text:
if new_colors and not self.text_color:
text_color = [new_colors[0]]
else:
text_color = self.text_color
transition_text = SegmentDisplayText.from_str(self.text, len(self.text), self.collapse_dots,
self.collapse_commas, text_color)
else:
transition_text = UncoloredSegmentDisplayText([], self.collapse_dots, self.collapse_commas)
if self.direction == 'right':
temp_list = new_display_text
temp_list.extend(transition_text)
temp_list.extend(current_display_text)
return temp_list[
self.output_length + len(self.text) - (step + 1):2 * self.output_length + len(
self.text) - (step + 1)]
if self.direction == 'left':
temp_list = current_display_text
temp_list.extend(transition_text)
temp_list.extend(new_display_text)
return temp_list[step + 1:step + 1 + self.output_length]
raise AssertionError(TRANSITION_DIRECTION_UNKNOWN_ERROR)
class CoverTransition(TransitionBase):
"""Segment display cover transition effect."""
def __init__(self, output_length: int, collapse_dots: bool, collapse_commas: bool, config: dict) -> None:
"""Class initializer."""
self.direction = 'right'
self.text = None
self.text_color = None
super().__init__(output_length, collapse_dots, collapse_commas, config)
if self.text is None:
self.text = ''
def get_step_count(self):
"""Return the total number of steps required for the transition."""
return self.output_length + len(self.text)
# pylint: disable=too-many-arguments
def get_transition_step(self, step: int, current_text: str, new_text: str,
current_colors: Optional[List[RGBColor]] = None,
new_colors: Optional[List[RGBColor]] = None) -> SegmentDisplayText:
"""Calculate all the steps in the transition."""
if step < 0 or step >= self.get_step_count():
raise AssertionError(STEP_OUT_OF_RANGE_ERROR)
current_display_text = SegmentDisplayText.from_str(current_text, self.output_length, self.collapse_dots,
self.collapse_commas, current_colors)
new_display_text = SegmentDisplayText.from_str(new_text, self.output_length, self.collapse_dots,
self.collapse_commas, new_colors)
if self.text:
if new_colors and not self.text_color:
text_color = [new_colors[0]]
else:
text_color = self.text_color
transition_text = SegmentDisplayText.from_str(self.text, len(self.text), self.collapse_dots,
self.collapse_commas, text_color)
else:
transition_text = UncoloredSegmentDisplayText([], self.collapse_dots, self.collapse_commas)
if self.direction == 'right':
new_extended_display_text = new_display_text
new_extended_display_text.extend(transition_text)
if step < self.output_length:
temp_text = new_extended_display_text[-(step + 1):]
temp_text.extend(current_display_text[step + 1:])
else:
temp_text = new_display_text[-(step + 1):-(step + 1) + self.output_length]
return temp_text
if self.direction == 'left':
new_extended_display_text = transition_text
new_extended_display_text.extend(new_display_text)
if step < self.output_length:
temp_text = current_display_text[:self.output_length - (step + 1)]
temp_text.extend(new_extended_display_text[:step + 1])
else:
temp_text = new_extended_display_text[step - self.output_length + 1:step + 1]
return temp_text
raise AssertionError(TRANSITION_DIRECTION_UNKNOWN_ERROR)
class UncoverTransition(TransitionBase):
"""Segment display uncover transition effect."""
def __init__(self, output_length: int, collapse_dots: bool, collapse_commas: bool, config: dict) -> None:
"""Class initializer."""
self.direction = 'right'
self.text = None
self.text_color = None
super().__init__(output_length, collapse_dots, collapse_commas, config)
if self.text is None:
self.text = ''
def get_step_count(self):
"""Return the total number of steps required for the transition."""
return self.output_length + len(self.text)
# pylint: disable=too-many-arguments
def get_transition_step(self, step: int, current_text: str, new_text: str,
current_colors: Optional[List[RGBColor]] = None,
new_colors: Optional[List[RGBColor]] = None) -> SegmentDisplayText:
"""Calculate all the steps in the transition."""
if step < 0 or step >= self.get_step_count():
raise AssertionError(STEP_OUT_OF_RANGE_ERROR)
current_display_text = SegmentDisplayText.from_str(current_text, self.output_length, self.collapse_dots,
self.collapse_commas, current_colors)
new_display_text = SegmentDisplayText.from_str(new_text, self.output_length, self.collapse_dots,
self.collapse_commas, new_colors)
if self.text:
if new_colors and not self.text_color:
text_color = [new_colors[0]]
else:
text_color = self.text_color
transition_text = SegmentDisplayText.from_str(self.text, len(self.text), self.collapse_dots,
self.collapse_commas, text_color)
else:
transition_text = UncoloredSegmentDisplayText([], self.collapse_dots, self.collapse_commas)
if self.direction == 'right':
current_extended_display_text = transition_text
current_extended_display_text.extend(current_display_text)
if step < len(self.text):
temp_text = current_extended_display_text[
len(self.text) - step - 1:len(self.text) - step - 1 + self.output_length]
else:
temp_text = new_display_text[:step - len(self.text) + 1]
temp_text.extend(current_extended_display_text[:self.output_length - len(temp_text)])
return temp_text
if self.direction == 'left':
current_extended_display_text = current_display_text
current_extended_display_text.extend(transition_text)
if step < len(self.text):
temp_text = current_extended_display_text[step + 1:step + 1 + self.output_length]
else:
temp_text = current_display_text[step + 1:]
temp_text.extend(new_display_text[-(self.output_length - len(temp_text)):])
return temp_text
raise AssertionError(TRANSITION_DIRECTION_UNKNOWN_ERROR)
class WipeTransition(TransitionBase):
"""Segment display wipe transition effect."""
def __init__(self, output_length: int, collapse_dots: bool, collapse_commas: bool, config: dict) -> None:
"""Class initializer."""
self.direction = 'right'
self.text = None
self.text_color = None
super().__init__(output_length, collapse_dots, collapse_commas, config)
if self.text is None:
self.text = ''
def get_step_count(self):
"""Return the total number of steps required for the transition."""
return self.output_length + len(self.text)
# pylint: disable=too-many-arguments,too-many-branches,too-many-return-statements
def get_transition_step(self, step: int, current_text: str, new_text: str,
current_colors: Optional[List[RGBColor]] = None,
new_colors: Optional[List[RGBColor]] = None) -> SegmentDisplayText:
"""Calculate all the steps in the transition."""
if step < 0 or step >= self.get_step_count():
raise AssertionError(STEP_OUT_OF_RANGE_ERROR)
current_display_text = SegmentDisplayText.from_str(current_text, self.output_length, self.collapse_dots,
self.collapse_commas, current_colors)
new_display_text = SegmentDisplayText.from_str(new_text, self.output_length, self.collapse_dots,
self.collapse_commas, new_colors)
if self.text:
if new_colors and not self.text_color:
text_color = [new_colors[0]]
else:
text_color = self.text_color
transition_text = SegmentDisplayText.from_str(self.text, len(self.text), self.collapse_dots,
self.collapse_commas, text_color)
else:
transition_text = UncoloredSegmentDisplayText([], self.collapse_dots, self.collapse_commas)
if self.direction == 'right':
if step < len(self.text):
temp_text = transition_text[-(step + 1):]
temp_text.extend(current_display_text[step + 1:])
elif step < self.output_length:
temp_text = new_display_text[:step - len(self.text) + 1]
temp_text.extend(transition_text)
temp_text.extend(current_display_text[len(temp_text):])
else:
temp_text = new_display_text[:step - len(self.text) + 1]
temp_text.extend(transition_text[:self.output_length - len(temp_text)])
return temp_text
if self.direction == 'left':
if step < len(self.text):
temp_text = current_display_text[:self.output_length - (step + 1)]
temp_text.extend(transition_text[:step + 1])
elif step < self.output_length:
temp_text = current_display_text[:self.output_length - (step + 1)]
temp_text.extend(transition_text)
temp_text.extend(new_display_text[len(temp_text):])
elif step < self.output_length + len(self.text) - 1:
temp_text = transition_text[step - (self.output_length + len(self.text)) + 1:]
temp_text.extend(new_display_text[-(self.output_length - len(temp_text)):])
else:
temp_text = new_display_text
return temp_text
raise AssertionError(TRANSITION_DIRECTION_UNKNOWN_ERROR)
class SplitTransition(TransitionBase):
"""Segment display split transition effect."""
def __init__(self, output_length: int, collapse_dots: bool, collapse_commas: bool, config: dict) -> None:
"""Class initializer."""
self.direction = 'out'
self.mode = 'push'
super().__init__(output_length, collapse_dots, collapse_commas, config)
def get_step_count(self):
"""Return the total number of steps required for the transition."""
return int((self.output_length + 1) / 2)
# pylint: disable=too-many-arguments,too-many-branches,too-many-return-statements
def get_transition_step(self, step: int, current_text: str, new_text: str,
current_colors: Optional[List[RGBColor]] = None,
new_colors: Optional[List[RGBColor]] = None) -> SegmentDisplayText:
"""Calculate all the steps in the transition."""
if step < 0 or step >= self.get_step_count():
raise AssertionError(STEP_OUT_OF_RANGE_ERROR)
current_display_text = SegmentDisplayText.from_str(current_text, self.output_length, self.collapse_dots,
self.collapse_commas, current_colors)
new_display_text = SegmentDisplayText.from_str(new_text, self.output_length, self.collapse_dots,
self.collapse_commas, new_colors)
if self.mode == 'push':
if self.direction == 'out':
if step == self.get_step_count() - 1:
return new_display_text
characters = int(self.output_length / 2)
split_point = characters
if characters * 2 == self.output_length:
characters -= 1
else:
split_point += 1
characters -= step
temp_text = current_display_text[split_point - characters:split_point]
temp_text.extend(new_display_text[characters:characters + (self.output_length - 2 * characters)])
temp_text.extend(current_display_text[split_point:split_point + characters])
return temp_text
if self.direction == 'in':
if step == self.get_step_count() - 1:
return new_display_text
split_point = int(self.output_length / 2)
characters = 1
if split_point * 2 < self.output_length:
split_point += 1
characters += step
temp_text = new_display_text[split_point - characters:split_point]
temp_text.extend(current_display_text[characters:characters + (self.output_length - 2 * characters)])
temp_text.extend(new_display_text[split_point:split_point + characters])
return temp_text
raise AssertionError(TRANSITION_DIRECTION_UNKNOWN_ERROR)
if self.mode == 'wipe':
if self.direction == 'out':
if step == self.get_step_count() - 1:
return new_display_text
characters = int(self.output_length / 2)
if characters * 2 == self.output_length:
characters -= 1
characters -= step
temp_text = current_display_text[:characters]
temp_text.extend(new_display_text[characters:characters + (self.output_length - 2 * characters)])
temp_text.extend(current_display_text[-characters:])
return temp_text
if self.direction == 'in':
if step == self.get_step_count() - 1:
return new_display_text
temp_text = new_display_text[:step + 1]
temp_text.extend(current_display_text[step + 1:step + 1 + (self.output_length - 2 * len(temp_text))])
temp_text.extend(new_display_text[-(step + 1):])
return temp_text
raise AssertionError(TRANSITION_DIRECTION_UNKNOWN_ERROR)
raise AssertionError("Transition uses an unknown mode value")
| [
1,
9995,
1626,
1301,
2187,
1304,
363,
10768,
14423,
1213,
15945,
13,
5215,
25638,
13,
3166,
19229,
1053,
28379,
29892,
2391,
13,
13,
3166,
286,
7810,
29889,
3221,
29889,
27074,
29918,
12847,
1053,
3992,
6733,
13,
3166,
286,
7810,
29889,
... |
setup_cares.py | thedrow/pycares | 0 | 37396 |
import errno
import os
import subprocess
import sys
from distutils import log
from distutils.command.build_ext import build_ext
from distutils.errors import DistutilsError
def exec_process(cmdline, silent=True, catch_enoent=True, input=None, **kwargs):
"""Execute a subprocess and returns the returncode, stdout buffer and stderr buffer.
Optionally prints stdout and stderr while running."""
try:
sub = subprocess.Popen(args=cmdline, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)
stdout, stderr = sub.communicate(input=input)
if type(stdout) != type(""):
# decode on Python 3
# do nothing on Python 2 (it just doesn't care about encoding anyway)
stdout = stdout.decode(sys.getdefaultencoding(), "replace")
stderr = stderr.decode(sys.getdefaultencoding(), "replace")
returncode = sub.returncode
if not silent:
sys.stdout.write(stdout)
sys.stderr.write(stderr)
except OSError as e:
if e.errno == errno.ENOENT and catch_enoent:
raise DistutilsError('"%s" is not present on this system' % cmdline[0])
else:
raise
if returncode != 0:
raise DistutilsError('Got return value %d while executing "%s", stderr output was:\n%s' % (returncode, " ".join(cmdline), stderr.rstrip("\n")))
return stdout
def exec_make(cmdline, *args, **kwargs):
assert isinstance(cmdline, list)
makes = ["make"]
if "bsd" in sys.platform:
makes.insert(0, "gmake")
for make in makes:
if "bsd" in sys.platform and make == "make":
log.warn("Running plain make on BSD-derived system. It will likely fail. Consider installing GNU make from the ports collection.")
try:
return exec_process([make] + cmdline, *args, catch_enoent=False, **kwargs)
except OSError as e:
if e.errno != errno.ENOENT:
raise
raise DistutilsError('"make" is not present on this system')
class cares_build_ext(build_ext):
cares_dir = os.path.join('deps', 'c-ares')
user_options = build_ext.user_options
user_options.extend([
("cares-clean-compile", None, "Clean c-ares tree before compilation"),
])
boolean_options = build_ext.boolean_options
boolean_options.extend(["cares-clean-compile"])
def initialize_options(self):
build_ext.initialize_options(self)
self.cares_clean_compile = 0
def build_extensions(self):
if self.compiler.compiler_type == 'mingw32':
# Dirty hack to avoid linking with more than one C runtime when using MinGW
self.compiler.dll_libraries = [lib for lib in self.compiler.dll_libraries if not lib.startswith('msvcr')]
self.force = self.cares_clean_compile
if self.compiler.compiler_type == 'msvc':
self.cares_lib = os.path.join(self.cares_dir, 'cares.lib')
else:
self.cares_lib = os.path.join(self.cares_dir, 'libcares.a')
self.build_cares()
# Set compiler options
if self.compiler.compiler_type == 'mingw32':
self.compiler.add_library_dir(self.cares_dir)
self.compiler.add_library('cares')
self.extensions[0].extra_objects = [self.cares_lib]
self.compiler.add_include_dir(os.path.join(self.cares_dir, 'src'))
if sys.platform.startswith('linux'):
self.compiler.add_library('rt')
elif sys.platform == 'win32':
if self.compiler.compiler_type == 'msvc':
self.extensions[0].extra_link_args = ['/NODEFAULTLIB:libcmt']
self.compiler.add_library('advapi32')
self.compiler.add_library('iphlpapi')
self.compiler.add_library('psapi')
self.compiler.add_library('ws2_32')
build_ext.build_extensions(self)
def build_cares(self):
#self.debug_mode = bool(self.debug) or hasattr(sys, 'gettotalrefcount')
win32_msvc = self.compiler.compiler_type == 'msvc'
def build():
cflags = '-fPIC'
env = os.environ.copy()
env['CFLAGS'] = ' '.join(x for x in (cflags, env.get('CFLAGS', None)) if x)
log.info('Building c-ares...')
if win32_msvc:
exec_process('cmd.exe /C vcbuild.bat', cwd=self.cares_dir, env=env, shell=True)
else:
exec_make(['libcares.a'], cwd=self.cares_dir, env=env)
def clean():
if win32_msvc:
exec_process('cmd.exe /C vcbuild.bat clean', cwd=self.cares_dir, shell=True)
else:
exec_make(['clean'], cwd=self.cares_dir)
if self.cares_clean_compile:
clean()
if not os.path.exists(self.cares_lib):
log.info('c-ares needs to be compiled.')
build()
else:
log.info('No need to build c-ares.')
| [
1,
29871,
13,
5215,
4589,
1217,
13,
5215,
2897,
13,
5215,
1014,
5014,
13,
5215,
10876,
13,
13,
3166,
1320,
13239,
1053,
1480,
13,
3166,
1320,
13239,
29889,
6519,
29889,
4282,
29918,
1062,
1053,
2048,
29918,
1062,
13,
3166,
1320,
13239,
... |
user_service/resources/auth.py | lewisemm/vistagrid-backend-k8 | 0 | 67379 | <filename>user_service/resources/auth.py
from flask_jwt_extended import (
create_access_token,
jwt_required,
get_jwt_identity
)
from flask_restful import reqparse, Resource
from user_service.models import User as UserModel
class UserAuth(Resource):
def __init__(self):
self.parser = reqparse.RequestParser()
self.parser.add_argument(
'username',
type=str,
help='username field is required.',
required=True
)
self.parser.add_argument(
'password',
type=str,
help='password field is required.',
required=True
)
def user_exists(self, username):
user = UserModel.query.filter_by(username=username).first()
return user
def post(self):
args = self.parser.parse_args()
user = self.user_exists(args['username'])
if user is None:
return {'error': f'User {args["username"]} does not exist.'}, 404
if not user.verify_password(args['password']):
return {
'error': f'Invalid password for user {args["username"]}.'
}, 401
access_token = create_access_token(identity=user.user_id)
return {'access_token': access_token}, 200
@jwt_required
def get(self):
try:
current_user = get_jwt_identity()
return {'username': current_user}, 200
except Exception as e:
return {'error': f'{e}'}, 500
| [
1,
529,
9507,
29958,
1792,
29918,
5509,
29914,
13237,
29914,
5150,
29889,
2272,
13,
3166,
29784,
29918,
29926,
14554,
29918,
1062,
2760,
1053,
313,
13,
1678,
1653,
29918,
5943,
29918,
6979,
29892,
13,
1678,
432,
14554,
29918,
12403,
29892,
... |
mumbleroni/tests/test_settings_datastructure.py | Peter-Morawski/MumbleRoni | 1 | 141147 | # -*- coding: utf-8 -*-
import os
import unittest
from ..settings.datastructure import Settings
class SettingsDataStructureTests(unittest.TestCase):
def setUp(self):
self._host = "example.domain.com"
self._port = 1337
self._user = "MumbleRoni"
self._password = "<PASSWORD>"
self._default_channel_str = "In - Game"
self._default_channel_int = 1
self._certificate_path = os.path.curdir
def test_parse_from_none(self):
with self.assertRaises(ValueError):
Settings.from_dict(None)
def test_parse_from_empty_dict(self):
with self.assertRaises(ValueError):
Settings.from_dict({})
def test_parse_from_dict_missing_host(self):
settings = {
"server": {
"username": self._host,
}
}
with self.assertRaises(ValueError):
Settings.from_dict(settings)
def test_parse_from_dict_empty_host(self):
settings = {
"server": {
"host": "",
}
}
with self.assertRaises(ValueError):
Settings.from_dict(settings)
def test_parse_from_dict_host_wrong_type(self):
settings = {
"server": {
"host": 123,
}
}
with self.assertRaises(ValueError):
Settings.from_dict(settings)
def test_parse_from_dict_missing_username(self):
settings = {
"server": {
"host": self._host,
}
}
with self.assertRaises(ValueError):
Settings.from_dict(settings)
def test_parse_from_dict_empty_username(self):
settings = {
"server": {
"host": self._host,
"username": "",
}
}
with self.assertRaises(ValueError):
Settings.from_dict(settings)
def test_parse_from_dict_username_wrong_type(self):
settings = {
"server": {
"host": self._host,
"username": 123,
}
}
with self.assertRaises(ValueError):
Settings.from_dict(settings)
def test_parse_from_dict_empty_password(self):
settings = {
"server": {
"host": self._host,
"username": self._user,
"password": ""
}
}
expected = Settings()
expected.server.host = self._host
expected.server.username = self._user
expected.server.password = ""
actual = Settings.from_dict(settings)
assert expected.server.host == actual.server.host
assert expected.server.username == actual.server.username
assert expected.server.password == actual.server.password
def test_parse_from_dict_password_wrong_type(self):
settings = {
"server": {
"host": self._host,
"username": self._user,
"password": <PASSWORD>
}
}
with self.assertRaises(ValueError):
Settings.from_dict(settings)
def test_parse_from_dict_default_channel_wrong_type(self):
settings = {
"server": {
"host": self._host,
"username": self._user,
"default_channel": False,
}
}
with self.assertRaises(ValueError):
Settings.from_dict(settings)
def test_parse_from_dict_default_channel_str(self):
settings = {
"server": {
"host": self._host,
"username": self._user,
"default_channel": self._default_channel_str,
}
}
expected = Settings()
expected.server.host = self._host
expected.server.username = self._user
expected.server.default_channel = self._default_channel_str
actual = Settings.from_dict(settings)
assert expected.server.host == actual.server.host
assert expected.server.username == actual.server.username
assert expected.server.default_channel == actual.server.default_channel
def test_parse_from_dict_default_channel_int(self):
settings = {
"server": {
"host": self._host,
"username": self._user,
"default_channel": self._default_channel_int,
}
}
expected = Settings()
expected.server.host = self._host
expected.server.username = self._user
expected.server.default_channel = self._default_channel_int
actual = Settings.from_dict(settings)
assert expected.server.host == actual.server.host
assert expected.server.username == actual.server.username
assert expected.server.default_channel == actual.server.default_channel
def test_parse_from_dict_port_wrong_type(self):
settings = {
"server": {
"host": self._host,
"username": self._user,
"port": "some_port",
}
}
with self.assertRaises(ValueError):
Settings.from_dict(settings)
def test_parse_from_dict_port_is_none(self):
settings = {
"server": {
"host": self._host,
"username": self._user,
"port": None,
}
}
with self.assertRaises(ValueError):
Settings.from_dict(settings)
def test_parse_from_dict_port(self):
settings = {
"server": {
"host": self._host,
"username": self._user,
"port": self._port,
}
}
expected = Settings()
expected.server.host = self._host
expected.server.username = self._user
expected.server.port = self._port
actual = Settings.from_dict(settings)
assert expected.server.host == actual.server.host
assert expected.server.username == actual.server.username
assert expected.server.port == actual.server.port
def test_parse_from_dict_certificate_path_wrong_type(self):
settings = {
"server": {
"host": self._host,
"username": self._user,
"certificate_path": 123,
}
}
with self.assertRaises(ValueError):
Settings.from_dict(settings)
def test_parse_from_dict_certificate_path_invalid_path(self):
settings = {
"server": {
"host": self._host,
"username": self._user,
"certificate_path": "invalid",
}
}
with self.assertRaises(ValueError):
Settings.from_dict(settings)
def test_parse_from_dict_certificate_path_cur_dir(self):
settings = {
"server": {
"host": self._host,
"username": self._user,
"certificate_path": self._certificate_path,
}
}
expected = Settings()
expected.server.host = self._host
expected.server.username = self._user
expected.server.certificate_path = self._certificate_path
actual = Settings.from_dict(settings)
assert expected.server.host == actual.server.host
assert expected.server.username == actual.server.username
assert expected.server.certificate_path == actual.server.certificate_path
def test_parse_from_valid_dict_1(self):
settings = {
"server": {
"host": self._host,
"username": self._user,
}
}
expected = Settings()
expected.server.host = self._host
expected.server.username = self._user
actual = Settings.from_dict(settings)
assert actual.server.host is expected.server.host
assert actual.server.username is expected.server.username
if __name__ == '__main__':
unittest.main()
| [
1,
396,
448,
29930,
29899,
14137,
29901,
23616,
29899,
29947,
448,
29930,
29899,
13,
13,
5215,
2897,
13,
5215,
443,
27958,
13,
3166,
6317,
11027,
29889,
4130,
7614,
12425,
1053,
19215,
13,
13,
13,
1990,
19215,
1469,
5015,
12425,
24376,
... |
3rdParty/Berkeley-DB/db-5.3.21.NC/build.py | rtobar/askapsoft | 1 | 73346 | import os
from askapdev.rbuild.builders import Autotools as Builder
import askapdev.rbuild.utils as utils
builder = Builder(buildsubdir="build_unix",
confcommand='../dist/configure')
builder.remote_archive = "db-5.3.21.NC.tar.gz"
if os.uname()[4] == 'x86_64':
builder.add_option('--with-pic')
#builder.add_option('--disable-shared') # Need shared libraries for Java.
builder.add_option('--enable-cxx')
builder.add_option('--enable-java')
builder.nowarnings = True
# On Mac OSX jni.h is in a location where BerkleyDB can't find it. Including
# $JAVA_HOME/include (and include/darwin) fixes this. The JAVA_HOME environment
# can be setup on OSX like so (for bash): export JAVA_HOME=$(/usr/libexec/java_home)
platform = utils.get_platform()
if platform['system'] == 'Darwin' and os.environ.has_key("JAVA_HOME"):
javahome = os.environ.get('JAVA_HOME')
# builder.add_option('JAVACFLAGS="-source 1.7 -target 1.7" CPPFLAGS="-I%s/include -I%s/include/darwin"' %(javahome,javahome))
if '1.8' in javahome:
builder.add_option('JAVACFLAGS="-source 1.7 -target 1.7"')
builder.add_option('CPPFLAGS="-I%s/include -I%s/include/darwin"' %(javahome,javahome))
# The Cray cc and c++ compilers wrappers break here, so go directly to gcc and g++
if os.environ.has_key("CRAYOS_VERSION"):
builder.add_env("CC","gcc")
builder.add_env("CXX","g++")
builder.add_env("LINK","g++")
builder.add_env("SHLINK","g++")
builder.build()
| [
1,
1053,
2897,
13,
13,
3166,
2244,
481,
3359,
29889,
29878,
4282,
29889,
4282,
414,
1053,
5202,
327,
8789,
408,
5373,
2700,
13,
5215,
2244,
481,
3359,
29889,
29878,
4282,
29889,
13239,
408,
3667,
29879,
13,
13,
16409,
353,
5373,
2700,
... |
server/form/mongo.py | SRM-IST-KTR/ossmosis | 6 | 3550 | import os
from pymongo import MongoClient
from dotenv import load_dotenv
def database_entry(data):
try:
load_dotenv()
mongo_string = os.getenv('MONGODB_AUTH_URI')
client = MongoClient(mongo_string)
database = client[os.getenv('MONGODB_DB')]
col = database['users']
col.insert_one(data)
return True
except Exception as e:
print(e)
return False
if __name__ == "__main__":
pass
| [
1,
1053,
2897,
13,
3166,
282,
962,
7443,
1053,
18294,
4032,
13,
3166,
8329,
6272,
1053,
2254,
29918,
6333,
6272,
13,
13,
13,
1753,
2566,
29918,
8269,
29898,
1272,
1125,
13,
1678,
1018,
29901,
13,
4706,
2254,
29918,
6333,
6272,
580,
13... |
tests/test_asyncio.py | adityaoza1901/AioContext | 37 | 100568 | <filename>tests/test_asyncio.py<gh_stars>10-100
import asyncio
import pytest
@asyncio.coroutine
def _check_update_context(context):
assert context == {'key1': 'value1'}
context['key1'] = 'value2'
context['key2'] = 'value2'
assert context == {'key1': 'value2', 'key2': 'value2'}
@pytest.mark.asyncio
@asyncio.coroutine
def test_ensure_future(context, context_loop):
context['key1'] = 'value1'
yield from asyncio.ensure_future(_check_update_context(context))
assert context == {'key1': 'value1'}
@pytest.mark.asyncio
@asyncio.coroutine
def test_wait_for(context, context_loop):
context['key1'] = 'value1'
yield from asyncio.wait_for(_check_update_context(context), 1)
assert context == {'key1': 'value1'}
@pytest.mark.asyncio
@asyncio.coroutine
def test_gather(context, context_loop):
context['key1'] = 'value1'
yield from asyncio.gather(_check_update_context(context))
assert context == {'key1': 'value1'}
| [
1,
529,
9507,
29958,
21150,
29914,
1688,
29918,
294,
948,
3934,
29889,
2272,
29966,
12443,
29918,
303,
1503,
29958,
29896,
29900,
29899,
29896,
29900,
29900,
13,
5215,
408,
948,
3934,
13,
13,
5215,
11451,
1688,
13,
13,
13,
29992,
294,
9... |
rubin_sim/maf/mafContrib/GRBTransientMetric.py | RileyWClarke/flarubin | 0 | 1602206 | from builtins import zip
# Gamma-ray burst afterglow metric
# <EMAIL>
import rubin_sim.maf.metrics as metrics
import numpy as np
__all__ = ['GRBTransientMetric']
class GRBTransientMetric(metrics.BaseMetric):
"""Detections for on-axis GRB afterglows decaying as
F(t) = F(1min)((t-t0)/1min)^-alpha. No jet break, for now.
Derived from TransientMetric, but calculated with reduce functions to
enable-band specific counts.
Burst parameters taken from 2011PASP..123.1034J.
Simplifications:
no color variation or evolution encoded yet.
no jet breaks.
not treating off-axis events.
Parameters
----------
alpha : float,
temporal decay index
Default = 1.0
apparent_mag_1min_mean : float,
mean magnitude at 1 minute after burst
Default = 15.35
apparent_mag_1min_sigma : float,
std of magnitudes at 1 minute after burst
Default = 1.59
transDuration : float, optional
How long the transient lasts (days). Default 10.
surveyDuration : float, optional
Length of survey (years).
Default 10.
surveyStart : float, optional
MJD for the survey start date.
Default None (uses the time of the first observation).
detectM5Plus : float, optional
An observation will be used if the light curve magnitude is brighter than m5+detectM5Plus.
Default 0.
nPerFilter : int, optional
Number of separate detections of the light curve above the
detectM5Plus theshold (in a single filter) for the light curve
to be counted.
Default 1.
nFilters : int, optional
Number of filters that need to be observed nPerFilter times,
with differences minDeltaMag,
for an object to be counted as detected.
Default 1.
minDeltaMag : float, optional
magnitude difference between detections in the same filter required
for second+ detection to be counted.
For example, if minDeltaMag = 0.1 mag and two consecutive observations
differ only by 0.05 mag, those two detections will only count as one.
(Better would be a SNR-based discrimination of lightcurve change.)
Default 0.
nPhaseCheck : int, optional
Sets the number of phases that should be checked.
One can imagine pathological cadences where many objects pass the detection criteria,
but would not if the observations were offset by a phase-shift.
Default 1.
"""
def __init__(self, alpha=1, apparent_mag_1min_mean=15.35,
apparent_mag_1min_sigma=1.59, metricName='GRBTransientMetric',
mjdCol='expMJD', m5Col='fiveSigmaDepth', filterCol='filter',
transDuration=10.,
surveyDuration=10., surveyStart=None, detectM5Plus=0.,
nPerFilter=1, nFilters=1, minDeltaMag=0., nPhaseCheck=1,
**kwargs):
self.mjdCol = mjdCol
self.m5Col = m5Col
self.filterCol = filterCol
super( GRBTransientMetric, self).__init__(
col=[self.mjdCol, self.m5Col, self.filterCol],
units='Fraction Detected',
metricName=metricName,**kwargs)
self.alpha = alpha
self.apparent_mag_1min_mean = apparent_mag_1min_mean
self.apparent_mag_1min_sigma = apparent_mag_1min_sigma
self.transDuration = transDuration
self.surveyDuration = surveyDuration
self.surveyStart = surveyStart
self.detectM5Plus = detectM5Plus
self.nPerFilter = nPerFilter
self.nFilters = nFilters
self.minDeltaMag = minDeltaMag
self.nPhaseCheck = nPhaseCheck
self.peakTime = 0.
self.reduceOrder = {'Bandu':0, 'Bandg':1, 'Bandr':2, 'Bandi':3, 'Bandz':4, 'Bandy':5,'Band1FiltAvg':6,'BandanyNfilters':7}
def lightCurve(self, time, filters):
"""
given the times and filters of an observation, return the magnitudes.
"""
lcMags = np.zeros(time.size, dtype=float)
decline = np.where(time > self.peakTime)
apparent_mag_1min = np.random.randn()*self.apparent_mag_1min_sigma + self.apparent_mag_1min_mean
lcMags[decline] += apparent_mag_1min + self.alpha * 2.5 * np.log10((time[decline]-self.peakTime)*24.*60.)
#for key in self.peaks.keys():
# fMatch = np.where(filters == key)
# lcMags[fMatch] += self.peaks[key]
return lcMags
def run(self, dataSlice, slicePoint=None):
""""
Calculate the detectability of a transient with the specified lightcurve.
Parameters
----------
dataSlice : numpy.array
Numpy structured array containing the data related to the visits provided by the slicer.
slicePoint : dict, optional
Dictionary containing information about the slicepoint currently active in the slicer.
Returns
-------
float
The total number of transients that could be detected.
"""
# Total number of transients that could go off back-to-back
nTransMax = np.floor(self.surveyDuration / (self.transDuration / 365.25))
tshifts = np.arange(self.nPhaseCheck) * self.transDuration / float(self.nPhaseCheck)
nDetected = 0
nTransMax = 0
for tshift in tshifts:
# Compute the total number of back-to-back transients are possible to detect
# given the survey duration and the transient duration.
nTransMax += np.floor(self.surveyDuration / (self.transDuration / 365.25))
if tshift != 0:
nTransMax -= 1
if self.surveyStart is None:
surveyStart = dataSlice[self.mjdCol].min()
time = (dataSlice[self.mjdCol] - surveyStart + tshift) % self.transDuration
# Which lightcurve does each point belong to
lcNumber = np.floor((dataSlice[self.mjdCol] - surveyStart) / self.transDuration)
lcMags = self.lightCurve(time, dataSlice[self.filterCol])
# How many criteria needs to be passed
detectThresh = 0
# Flag points that are above the SNR limit
detected = np.zeros(dataSlice.size, dtype=int)
detected[np.where(lcMags < dataSlice[self.m5Col] + self.detectM5Plus)] += 1
bandcounter={'u':0, 'g':0, 'r':0, 'i':0, 'z':0, 'y':0, 'any':0} #define zeroed out counter
# make sure things are sorted by time
ord = np.argsort(dataSlice[self.mjdCol])
dataSlice = dataSlice[ord]
detected = detected[ord]
lcNumber = lcNumber[ord]
lcMags = lcMags[ord]
ulcNumber = np.unique(lcNumber)
left = np.searchsorted(lcNumber, ulcNumber)
right = np.searchsorted(lcNumber, ulcNumber, side='right')
detectThresh += self.nFilters
# iterate over the lightcurves
for le, ri in zip(left, right):
wdet = np.where(detected[le:ri] > 0)
ufilters = np.unique(dataSlice[self.filterCol][le:ri][wdet])
nfilts_lci = 0
for filtName in ufilters:
wdetfilt = np.where(
(dataSlice[self.filterCol][le:ri] == filtName) &
detected[le:ri])
lcPoints = lcMags[le:ri][wdetfilt]
dlc = np.abs(np.diff(lcPoints))
# number of detections in band, requring that for
# nPerFilter > 1 that points have more than minDeltaMag
# change
nbanddet = np.sum(dlc > self.minDeltaMag) + 1
if nbanddet >= self.nPerFilter:
bandcounter[filtName] += 1
nfilts_lci += 1
if nfilts_lci >= self.nFilters:
bandcounter['any'] += 1
bandfraction = {}
for band in bandcounter.keys():
bandfraction[band] = float(bandcounter[band]) / nTransMax
return bandfraction
def reduceBand1FiltAvg(self, bandfraction):
"Average fraction detected in single filter"
return np.mean(list(bandfraction.values()))
def reduceBandanyNfilters(self, bandfraction):
"Fraction of events detected in Nfilters or more"
return bandfraction['any']
def reduceBandu(self, bandfraction):
return bandfraction['u']
def reduceBandg(self, bandfraction):
return bandfraction['g']
def reduceBandr(self, bandfraction):
return bandfraction['r']
def reduceBandi(self, bandfraction):
return bandfraction['i']
def reduceBandz(self, bandfraction):
return bandfraction['z']
def reduceBandy(self, bandfraction):
return bandfraction['y']
| [
1,
515,
4240,
1144,
1053,
14319,
13,
29937,
402,
2735,
29899,
764,
20887,
1156,
29887,
677,
12714,
13,
29937,
529,
26862,
6227,
29958,
13,
13,
5215,
14051,
262,
29918,
3601,
29889,
655,
29888,
29889,
2527,
10817,
408,
21556,
13,
5215,
1... |
graph_search/week2/assignment_dijkstra_shortest_paths.py | liaoaoyuan97/standford_algorithms_specialization | 0 | 5595 | import heapq
import time
from os import path
from math import floor
class Heap:
def __init__(self):
self.size = 0
self.array = []
self.v2index_map = {}
def __get_parent_index(self, idx):
return int(floor((idx - 1) / 2))
def __get_left_child_index(self, idx):
return 2 * idx + 1
def __get_right_child_index(self, idx):
return 2 * idx + 2
def __swap_value(self, i, j):
t = self.array[i]
self.v2index_map[t[0]] = j
self.v2index_map[self.array[j][0]] = i
self.array[i] = self.array[j]
self.array[j] = t
def __bubble_up(self, idx):
parent_idx = self.__get_parent_index(idx)
while parent_idx >= 0:
if self.array[parent_idx][1] <= self.array[idx][1]:
break
self.__swap_value(parent_idx, idx)
idx = parent_idx
parent_idx = self.__get_parent_index(idx)
def __bubble_down(self, idx):
left_idx = self.__get_left_child_index(idx)
right_idx = self.__get_right_child_index(idx)
while left_idx < self.size or right_idx < self.size:
min_idx = left_idx
if left_idx >= self.size or (right_idx < self.size and self.array[right_idx][1] < self.array[left_idx][1]):
min_idx = right_idx
if self.array[idx][1] < self.array[min_idx][1]:
break
self.__swap_value(idx, min_idx)
idx = min_idx
left_idx = self.__get_left_child_index(idx)
right_idx = self.__get_right_child_index(idx)
def get_vertex_key(self, v_id):
return self.array[self.v2index_map[v_id]][1]
def pop(self):
if self.size < 1:
raise IndexError
min_node = self.array[0]
self.size = self.size - 1
self.__swap_value(0, self.size)
self.array.pop()
if self.size > 1:
self.__bubble_down(0)
del self.v2index_map[min_node[0]]
return min_node
def insert(self, node):
self.array.append(node)
self.v2index_map[node[0]] = self.size
self.size = self.size + 1
if self.size > 1:
self.__bubble_up(self.size - 1)
def modify_key(self, v_id, update_val):
idx = self.v2index_map[v_id]
self.array[idx] = (v_id, update_val)
parent_idx = self.__get_parent_index(idx)
if parent_idx >= 0 and self.array[idx][1] < self.array[parent_idx][1]:
self.__bubble_up(idx)
else:
self.__bubble_down(idx)
def read_graph(filename):
graph = dict()
with open(path.join('.', filename), 'r') as f:
for row in f.readlines():
edges = row.strip('\t\n').split('\t')
s = int(edges[0])
graph[s] = []
for i in range(1, len(edges)):
edge = edges[i].split(',')
graph[s].append((int(edge[0]), int(edge[1])))
return graph
def get_shortest_paths_heapq(graph):
heap = []
heapq.heappush(heap, (0, 1)) # (dj_score, vertex_id)
distances = {i: 1000000 for i in graph}
distances[1] = 0
X = []
while heap:
cur_distance, cur_v = heapq.heappop(heap)
if cur_distance > distances[cur_v]:
continue
# added to X
X.append(cur_v)
for neighbor, weight in graph[cur_v]:
dj_score = cur_distance + weight
if dj_score < distances[neighbor]:
distances[neighbor] = dj_score
heapq.heappush(heap, (dj_score, neighbor))
return distances, X
def get_shortest_paths_self_defined_heap(graph):
heap = Heap()
heap.insert((1, 0)) # (vertex_id, dj_score)
for v in graph:
if v != 1:
heap.insert((v, 1000000))
shortest_paths = dict()
n_v = len(graph)
while len(shortest_paths) < n_v:
assert len(shortest_paths) + heap.size == n_v
cur_v, v_score = heap.pop()
shortest_paths[cur_v] = v_score
for neighbor, weight in graph[cur_v]:
dj_score = v_score + weight
# import pdb;pdb.set_trace()
if neighbor not in shortest_paths and dj_score < heap.get_vertex_key(neighbor):
heap.modify_key(neighbor, dj_score)
return shortest_paths
if __name__ == "__main__":
# test case 1, output: {1: 0, 2: 1, 3: 2, 4: 2, 5: 3, 6: 4}
# graph = {
# 1: [(6, 7), (5, 3), (2, 1), (4, 2), (3, 3)],
# 2: [(1, 1), (3, 1), (4, 1), (6, 6)],
# 3: [(1, 3), (2, 1), (6, 2)],
# 4: [(2, 1), (1, 2), (6, 5)],
# 5: [(1, 3), (6, 3)],
# 6: [(1, 7), (3, 2), (2, 6), (4, 5), (5, 3)]
# }
graph = read_graph("Dijkstra.txt")
dedup_edges = set()
for k, _ in graph.items():
for v in _:
dedup_edges.add((k, v[0], v[1]))
dedup_edges.add((v[0], k, v[1]))
assert len(dedup_edges) == sum([len(e) for e in graph.values()])
# graph = {}
# heap = Heap()
# heap.insert((1,0))
# heap.insert((2,0))
# heap.pop()
start_t = time.time()
min_distances,X = get_shortest_paths_heapq(graph)
print(time.time() - start_t)
# print(min_distances)
e = [7, 37, 59, 82, 99, 115, 133, 165, 188, 197]
print(",".join([str(int(min_distances[i])) for i in e]))
start_t = time.time()
min_distances = get_shortest_paths_self_defined_heap(graph, X)
print(time.time() - start_t)
# print(min_distances)
e = [7, 37, 59, 82, 99, 115, 133, 165, 188, 197]
print(",".join([str(int(min_distances[i])) for i in e]))
| [
1,
1053,
16947,
29939,
13,
5215,
931,
13,
3166,
2897,
1053,
2224,
13,
3166,
5844,
1053,
11904,
13,
13,
13,
1990,
940,
481,
29901,
13,
1678,
822,
4770,
2344,
12035,
1311,
1125,
13,
4706,
1583,
29889,
2311,
353,
29871,
29900,
13,
4706,
... |
examples/ex_bit_flip_code.py | jasonelhaderi/pypsqueak | 0 | 110933 | import context # Remove this import if running with pip installed version.
from pypsqueak.api import qReg, qOp
from pypsqueak.gates import X, Z, I, ROT_Y, CNOT
from pypsqueak.noise import b_flip_map
import numpy as np
import sys
if len(sys.argv) > 1 and int(sys.argv[1]) > 0:
n_trials = int(sys.argv[1])
else:
n_trials = 2000
if len(sys.argv) > 2 and float(sys.argv[2]) <= 1 and float(sys.argv[2]) >= 0:
prob = float(sys.argv[2])
else:
prob = 0.1
theory_success = (1 - prob)**3 + 3*prob*(1-prob)**2
theory_failure = 1 - theory_success
successes = 0
failures = 0
noisy_channel = qOp(np.eye(2), kraus_ops=b_flip_map(prob))
# Check that we are getting the correct statistics out of our noisy channel.
print("Initialized noisy channel with {:.1f}% chance of bit flip.".format(100*prob))
print("Probing channel with single qubit {} times...".format(n_trials))
flip_amount = 0
for i in range(n_trials):
register = qReg()
noisy_channel.on(register)
if not np.array_equal([1, 0], register.dump_state()):
flip_amount += 1
flip_percent = 100*flip_amount/n_trials
print("Bit flip occured ({:.1f} +/- {:.1f})% of the time.\n".format(flip_percent, 0.5*flip_percent/np.sqrt(n_trials)))
print("With bit flip probability of {:.1f}%:".format(100*prob))
print("Theoretical transmission success rate: {:.1f}%".format(100*theory_success))
print("Theoretical transmission failure rate: {:.1f}%\n".format(100*theory_failure))
# Now we send an encoded state through our noisy channel n_trials times.
# Uncomment the print lines in the for loop to peek at the state of the register
# after each operation. Remember, peeking is unphysical!
print("Running {} trials of sending an encoded state...".format(n_trials))
for i in range(n_trials):
# Initialize a state.
super_position = qReg(1)
ROT_Y(0.2).on(super_position)
# print("Input state |psi> =", super_position.peek())
# Encode against bit flip.
CNOT.on(super_position, 1, 0)
CNOT.on(super_position, 2, 0)
init_state = super_position.dump_state()
# print("Encoded state |psi'> =", super_position.peek())
# Send state through noisy channel.
for qubit in range(len(super_position)):
noisy_channel.on(super_position, qubit)
# print("Encoded state after noisy transmission:", super_position.peek())
# Diagnose error syndrome.
Z_21 = Z.kron(Z, I)
Z_10 = I.kron(Z, Z)
product_21 = super_position.measure_observable(Z_21)
# print("Action of Z_21:", super_position.peek())
# print("Z_21 measurement:", product_21)
product_10 = super_position.measure_observable(Z_10)
# print("Action of Z_10:", super_position.peek())
# print("Z_10 measurement:", product_10)
if product_10 == product_21:
if product_10 == 1:
# No correction required (1 - p)^3
pass
else:
# Middle qubit flipped (1 - p)^2 * p
X.on(super_position, 1)
if product_10 != product_21:
if product_21 == -1:
# Qubit 2 flipped (1 - p)^2 * p
X.on(super_position, 2)
else:
# Qubit 0 flipped (1 - p)^2 * p
X.on(super_position, 0)
# print("Recovered state:", super_position.peek())
if np.allclose(init_state, super_position.dump_state()):
successes += 1
else:
failures += 1
print("Successful {:.2f}% of the time".format(100*successes/n_trials))
print("Unsuccessful {:.2f}% of the time".format(100*failures/n_trials))
| [
1,
1053,
3030,
29871,
396,
15154,
445,
1053,
565,
2734,
411,
8450,
5130,
1873,
29889,
13,
13,
3166,
11451,
567,
802,
557,
29889,
2754,
1053,
3855,
4597,
29892,
3855,
11746,
13,
3166,
11451,
567,
802,
557,
29889,
29887,
1078,
1053,
1060,... |
devices/parser/serializers.py | City-of-Helsinki/hel-data-pipe | 1 | 19250 | <reponame>City-of-Helsinki/hel-data-pipe
from rest_framework import serializers
from .models import Device, SensorType
class SensorTypeSerializer(serializers.ModelSerializer):
class Meta:
model = SensorType
fields = "__all__"
class DeviceSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Device
fields = "__all__"
| [
1,
529,
276,
1112,
420,
29958,
16885,
29899,
974,
29899,
29950,
1379,
682,
29875,
29914,
3952,
29899,
1272,
29899,
17760,
13,
3166,
1791,
29918,
4468,
1053,
7797,
19427,
13,
13,
3166,
869,
9794,
1053,
21830,
29892,
317,
6073,
1542,
13,
... |
tests/unit/test_client.py | Omegaice/smartcontainers | 6 | 165916 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""Tests for Smart Containers Docker API Client.
Testing for Smart Containers Client.
This module extends the docker-py package to provide the ability to add
metadata to docker containers. It is meant to be a drop-in replacement the
docker-py package Client module. Existing methods that change the state of a
conainer are implimented to also write the provenance associated with that
state change.
"""
import tarfile
import time
import os
def test_simple_tar(createClient):
"""Tarfile creation.
Create tarfile from sample file and assert that resultant file
is a tarfile.
"""
# Create the smartcontainer client
# Create a test file to be turned into a tar
with open('tempprov.txt', 'a') as provfile:
provfile.write('This is the data for the tar file test.')
# Call the simple tar function and test the result
thisfile = createClient.simple_tar('tempprov.txt')
assert tarfile.is_tarfile(thisfile.name)
def test_fileCopyIn(createClient, pull_docker_image):
"""File Copy into container from image.
Create a new test container and copy tarfile into container.
"""
newContainer = createClient.create_container(image=pull_docker_image,
command="/bin/sh", tty=True)
ContainerID = str(newContainer['Id'])
createClient.start(ContainerID)
with open('SCProv.jsonld', 'a') as provfile:
provfile.write('This is the data for the tar file test.')
createClient.fileCopyIn(ContainerID, 'SCProv.jsonld', '/')
assert createClient.hasProv(ContainerID, 'SCProv.jsonld', '/SmartContainer')
time.sleep(1)
createClient.stop(ContainerID)
createClient.remove_container(ContainerID)
os.remove('SCProv.jsonld')
def test_fileCopyOut(createClient, pull_docker_image):
"""File Copy out of container.
Create a new test container and copy directory out of container
as a tarfile.
"""
newContainer = createClient.create_container(image=pull_docker_image,
command="/bin/sh", tty=True)
ContainerID = str(newContainer['Id'])
createClient.start(ContainerID)
with open('SCProv.jsonld', 'a') as provfile:
provfile.write('This is the data for the tar file test.')
createClient.fileCopyIn(ContainerID, 'SCProv.jsonld', '/')
createClient.fileCopyOut(ContainerID, 'SCProv.jsonld', '/SmartContainer/')
assert os.path.isfile('SCProv.jsonld')
time.sleep(1)
createClient.stop(ContainerID)
createClient.remove_container(ContainerID)
os.remove('SCProv.jsonld')
def test_put_label_image(createClient, pull_docker_image):
"""Add label to docker image.
Add a label to a test image file and assert the label exists.
"""
myLabel = {'smartcontainer': '{"author":"<NAME>"}'}
createClient.put_label_image(image=pull_docker_image,
repository="phusion/baseimage",
tag="tester", label=myLabel)
# The new image created should be image[0]'s id
image_list = createClient.images()
image_id = image_list[0]['Id']
myInspect = createClient.inspect_image(image_id)
assert 'Szakonyi' in str(myInspect)
createClient.remove_image(image_id)
def test_infect_image(createClient, pull_docker_image):
"""TODO: Create new Smart Container from docker image ID.
First create a new smart container from fixture image. Next make
sure that if the smart container already exists we don't overwrite the
existing smart container.
"""
imageID = str(createClient.inspect_image(pull_docker_image)['Id'])
imageID = imageID.replace('sha256:', '')
sc_image = createClient.infect_image(image=imageID)
# Test creation of existing smart container -- it's a twofur
existing_sc_image = createClient.infect_image(image=sc_image)
assert existing_sc_image == None
# Cleanup image after ourselves
createClient.remove_image(sc_image)
# image_list = createClient.images()
# We assume that if this operation is successful it will be on
# top of the image list.
# image_id = image_list[0]['Id']
def test_infect_container(createClient, pull_docker_image):
"""TODO: Create new Smartcontainer from container ID."""
pass
def test_get_label_image(createClient, pull_docker_image):
"""Get JSON label from imageID."""
imageID = str(createClient.inspect_image(pull_docker_image)['Id'])
imageID = imageID.replace('sha256:', '')
sc_image = createClient.infect_image(image=imageID)
json_str = createClient.get_label_image(imageID=sc_image)
assert 'smartcontainer' in str(json_str)
# print json_str
createClient.remove_image(sc_image)
| [
1,
529,
12443,
29918,
303,
1503,
29958,
29896,
29899,
29896,
29900,
13,
29937,
448,
29930,
29899,
14137,
29901,
23616,
29899,
29947,
448,
29930,
29899,
13,
15945,
29908,
24376,
363,
4116,
442,
2866,
475,
414,
20868,
3450,
12477,
29889,
13,
... |
neo/io/plexonio.py | michaelfsp/python-neo | 1 | 52859 | <filename>neo/io/plexonio.py
# encoding: utf-8
"""
Class for reading data from Plexion acquisition system (.plx)
Compatible with versions 100 to 106.
Other versions have not been tested.
This IO is developed thanks to the header file downloadable from:
http://www.plexon.com/downloads.html
Depend on:
Supported : Read
Author: sgarcia
"""
from .baseio import BaseIO
from ..core import *
from .tools import create_many_to_one_relationship, iteritems
import numpy as np
import quantities as pq
import struct
import datetime
import os
class PlexonIO(BaseIO):
"""
Class for reading plx file.
Usage:
>>> from neo import io
>>> r = io.PlexonIO(filename='File_plexon_1.plx')
>>> seg = r.read_segment(lazy=False, cascade=True)
>>> print seg.analogsignals
[]
>>> print seg.spiketrains # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[<SpikeTrain(array([ 2.75000000e-02, 5.68250000e-02, 8.52500000e-02, ...,
...
>>> print seg.eventarrays
[]
"""
is_readable = True
is_writable = False
supported_objects = [Segment , AnalogSignal, SpikeTrain, EventArray, EpochArray]
readable_objects = [ Segment]
writeable_objects = []
has_header = False
is_streameable = False
# This is for GUI stuf : a definition for parameters when reading.
read_params = {
Segment : [
('load_spike_waveform' , { 'value' : False } ) ,
]
}
write_params = None
name = 'Plexon'
extensions = [ 'plx' ]
mode = 'file'
def __init__(self , filename = None) :
"""
This class read a plx file.
Arguments:
filename : the filename
"""
BaseIO.__init__(self)
self.filename = filename
def read_segment(self,
lazy = False,
cascade = True,
load_spike_waveform = False,
):
"""
"""
fid = open(self.filename, 'rb')
globalHeader = HeaderReader(fid , GlobalHeader ).read_f(offset = 0)
# metadatas
seg = Segment()
seg.rec_datetime = datetime.datetime( globalHeader['Year'] , globalHeader['Month'] , globalHeader['Day'] ,
globalHeader['Hour'] , globalHeader['Minute'] , globalHeader['Second'] )
seg.file_origin = os.path.basename(self.filename)
seg.annotate(plexon_version = globalHeader['Version'])
if not cascade:
return seg
## Step 1 : read headers
# dsp channels heade
dspChannelHeaders = { }
maxunit=0
maxchan = 0
for i in xrange(globalHeader['NumDSPChannels']):
# channel is 1 based
channelHeader = HeaderReader(fid , ChannelHeader ).read_f(offset = None)
channelHeader['Template'] = np.array(channelHeader['Template']).reshape((5,64))
channelHeader['Boxes'] = np.array(channelHeader['Boxes']).reshape((5,2,4))
dspChannelHeaders[channelHeader['Channel']]=channelHeader
maxunit = max(channelHeader['NUnits'],maxunit)
maxchan = max(channelHeader['Channel'],maxchan)
# event channel header
eventHeaders = { }
for i in xrange(globalHeader['NumEventChannels']):
eventHeader = HeaderReader(fid , EventHeader ).read_f(offset = None)
eventHeaders[eventHeader['Channel']] = eventHeader
# slow channel header
slowChannelHeaders = { }
for i in xrange(globalHeader['NumSlowChannels']):
slowChannelHeader = HeaderReader(fid , SlowChannelHeader ).read_f(offset = None)
slowChannelHeaders[slowChannelHeader['Channel']] = slowChannelHeader
## Step 2 : prepare allocating
# for allocating continuous signal
ncontinuoussamples = np.zeros(len(slowChannelHeaders))
sampleposition = np.zeros(len(slowChannelHeaders))
anaSigs = { }
# for allocating spiketimes and waveform
spiketrains = { }
nspikecounts = np.zeros((maxchan+1, maxunit+1) ,dtype='i')
for i,channelHeader in iteritems(dspChannelHeaders):
spiketrains[i] = { }
# for allocating EventArray
eventarrays = { }
neventsperchannel = { }
#maxstrsizeperchannel = { }
for chan, h in iteritems(eventHeaders):
neventsperchannel[chan] = 0
#maxstrsizeperchannel[chan] = 0
## Step 3 : a first loop for counting size
start = fid.tell()
while fid.tell() !=-1 :
# read block header
dataBlockHeader = HeaderReader(fid , DataBlockHeader ).read_f(offset = None)
if dataBlockHeader is None : break
chan = dataBlockHeader['Channel']
unit = dataBlockHeader['Unit']
n1,n2 = dataBlockHeader['NumberOfWaveforms'] , dataBlockHeader['NumberOfWordsInWaveform']
if dataBlockHeader['Type'] == 1:
#spike
if unit not in spiketrains[chan]:
sptr = SpikeTrain([ ], units='s', t_stop=0.0)
sptr.annotate(unit_name = dspChannelHeaders[chan]['Name'])
sptr.annotate(channel_index = i)
spiketrains[chan][unit] = sptr
spiketrains[chan][unit].sizeOfWaveform = n1,n2
nspikecounts[chan,unit] +=1
fid.seek(n1*n2*2,1)
elif dataBlockHeader['Type'] ==4:
#event
neventsperchannel[chan] += 1
if chan not in eventarrays:
ea = EventArray()
ea.annotate(channel_name= eventHeaders[chan]['Name'])
ea.annotate(channel_index = chan)
eventarrays[chan] = ea
elif dataBlockHeader['Type'] == 5:
#continuous signal
fid.seek(n2*2, 1)
if n2> 0:
ncontinuoussamples[chan] += n2
if chan not in anaSigs:
anasig = AnalogSignal(
[ ],
units = 'V',
sampling_rate = float(slowChannelHeaders[chan]['ADFreq'])*pq.Hz,
t_start = 0.*pq.s,
)
anasig.annotate(channel_index = slowChannelHeaders[chan]['Channel'])
anasig.annotate(channel_name = slowChannelHeaders[chan]['Name'])
anaSigs[chan] = anasig
if lazy:
for chan, anaSig in iteritems(anaSigs):
anaSigs[chan].lazy_shape = ncontinuoussamples[chan]
for chan, sptrs in iteritems(spiketrains):
for unit, sptr in iteritems(sptrs):
spiketrains[chan][unit].lazy_shape = nspikecounts[chan][unit]
for chan, ea in iteritems(eventarrays):
ea.lazy_shape = neventsperchannel[chan]
else:
## Step 4: allocating memory if not lazy
# continuous signal
for chan, anaSig in iteritems(anaSigs):
anaSigs[chan] = anaSig.duplicate_with_new_array(np.zeros((ncontinuoussamples[chan]) , dtype = 'f4')*pq.V, )
# allocating mem for SpikeTrain
for chan, sptrs in iteritems(spiketrains):
for unit, sptr in iteritems(sptrs):
new = SpikeTrain(np.zeros((nspikecounts[chan][unit]), dtype='f')*pq.s, t_stop=1e99) # use an enormous value for t_stop for now, put in correct value later
new.annotations.update(sptr.annotations)
if load_spike_waveform:
n1, n2 = spiketrains[chan][unit].sizeOfWaveform
new.waveforms = np.zeros( (nspikecounts[chan][unit], n1, n2 )*pq.V , dtype = 'f' ) * pq.V
spiketrains[chan][unit] = new
nspikecounts[:] = 0
# event
eventpositions = { }
for chan, ea in iteritems(eventarrays):
ea.times = np.zeros( neventsperchannel[chan] )*pq.s
#ea.labels = zeros( neventsperchannel[chan] , dtype = 'S'+str(neventsperchannel[chan]) )
eventpositions[chan]=0
if not lazy:
## Step 5 : a second loop for reading if not lazy
fid.seek(start)
while fid.tell() !=-1 :
dataBlockHeader = HeaderReader(fid , DataBlockHeader ).read_f(offset = None)
if dataBlockHeader is None : break
chan = dataBlockHeader['Channel']
n1,n2 = dataBlockHeader['NumberOfWaveforms'] , dataBlockHeader['NumberOfWordsInWaveform']
time = dataBlockHeader['UpperByteOf5ByteTimestamp']*2.**32 + dataBlockHeader['TimeStamp']
time/= globalHeader['ADFrequency']
if n2 <0: break
if dataBlockHeader['Type'] == 1:
#spike
unit = dataBlockHeader['Unit']
sptr = spiketrains[chan][unit]
pos = nspikecounts[chan,unit]
sptr[pos] = time * pq.s
if load_spike_waveform and n1*n2 != 0 :
waveform = fromstring( fid.read(n1*n2*2) , dtype = 'i2').reshape(n1,n2).astype('f')
#range
if globalHeader['Version'] <103:
waveform = waveform*3000./(2048*dspChannelHeaders[chan]['Gain']*1000.)
elif globalHeader['Version'] >=103 and globalHeader['Version'] <105:
waveform = waveform*globalHeader['SpikeMaxMagnitudeMV']/(.5*2.**(globalHeader['BitsPerSpikeSample'])*1000.)
elif globalHeader['Version'] >105:
waveform = waveform*globalHeader['SpikeMaxMagnitudeMV']/(.5*2.**(globalHeader['BitsPerSpikeSample'])*globalHeader['SpikePreAmpGain'])
sptr._waveforms[pos,:,:] = waveform
else:
fid.seek(n1*n2*2,1)
nspikecounts[chan,unit] +=1
elif dataBlockHeader['Type'] == 4:
# event
pos = eventpositions[chan]
eventarrays[chan].times[pos] = time * pq.s
eventpositions[chan]+= 1
elif dataBlockHeader['Type'] == 5:
#signal
data = np.fromstring( fid.read(n2*2) , dtype = 'i2').astype('f4')
#range
if globalHeader['Version'] ==100 or globalHeader['Version'] ==101 :
data = data*5000./(2048*slowChannelHeaders[chan]['Gain']*1000.)
elif globalHeader['Version'] ==102 :
data = data*5000./(2048*slowChannelHeaders[chan]['Gain']*slowChannelHeaders[chan]['PreampGain'])
elif globalHeader['Version'] >= 103:
data = data*globalHeader['SlowMaxMagnitudeMV']/(.5*(2**globalHeader['BitsPerSpikeSample'])*\
slowChannelHeaders[chan]['Gain']*slowChannelHeaders[chan]['PreampGain'])
anaSigs[chan][sampleposition[chan] : sampleposition[chan]+data.size] = data * pq.V
sampleposition[chan] += data.size
if sampleposition[chan] ==0:
anaSigs[chan].t_start = time* pq.s
#TODO if lazy
# add AnalogSignal to sgement
for k,anaSig in iteritems(anaSigs) :
if anaSig is not None:
seg.analogsignals.append(anaSig)
# add SpikeTrain to sgement
for chan, sptrs in iteritems(spiketrains):
for unit, sptr in iteritems(sptrs):
if len(sptr) > 0:
sptr.t_stop = sptr.max() # can probably get a better value for this, from the associated AnalogSignal
seg.spiketrains.append(sptr)
# add eventarray to segment
for chan,ea in iteritems(eventarrays):
seg.eventarrays.append(ea)
create_many_to_one_relationship(seg)
return seg
GlobalHeader = [
('MagicNumber' , 'I'),
('Version','i'),
('Comment','128s'),
('ADFrequency','i'),
('NumDSPChannels','i'),
('NumEventChannels','i'),
('NumSlowChannels','i'),
('NumPointsWave','i'),
('NumPointsPreThr','i'),
('Year','i'),
('Month','i'),
('Day','i'),
('Hour','i'),
('Minute','i'),
('Second','i'),
('FastRead','i'),
('WaveformFreq','i'),
('LastTimestamp','d'),
#version >103
('Trodalness' , 'b'),
('DataTrodalness' , 'b'),
('BitsPerSpikeSample' , 'b'),
('BitsPerSlowSample' , 'b'),
('SpikeMaxMagnitudeMV' , 'H'),
('SlowMaxMagnitudeMV' , 'H'),
#version 105
('SpikePreAmpGain' , 'H'),
#version 106
('AcquiringSoftware','18s'),
('ProcessingSoftware','18s'),
('Padding','10s'),
# all version
('TSCounts','650i'),
('WFCounts','650i'),
('EVCounts','512i'),
]
ChannelHeader = [
('Name' , '32s'),
('SIGName','32s'),
('Channel','i'),
('WFRate','i'),
('SIG','i'),
('Ref','i'),
('Gain','i'),
('Filter','i'),
('Threshold','i'),
('Method','i'),
('NUnits','i'),
('Template','320h'),
('Fit','5i'),
('SortWidth','i'),
('Boxes','40h'),
('SortBeg','i'),
#version 105
('Comment','128s'),
#version 106
('SrcId','b'),
('reserved','b'),
('ChanId','H'),
('Padding','10i'),
]
EventHeader = [
('Name' , '32s'),
('Channel','i'),
#version 105
('Comment' , '128s'),
#version 106
('SrcId','b'),
('reserved','b'),
('ChanId','H'),
('Padding','32i'),
]
SlowChannelHeader = [
('Name' , '32s'),
('Channel','i'),
('ADFreq','i'),
('Gain','i'),
('Enabled','i'),
('PreampGain','i'),
#version 104
('SpikeChannel','i'),
#version 105
('Comment','128s'),
#version 106
('SrcId','b'),
('reserved','b'),
('ChanId','H'),
('Padding','27i'),
]
DataBlockHeader = [
('Type','h'),
('UpperByteOf5ByteTimestamp','h'),
('TimeStamp','i'),
('Channel','h'),
('Unit','h'),
('NumberOfWaveforms','h'),
('NumberOfWordsInWaveform','h'),
]# 16 bytes
class HeaderReader():
def __init__(self,fid ,description ):
self.fid = fid
self.description = description
def read_f(self, offset =None):
if offset is not None :
self.fid.seek(offset)
d = { }
for key, format in self.description :
buf = self.fid.read(struct.calcsize(format))
if len(buf) != struct.calcsize(format) : return None
val = struct.unpack(format , buf)
if len(val) == 1:
val = val[0]
else :
val = list(val)
if 's' in format :
val = val.replace('\x00','')
d[key] = val
return d
| [
1,
529,
9507,
29958,
11496,
29914,
601,
29914,
10709,
14642,
29889,
2272,
13,
29937,
8025,
29901,
23616,
29899,
29947,
13,
15945,
29908,
13,
2385,
363,
5183,
848,
515,
349,
2506,
291,
1274,
23493,
1788,
14544,
572,
29916,
29897,
13,
13,
... |
beta_version.py | JeanneGasser/basic_cypting | 0 | 191086 | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 20 16:56:32 2018
@author: jeann
"""
from nltk import RegexpTokenizer
toknizer = RegexpTokenizer(r'''\w'|\w+|[^\w\s]''')
from string import punctuation
import unidecode
#Class construction
class EncryptDecrypt:
"""
For each letter return the numerical position in alphabet
Or for each number return the corresponding letter
"""
def encrypt(self, text):
#Remove accent, caps and excess white space
text = unidecode.unidecode(text.lower().strip())
token = toknizer.tokenize(text)
#ord return ascii code of the letter. In order to have the alphabet position : -96
return " ".join(
["-".join([str(ord(l)-96) for l in word if l.isalpha()])
for word in token if word not in punctuation])
def decrypt(self, text):
#chr gives the char attached to an ascii code. Since we're using letter position, need to add +96
#Encrypted word given in format xx-xx-xx, hence the split.
to_decrypt = [word.split("-") for word in text.split(" ")]
return " ".join(
[("".join([chr(int(l)+96) for l in word]))
for word in to_decrypt])
#User input and class output
print("Bienvenue, avec ce programme vous allez pouvoir chiffrer ou déchiffrer du texte. \n \
Chiffrement : lettres = position numérique dans l'alphabet")
textfile = input("Veuillez entrer votre texte ou un nom de fichier texte avec le chemin \n")
if ".txt" in textfile:
txt = open(textfile,"r").read()
what_to_do = input("Voulez vous décrypter ou encrypter \n \n")
if unidecode.unidecode(what_to_do.lower().strip())=="encrypter":
open(textfile.split(".")[0] + "_crypted.txt","w").write(EncryptDecrypt().encrypt(txt))
print("Fichier encrypté et enregistré")
elif unidecode.unidecode(what_to_do.lower().strip())=="decrypter":
open(textfile.split(".")[0] + "_decrypted.txt","w").write(EncryptDecrypt().decrypt(txt))
print("Fichier décrypté et enregistré")
else:
print("Veuillez entrer une commande valide: Encrypter ou Decrypter")
else:
what_to_do = input("Voulez vous décrypter ou encrypter \n \n")
if what_to_do.lower().strip()=="encrypter":
print(EncryptDecrypt().encrypt(textfile))
elif unidecode.unidecode(what_to_do.lower().strip())=="decrypter":
print(EncryptDecrypt().decrypt(textfile))
else:
print("Veuillez entrer une commande valide: Encrypter ou Decrypter")
| [
1,
396,
448,
29930,
29899,
14137,
29901,
23616,
29899,
29947,
448,
29930,
29899,
30004,
13,
15945,
19451,
13,
20399,
373,
323,
434,
2864,
29871,
29906,
29900,
29871,
29896,
29953,
29901,
29945,
29953,
29901,
29941,
29906,
29871,
29906,
29900,... |
tests/unit/test_openstack_group_service.py | venumurthy/ec2-driver | 0 | 163580 | <reponame>venumurthy/ec2-driver<gh_stars>0
import unittest
from mock import Mock
from novaclient.v1_1.security_groups import SecurityGroupManager
from nova.virt.ec2.openstack_group_service import OpenstackGroupService
class TestOpenstackGroupService(unittest.TestCase):
def setUp(self):
self.security_group_manager = Mock(spec=SecurityGroupManager)
self.openstack_group_service = OpenstackGroupService(self.security_group_manager)
def test_should_get_group_from_nova_security_group_manager(self):
security_group = Mock()
security_group.name = 'secGroup'
self.security_group_manager.list.return_value = [security_group]
self.assertEqual(self.openstack_group_service.get_group(security_group.name), security_group)
def test_should_get_group_from_nova_security_group_manager_when_multiple_groups_present(self):
security_group1 = Mock()
security_group1.name = 'secGroup'
security_group2 = Mock()
security_group2.name = 'otherGroup'
self.security_group_manager.list.return_value = [security_group1, security_group2]
self.assertEqual(self.openstack_group_service.get_group(security_group2.name), security_group2) | [
1,
529,
276,
1112,
420,
29958,
854,
398,
332,
21155,
29914,
687,
29906,
29899,
9465,
29966,
12443,
29918,
303,
1503,
29958,
29900,
13,
5215,
443,
27958,
13,
13,
3166,
11187,
1053,
26297,
13,
3166,
2420,
562,
1593,
29889,
29894,
29896,
2... |
yuntu/dataset/base.py | CONABIO/yuntu | 0 | 68250 | <reponame>CONABIO/yuntu<filename>yuntu/dataset/base.py
from abc import abstractmethod, ABCMeta
import yuntu.dataset.methods as dsetMethods
class metaDataset(object):
__metaclass__ = ABCMeta
@abstractmethod
def getType(self):
pass
@abstractmethod
def getDefaultConfig(self):
pass
@abstractmethod
def setConfig(self):
pass
class audioDataset(object):
__metaclass__ = ABCMeta
def __init__(self,name,collection=None,dirPath="",config=None,metadata=None,client=None,overwrite=False):
self.name = name
self.dirPath = dirPath
self.collection = collection
self.client = client
self.config = self.getDefaultConfig()
self.overwrite = overwrite
self.info = {"name":self.name,"dirPath":dirPath,"collection":None,"creation":None,"modification":None,"type":"audioDataset","metadata":metadata}
self.graph = {}
doBuild = False
if dsetMethods.datasetExists(self):
if not self.overwrite:
print("Loading soundscape...")
if dsetMethods.datasetLoad(self):
self.setConfig(config)
else:
doBuild = True
else:
doBuild = True
if doBuild:
if collection is None:
raise ValueError("Collection must be explicit in dataset creation (create a collection and pass as parameter)")
print("Building new soundscape...")
if dsetMethods.datasetBuild(self):
self.setConfig(config)
self.loadGraph()
def getType(self):
return "audioDataset"
def setConfig(self,config):
return dsetMethods.datasetSetConfig(self,config)
def getDefaultConfig(self):
config = {
"globalParams" : {
"multiThread" : False,
"collectionFilter" : None,
"groupingFields" : None,
"groupingTypes" : None,
"npartitions" : 20,
"annotationFabric" : "default"
},
"transformationParams" : {
"annotationFabric" : None,
"exampleFabric" : None
},
"splittingParams": {
"folds" : 5,
"splits" : {
"train" : 70,
"test" : 20,
"validation" : 10
}
},
}
return config
| [
1,
529,
276,
1112,
420,
29958,
6007,
2882,
5971,
29914,
29891,
4159,
29966,
9507,
29958,
29891,
4159,
29914,
24713,
29914,
3188,
29889,
2272,
13,
3166,
25638,
1053,
9846,
5696,
29892,
16417,
19346,
13,
5215,
343,
4159,
29889,
24713,
29889,
... |
src/OTLMOW/OTLModel/Classes/WVConsole.py | davidvlaminck/OTLClassPython | 2 | 186458 | <reponame>davidvlaminck/OTLClassPython
# coding=utf-8
from OTLMOW.OTLModel.Classes.AIMNaamObject import AIMNaamObject
from OTLMOW.GeometrieArtefact.PuntGeometrie import PuntGeometrie
# Generated with OTLClassCreator. To modify: extend, do not edit
class WVConsole(AIMNaamObject, PuntGeometrie):
"""Een draagconstructie voor het ophangen van openbare wegverlichting op plaatsen waar er geen ruimte is voor verlichtingsmasten in de grond. Typisch wordt in dergelijke gevallen de draagconstructie met het verlichtingstoestel op hoogte bevestigd aan een gebouw of een andere constructie naast de weg."""
typeURI = 'https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#WVConsole'
"""De URI van het object volgens https://www.w3.org/2001/XMLSchema#anyURI."""
def __init__(self):
AIMNaamObject.__init__(self)
PuntGeometrie.__init__(self)
| [
1,
529,
276,
1112,
420,
29958,
29881,
16093,
29894,
5288,
262,
384,
29914,
2891,
29931,
2385,
11980,
13,
29937,
14137,
29922,
9420,
29899,
29947,
13,
3166,
438,
14632,
6720,
29956,
29889,
2891,
29931,
3195,
29889,
27403,
29889,
29909,
7833,... |
python/remap.py | rmu75/rover-342-retrofit | 0 | 82201 | from stdglue import *
| [
1,
515,
3659,
3820,
434,
1053,
334,
13,
2
] |
train-cats.py | adamklein/keras-yolo3 | 0 | 55050 | """
Derived from keras-yolo3 train.py (https://github.com/qqwweee/keras-yolo3),
with additions from https://github.com/AntonMu/TrainYourOwnYOLO.
"""
import os
import sys
import argparse
import pickle
import numpy as np
import keras.backend as K
from keras.layers import Input, Lambda
from keras.models import Model
from keras.optimizers import Adam
from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
from PIL import Image
from time import time
from yolo3.model import preprocess_true_boxes, yolo_body, tiny_yolo_body, yolo_loss
from yolo3.utils import get_random_data
def get_curr_dir():
return os.path.dirname(os.path.abspath(__file__))
def get_parent_dir(n=1):
"""
returns the n-th parent dicrectory of the current
working directory
"""
current_path = get_curr_dir()
for k in range(n):
current_path = os.path.dirname(current_path)
return current_path
# --- global constants
EXPORT_DIR = os.path.join(get_parent_dir(), 'for_yolo', 'vott', 'vott-export')
ANNOT_FILE = os.path.join(EXPORT_DIR, 'yolo_annotations.txt')
WEIGHTS_DIR = os.path.join(get_curr_dir(), 'model_data')
YOLO_CLASSES = os.path.join(EXPORT_DIR, 'classes.names')
LOG_DIR = 'logs/000/'
ANCHORS_PATH = os.path.join(WEIGHTS_DIR, 'yolo_anchors.txt')
WEIGHTS_PATH = os.path.join(WEIGHTS_DIR, 'yolo_weights.h5')
VAL_SPLIT = 0.1 # 10% validation data
EPOCHS = 102 # number of epochs to train; 50% transfer, 50% fine-tuning
def _main():
class_names = get_classes(YOLO_CLASSES)
num_classes = len(class_names)
anchors = get_anchors(ANCHORS_PATH)
input_shape = (416, 416) # multiple of 32, height, width
epoch1, epoch2 = EPOCHS // 2, EPOCHS // 2
model = create_model(input_shape, anchors, num_classes,
freeze_body=2, weights_path=WEIGHTS_PATH) # make sure you know what you freeze
logging = TensorBoard(log_dir=LOG_DIR)
checkpoint = ModelCheckpoint(LOG_DIR + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
monitor='val_loss', save_weights_only=True, save_best_only=True, period=3)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=1)
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1)
with open(ANNOT_FILE) as f:
lines = f.readlines()
np.random.seed(10101)
np.random.shuffle(lines)
num_val = int(len(lines) * VAL_SPLIT)
num_train = len(lines) - num_val
# Train with frozen layers first, to get a stable loss.
# Adjust num epochs to your dataset. This step is enough to obtain a decent model.
if True:
model.compile(optimizer=Adam(lr=1e-3), loss={
# use custom yolo_loss Lambda layer.
'yolo_loss': lambda y_true, y_pred: y_pred})
batch_size = 32
print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
history = model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes),
steps_per_epoch=max(1, num_train//batch_size),
validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors, num_classes),
validation_steps=max(1, num_val//batch_size),
epochs=epoch1,
initial_epoch=0,
callbacks=[logging, checkpoint])
model.save_weights(os.path.join(LOG_DIR, 'trained_weights_stage_1.h5'))
step1_train_loss = history.history['loss']
with open(os.path.join(log_dir_time,'step1_loss.npy'), 'w') as f:
for item in step1_train_loss:
f.write("%s\n" % item)
step1_val_loss = np.array(history.history['val_loss'])
with open(os.path.join(log_dir_time,'step1_val_loss.npy'), 'w') as f:
for item in step1_val_loss:
f.write("%s\n" % item)
# Unfreeze and continue training, to fine-tune.
# Train longer if the result is not good.
if True:
for i in range(len(model.layers)):
model.layers[i].trainable = True
model.compile(optimizer=Adam(lr=1e-4), loss={'yolo_loss': lambda y_true, y_pred: y_pred}) # recompile to apply the change
print('Unfreeze all layers.')
batch_size = 4 # note that more GPU memory is required after unfreezing the body
print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
history=model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes),
steps_per_epoch=max(1, num_train//batch_size),
validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors, num_classes),
validation_steps=max(1, num_val//batch_size),
epochs=epoch1+epoch2,
initial_epoch=epoch1,
callbacks=[logging, checkpoint, reduce_lr, early_stopping])
model.save_weights(os.path.join(LOG_DIR, 'trained_weights_final.h5'))
step2_train_loss = history.history['loss']
with open(os.path.join(log_dir_time,'step2_loss.npy'), 'w') as f:
for item in step2_train_loss:
f.write("%s\n" % item)
step2_val_loss = np.array(history.history['val_loss'])
with open(os.path.join(log_dir_time,'step2_val_loss.npy'), 'w') as f:
for item in step2_val_loss:
f.write("%s\n" % item)
# --- HELPER FUNCS
def get_classes(classes_path):
""" loads the classes """
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def get_anchors(anchors_path):
'''loads the anchors from a file'''
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
def create_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2,
weights_path='keras_yolo3/model_data/yolo_weights.h5'):
'''create the training model'''
K.clear_session() # get a new session
image_input = Input(shape=(None, None, 3))
h, w = input_shape
num_anchors = len(anchors)
y_true = [Input(shape=(h//{0:32, 1:16, 2:8}[l], w//{0:32, 1:16, 2:8}[l], \
num_anchors//3, num_classes+5)) for l in range(3)]
model_body = yolo_body(image_input, num_anchors//3, num_classes)
print('Create YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes))
if load_pretrained:
model_body.load_weights(weights_path, by_name=True, skip_mismatch=True)
print('Load weights {}.'.format(weights_path))
if freeze_body in [1, 2]:
# Freeze darknet53 body or freeze all but 3 output layers.
num = (185, len(model_body.layers)-3)[freeze_body-1]
for i in range(num): model_body.layers[i].trainable = False
print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers)))
model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss',
arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})(
[*model_body.output, *y_true])
model = Model([model_body.input, *y_true], model_loss)
return model
def data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes):
'''data generator for fit_generator'''
n = len(annotation_lines)
i = 0
while True:
image_data = []
box_data = []
for b in range(batch_size):
if i==0:
np.random.shuffle(annotation_lines)
image, box = get_random_data(annotation_lines[i], input_shape, random=True)
image_data.append(image)
box_data.append(box)
i = (i+1) % n
image_data = np.array(image_data)
box_data = np.array(box_data)
y_true = preprocess_true_boxes(box_data, input_shape, anchors, num_classes)
yield [image_data, *y_true], np.zeros(batch_size)
def data_generator_wrapper(annotation_lines, batch_size, input_shape, anchors, num_classes):
n = len(annotation_lines)
if n==0 or batch_size<=0: return None
return data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes)
# ----
if __name__ == '__main__':
_main()
| [
1,
9995,
13,
15383,
2347,
515,
13023,
294,
29899,
29891,
3543,
29941,
7945,
29889,
2272,
313,
991,
597,
3292,
29889,
510,
29914,
24349,
29893,
705,
3905,
29914,
3946,
294,
29899,
29891,
3543,
29941,
511,
13,
2541,
788,
2187,
515,
2045,
... |
src/tokenizers.py | tomfran/caselaw-temporal-analysis | 0 | 97271 | import os
import spacy
import json
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
class BatchTokenizer():
def __init__(self):
self.nlp = spacy.load("en_core_web_md")
def tokenize(self, documents):
tokens = []
for doc in self.nlp.pipe(documents, batch_size = 30):
tokens.append([token.lemma_ for token in doc
if token.pos_ in ['NOUN', 'PROPN', 'VERB', 'ADJ', 'ADV']])
return tokens | [
1,
1053,
2897,
13,
5215,
805,
4135,
13,
5215,
4390,
13,
13,
359,
29889,
21813,
1839,
8969,
29918,
6271,
29925,
29918,
16173,
29918,
14480,
29918,
1307,
29963,
6670,
2033,
353,
525,
29941,
29915,
29871,
13,
308,
13,
1990,
350,
905,
6066,... |
crits/exploits/urls.py | dutrow/crits | 738 | 49629 | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^add/$', views.add_exploit, name='crits-exploits-views-add_exploit'),
url(r'^edit/cve/$', views.edit_exploit_cve, name='crits-exploits-views-edit_exploit_cve'),
url(r'^edit/name/(?P<id_>\S+)/$', views.edit_exploit_name, name='crits-exploits-views-edit_exploit_name'),
url(r'^details/(?P<id_>\S+)/$', views.exploit_detail, name='crits-exploits-views-exploit_detail'),
url(r'^remove/(?P<id_>\S+)/$', views.remove_exploit, name='crits-exploits-views-remove_exploit'),
url(r'^list/$', views.exploits_listing, name='crits-exploits-views-exploits_listing'),
url(r'^list/(?P<option>\S+)/$', views.exploits_listing, name='crits-exploits-views-exploits_listing'),
]
| [
1,
515,
9557,
29889,
5527,
29889,
26045,
1053,
3142,
13,
13,
3166,
869,
1053,
8386,
13,
13,
2271,
11037,
29879,
353,
518,
13,
1678,
3142,
29898,
29878,
29915,
29985,
1202,
13346,
742,
8386,
29889,
1202,
29918,
4548,
417,
277,
29892,
102... |
python_modules/dagster-graphql/dagster_graphql_tests/graphql/snapshots/snap_test_solids.py | zzztimbo/dagster | 0 | 197932 | # -*- coding: utf-8 -*-
# snapshottest: v1 - https://goo.gl/zC4yUc
from __future__ import unicode_literals
from snapshottest import Snapshot
snapshots = Snapshot()
snapshots['test_query_all_solids 1'] = {
'usedSolids': [
{
'__typename': 'UsedSolid',
'definition': {
'name': 'a_solid_with_multilayered_config'
},
'invocations': [
{
'pipeline': {
'name': 'more_complicated_nested_config'
},
'solidHandle': {
'handleID': 'a_solid_with_multilayered_config'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'a_solid_with_three_field_config'
},
'invocations': [
{
'pipeline': {
'name': 'more_complicated_config'
},
'solidHandle': {
'handleID': 'a_solid_with_three_field_config'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'add_four'
},
'invocations': [
{
'pipeline': {
'name': 'composites_pipeline'
},
'solidHandle': {
'handleID': 'add_four'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'add_one'
},
'invocations': [
{
'pipeline': {
'name': 'composites_pipeline'
},
'solidHandle': {
'handleID': 'add_four.adder_1.adder_1'
}
},
{
'pipeline': {
'name': 'composites_pipeline'
},
'solidHandle': {
'handleID': 'add_four.adder_2.adder_1'
}
},
{
'pipeline': {
'name': 'composites_pipeline'
},
'solidHandle': {
'handleID': 'add_four.adder_1.adder_2'
}
},
{
'pipeline': {
'name': 'composites_pipeline'
},
'solidHandle': {
'handleID': 'add_four.adder_2.adder_2'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'add_two'
},
'invocations': [
{
'pipeline': {
'name': 'composites_pipeline'
},
'solidHandle': {
'handleID': 'add_four.adder_1'
}
},
{
'pipeline': {
'name': 'composites_pipeline'
},
'solidHandle': {
'handleID': 'add_four.adder_2'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'apply_to_three'
},
'invocations': [
{
'pipeline': {
'name': 'multi_mode_with_resources'
},
'solidHandle': {
'handleID': 'apply_to_three'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'can_fail'
},
'invocations': [
{
'pipeline': {
'name': 'retry_multi_output_pipeline'
},
'solidHandle': {
'handleID': 'can_fail'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'df_expectations_solid'
},
'invocations': [
{
'pipeline': {
'name': 'csv_hello_world_with_expectations'
},
'solidHandle': {
'handleID': 'df_expectations_solid'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'div_four'
},
'invocations': [
{
'pipeline': {
'name': 'composites_pipeline'
},
'solidHandle': {
'handleID': 'div_four'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'div_two'
},
'invocations': [
{
'pipeline': {
'name': 'composites_pipeline'
},
'solidHandle': {
'handleID': 'div_four.div_1'
}
},
{
'pipeline': {
'name': 'composites_pipeline'
},
'solidHandle': {
'handleID': 'div_four.div_2'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'emit_failed_expectation'
},
'invocations': [
{
'pipeline': {
'name': 'pipeline_with_expectations'
},
'solidHandle': {
'handleID': 'emit_failed_expectation'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'emit_successful_expectation'
},
'invocations': [
{
'pipeline': {
'name': 'pipeline_with_expectations'
},
'solidHandle': {
'handleID': 'emit_successful_expectation'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'emit_successful_expectation_no_metadata'
},
'invocations': [
{
'pipeline': {
'name': 'pipeline_with_expectations'
},
'solidHandle': {
'handleID': 'emit_successful_expectation_no_metadata'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'fail'
},
'invocations': [
{
'pipeline': {
'name': 'eventually_successful'
},
'solidHandle': {
'handleID': 'fail'
}
},
{
'pipeline': {
'name': 'eventually_successful'
},
'solidHandle': {
'handleID': 'fail_2'
}
},
{
'pipeline': {
'name': 'eventually_successful'
},
'solidHandle': {
'handleID': 'fail_3'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'fail_subset'
},
'invocations': [
{
'pipeline': {
'name': 'pipeline_with_invalid_definition_error'
},
'solidHandle': {
'handleID': 'fail_subset'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'loop'
},
'invocations': [
{
'pipeline': {
'name': 'infinite_loop_pipeline'
},
'solidHandle': {
'handleID': 'loop'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'materialize'
},
'invocations': [
{
'pipeline': {
'name': 'materialization_pipeline'
},
'solidHandle': {
'handleID': 'materialize'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'multi'
},
'invocations': [
{
'pipeline': {
'name': 'retry_multi_output_pipeline'
},
'solidHandle': {
'handleID': 'multi'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'no_output'
},
'invocations': [
{
'pipeline': {
'name': 'retry_multi_output_pipeline'
},
'solidHandle': {
'handleID': 'child_multi_skip'
}
},
{
'pipeline': {
'name': 'retry_multi_output_pipeline'
},
'solidHandle': {
'handleID': 'child_skip'
}
},
{
'pipeline': {
'name': 'retry_multi_output_pipeline'
},
'solidHandle': {
'handleID': 'grandchild_fail'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'noop_solid'
},
'invocations': [
{
'pipeline': {
'name': 'noop_pipeline'
},
'solidHandle': {
'handleID': 'noop_solid'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'one'
},
'invocations': [
{
'pipeline': {
'name': 'pipeline_with_invalid_definition_error'
},
'solidHandle': {
'handleID': 'one'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'passthrough'
},
'invocations': [
{
'pipeline': {
'name': 'retry_multi_output_pipeline'
},
'solidHandle': {
'handleID': 'child_fail'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'reset'
},
'invocations': [
{
'pipeline': {
'name': 'eventually_successful'
},
'solidHandle': {
'handleID': 'reset'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'return_any'
},
'invocations': [
{
'pipeline': {
'name': 'scalar_output_pipeline'
},
'solidHandle': {
'handleID': 'return_any'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'return_bool'
},
'invocations': [
{
'pipeline': {
'name': 'scalar_output_pipeline'
},
'solidHandle': {
'handleID': 'return_bool'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'return_foo'
},
'invocations': [
{
'pipeline': {
'name': 'no_config_chain_pipeline'
},
'solidHandle': {
'handleID': 'return_foo'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'return_hello'
},
'invocations': [
{
'pipeline': {
'name': 'no_config_pipeline'
},
'solidHandle': {
'handleID': 'return_hello'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'return_hello_world'
},
'invocations': [
{
'pipeline': {
'name': 'no_config_chain_pipeline'
},
'solidHandle': {
'handleID': 'return_hello_world'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'return_int'
},
'invocations': [
{
'pipeline': {
'name': 'scalar_output_pipeline'
},
'solidHandle': {
'handleID': 'return_int'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'return_six'
},
'invocations': [
{
'pipeline': {
'name': 'multi_mode_with_loggers'
},
'solidHandle': {
'handleID': 'return_six'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'return_str'
},
'invocations': [
{
'pipeline': {
'name': 'scalar_output_pipeline'
},
'solidHandle': {
'handleID': 'return_str'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'simple_solid'
},
'invocations': [
{
'pipeline': {
'name': 'tagged_pipeline'
},
'solidHandle': {
'handleID': 'simple_solid'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'solid_with_list'
},
'invocations': [
{
'pipeline': {
'name': 'pipeline_with_list'
},
'solidHandle': {
'handleID': 'solid_with_list'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'solid_with_required_resource'
},
'invocations': [
{
'pipeline': {
'name': 'required_resource_pipeline'
},
'solidHandle': {
'handleID': 'solid_with_required_resource'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'spawn'
},
'invocations': [
{
'pipeline': {
'name': 'eventually_successful'
},
'solidHandle': {
'handleID': 'spawn'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'spew'
},
'invocations': [
{
'pipeline': {
'name': 'spew_pipeline'
},
'solidHandle': {
'handleID': 'spew'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'start'
},
'invocations': [
{
'pipeline': {
'name': 'retry_resource_pipeline'
},
'solidHandle': {
'handleID': 'start'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'sum_solid'
},
'invocations': [
{
'pipeline': {
'name': 'csv_hello_world'
},
'solidHandle': {
'handleID': 'sum_solid'
}
},
{
'pipeline': {
'name': 'csv_hello_world_df_input'
},
'solidHandle': {
'handleID': 'sum_solid'
}
},
{
'pipeline': {
'name': 'csv_hello_world_two'
},
'solidHandle': {
'handleID': 'sum_solid'
}
},
{
'pipeline': {
'name': 'csv_hello_world_with_expectations'
},
'solidHandle': {
'handleID': 'sum_solid'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'sum_sq_solid'
},
'invocations': [
{
'pipeline': {
'name': 'csv_hello_world'
},
'solidHandle': {
'handleID': 'sum_sq_solid'
}
},
{
'pipeline': {
'name': 'csv_hello_world_df_input'
},
'solidHandle': {
'handleID': 'sum_sq_solid'
}
},
{
'pipeline': {
'name': 'csv_hello_world_with_expectations'
},
'solidHandle': {
'handleID': 'sum_sq_solid'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'takes_an_enum'
},
'invocations': [
{
'pipeline': {
'name': 'pipeline_with_enum_config'
},
'solidHandle': {
'handleID': 'takes_an_enum'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'throw_a_thing'
},
'invocations': [
{
'pipeline': {
'name': 'naughty_programmer_pipeline'
},
'solidHandle': {
'handleID': 'throw_a_thing'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'will_fail'
},
'invocations': [
{
'pipeline': {
'name': 'retry_resource_pipeline'
},
'solidHandle': {
'handleID': 'will_fail'
}
}
]
}
]
}
| [
1,
396,
448,
29930,
29899,
14137,
29901,
23616,
29899,
29947,
448,
29930,
29899,
13,
29937,
15101,
845,
1501,
342,
29901,
325,
29896,
448,
2045,
597,
1484,
29877,
29889,
3820,
29914,
29920,
29907,
29946,
29891,
29965,
29883,
13,
3166,
4770,... |
setup.py | yuvipanda/fakeokclient | 0 | 33917 | import setuptools
setuptools.setup(
name="fakeokpy",
version='0.1',
url="https://github.com/yuvipanda/fakeokpy",
author="<NAME>",
author_email="<EMAIL>",
license="BSD-3-Clause",
packages=setuptools.find_packages(),
)
| [
1,
1053,
731,
21245,
8789,
13,
13,
842,
21245,
8789,
29889,
14669,
29898,
13,
1678,
1024,
543,
29888,
1296,
554,
2272,
613,
13,
1678,
1873,
2433,
29900,
29889,
29896,
742,
13,
1678,
3142,
543,
991,
597,
3292,
29889,
510,
29914,
29891,
... |
modules/astrom_test_2.py | rsiverd/ultracool | 0 | 181741 | #!/usr/bin/env python
# vim: set fileencoding=utf-8 ts=4 sts=4 sw=4 et tw=80 :
#
# Second cut at astrometry fitting for UCD project.
#
# <NAME>
# Created: 2021-08-30
# Last modified: 2021-08-30
#--------------------------------------------------------------------------
#**************************************************************************
#--------------------------------------------------------------------------
## Logging setup:
import logging
#logging.basicConfig(level=logging.DEBUG)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
#logger.setLevel(logging.DEBUG)
logger.setLevel(logging.INFO)
## Current version:
__version__ = "0.1.0"
## Modules:
import os
import sys
import time
import numpy as np
from numpy.lib.recfunctions import append_fields
#import datetime as dt
#from dateutil import parser as dtp
#import scipy.linalg as sla
#import scipy.signal as ssig
#import scipy.ndimage as ndi
import scipy.optimize as opti
#import scipy.interpolate as stp
#import scipy.spatial.distance as ssd
#from functools import partial
#from collections import OrderedDict
#from collections.abc import Iterable
#import multiprocessing as mp
#np.set_printoptions(suppress=True, linewidth=160)
#import pandas as pd
#import statsmodels.api as sm
#import statsmodels.formula.api as smf
#from statsmodels.regression.quantile_regression import QuantReg
_have_np_vers = float('.'.join(np.__version__.split('.')[:2]))
import theil_sen as ts
## Useful stats routines:
def calc_ls_med_MAD(a, axis=None):
"""Return median and median absolute deviation of *a* (scaled to normal)."""
med_val = np.median(a, axis=axis)
sig_hat = (1.482602218 * np.median(np.abs(a - med_val), axis=axis))
return (med_val, sig_hat)
## Median absolute residual:
def calc_MAR(residuals, scalefactor=1.482602218):
"""Return median absolute residual (MAR) of input array. By default,
the result is scaled to the normal distribution."""
return scalefactor * np.median(np.abs(residuals))
##--------------------------------------------------------------------------##
##------------------ Astrometry Fitting (5-par) ----------------##
##--------------------------------------------------------------------------##
_ARCSEC_PER_RADIAN = 180. * 3600.0 / np.pi
_MAS_PER_RADIAN = _ARCSEC_PER_RADIAN * 1e3
class AstFit(object):
"""
This module provides astrometric fitting capability. Internally, a
5-parameter model is maintained in a numpy array. Its contents are:
* RA (radians) at reference epoch
* DE (radians) at reference epoch
* pmRA (radians / yr). [this is pmRA* / cos(dec)]
* pmDE (radians / yr)
* parallax (radians)
"""
_need_eph_keys = ['jdtdb', 'x', 'y', 'z']
_need_data_keys = ['jdtdb', 'dra', 'dde', 'obs_x', 'obs_y', 'obs_z']
_asec_per_rad = _ARCSEC_PER_RADIAN
_mas_per_rad = _MAS_PER_RADIAN
def __init__(self):
self._jd_tdb = None
self._dt_yrs = None
self.obs_eph = None
self.ref_tdb = None
self.inliers = None
self.rweight = None
self._is_set = False
self._chiexp = 2
self._can_iterate = False
return
def set_exponent(self, exponent=2):
"""
Choose exponent used in penalty function (N below). The solver seeks
to minimize the sum over data points of:
((obs - model) / err)**N
"""
#Setting N=2 behaves like Chi-squared. Setting N=1 minimizes total
#absolute deviation
self._chiexp = exponent
return
#def setup(self, jd_tdb_ref, RA_deg, DE_deg, obs_eph,
def setup(self, data, reject_outliers=True,
jd_tdb_ref=None, RA_err=None, DE_err=None):
self._is_rdy = False
if not all([isinstance(data[x], np.ndarray) \
for x in self._need_data_keys]):
sys.stderr.write("Incomplete data set!\n")
sys.stderr.write("Required columns include:\n")
sys.stderr.write("--> %s\n" % str(self._need_data_keys))
return False
self._outrej = reject_outliers
#if not all([isinstance(obs_eph[x], np.ndarray) \
# for x in self._need_eph_keys]):
# sys.stderr.write("Incomplete ephemeris data!\n")
# sys.stderr.write("Required columns include:\n")
# sys.stderr.write("--> %s\n" % str(self._need_eph_keys))
# return False
#self.inliers = np.ones_like(RA_deg, dtype='bool')
#self.rweight = np.ones_like(RA_deg)
self.inliers = np.ones(len(data), dtype='bool')
self.rweight = np.ones(len(data), dtype='float')
#self.obs_eph = self._augmented_eph(obs_eph)
self.dataset = np.copy(data)
if jd_tdb_ref:
self.ref_tdb = jd_tdb_ref
else:
self.ref_tdb = data['jdtdb'][0]
#self.ref_tdb = jd_tdb_ref
self._dt_yrs = (self.dataset['jdtdb'] - self.ref_tdb) / 365.25
#self._RA_rad = np.radians(RA_deg)
#self._DE_rad = np.radians(DE_deg)
self._RA_rad = np.radians(self.dataset['dra'])
self._DE_rad = np.radians(self.dataset['dde'])
#self._RA_med, self._RA_MAD = calc_ls_med_MAD(self._RA_rad)
#self._DE_med, self._DE_MAD = calc_ls_med_MAD(self._DE_rad)
#self._RA_MAD *= np.cos(self._DE_med)
self._RA_err = RA_err
self._DE_err = DE_err
self._need_resid_errors = False
if not isinstance(RA_err, np.ndarray):
sys.stderr.write("WARNING: RA_err not given, using estimated\n")
self._need_resid_errors = True
if not isinstance(DE_err, np.ndarray):
sys.stderr.write("WARNING: DE_err not given, using estimated\n")
self._need_resid_errors = True
#if isinstance(RA_err, np.ndarray):
# self._RA_err = np.radians(RA_err)
#else:
# self._RA_err = self._RA_MAD
#if isinstance(DE_err, np.ndarray):
# self._DE_err = np.radians(DE_err)
#else:
# self._DE_err = self._DE_MAD
#self._DE_err = np.radians(DE_err) if DE_err else self._DE_MAD
self._is_set = True
self._can_iterate = False
return True
#def set_ref_time(self, t_ref):
# self.ref_time = t_ref
# return
@staticmethod
def _calc_parallax_factors(RA_rad, DE_rad, X_au, Y_au, Z_au):
"""Compute parallax factors in arcseconds. The RA component has
been divided by cos(dec) so that it can be used directly for
residual minimization."""
sinRA, cosRA = np.sin(RA_rad), np.cos(RA_rad)
sinDE, cosDE = np.sin(DE_rad), np.cos(DE_rad)
ra_factor = (X_au * sinRA - Y_au * cosRA) / cosDE
de_factor = X_au * cosRA * sinDE \
+ Y_au * sinRA * sinDE \
- Z_au * cosDE
return ra_factor, de_factor
#def ts_fit_coord(self, time_vals, coo_vals):
@staticmethod
def ts_fit_radec_pm(t_yrs, RA_rad, DE_rad, plx_as=0, weighted=False):
ts_ra_model = ts.linefit(t_yrs, RA_rad, weighted=weighted)
ts_de_model = ts.linefit(t_yrs, DE_rad, weighted=weighted)
return np.array([ts_ra_model[0], ts_de_model[0],
ts_ra_model[1], ts_de_model[1], plx_as])
def apparent_radec(self, t_ref, astrom_pars, eph_obs):
"""
t_ref -- chosen reference epoch
astrom_pars -- five astrometric parameters specified at the
reference epoch: meanRA (rad), meanDE (rad),
pmRA*cos(DE), pmDE, and parallax
eph_obs -- dict with x,y,z,t elements describing the times
and places of observations (numpy arrays)
FOR NOW, assume
[t_ref] = JD (TDB)
[t] = JD (TDB)
[pars] = rad, rad, arcsec/yr, arcsec/yr, arcsec
*no cos(d)*
"""
rra, rde, pmra, pmde, prlx = astrom_pars
t_diff_yr = (eph_obs['t'] - t_ref) / 365.25 # units of years
pfra, pfde = self._calc_parallax_factors(rra, rde,
eph_obs['x'], eph_obs['y'], eph_obs['z'])
delta_ra = (t_diff_yr * pmra + prlx * pfra)
delta_de = (t_diff_yr * pmde + prlx * pfde)
return (rra + delta_ra, rde + delta_de)
def eval_model(self, params):
return self._solver_eval(params)
#def eval_model(self, params):
# rra, rde, pmra, pmde, prlx = params
# pfra, pfde = self._calc_parallax_factors(rra, rde,
# self.dataset['obs_x'], self.dataset['obs_y'],
# self.dataset['obs_z'])
# delta_ra = self._dt_yrs * pmra + prlx * pfra
# delta_de = self._dt_yrs * pmde + prlx * pfde
# return (rra + delta_ra, rde + delta_de)
def _solver_eval(self, params):
rra, rde, pmra, pmde, prlx = params
pfra, pfde = self._calc_parallax_factors(rra, rde,
self.dataset['obs_x'], self.dataset['obs_y'],
self.dataset['obs_z'])
delta_ra = self._dt_yrs * pmra + prlx * pfra
delta_de = self._dt_yrs * pmde + prlx * pfde
#delta_ra = self._dt_yrs * pmra - prlx * pfra
#delta_de = self._dt_yrs * pmde - prlx * pfde
return (rra + delta_ra, rde + delta_de)
def _calc_radec_residuals(self, params):
model_RA, model_DE = self._solver_eval(params)
return (self._RA_rad - model_RA, self._DE_rad - model_DE)
def _calc_radec_residuals_sigma(self, params):
model_RA, model_DE = self._solver_eval(params)
#rsigs_RA = (self._RA_rad - model_RA) / self._RA_err
#rsigs_DE = (self._DE_rad - model_DE) / self._DE_err
rsigs_RA = (self._RA_rad - model_RA) / self._use_RA_err
rsigs_DE = (self._DE_rad - model_DE) / self._use_DE_err
return rsigs_RA, rsigs_DE
def _calc_total_residuals_sigma(self, params):
return np.hypot(*self._calc_radec_residuals_sigma(params))
def _calc_chi_square(self, params, negplxhit=100.):
model_ra, model_de = self._solver_eval(params)
#resid_ra = (model_ra - self._RA_rad) #/ np.cos(model_de)
#resid_de = (model_de - self._DE_rad)
resid_ra = (self._RA_rad - model_ra) #/ np.cos(model_de)
resid_de = (self._DE_rad - model_de)
#resid_ra = (model_ra - self._RA_rad) / self._RA_err
#resid_de = (model_de - self._DE_rad) / self._DE_err
#if isinstance(self._RA_err, np.ndarray):
# resid_ra /= self._RA_err
#if isinstance(self._DE_err, np.ndarray):
# resid_de /= self._DE_err
if isinstance(self._use_RA_err, np.ndarray):
resid_ra /= self._use_RA_err
if isinstance(self._use_DE_err, np.ndarray):
resid_de /= self._use_DE_err
#return np.sum(np.hypot(resid_ra, resid_de))
#return np.sum(np.hypot(resid_ra, resid_de)**2)
resid_tot = np.hypot(resid_ra, resid_de)[self.inliers]
if (params[4] < 0.0):
resid_tot *= negplxhit
return np.sum(resid_tot**self._chiexp)
#return np.sum(np.hypot(resid_ra, resid_de)**self._chiexp)
#return np.sum(np.abs(resid_ra * resid_de)**self._chiexp)
def _calc_initial_parallax(self, params):
rra_resid, rde_resid = self._calc_radec_residuals(params)
mar_ra_rad = calc_MAR(rra_resid)
mar_ra_mas = _MAS_PER_RADIAN * mar_ra_rad
sys.stderr.write("mar_ra_rad: %f\n" % mar_ra_rad)
sys.stderr.write("mar_ra_mas: %f\n" % mar_ra_mas)
pfra, pfde = self._calc_parallax_factors(
self._RA_rad, self._DE_rad, self.dataset['obs_x'],
self.dataset['obs_y'], self.dataset['obs_z'])
#sys.stderr.write("pfra_arcsec: %s\n" % str(pfra_arcsec))
#pfra_rad = pfra_arcsec / _ARCSEC_PER_RADIAN
adjustment_arcsec = ts.linefit(pfra, _ARCSEC_PER_RADIAN * rra_resid)
sys.stderr.write("adjustment (arcsec): %s\n" % str(adjustment_arcsec))
return adjustment_arcsec
# Driver routine for 5-parameter astrometric fitting:
def fit_bestpars(self, sigcut=5):
if not self._is_set:
sys.stderr.write("Error: data not OK for fitting!\n")
sys.stderr.write("Run setup() first and retry ...\n")
return False
# robust initial guess with Theil-Sen:
uguess = self.ts_fit_radec_pm(self._dt_yrs, self._RA_rad, self._DE_rad)
wguess = self.ts_fit_radec_pm(self._dt_yrs, self._RA_rad, self._DE_rad,
weighted=True)
#sys.stderr.write("Initial guess: %s\n" % str(guess))
sys.stderr.write("Initial guess (unweighted):\n")
sys.stderr.write("==> %s\n" % str(self.nice_units(uguess)))
sys.stderr.write("\n")
sys.stderr.write("Initial guess (weighted):\n")
sys.stderr.write("==> %s\n" % str(self.nice_units(wguess)))
sys.stderr.write("\n")
guess = uguess # adopt unweighted for now
#guess[4] = 1000. / _MAS_PER_RADIAN
# initial crack at parallax and zero-point:
woohoo = self._calc_initial_parallax(guess)
sys.stderr.write("woohoo: %s\n" % str(woohoo))
self.woohoo = woohoo
ra_nudge_rad, plx_rad = woohoo / _ARCSEC_PER_RADIAN
guess[0] += ra_nudge_rad
guess[4] = plx_rad
# estimate RA,Dec uncertainty from residuals if not known a prior:
if self._need_resid_errors:
rra_resid, rde_resid = self._calc_radec_residuals(guess)
rra_scatter = calc_MAR(rra_resid)
rde_scatter = calc_MAR(rde_resid)
mra_scatter = _MAS_PER_RADIAN * rra_scatter
mde_scatter = _MAS_PER_RADIAN * rde_scatter
#sys.stderr.write("rra_resid: %s\n" % str(rra_resid))
#sys.stderr.write("rde_resid: %s\n" % str(rde_resid))
sys.stderr.write("rra_scatter: %e (rad)\n" % rra_scatter)
sys.stderr.write("rde_scatter: %e (rad)\n" % rde_scatter)
sys.stderr.write("mra_scatter: %10.5f (mas)\n" % mra_scatter)
sys.stderr.write("mde_scatter: %10.5f (mas)\n" % mde_scatter)
self._RA_err = np.ones_like(self._RA_rad) * rra_scatter
self._DE_err = np.ones_like(self._DE_rad) * rde_scatter
self._use_RA_err = np.copy(self._RA_err)
self._use_DE_err = np.copy(self._DE_err)
# check whether anything looks really bad:
self._par_guess = guess
#rsig_tot = np.hypot(*self._calc_radec_residuals_sigma(guess))
rsig_tot = self._calc_total_residuals_sigma(guess)
#sys.stderr.write("rsig_tot:\n")
#sys.stderr.write("%s\n" % str(rsig_tot))
sys.stderr.write("typical rsig_tot: %8.3f\n" % np.median(rsig_tot))
#sys.stderr.write("rsig_tot: %s\n" % str(rsig_tot))
self.inliers = (rsig_tot < sigcut)
ndropped = self.inliers.size - np.sum(self.inliers)
sys.stderr.write("Dropped %d point(s) beyond %.2f-sigma.\n"
% (ndropped, sigcut))
#sys.stderr.write("ra_res: %s\n" % str(ra_res))
#sys.stderr.write("de_res: %s\n" % str(de_res))
#sys.stderr.write("ra_sig: %s\n" % str(ra_sig))
#sys.stderr.write("de_sig: %s\n" % str(de_sig))
# find minimum:
self.full_result = opti.fmin(self._calc_chi_square, guess,
xtol=1e-7, ftol=1e-7, full_output=True)
#xtol=1e-9, ftol=1e-9, full_output=True)
self.result = self.full_result[0]
# brute-force minimum:
#ra_fudge = np.median(self._RA_err)
#de_fudge = np.median(self._DE_err)
#pm_fudge = 0.2
#px_fudge = 4.0
#ranges = [(guess[0] - ra_fudge, guess[0] + ra_fudge), # RA
# (guess[1] - de_fudge, guess[1] + de_fudge), # DE
# (guess[2] / pm_fudge, guess[2] * pm_fudge), # pmRA
# (guess[3] / pm_fudge, guess[3] * pm_fudge), # pmRA
# (guess[4] / px_fudge, guess[3] * px_fudge), # parallax
# ]
#npts = 10
#self.result = opti.brute(self._calc_chi_square, ranges, Ns=npts)
sys.stderr.write("Found minimum:\n")
sys.stderr.write("==> %s\n" % str(self.nice_units(self.result)))
self._can_iterate = True
return self.result
# -----------------------------------------------------------------------
def _calc_huber_rweights(self, residuals, sigma):
_k_sig = 1.34 * sigma
res_devs = np.abs(residuals / _k_sig)
rweights = np.ones_like(res_devs)
distants = (res_devs > 1.0)
rweights[distants] = 1.0 / res_devs[distants]
return rweights
def iter_update_bestpars(self, params):
"""Perform an IRLS iteration."""
# calculate residuals:
rra_resid, rde_resid = self._calc_radec_residuals(params)
#sys.stderr.write("rra_resid: %s\n" % str(rra_resid))
#sys.stderr.write("rde_resid: %s\n" % str(rde_resid))
rra_scatter = calc_MAR(rra_resid)
rde_scatter = calc_MAR(rde_resid)
#sys.stderr.write("rra_scatter: %e (rad)\n" % rra_scatter)
#sys.stderr.write("rde_scatter: %e (rad)\n" % rde_scatter)
ra_rweights = self._calc_huber_rweights(rra_resid, rra_scatter)
self._use_RA_err = ra_rweights * self._RA_err
de_rweights = self._calc_huber_rweights(rde_resid, rde_scatter)
self._use_DE_err = de_rweights * self._DE_err
# find minimum:
self.iresult = opti.fmin(self._calc_chi_square, params ,
xtol=1e-7, ftol=1e-7, full_output=True)
sys.stderr.write("Found IRLS minimum:\n")
sys.stderr.write("==> %s\n" % str(self.nice_units(self.iresult[0])))
self._can_iterate = True
return self.iresult[0]
# -----------------------------------------------------------------------
def nice_units(self, params):
result = np.degrees(params)
result[2:5] *= 3.6e6 # into milliarcsec
result[2] *= np.cos(params[1]) # cos(dec) for pmRA
return result
def list_resid_sigmas(self, params):
rsig_RA, rsig_DE = self._calc_radec_residuals_sigma(params)
rsig_tot = np.hypot(rsig_RA, rsig_DE)
#sys.stderr.write("%15s %15s\n")
for ii,point in enumerate(zip(rsig_RA, rsig_DE, rsig_tot), 0):
sys.stderr.write("> %10.5f %10.5f (%10.5f)\n" % point)
return
######################################################################
# CHANGELOG (astrom_test_2.py):
#---------------------------------------------------------------------
#
# 2020-02-07:
# -- Increased __version__ to 0.1.0.
# -- First created astrom_test_2.py.
#
| [
1,
18787,
4855,
29914,
2109,
29914,
6272,
3017,
13,
29937,
325,
326,
29901,
731,
934,
22331,
29922,
9420,
29899,
29947,
18696,
29922,
29946,
380,
29879,
29922,
29946,
2381,
29922,
29946,
634,
3252,
29922,
29947,
29900,
584,
13,
29937,
13,
... |
main.py | SaintBuddha/UCU_LinuxClub_GitHW | 0 | 140358 | <reponame>SaintBuddha/UCU_LinuxClub_GitHW<gh_stars>0
from os import system, name
import cowsay
import json
import random
CHARACTERS = ["beavis", "cheese", "daemon", "cow", "dragon", "ghostbusters", "kitty", "meow", "milk", "stegosaurus",
"stimpy", "turkey", "turtle", "tux"]
def get_character():
"""This function chooses the character for cowsay
Returns:
str - cowsay character
"""
character = random.choice(CHARACTERS)
return character
def get_quote():
"""
Responsible for getting a quote.
Args:
Returns:
str - a quote
"""
return "This is quote from latest master branch! Random quotes coming soon!"
if __name__ == "__main__":
character = get_character()
print(f"{character.capitalize()} says:\n\n\n")
getattr(cowsay,character)(get_quote())
| [
1,
529,
276,
1112,
420,
29958,
22211,
29933,
566,
29881,
2350,
29914,
23129,
29965,
29918,
24085,
6821,
431,
29918,
28712,
29950,
29956,
29966,
12443,
29918,
303,
1503,
29958,
29900,
13,
3166,
2897,
1053,
1788,
29892,
1024,
13,
5215,
274,
... |
metalgrafica/metalgrafica/doctype/plantilla_de_grupo_de_productos/plantilla_de_grupo_de_productos.py | Nirchains/metal | 0 | 132129 | # -*- coding: utf-8 -*-
# Copyright (c) 2017, <NAME> and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class Plantilladegrupodeproductos(Document):
def validate(self):
from erpnext.utilities.transaction_base import validate_uom_is_integer
validate_uom_is_integer(self, "stock_uom", "qty", "BOM Item")
| [
1,
396,
448,
29930,
29899,
14137,
29901,
23616,
29899,
29947,
448,
29930,
29899,
13,
29937,
14187,
1266,
313,
29883,
29897,
29871,
29906,
29900,
29896,
29955,
29892,
529,
5813,
29958,
322,
17737,
29560,
13,
29937,
1152,
19405,
2472,
29892,
... |
src/pyft4222/wrapper/i2c/slave.py | lavovaLampa/pyft4222 | 0 | 51407 | <filename>src/pyft4222/wrapper/i2c/slave.py<gh_stars>0
from ctypes import POINTER, byref, c_char_p
from ctypes import c_void_p, c_uint8, c_uint16, c_bool
from typing import NewType
from ..dll_loader import ftlib
from .. import FtHandle, Result, Ok, Err, Ft4222Exception, Ft4222Status
I2cSlaveHandle = NewType("I2cSlaveHandle", FtHandle)
# Function prototypes
_init = ftlib.FT4222_I2CSlave_Init
_init.argtypes = [c_void_p]
_init.restype = Ft4222Status
_reset = ftlib.FT4222_I2CSlave_Reset
_reset.argtypes = [c_void_p]
_reset.restype = Ft4222Status
_get_address = ftlib.FT4222_I2CSlave_GetAddress
_get_address.argtypes = [c_void_p, POINTER(c_uint8)]
_get_address.restype = Ft4222Status
_set_address = ftlib.FT4222_I2CSlave_SetAddress
_set_address.argtypes = [c_void_p, c_uint8]
_set_address.restype = Ft4222Status
_get_rx_status = ftlib.FT4222_I2CSlave_GetRxStatus
_get_rx_status.argtypes = [c_void_p, POINTER(c_uint16)]
_get_rx_status.restype = Ft4222Status
_read = ftlib.FT4222_I2CSlave_Read
_read.argtypes = [c_void_p, POINTER(c_uint8), c_uint16, POINTER(c_uint16)]
_read.restype = Ft4222Status
_write = ftlib.FT4222_I2CSlave_Write
_write.argtypes = [c_void_p, c_char_p, c_uint16, POINTER(c_uint16)]
_write.restype = Ft4222Status
_set_clock_stretch = ftlib.FT4222_I2CSlave_SetClockStretch
_set_clock_stretch.argtypes = [c_void_p, c_bool]
_set_clock_stretch.restype = Ft4222Status
_set_resp_word = ftlib.FT4222_I2CSlave_SetRespWord
_set_resp_word.argtypes = [c_void_p, c_uint8]
_set_resp_word.restype = Ft4222Status
def init(ft_handle: FtHandle) -> Result[I2cSlaveHandle, Ft4222Status]:
"""Initialized FT4222H as an I2C slave.
Note:
The I2C slave address is set to 0x40 after initialization.
Args:
ft_handle: Handle to an opened FT4222 device
Returns:
Result: Handle to initialized FT4222 device in I2C Slave mode
"""
result: Ft4222Status = _init(ft_handle)
if result == Ft4222Status.OK:
return Ok(I2cSlaveHandle(ft_handle))
else:
return Err(result)
def reset(ft_handle: I2cSlaveHandle) -> None:
"""Reset the I2C slave device.
This function will maintain the original I2C slave settings
and clear all caches in the device.
Args:
ft_handle: Handle to an initialized FT4222 device in I2C Slave mode
Raises:
Ft4222Exception: In case of unexpected error
"""
result: Ft4222Status = _reset(ft_handle)
if result != Ft4222Status.OK:
raise Ft4222Exception(result)
def get_address(ft_handle: I2cSlaveHandle) -> int:
"""Get the address of the I2C slave device.
Default address is 0x40.
Args:
ft_handle: Handle to an initialized FT4222 device in I2C Slave mode
Raises:
Ft4222Exception: In case of unexpected error
Returns:
int: Current I2C slave address
"""
addr = c_uint8()
result: Ft4222Status = _get_address(ft_handle, byref(addr))
if result != Ft4222Status.OK:
raise Ft4222Exception(result)
return addr.value
def set_address(ft_handle: I2cSlaveHandle, addr: int) -> None:
"""Set the address of the I2C slave device.
Args:
ft_handle: Handle to an initialized FT4222 device in I2C Slave mode
addr: Address to be set
Raises:
Ft4222Exception: In case of unexpected error
"""
assert (
0 <= addr < (2 ** 7)
), "Device address must be an unsigned 16b integer (range 0 - 65 535)"
result: Ft4222Status = _set_address(ft_handle, addr)
if result != Ft4222Status.OK:
raise Ft4222Exception(result)
def get_rx_status(ft_handle: I2cSlaveHandle) -> int:
"""Get number of bytes in the receive queue.
Args:
ft_handle: Handle to an initialized FT4222 device in I2C Slave mode
Raises:
Ft4222Exception: In case of unexpected error
Returns:
int: Number of bytes in Rx queue
"""
rx_size = c_uint16()
result: Ft4222Status = _get_rx_status(ft_handle, byref(rx_size))
if result != Ft4222Status.OK:
raise Ft4222Exception(result)
return rx_size.value
def read(ft_handle: I2cSlaveHandle, read_byte_count: int) -> bytes:
"""Read data from the buffer of the I2C slave device.
Args:
ft_handle: Handle to an initialized FT4222 device in I2C Slave mode
read_byte_count: Positive number of bytes to read
Raises:
Ft4222Exception: In case of unexpected error
Returns:
bytes: Read data
"""
assert (
0 < read_byte_count < (2 ** 16)
), "Number of bytes to read must be positive and less than 2^16"
read_buffer = (c_uint8 * read_byte_count)()
bytes_read = c_uint16()
result: Ft4222Status = _read(
ft_handle, read_buffer, len(read_buffer), byref(bytes_read)
)
if result != Ft4222Status.OK:
raise Ft4222Exception(result)
return bytes(read_buffer[: bytes_read.value])
def write(ft_handle: I2cSlaveHandle, write_data: bytes) -> int:
"""Write data to the buffer of I2C slave device.
Args:
ft_handle: Handle to an initialized FT4222 device in I2C Slave mode
write_data: Non-empty list of bytes to write into Tx queue
Raises:
Ft4222Exception: In case of unexpected error
Returns:
int: Number of bytes written
"""
assert (
0 < len(write_data) < (2 ** 16)
), "Data to be written must be non-empty and contain less than 2^16 bytes"
bytes_written = c_uint16()
result: Ft4222Status = _write(
ft_handle, write_data, len(write_data), byref(bytes_written)
)
if result != Ft4222Status.OK:
raise Ft4222Exception(result)
return bytes_written.value
def set_clock_stretch(ft_handle: I2cSlaveHandle, enable: bool) -> None:
"""Enable or disable Clock Stretch.
The default setting of clock stretching is disabled.
Clock stretch is as a flow-control mechanism for slaves.
An addressed slave device may hold the clock line (SCL) low after receiving (or sending) a byte,
indicating that it is not yet ready to process more data.
The master that is communicating with the slave may not finish the transmission of the current bit,
but must wait until the clock line actually goes high.
Args:
ft_handle: Handle to an initialized FT4222 device in I2C Slave mode
enable: Enable clock stretching?
Raises:
Ft4222Exception: In case of unexpected error
"""
result: Ft4222Status = _set_clock_stretch(ft_handle, enable)
if result != Ft4222Status.OK:
raise Ft4222Exception(result)
def set_resp_word(ft_handle: I2cSlaveHandle, response_word: int) -> None:
"""Set the response word in case of empty Tx queue.
Default value is 0xFF.
This function only takes effect when Clock Stretch is disabled.
When data is requested by an I2C master and the device is not ready to respond,
the device will respond with a default value.
Args:
ft_handle: Handle to an initialized FT4222 device in I2C Slave mode
response_word: Unsigned 8-bit response word to be set
Raises:
Ft4222Exception: In case of unexpected error
"""
assert (
0 <= response_word < (2 ** 8)
), "The response word must be an 8b unsigned integer (range 0 - 255)"
result: Ft4222Status = _set_resp_word(ft_handle, response_word)
if result != Ft4222Status.OK:
raise Ft4222Exception(result)
| [
1,
529,
9507,
29958,
4351,
29914,
2272,
615,
29946,
29906,
29906,
29906,
29914,
17699,
29914,
29875,
29906,
29883,
29914,
29879,
18398,
29889,
2272,
29966,
12443,
29918,
303,
1503,
29958,
29900,
13,
3166,
274,
8768,
1053,
349,
6992,
4945,
2... |
test/psyke/classification/real/test_real.py | psykei/psyke-python | 2 | 179137 | <reponame>psykei/psyke-python
import unittest
from parameterized import parameterized_class
from tuprolog.solve.prolog import prolog_solver
from psyke import logger
from test import get_in_rule
from test.psyke import initialize, data_to_struct
@parameterized_class(initialize('real'))
class TestReal(unittest.TestCase):
def test_extract(self):
logger.info(self.expected_theory)
logger.info(self.extracted_theory)
self.assertTrue(self.expected_theory.equals(self.extracted_theory, False))
def test_predict(self):
predictions = self.extractor.predict(self.test_set.iloc[:, :-1])
solver = prolog_solver(static_kb=self.extracted_theory.assertZ(get_in_rule()))
substitutions = [solver.solveOnce(data_to_struct(data)) for _, data in self.test_set.iterrows()]
index = self.test_set.shape[1] - 1
expected = [str(query.solved_query.get_arg_at(index)) if query.is_yes else -1 for query in substitutions]
logger.info(predictions)
logger.info(expected)
self.assertTrue(predictions == expected)
if __name__ == '__main__':
unittest.main()
| [
1,
529,
276,
1112,
420,
29958,
567,
29891,
446,
29875,
29914,
567,
29891,
446,
29899,
4691,
13,
5215,
443,
27958,
13,
3166,
3443,
1891,
1053,
3443,
1891,
29918,
1990,
13,
3166,
5291,
771,
1188,
29889,
2929,
345,
29889,
771,
1188,
1053,
... |
Python-Intermetiate/04_regular_expressions.py | carlosmertens/Django-Full-Stack | 0 | 139408 | # REGULAR EXPRESSIONS
# Start by importing "re" for Reglar Expressions
import re
patterns = ["term1", "term2"]
text = "This is a string with term1, but not the other!"
# Search with re
print("\n** Regular Expression - Search")
for pattern in patterns:
print("*Serching for: " + pattern)
if re.search(pattern, text):
print("Found match!")
else:
print("No match found!")
match = re.search("term1", text)
print("** Locations start at:", match.start()) # Print location
# Split with re
print("\nRegular Expression - Split")
email = "<EMAIL>"
split_at = "@"
print(re.split(split_at, email))
# Find with re
finding = re.findall("t", text)
print(finding)
# Create a function to find patterns with re
def multi_re_find(patterns, phrase):
"""Function to find patterns using Regular Expressions."""
for idx in patterns:
print("*Searching for pattern {}".format(idx))
print(re.findall(idx, phrase))
print("\n")
test_phrase = "sdsd..sssddd...sdddsddd...dsds...dsssss...sdddd"
test_patterns = ["sd*", # s followed by zero or more d's
"sd+", # s followed by one or more d's
"sd?", # s followed by zero or one d's
"sd{3}", # s followed by three d's
"sd{2,3}", # s followed by two to three d's
"s[sd]+"
]
multi_re_find(test_patterns, test_phrase)
# Strip punctuations
print("** Strip Punctuation")
test_comment = "This is a string! But it has punctuation. How can we remove it?"
new_patterns = ["[^!.?]+"]
multi_re_find(new_patterns, test_comment)
other_patterns = ["[a-z]+"] # To find capital letters change to A-Z
multi_re_find(other_patterns, test_comment)
| [
1,
396,
5195,
29954,
13309,
1718,
8528,
15094,
13507,
29903,
13,
13,
29937,
7370,
491,
28348,
376,
276,
29908,
363,
2169,
4675,
14657,
1080,
13,
13,
5215,
337,
13,
13,
11037,
29879,
353,
6796,
8489,
29896,
613,
376,
8489,
29906,
3108,
... |
setup.py | mariocesar/boot.py | 2 | 30074 | <reponame>mariocesar/boot.py<filename>setup.py
#!/usr/bin/env python3
import sys
from setuptools import find_packages, setup
if sys.version_info < (3, 6):
sys.exit('Python 3.6 is the minimum required version')
description, long_description = (
open('README.rst', 'rt').read().split('\n\n', 1))
setup(
name='boot.py',
author='<NAME>',
author_email='<EMAIL>',
version='0.16',
url='https://github.com/mariocesar/boot.py',
description=description,
long_description=f'\n{long_description}',
package_dir={'': 'src'},
packages=find_packages('src'),
python_requires='>=3.6',
setup_requires=['pytest-runner'],
tests_require=['pytest', 'pytest-cov'],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: MIT License',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| [
1,
529,
276,
1112,
420,
29958,
29885,
1306,
542,
26892,
29914,
4777,
29889,
2272,
29966,
9507,
29958,
14669,
29889,
2272,
13,
29937,
14708,
4855,
29914,
2109,
29914,
6272,
3017,
29941,
13,
5215,
10876,
13,
13,
3166,
731,
21245,
8789,
1053... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.