content stringlengths 5 1.05M |
|---|
from fastapi import FastAPI
from fastapi import responses
from fastapi.responses import JSONResponse
from fastapi.middleware.cors import CORSMiddleware
import uvicorn
from webService.fatigueWebWrapper.Payload import FatiguePayload
from webService.fatigueWebWrapper.wrapper import FatigueWebWrapper
from webService.stressCorrectionWrapper.Payload import NeuberPayload
from webService.stressCorrectionWrapper.wrapper import StressCorrectionWebWrapper
from webService.contactWrapper.wrapper import Contact
from webService.contactWrapper.Payload import ContactFormPayload
app = FastAPI()
origins = [
"http://localhost:3000",
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_methods=["*"],
allow_headers=["*"],
)
@app.get("/")
async def root():
return {"message": "Welcome to engineering toolbox"}
@app.get("/api/health")
async def root():
return {"health": "OK"}
@app.post("/api/calculations/fatigue/")
async def calculate_fatigue(payload: FatiguePayload, excel: bool = True):
try:
return FatigueWebWrapper(payload, excel).fatigue()
except Exception as e:
response = {
"detail": [
{
"loc": ["body", "resultsData"],
"msg": "Unable to find convergent solution. Check your input data",
"type": "value_error",
}
]
}
return JSONResponse(content=response, status_code=422)
@app.post("/api/calculations/stress-correction/")
async def calculate_neuber(payload: NeuberPayload):
import time
time.sleep(0.5)
try:
return StressCorrectionWebWrapper(payload).get_data()
except Exception as e:
response = {
"detail": [
{
"loc": ["body", "resultsData"],
"msg": "Unable to find convergent solution. Check your input data",
"type": "value_error",
}
]
}
return JSONResponse(content=response, status_code=422)
@app.post("/api/contact")
async def contact(payload: ContactFormPayload):
try:
Contact(payload).send()
return JSONResponse({"detail": "Thank you for submitting your message. I'll contact you shortly"})
except Exception as e:
return JSONResponse({"detail": str(e)}, status_code=502)
if __name__ == "__main__":
uvicorn.run("main:app", port=8000, reload=True)
|
import os
import argparse
import io
import struct
from typing import Union
from enum import Enum
class ZIPTag(Enum):
S_ZIPFILERECORD = 0x04034b50
S_ZIPDATADESCR = 0x08074b50
S_ZIPDIRENTRY = 0x02014b50
S_ZIPDIGITALSIG = 0x05054b50
S_ZIP64ENDLOCATORRECORD = 0x06064b50
S_ZIP64ENDLOCATOR = 0x07064b50
S_ZIPENDLOCATOR = 0x06054b50
class ZIPManipulation(object):
def __init__(self, reader: Union[io.BytesIO, bytes], prepend: bytes, append: bytes):
if isinstance(reader, bytes):
reader = io.BytesIO(reader)
self.reader: io.BytesIO = reader
self.prepend_table = []
self.append_table = []
self.prepend = prepend
self.prepend_size = len(prepend)
self.append = append
self.append_size = len(append)
def run(self):
while True:
tag = self.reader.read(4)
length = len(tag)
if length == 0:
break
elif length > 4:
raise Exception('please do not append any bytes to original zip file')
elif length < 4:
raise Exception('unsupported type found')
n = struct.unpack('<I', tag)
self.next(n[0])
self.reader.seek(0)
data = self.reader.getvalue()
for (index, offset) in self.prepend_table:
data = data[:index] + struct.pack('<I', offset + self.prepend_size) + data[index + 4:]
for (index, size) in self.append_table:
data = data[:index] + struct.pack('<H', size + self.append_size) + data[index + 2:]
return self.prepend + data + self.append
def next(self, tag: int):
if tag == ZIPTag.S_ZIPFILERECORD.value:
self.zip_filerecord()
elif tag == ZIPTag.S_ZIPDATADESCR.value:
self.zip_data_descr()
elif tag == ZIPTag.S_ZIPDIRENTRY.value:
self.zip_direntry()
elif tag == ZIPTag.S_ZIPENDLOCATOR.value:
self.zip_end_locator()
else:
raise Exception('does not support this type of zip: %r', tag)
def zip_filerecord(self):
self.reader.read(14)
compressed_size = struct.unpack('<I', self.reader.read(4))[0]
self.reader.read(4)
filename_size, extra_size = struct.unpack('<HH', self.reader.read(4))
self.reader.read(compressed_size + filename_size + extra_size)
def zip_data_descr(self):
self.reader.read(12)
def zip_direntry(self):
self.reader.read(24)
filename_size = struct.unpack('<H', self.reader.read(2))[0]
self.reader.read(12)
index = self.reader.tell()
offset = struct.unpack('<I', self.reader.read(4))[0]
self.reader.read(filename_size)
self.prepend_table.append((index, offset))
def zip_end_locator(self):
self.reader.read(12)
index = self.reader.tell()
offset, comment_size = struct.unpack('<IH', self.reader.read(6))
self.prepend_table.append((index, offset))
self.append_table.append((index + 4, comment_size))
def main():
parser = argparse.ArgumentParser(
description='A tool you can craft a zip file that contains the padding characters between the file content'
)
parser.add_argument('-i', '--input', required=True, metavar='INPUT_FILENAME')
parser.add_argument('-o', '--output', required=True, metavar='OUTPUT_FILENAME')
parser.add_argument('-p',
'--prepend',
help='the characters that you want to prepend to the file beginning')
parser.add_argument('-a',
'--append',
help='the characters that you want to append to the file')
args = parser.parse_args()
with open(args.input, 'rb') as f:
data = f.read()
manipulation = ZIPManipulation(data, args.prepend.encode(), args.append.encode())
data = manipulation.run()
with open(args.output, 'wb') as f:
f.write(data)
print('file %r is generated' % args.output)
if __name__ == '__main__':
main()
|
from os import mkdir
from os.path import exists
def create_dataset_directories(parent_dir: str, classes: [str]) -> [str]:
"""
Checks, whether directories exist, in which training samples can be stored
at. If not, new directories are getting generated.
@param parent_dir: Directory, at which a new class dir will be created.
@param classes: Name of a training class for which a dir gets created.
@return: Path to the class directory.
"""
class_directories: [str] = []
datasets_dir = f'{parent_dir}/datasets'
raw_data_dir = f'{parent_dir}/datasets/raw_data'
# Creates a dataset dir if not already present.
if not exists(datasets_dir):
mkdir(datasets_dir)
if not exists(raw_data_dir):
mkdir(raw_data_dir)
for class_name in classes:
class_directories.append(f'{raw_data_dir}/{class_name}')
for class_directory in class_directories:
if not exists(class_directory):
mkdir(class_directory)
return class_directories
def create_training_directories(parent_dir: str, classes: [str]) -> [str]:
"""
Takes a target directory, intended host datasets for training, validation
and test data. Each of the three datasets will contain a dedicated
directory for storing images of dogs and cats, respectively.
@param parent_dir: Directory in which the datasets directory are be placed.
@param classes:
@return: List of strings, containing the generated directories.
"""
datasets_dir = f'{parent_dir}/datasets'
destination_directories = []
if not exists(datasets_dir):
mkdir(datasets_dir)
dirs = [f'{datasets_dir}/train',
f'{datasets_dir}/val',
f'{datasets_dir}/test']
# Adds the paths of the envisioned child directories of the top-level
# training, validation and testing directories to the list of
# destination directories.
for class_name in classes:
for directory in dirs:
destination_directories.append(f'{directory}/{class_name}')
# Creates training, validation and test directories.
for directory in dirs:
if not exists(directory):
mkdir(directory)
# Creates all sub-directories of the top-level training, validation and
# test directories.
for directory in destination_directories:
if not exists(directory):
mkdir(directory)
return destination_directories
|
expected_output = {
"address_family":{
"ipv4":{
"bfd_sessions_down":0,
"bfd_sessions_inactive":1,
"bfd_sessions_up":0,
"intf_down":1,
"intf_up":1,
"num_bfd_sessions":1,
"num_intf":2,
"state":{
"all":{
"sessions":2,
"slaves":0,
"total":2
},
"backup":{
"sessions":1,
"slaves":0,
"total":1
},
"init":{
"sessions":1,
"slaves":0,
"total":1
},
"master":{
"sessions":0,
"slaves":0,
"total":0
},
"master(owner)":{
"sessions":0,
"slaves":0,
"total":0
}
},
"virtual_addresses_active":0,
"virtual_addresses_inactive":2,
"vritual_addresses_total":2
},
"ipv6":{
"bfd_sessions_down":0,
"bfd_sessions_inactive":0,
"bfd_sessions_up":0,
"intf_down":0,
"intf_up":1,
"num_bfd_sessions":0,
"num_intf":1,
"state":{
"all":{
"sessions":1,
"slaves":0,
"total":1
},
"backup":{
"sessions":0,
"slaves":0,
"total":0
},
"init":{
"sessions":0,
"slaves":0,
"total":0
},
"master":{
"sessions":1,
"slaves":0,
"total":1
},
"master(owner)":{
"sessions":0,
"slaves":0,
"total":0
}
},
"virtual_addresses_active":1,
"virtual_addresses_inactive":0,
"vritual_addresses_total":1
},
"num_tracked_objects":2,
"tracked_objects_down":2,
"tracked_objects_up":0
}
}
|
import torch
def covariance(x):
x = x - x.mean(dim=0) # It use to be x -= x.mean(dim=0), stupid! this will change the input tensor!!!
return x.t().matmul(x) / (x.size(0) - 1)
def corrcoef(x=None, c=None):
# breakpoint()
c = covariance(x) if c is None else c
std = c.diagonal(0).sqrt()
# breakpoint()
c /= std[:,None] * std[None,:]
eps = 1e-5
# eps=0.3
return c # .clamp(-1+eps, 1-eps)
def mean_std_covariance(x):
x = (x - x.mean(dim=0)) / x.std(dim=0)
return covariance(x)
def bn_covariance(x):
from batch_norm import BatchNorm1d
bn = BatchNorm1d(x.shape[1], unbiased=False)
x = bn(x)
return covariance(x)
if __name__ == "__main__":
x = torch.randn(5,7)
print(covariance(x))
print(corrcoef(x))
print(mean_std_covariance(x))
# print(bn_covariance(x))
|
from pydantic import BaseModel
class Nmap_input(BaseModel):
url: str
class Config:
orm_mode = True |
import numpy as np
import pyqtgraph as pg
from qtpy.QtWidgets import QProgressBar, QVBoxLayout
class ImageSize:
height = 0
width = 0
gap_index = 0
def __init__(self, width=0, height=0):
gap_index = np.int(height/2)
self.gap_index = gap_index
self.width = width
self.height = height
class Initialization:
"""initialization of all the widgets such as pyqtgraph, progressbar..."""
def __init__(self, parent=None):
self.parent = parent
def run_all(self):
self.data()
self.pyqtgraph()
self.parent.chips_index_changed()
self.splitter()
self.parent.profile_type_changed()
self.parent.profile_changed()
self.widgets()
self.statusbar()
self.parent.chips_alignment_clicked()
def pyqtgraph(self):
# setup
self.parent.setup_image_view = pg.ImageView()
self.parent.setup_image_view.ui.roiBtn.hide()
self.parent.setup_image_view.ui.menuBtn.hide()
setup_layout = QVBoxLayout()
setup_layout.addWidget(self.parent.setup_image_view)
self.parent.ui.setup_widget.setLayout(setup_layout)
# with correction
self.parent.corrected_image_view = pg.ImageView(view=pg.PlotItem())
self.parent.corrected_image_view.ui.roiBtn.hide()
self.parent.corrected_image_view.ui.menuBtn.hide()
correction_layout = QVBoxLayout()
correction_layout.addWidget(self.parent.corrected_image_view)
self.parent.ui.with_correction_widget.setLayout(correction_layout)
# profile
self.parent.profile_view = pg.PlotWidget(title="Profile")
profile_layout = QVBoxLayout()
profile_layout.addWidget(self.parent.profile_view)
self.parent.ui.profile_widget.setLayout(profile_layout)
# Alignment
self.parent.alignment_view = pg.ImageView(view=pg.PlotItem())
self.parent.alignment_view.ui.roiBtn.hide()
self.parent.alignment_view.ui.menuBtn.hide()
setup_layout = QVBoxLayout()
setup_layout.addWidget(self.parent.alignment_view)
self.parent.ui.alignment_widget.setLayout(setup_layout)
# result
self.parent.result_view = pg.ImageView(view=pg.PlotItem())
self.parent.result_view.ui.roiBtn.hide()
self.parent.result_view.ui.menuBtn.hide()
setup_layout = QVBoxLayout()
setup_layout.addWidget(self.parent.result_view)
self.parent.ui.result_widget.setLayout(setup_layout)
def data(self):
self.parent.integrated_data = self.parent.o_corrector.integrated_data
self.parent.working_data = self.parent.o_corrector.working_data
self.parent.working_list_files = self.parent.o_corrector.working_list_files
[height, width] = np.shape(self.parent.integrated_data)
self.parent.image_size = ImageSize(width=width, height=height)
def splitter(self):
self.parent.ui.splitter.setSizes([1, 1])
def widgets(self):
self.parent.ui.reset_pushButton.setVisible(False)
def statusbar(self):
self.parent.eventProgress = QProgressBar(self.parent.ui.statusbar)
self.parent.eventProgress.setMinimumSize(20, 14)
self.parent.eventProgress.setMaximumSize(540, 100)
self.parent.eventProgress.setVisible(False)
self.parent.ui.statusbar.addPermanentWidget(self.parent.eventProgress)
|
from .button import Button
from .background import Background
import pygame
class Menu:
def __init__(self):
self.background = None
self.images = {}
self.buttons = {}
def create_buttons(self, screen):
self.buttons = {}
start = (480, 700)
size = (200, 50)
self.buttons['resume'] = Button(start[0], start[1],
size[0], size[1], screen.engine.database.language.texts['gui']['menu']['resume'],
screen.font, self.exit, screen.screen,
screen.engine.settings.graphic['screen'], screen)
self.buttons['resume'] = Button(start[0], start[1],
size[0], size[1], screen.engine.database.language.texts['gui']['menu']['resume'],
screen.font, self.exit, screen.screen,
screen.engine.settings.graphic['screen'], screen)
start = (480, 775)
self.buttons['settings'] = Button(start[0], start[1], size[0], size[1],
screen.engine.database.language.texts['gui']['menu']['settings'], screen.font,
self.go_to_settings, screen.screen, screen.engine.settings.graphic['screen'],
screen)
start = (480, 850)
self.buttons['main_menu'] = Button(start[0], start[1], size[0], size[1],
screen.engine.database.language.texts['gui']['menu']['main_menu'], screen.font,
self.go_to_main_menu, screen.screen,
screen.engine.settings.graphic['screen'], screen)
start = (480, 925)
self.buttons['exit'] = Button(start[0], start[1], size[0], size[1],
screen.engine.database.language.texts['gui']['menu']['exit'], screen.font, exit, screen.screen,
screen.engine.settings.graphic['screen'], 0)
self.images['logo'] = (pygame.image.load(screen.engine.path + '/data/graphic/logo/game_logo.png'),
(480 * screen.engine.settings.graphic['screen']['resolution_scale'][0],
400 * screen.engine.settings.graphic['screen']['resolution_scale'][1]))
def create_background(self, screen):
x1 = 200
y1 = 0
x2 = 1520
y2 = 1680
self.background = Background(x1, y1, x2, y2, (255, 255, 255), screen)
def create(self, screen):
self.create_buttons(screen)
self.create_background(screen)
@staticmethod
def go_to_main_menu(screen):
screen.game.gui['menu'] = False
screen.game.gui['main_menu'] = True
@staticmethod
def go_to_settings(screen):
screen.game.gui['menu'] = False
screen.game.gui['settings'] = True
@staticmethod
def exit(screen):
screen.game.gui['menu'] = False
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-30 14:46
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('proposals', '0014_auto_20151225_0501'),
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='AdditionalSpeaker',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('proposal_id', models.BigIntegerField(verbose_name='proposal ID')),
('status', models.CharField(choices=[('pending', 'Pending'), ('accepted', 'Accepted'), ('declined', 'Declined')], default='pending', max_length=8)),
('proposal_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType', verbose_name='proposal model type')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='user')),
('cancelled', models.BooleanField(db_index=True, default=False, verbose_name='cancelled')),
],
options={
'ordering': ['proposal_type', 'proposal_id', 'user__speaker_name'],
'verbose_name': 'additional speaker',
'verbose_name_plural': 'additional speakers',
},
),
migrations.AlterUniqueTogether(
name='additionalspeaker',
unique_together=set([('user', 'proposal_type', 'proposal_id')]),
),
migrations.AlterField(
model_name='talkproposal',
name='python_level',
field=models.CharField(choices=[('NOVICE', 'Novice'), ('INTERMEDIATE', 'Intermediate'), ('EXPERIENCED', 'Experienced')], help_text='The choice of talk level matters during the review process. More definition of talk level can be found at the <a href="/None/speaking/talk/" target="_blank">How to Propose a Talk</a> page. Note that a proposal won\'t be more likely to be accepted because of being "Novice" level. We may contact you to change the talk level when we find the content is too-hard or too-easy for the target audience.', max_length=12, verbose_name='Python level'),
),
migrations.AlterField(
model_name='tutorialproposal',
name='python_level',
field=models.CharField(choices=[('NOVICE', 'Novice'), ('INTERMEDIATE', 'Intermediate'), ('EXPERIENCED', 'Experienced')], help_text='The choice of talk level matters during the review process. More definition of talk level can be found at the <a href="/None/speaking/talk/" target="_blank">How to Propose a Talk</a> page. Note that a proposal won\'t be more likely to be accepted because of being "Novice" level. We may contact you to change the talk level when we find the content is too-hard or too-easy for the target audience.', max_length=12, verbose_name='Python level'),
),
]
|
# coding=utf-8
from typing import Optional
def clear(string: str, delchars: Optional[str] = "") -> str:
"""
Clears string by removing unwanted spaces, HTML-tags, special and specified characters
Args:
string (str): String you want to clear
delchars (Optional[str]): Characters you want to remove from string
Returns:
str: Cleared string
Raises:
TypeError: if 'string' argument type is not 'str'
"""
# Checking str type
if string is None:
return ""
elif not isinstance(string, str):
raise TypeError(f"'string' argument type must be 'str', not '{string.__class__.__name__}'")
# Deleting unwanted symbols
for delstring in ["\n", "\t", "\r", "<b>", "</b>"]:
string = string.replace(delstring, "")
string = string.translate(
str.maketrans(dict.fromkeys(delchars))
)
# Clearing extra spaces
if string:
index = 1
try:
# Clearing extra spaces at the beginning of string
while string[0] == " ":
string = string[1:]
# Clearing extra spaces between words
while index < len(string) - 2:
if string[index] == string[index + 1] == " ":
string = string[:index] + string[index + 1:]
else:
index += 1
# Clearing extra spaces at the end of string
if string:
while string[-1] == " ":
string = string[:-1]
except IndexError:
pass
return string
|
import os.path
import itertools
import Tools
import random
import numpy as np
import scipy
import scipy.stats
NBTESTS = 10
VECDIM = [12,14,20]
def entropyTest(config,nb):
inputs = []
outputs = []
vecDim = VECDIM[nb % len(VECDIM)]
dims=np.array([NBTESTS,vecDim])
for _ in range(0,NBTESTS):
v = np.random.rand(vecDim)
v = v / np.sum(v)
e = scipy.stats.entropy(v)
inputs += list(v)
outputs.append(e)
inputs = np.array(inputs)
outputs = np.array(outputs)
config.writeInput(nb, inputs,"Input")
config.writeInputS16(nb, dims,"Dims")
config.writeReference(nb, outputs,"RefEntropy")
def logsumexpTest(config,nb):
inputs = []
outputs = []
vecDim = VECDIM[nb % len(VECDIM)]
dims=np.array([NBTESTS,vecDim])
for _ in range(0,NBTESTS):
v = np.random.rand(vecDim)
v = v / np.sum(v)
e = scipy.special.logsumexp(v)
inputs += list(v)
outputs.append(e)
inputs = np.array(inputs)
outputs = np.array(outputs)
config.writeInput(nb, inputs,"Input")
config.writeInputS16(nb, dims,"Dims")
config.writeReference(nb, outputs,"RefLogSumExp")
def klTest(config,nb):
inputsA = []
inputsB = []
outputs = []
vecDim = VECDIM[nb % len(VECDIM)]
dims=np.array([NBTESTS,vecDim])
for _ in range(0,NBTESTS):
va = np.random.rand(vecDim)
va = va / np.sum(va)
vb = np.random.rand(vecDim)
vb = vb / np.sum(vb)
e = scipy.stats.entropy(va,vb)
inputsA += list(va)
inputsB += list(vb)
outputs.append(e)
inputsA = np.array(inputsA)
inputsB = np.array(inputsB)
outputs = np.array(outputs)
config.writeInput(nb, inputsA,"InputA")
config.writeInput(nb, inputsB,"InputB")
config.writeInputS16(nb, dims,"Dims")
config.writeReference(nb, outputs,"RefKL")
def logSumExpDotTest(config,nb):
inputsA = []
inputsB = []
outputs = []
vecDim = VECDIM[nb % len(VECDIM)]
dims=np.array([NBTESTS,vecDim])
for _ in range(0,NBTESTS):
va = np.random.rand(vecDim)
va = va / np.sum(va)
vb = np.random.rand(vecDim)
vb = vb / np.sum(vb)
d = 0.001
# It is a proba so must be in [0,1]
# But restricted to ]d,1] so that the log exists
va = (1-d)*va + d
vb = (1-d)*vb + d
e = np.log(np.dot(va,vb))
va = np.log(va)
vb = np.log(vb)
inputsA += list(va)
inputsB += list(vb)
outputs.append(e)
inputsA = np.array(inputsA)
inputsB = np.array(inputsB)
outputs = np.array(outputs)
config.writeInput(nb, inputsA,"InputA")
config.writeInput(nb, inputsB,"InputB")
config.writeInputS16(nb, dims,"Dims")
config.writeReference(nb, outputs,"RefLogSumExpDot")
def writeF32OnlyTests(config):
entropyTest(config,1)
logsumexpTest(config,2)
klTest(config,3)
logSumExpDotTest(config,4)
return(4)
def generateMaxTests(config,nb,format,data):
nbiters = Tools.loopnb(format,Tools.TAILONLY)
index=np.argmax(data[0:nbiters])
maxvalue=data[index]
return(nb+1)
def writeTests(config,nb,format):
data1=np.random.randn(NBSAMPLES)
data2=np.random.randn(NBSAMPLES)
data1 = data1/max(data1)
data2 = data1/max(data2)
nb=generateMaxTests(config,nb,format,data1)
PATTERNDIR = os.path.join("Patterns","DSP","Stats","Stats")
PARAMDIR = os.path.join("Parameters","DSP","Stats","Stats")
configf32=Tools.Config(PATTERNDIR,PARAMDIR,"f32")
configq31=Tools.Config(PATTERNDIR,PARAMDIR,"q31")
configq15=Tools.Config(PATTERNDIR,PARAMDIR,"q15")
configq7 =Tools.Config(PATTERNDIR,PARAMDIR,"q7")
nb=writeF32OnlyTests(configf32)
writeTests(nb+1,configf32,0)
writeTests(nb+1,configq31,31)
writeTests(nb+1,configq15,15)
writeTests(nb+1,configq7,7) |
# ==========================================================================
# Only for a given set of 10 companies the data is being extracted/collected
# to make predictions as they are independent and are also likely to sample
# lot of variation from engineering, beverages, mdicine, investment banking
# etc and the corresponding ensembles are being used.
# ==========================================================================
selected = [
'GS',
# 'NKE',
# 'MCD',
# 'PFE',
# 'DIS',
# 'INTC',
# 'WMT',
# 'JNJ',
# 'JPM',
# 'AAPL'
]
# ===================================
# Companies to be added in the future
# ===================================
all_companies_list = [
'TRV',
'DOW',
'WBA',
'CAT',
'GS',
'MMM',
'AXP',
'UTX',
'IBM',
'NKE',
'MCD',
'BA',
'CSCO',
'CVX',
'PFE',
'MRK',
'VZ',
'KO',
'DIS',
'HD',
'XOM',
'UNH',
'INTC',
'PG',
'WMT',
'JNJ',
'JPM',
'V',
'AAPL',
'MSFT'
]
# Companies List:
# 1.Travelers ==== TRV
# 2.Dow ==== DOW
# 3.Walgreens Boots Alliance ==== WBA
# 4.Caterpillar ==== CAT
# 5.Goldman Sachs ==== GS
# 6.3M ==== MMM
# 7.American Express ==== AXP
# 8.United Technologies ==== UTX
# 9.IBM ==== IBM
#10.Nike ==== NKE
#11.McDonald's ==== MCD
#12.Boeing ==== BA
#13.Cisco ==== CSCO
#14.Chevron ==== CVX
#15.Pfizer ==== PFE
#16.Merck & Co ==== MRK
#17.Verizon ==== VZ
#18.The Coca-Cola Company ==== KO
#19.Disney ==== DIS
#20.Home Depot ==== HD
#21.Exxon Mobil ==== XOM
#22.UnitedHealth Group ==== UNH
#23.Intel ==== INTC
#24.Procter & Gamble ==== PG
#25.Walmart ==== WMT
#26.Johnson & Johnson ==== JNJ
#27.JPMorgan ==== JPM
#28.Visa ==== V
#29.Apple ==== AAPL
#30.Microsoft ==== MSFT |
from week7.baseSort.cmp.insert.InsertionSort import insertSort
"""
桶排序
通过最大值最小值以及桶默认大小 计算需要的桶个数
遍历每个桶 对每个桶进行插入排序
遍历桶将数据按顺序保存到原始数组
"""
def bucketSort(arr):
_max = max(arr)
_min = min(arr)
_len = len(arr)
_pox = 2
_size = ((_max - _min) >> _pox) + 1 # 默认每个桶存放2**_pox个数据 则计算需要的桶个数
buckets = [[] for _ in range(_size)]
for v in arr:
index = (v - _min)>>_pox
buckets[index].append(v)
arr = []
for i in range(_size):
insertSort(buckets[i])
for j in range(len(buckets[i])):
arr.append(buckets[i][j])
return arr
a = [2,3,9,4,1,2,3,4,5,6,7,6,6,5,4,8,4,1,4,2,3,3,9]
print(a)
print(bucketSort(a)) |
from rest_framework import serializers
from apps.contents.models import SPUSpecification, SPU
class SPUSpecSerializer(serializers.ModelSerializer):
"""
商品规格表的序列化器
"""
# 指定spu的名称 通过模型类的外键
spu = serializers.StringRelatedField(read_only=True)
# 指定spu的id值 通过表字段
spu_id = serializers.IntegerField()
class Meta:
model = SPUSpecification
fields = ('id', 'name', 'spu', 'spu_id')
class SPUSerializer(serializers.ModelSerializer):
"""
SPU 商品的序列化器
"""
class Meta:
model = SPU
fields = ('id', 'name')
|
# -*- coding: utf-8 -*-
from .headers import (get_csq, get_header)
from .get_info import (get_most_severe_consequence, get_omim_number,
get_cytoband_coord, get_gene_info, get_gene_symbols)
from .ped import get_individuals, get_cases
from .phenomizer import hpo_genes
from .constants import IMPACT_SEVERITIES
from .get_file_info import (get_file_type, get_variant_type) |
from django.shortcuts import render
from django.views.generic import ListView, DetailView
from .models import Author, Book
# Create your views here.
class AuthorListView(ListView):
model = Author
class AuthorDetailView(DetailView):
model = Author
class BookListView(ListView):
model = Book
class BookDetailView(DetailView):
model = Book |
from distutils.core import setup
PACKAGE = "inmembrane"
DESCRIPTION = "A bioinformatic pipeline for proteome annotation \
to predict if a protein is exposed on the surface of a bacteria."
AUTHOR = "Andrew Perry & Bosco Ho"
AUTHOR_EMAIL = "ajperry@pansapiens.com"
URL = "http://github.com/boscoh/inmembrane"
# Must be a semantic version number. Also update inmembrane/__init__.py
VERSION = "0.95.0" # __import__(PACKAGE).__version__
try:
extra_requires = []
from collections import OrderedDict
except:
extra_requires.append("ordereddict")
setup(
name=PACKAGE,
version=VERSION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
url=URL,
description=DESCRIPTION,
packages=['inmembrane', 'inmembrane.plugins',
'inmembrane.protocols', 'inmembrane.tests'],
# NOTE: some packaging filters are also in MANIFEST.in
package_data={'inmembrane': ['protocols/*/*',
'tests/*/*',
'plugins/*/*'], },
scripts=['inmembrane_scan'],
# README, examples & docs are included via MANIFEST.in
license='BSD',
long_description=open('README.rst', 'rt').read(),
install_requires=["BeautifulSoup >= 3.2.1",
"bs4",
"cssselect",
"lxml",
"requests >= 2.0.0",
"semantic_version",
"suds >= 0.4",
"twill == 0.9.1",
] + extra_requires,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Scientific/Engineering :: Bio-Informatics",
],
zip_safe=False,
)
|
from functools import wraps
def flatten_list(method):
@wraps(method)
def inner(*args):
return list(method(*args))
return inner
def flatten_dict(method):
@wraps(method)
def inner(*args):
return dict(method(*args))
return inner
|
import platform
import time
from datetime import datetime as dt
win_hosts_path = r'/c/Windows/System32/Drivers/etc/hosts'
osx_linux_hosts_path = r'etc/hosts'
temp_path = r'hosts'
redirect = '127.0.0.1'
websites = ['www.example.com', 'example.com']
if (platform.system() == 'Windows'):
hosts_path = win_hosts_path
else:
hosts_path = osx_linux_hosts_path
hosts_path = temp_path
while True:
if 9 < dt.now().hour < 18:
print('Working hour')
with open(hosts_path, 'r+') as file:
content = file.read()
for website in websites:
if website in content:
pass
else:
file.write(redirect + ' ' + website + '\n')
else:
print('Off hour')
with open(hosts_path, 'r+') as file:
content = file.readlines()
file.seek(0)
for line in content:
if not any(website in line for website in websites):
file.write(line)
file.truncate()
time.sleep(10)
|
import distutils.core
from distutils.core import setup
version_number = "0.1"
setup(
name="qvalue",
version=version_number,
description="Converts p-values in q-values in order to account for multiple hypotheses testing, see (Storey and Tibshirani, 2003)",
long_description=open("README.md").read(),
author="Nicolo Fusi",
author_email="nicolo.fusi@sheffield.ac.uk",
packages=["qvalue"],
requires=["numpy (>=1.5)", "scipy (>=0.8)"],
license="3-clause BSD",
)
|
"""
Simple linear regression example in TensorFlow
This program tries to predict the number of thefts from
the number of fire in the city of Chicago
"""
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import xlrd
DATA_FILE = '../data/fire_theft.xls'
# Phase 1: Assemble the graph
# Step 1: read in data from the .xls file
book = xlrd.open_workbook(DATA_FILE, encoding_override='utf-8')
sheet = book.sheet_by_index(0)
data = np.asarray([sheet.row_values(i) for i in range(1, sheet.nrows)])
n_samples = sheet.nrows - 1
# Step 2: create placeholders for input X (number of fire) and label Y (number of theft)
number_of_features = 1 # fire
X = tf.placeholder(dtype=tf.float32, shape=(None))
Y = tf.placeholder(dtype=tf.float32, shape=(None))
# Step 3: create weight and bias, initialized to 0
# name your variables w and b
w = tf.Variable(initial_value=0.0,dtype=tf.float32)
b = tf.Variable(initial_value=0.0,dtype=tf.float32)
# Step 4: predict Y (number of theft) from the number of fire
# name your variable Y_predicted
Y_predicted = X * w + b
# Step 5: use the square error as the loss function
# name your variable loss
loss = tf.nn.l2_loss(Y-Y_predicted)
# Step 6: using gradient descent with learning rate of 0.01 to minimize loss
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001).minimize(loss)
# Phase 2: Train our model
with tf.Session() as sess:
# Step 7: initialize the necessary variables, in this case, w and b
init = tf.global_variables_initializer()
sess.run(init)
# Step 8: train the model
for i in range(100): # run 100 epochs
total_loss = 0
for x, y in data:
# Session runs optimizer to minimize loss and fetch the value of loss
_, l = sess.run([optimizer, loss],feed_dict={X:x, Y:y})
total_loss += l
print "Epoch {0}: {1}".format(i, total_loss/n_samples)
w = sess.run(w)
b = sess.run(b)
# plot the results
X, Y = data.T[0], data.T[1]
plt.plot(X, Y, 'bo', label='Real data')
plt.plot(X, X * w + b, 'r', label='Predicted data')
plt.legend()
plt.show() |
# pylint: disable=invalid-name
from Page import Page
from WebUtils.Funcs import htmlEncode as enc
class Inspector(Page):
def writeContent(self):
req = self.request()
self.write('Path:<br>\n')
self.write(f'<code>{enc(req.extraURLPath())}</code><p>\n')
self.write('Variables:<br>\n')
self.write('<table>')
for name in sorted(req.fields()):
self.write(
f'<tr><td style="text-align:right">{enc(name)}:</td>'
f'<td>{enc(req.field(name))}</td></tr>\n')
self.write('</table><p>\n')
self.write('Server-side path:<br>\n')
self.write(f'<code>{enc(req.serverSidePath())}</code><p>\n')
|
"""Functions for computing atomic structure of proteins."""
import logging
# %%
# Graphein
# Author: Arian Jamasb <arian@jamasb.io>
# License: MIT
# Project Website: https://github.com/a-r-j/graphein
# Code Repository: https://github.com/a-r-j/graphein
from typing import Any, Dict
import networkx as nx
import numpy as np
import pandas as pd
from graphein.protein.edges.distance import compute_distmat
from graphein.protein.resi_atoms import (
BOND_LENGTHS,
BOND_ORDERS,
COVALENT_RADII,
DEFAULT_BOND_STATE,
RESIDUE_ATOM_BOND_STATE,
)
log = logging.getLogger(__name__)
# Todo dealing with metals
# Todo There are other check and balances that can be implemented from here: https://www.daylight.com/meetings/mug01/Sayle/m4xbondage.html
def assign_bond_states_to_dataframe(df: pd.DataFrame) -> pd.DataFrame:
"""
Takes a ``PandasPDB`` atom dataframe and assigns bond states to each atom based on:
Atomic Structures of all the Twenty Essential Amino Acids and a Tripeptide, with Bond Lengths as Sums of Atomic Covalent Radii
Heyrovska, 2008
First, maps atoms to their standard bond states (:const:`~graphein.protein.resi_atoms.DEFAULT_BOND_STATE`).
Second, maps non-standard bonds states (:const:`~graphein.protein.resi_atoms.RESIDUE_ATOM_BOND_STATE`).
Fills NaNs with standard bond states.
:param df: Pandas PDB dataframe
:type df: pd.DataFrame
:return: Dataframe with added ``atom_bond_state`` column
:rtype: pd.DataFrame
"""
# Map atoms to their standard bond states
naive_bond_states = pd.Series(df["atom_name"].map(DEFAULT_BOND_STATE))
# Create series of bond states for the non-standard states
ss = (
pd.DataFrame(RESIDUE_ATOM_BOND_STATE)
.unstack()
.rename_axis(("residue_name", "atom_name"))
.rename("atom_bond_state")
)
# Map non-standard states to the dataframe based on the residue and atom name
df = df.join(ss, on=["residue_name", "atom_name"])
# Fill the NaNs with the standard states
df = df.fillna(value={"atom_bond_state": naive_bond_states})
return df
def assign_covalent_radii_to_dataframe(df: pd.DataFrame) -> pd.DataFrame:
"""
Assigns covalent radius (:const:`~graphein.protein.resi_atoms.COVALENT_RADII`) to each atom based on its bond state. Adds a ``covalent_radius`` column. Using values from:
Atomic Structures of all the Twenty Essential Amino Acids and a Tripeptide, with Bond Lengths as Sums of Atomic Covalent Radii
Heyrovska, 2008
:param df: Pandas PDB dataframe with a ``bond_states_column``
:type df: pd.DataFrame
:return: Pandas PDB dataframe with added ``covalent_radius`` column
:rtype: pd.DataFrame
"""
# Assign covalent radius to each atom
df["covalent_radius"] = df["atom_bond_state"].map(COVALENT_RADII)
return df
def add_atomic_edges(G: nx.Graph, tolerance: float = 0.56) -> nx.Graph:
"""
Computes covalent edges based on atomic distances. Covalent radii are assigned to each atom based on its bond assign_bond_states_to_dataframe
The distance matrix is then thresholded to entries less than this distance plus some tolerance to create an adjacency matrix.
This adjacency matrix is then parsed into an edge list and covalent edges added
:param G: Atomic graph (nodes correspond to atoms) to populate with atomic bonds as edges
:type G: nx.Graph
:param tolerance: Tolerance for atomic distance. Default is ``0.56`` Angstroms. Commonly used values are: ``0.4, 0.45, 0.56``
:type tolerance: float
:return: Atomic graph with edges between bonded atoms added
:rtype: nx.Graph
"""
dist_mat = compute_distmat(G.graph["pdb_df"])
# We assign bond states to the dataframe, and then map these to covalent radii
G.graph["pdb_df"] = assign_bond_states_to_dataframe(G.graph["pdb_df"])
G.graph["pdb_df"] = assign_covalent_radii_to_dataframe(G.graph["pdb_df"])
# Create a covalent 'distance' matrix by adding the radius arrays with its transpose
covalent_radius_distance_matrix = np.add(
np.array(G.graph["pdb_df"]["covalent_radius"]).reshape(-1, 1),
np.array(G.graph["pdb_df"]["covalent_radius"]).reshape(1, -1),
)
# Add the tolerance
covalent_radius_distance_matrix = (
covalent_radius_distance_matrix + tolerance
)
# Threshold Distance Matrix to entries where the eucl distance is less than the covalent radius plus tolerance and larger than 0.4
dist_mat = dist_mat[dist_mat > 0.4]
t_distmat = dist_mat[dist_mat < covalent_radius_distance_matrix]
# Store atomic adjacency matrix in graph
G.graph["atomic_adj_mat"] = np.nan_to_num(t_distmat)
# Get node IDs from non NaN entries in the thresholded distance matrix and add the edge to the graph
inds = zip(*np.where(~np.isnan(t_distmat)))
for i in inds:
length = t_distmat[i[0]][i[1]]
node_1 = G.graph["pdb_df"]["node_id"][i[0]]
node_2 = G.graph["pdb_df"]["node_id"][i[1]]
chain_1 = G.graph["pdb_df"]["chain_id"][i[0]]
chain_2 = G.graph["pdb_df"]["chain_id"][i[1]]
# Check nodes are in graph
if not (G.has_node(node_1) and G.has_node(node_2)):
continue
# Check atoms are in the same chain
if not (chain_1 and chain_2):
continue
if G.has_edge(node_1, node_2):
G.edges[node_1, node_2]["kind"].add("covalent")
G.edges[node_1, node_2]["bond_length"] = length
else:
G.add_edge(node_1, node_2, kind={"covalent"}, bond_length=length)
# Todo checking degree against MAX_NEIGHBOURS
return G
def add_ring_status(G: nx.Graph) -> nx.Graph:
"""
Identifies rings in the atomic graph. Assigns the edge attribute ``"RING"`` to edges in the ring. We do not distinguish
between aromatic and non-aromatic rings. Functions by identifying all cycles in the graph.
:param G: Atom-level protein structure graph to add ring edge types to
:type G: nx.Graph
:return: Atom-level protein structure graph with added ``"RING"`` edge attribute
:rtype: nx.Graph
"""
cycles = nx.cycle_basis(
G
) # Produces a list of lists containing nodes in each cycle
# Iterate over cycles, check for an edge between the nodes - if there is one, add a "RING" attribute
for cycle in cycles:
[
G.edges[x, y]["kind"].add("RING")
for i, x in enumerate(cycle)
for j, y in enumerate(cycle)
if G.has_edge(x, y)
if i != j
]
return G
def add_bond_order(G: nx.Graph) -> nx.Graph:
"""
Assign bond orders to the covalent bond edges between atoms on the basis of bond length. Values are taken from:
Automatic Assignment of Chemical Connectivity to Organic Molecules in the Cambridge Structural Database.
Jon C. Baber and Edward E. Hodgkin*
:param G: Atomic-level protein graph with covalent edges.
:type G: nx.Graph
:return: Atomic-level protein graph with covalent edges annotated with putative bond order.
:rtype: mx.Graph
"""
for u, v, a in G.edges(data=True):
atom_a = G.nodes[u]["element_symbol"]
atom_b = G.nodes[v]["element_symbol"]
# Assign bonds with hydrogens to 1
if atom_a == "H" or atom_b == "H":
G.edges[u, v]["kind"].add("SINGLE")
# If not, we need to identify the bond type from the bond length
else:
query = f"{atom_a}-{atom_b}"
# We need this try block as the dictionary keys may be X-Y, whereas the query we construct may be Y-X
try:
identify_bond_type_from_mapping(G, u, v, a, query)
except KeyError:
query = f"{atom_b}-{atom_a}"
try:
identify_bond_type_from_mapping(G, u, v, a, query)
except KeyError:
log.debug(
f"Could not identify bond type for {query}. Adding a single bond."
)
G.edges[u, v]["kind"].add("SINGLE")
return G
def identify_bond_type_from_mapping(
G: nx.Graph, u: str, v: str, a: Dict[str, Any], query: str
):
"""
Compares the bond length between two atoms in the graph, and the relevant experimental value by performing a lookup
against the watershed values in:
Automatic Assignment of Chemical Connectivity to Organic Molecules in the Cambridge
Structural Database. Jon C. Baber and Edward E. Hodgkin*
Bond orders are assigned in the order ``triple`` < ``double`` < ``single`` (e.g. if a bond is shorter than the triple bond
watershed (``w_dt``) then it is assigned as a triple bond. Similarly, if a bond is longer than this but shorter than the
double bond watershed (``w_sd``), it is assigned double bond status.
:param G: ``nx.Graph`` of atom-protein structure with atomic edges added
:type G: nx.Graph
:param u: node 1 in edge
:type u: str
:param v: node 2 in edge
:type v: str
:param a: edge data
:type a: Dict[str, Any]
:param query: ``"ELEMENTX-ELEMENTY"`` to perform lookup with (E.g. ``"C-O"``,``"N-N"``)
:type query: str
:return: Graph with atomic edge bond order assigned
:rtype: nx.Graph
"""
# Perform lookup of allowable bond orders for the given atom pair
allowable_order = BOND_ORDERS[query]
# If max double, compare the length to the double watershed distance, w_sd, else assign single
if len(allowable_order) == 2:
if a["bond_length"] < BOND_LENGTHS[query]["w_sd"]:
G.edges[u, v]["kind"].add("DOUBLE")
else:
G.edges[u, v]["kind"].add("SINGLE")
else:
# If max triple, compare the length to the triple watershed distance, w_dt, then double, else assign single
if a["bond_length"] < BOND_LENGTHS[query]["w_dt"]:
G.edges[u, v]["kind"].add("TRIPLE")
elif a["bond_length"] < BOND_LENGTHS[query]["w_sd"]:
G.edges[u, v]["kind"].add("DOUBLE")
else:
G.edges[u, v]["kind"].add("SINGLE")
return G
# The codeblock below was used in an initial pass at solving the bond order assignment problem based on hybridisation state.
# We instead use a simpler method of construction based on bond lengths, but I am loathe to remove this code as it may prove useful later
"""
def cosinus(x0, x1, x2):
e0 = x0 - x1
e1 = x2 - x1
e0 = e0 / np.linalg.norm(e0)
e1 = e1 / np.linalg.norm(e1)
cosinus = np.dot(e0, e1)
angle = np.arccos(cosinus)
return 180 - np.degrees(angle)
def dihedral(x0, x1, x2, x3):
b0 = -1.0 * (x1 - x0)
b1 = x2 - x1
b2 = x3 - x2
b0xb1 = np.cross(b0, b1)
b1xb2 = np.cross(b2, b1)
b0xb1_x_b1xb2 = np.cross(b0xb1, b1xb2)
y = np.dot(b0xb1_x_b1xb2, b1) * (1.0 / np.linalg.norm(b1))
x = np.dot(b0xb1, b1xb2)
grad = 180 - np.degrees(np.arctan2(y, x))
return grad
def assign_bond_orders(G: nx.Graph) -> nx.Graph:
bond_angles: Dict[str, float] = {}
for n, d in G.nodes(data=True):
neighbours = list(G.neighbors(n))
if len(neighbours) == 1:
G.edges[n, neighbours[0]]["kind"].add("SINGLE")
bond_angles[n] = 0.0
elif len(neighbours) == 2:
cos_angle = cosinus(
G.nodes[n]["coords"],
G.nodes[neighbours[0]]["coords"],
G.nodes[neighbours[1]]["coords"],
)
bond_angles[n] = cos_angle
elif len(neighbours) == 3:
dihed = dihedral(
G.nodes[n]["coords"],
G.nodes[neighbours[0]]["coords"],
G.nodes[neighbours[1]]["coords"],
G.nodes[neighbours[2]]["coords"],
)
bond_angles[n] = dihed
print(bond_angles)
# Assign Bond angles to dataframe
G.graph["pdb_df"]["bond_angles"] = G.graph["pdb_df"]["node_id"].map(
bond_angles
)
print(G.graph["pdb_df"].to_string())
# Assign Hybridisation state from Bond Angles
hybridisation_state = {
n: "sp"
if d > 155
else "sp2"
if d > 115
else "sp3"
if d <= 115
else "UNK"
for n, d in bond_angles.items()
}
G.graph["pdb_df"]["bond_angles"] = G.graph["pdb_df"]["node_id"].map(
hybridisation_state
)
return G
"""
|
from __future__ import annotations
from typing import Any
from rich.console import Group
from rich.progress import BarColumn, Progress
from rich.rule import Rule
from rich.table import Table
from rich.text import Text
from .. import styles
from .paginated_table import PaginatedTableRenderable
class ExecutorStatusTableRenderable(PaginatedTableRenderable):
"""A renderable that displays execution status."""
def __init__(
self,
builds: list[dict[str, Any]],
page_size: int = -1,
page: int = 1,
row: int = 0,
) -> None:
"""A renderable that displays execution status.
Args:
builds (list[dict[str, Any]]): A list of builds to display.
page_size (int): The size of the page before pagination happens. Defaults to -1.
page (int): The starting page. Defaults to 1.
row (int): The starting row. Defaults to 0.
"""
self.builds = builds
super().__init__(
len(builds), page_size=page_size, page=page, row=row, row_size=3
)
def renderables(self, start_index: int, end_index: int) -> list[dict[str, Any]]:
"""Generate a list of renderables.
Args:
start_index (int): The starting index.
end_index (int): The ending index.
Returns:
list[dict[str, Any]]: A list of renderables.
"""
return self.builds[start_index:end_index]
def render_rows(self, table: Table, renderables: list[dict[str, Any]]) -> None:
"""Renders rows for the table.
Args:
table (Table): The table to render rows for.
renderables (list[dict[str, Any]]): The renderables to render.
"""
for build in renderables:
name = build["name"]
progress = Progress(
"[progress.description]{task.description}",
BarColumn(),
"[progress.percentage]{task.percentage:>3.0f}%",
)
completed = 0 if build["progress"] == -1 else build["progress"]
progress.add_task(
"[green]➜ [/]",
completed=completed,
)
row_text = Text(name, style=styles.GREY)
row_text.highlight_regex(re_highlight="\([^)]+\)", style=styles.ORANGE)
row_text.highlight_regex(re_highlight="(#\d+)", style=styles.GREEN)
render_group = Group(
*[
row_text,
progress.get_renderable(),
Rule(style=styles.GREY),
]
)
table.add_row(render_group)
def render_columns(self, table: Table) -> None:
"""Renders columns for the table.
Args:
table (Table): The table to render columns for.
"""
table.show_header = False
table.add_column(no_wrap=True)
|
import pytest
pytestmark = pytest.mark.asyncio
async def test_indices(cms_requester):
async with cms_requester as requester:
resp, status = await requester(
'GET',
'/db/guillotina/@indices'
)
assert 'Item' in resp['types']
assert 'title' in resp['types']['Item']
assert 'text' in resp['types']['Item']['title']
assert 'guillotina.behaviors.dublincore.IDublinCore' in resp['behaviors'] # noqa
assert 'tags' in resp['behaviors']['guillotina.behaviors.dublincore.IDublinCore'] # noqa
assert 'keyword' in resp['behaviors']['guillotina.behaviors.dublincore.IDublinCore']['tags'] # noqa |
from distutils.core import setup
setup(name='ParrotD3MWrapper',
version='1.0.3',
description='A thin wrapper for interacting with New Knowledge time series prediction tool Parrot',
packages=['ParrotD3MWrapper'],
install_requires=["typing",
"Sloth==2.0.3"],
dependency_links=[
"git+https://github.com/NewKnowledge/sloth@82a1e08049531270256f38ca838e6cc7d1119223#egg=Sloth-2.0.3"
],
entry_points = {
'd3m.primitives': [
'time_series_forecasting.arima.Parrot = ParrotD3MWrapper:Parrot'
],
},
)
|
"""Functions to make predictions using the deep learning model"""
import numpy as np
from . import config as cf
from pathlib import Path
import gdown
from fastai.vision import load_learner
from skimage import measure
def fetch_learner(path=Path(__file__).parents[1], model=cf.MODEL):
"""
Returns learner if model file exists in path. If not, download
the model file into the root directory and return the learner
"""
filename = path / model
if filename.exists():
learn = load_learner(path, model)
else:
url = cf.MODEL_URL
gdown.download(url, 'stage-2_bs24_rnet18.pkl', quiet=False)
learn = load_learner(path, model)
return learn
def predict_segment(learner, img):
"""
Predicts a segmentation mask using a deep learning based model.
Parameters
-------------------------------------------------
learner : Learner object
The learner used to perform the prediction
img : Image object
The input image. Should be a fastai Image object.
Returns
------------------------------------------------
pred : PyTorch Tensor
Contains segmentation mask data
"""
pred = learner.predict(img)[0]
return 1 - pred.data.numpy()[0]
def get_size_distr(pred):
"""
Obtains the size distribution of particles in an image
using a deep learning based model.
Parameters
-------------------------------------------------
pred : ndarray
The predicted segmentation mask for the image.
Should only have 0s and 1s.
Returns
------------------------------------------------
counts : ndarray
Contains number of pixels for each segment of the
image as determined by the model. Does not include
the background.
"""
# labels each connected region with a unique value
pred_labeled = measure.label(pred, background=0, connectivity=1)
unique, size_distr = np.unique(pred_labeled, return_counts=True)
return pred_labeled, unique[1:], size_distr[1:]
|
#!/usr/bin/env python3
from unittest import TestCase
from kubernetes.client.models.v1_object_meta import V1ObjectMeta
from kubernetes.client.models.v1_node import V1Node
from kubernetes.client.models.v1_node_spec import V1NodeSpec
from kubernetes.client.models.v1_node_status import V1NodeStatus
from kubernetes.client.models.v1_node_address import V1NodeAddress
from kubernetes.client.models.v1_node_condition import V1NodeCondition
from kubernetes.client.models.v1_pod import V1Pod
from kubernetes.client.models.v1_pod_spec import V1PodSpec
from kubernetes.client.models.v1_pod_status import V1PodStatus
from kubernetes.client.models.v1_container import V1Container
from kubernetes.client.models.v1_resource_requirements import \
V1ResourceRequirements
from cluster_status import str2bool, ClusterStatus
Mi = 1024 * 1024
MilliCPU = 1000
class MockK8sConfig(object):
def is_initialized(self):
for _, v in self.__dict__.items():
if v is None:
return False
return True
class MockK8sNodeConfig(MockK8sConfig):
def __init__(self):
self.name = None
self.labels = None
self.capacity = None
self.allocatable = None
self.internal_ip = None
self.unschedulable = None
self.ready = None
class MockK8sPodConfig(MockK8sConfig):
def __int__(self):
self.name = None
self.labels = None
self.namespace = None
self.phase = None
self.node_name = None
# A list of container resource requests
self.container_requests = None
def mock_k8s_node(config):
if not isinstance(config, MockK8sNodeConfig):
raise TypeError("Wrong config type")
if not config.is_initialized():
raise ValueError("Config uninitialized")
node = V1Node()
node.metadata = V1ObjectMeta(name=config.name,
labels=config.labels)
node.spec = V1NodeSpec(unschedulable=config.unschedulable)
address_ip = V1NodeAddress(config.internal_ip, "InternalIP")
conditions = [
V1NodeCondition(type="Ready", status=config.ready)
]
node.status = V1NodeStatus(addresses=[address_ip],
conditions=conditions,
capacity=config.capacity,
allocatable=config.allocatable)
return node
def mock_k8s_pod(config):
if not isinstance(config, MockK8sPodConfig):
raise TypeError("Wrong config type")
if not config.is_initialized():
raise ValueError("Config uninitialized")
pod = V1Pod()
pod.metadata = V1ObjectMeta(name=config.name,
labels=config.labels,
namespace=config.namespace)
containers = []
for i, requests in enumerate(config.container_requests):
r = V1ResourceRequirements(requests=requests)
c = V1Container(name=config.name + str(i), resources=r)
containers.append(c)
pod.spec = V1PodSpec(node_name=config.node_name,
containers=containers)
pod.status = V1PodStatus(phase=config.phase)
return pod
class TestClusterStatus(TestCase):
def test_str2bool(self):
self.assertTrue(str2bool("True"))
self.assertTrue(str2bool("1"))
self.assertTrue(str2bool("Y"))
self.assertTrue(str2bool("Yes"))
self.assertTrue(str2bool("T"))
self.assertFalse(str2bool("false"))
self.assertFalse(str2bool("0"))
def test_to_dict(self):
inclusion = [
"gpu_capacity",
"gpu_used",
"gpu_available",
"gpu_unschedulable",
"gpu_reserved",
"node_status",
"user_status",
"user_status_preemptable"
]
exclusion = [
"prometheus_node",
"nodes",
"pods",
"node_statuses",
"pod_statuses",
"user_info",
"user_info_preemptable",
"dict_exclusion"
]
cs = ClusterStatus({}, [], [])
d = cs.to_dict()
for inc in inclusion:
self.assertTrue(inc in d)
for exc in exclusion:
self.assertFalse(exc in d)
def test_compute_cluster_status(self):
"""
3 nodes
node1:
sku: m_type1
gpu:
P40: 4
cpu: 10
memory: 102400Mi
node2:
sku: m_type2
gpu:
P40: 0
cpu: 20
memory: 409600Mi
node3:
sku: m_type3
gpu:
P40: 4
unschedulable: True
cpu: 12
memory: 102400Mi
4 pods
pod1:
node:
node1
gpu:
P40: 1
cpu: 4
memory: 81920Mi
user: user1
pod2:
node:
node2
gpu:
P40: 0
cpu: 16
memory: 348160Mi
user: user2
pod3:
node:
node1
gpu:
P40: 2
cpu: 2
memory: 2048Mi
user: user3
pod4:
node:
node3
gpu:
P40: 2
cpu: 6
memory: 61440Mi
user: user1
"""
# Create node1
n1_config = MockK8sNodeConfig()
n1_config.name = "node1"
n1_config.labels = {"gpuType": "P40", "sku": "m_type1"}
n1_config.capacity = {
"nvidia.com/gpu": "4",
"cpu": "10",
"memory": "102400Mi"
}
n1_config.allocatable = {
"nvidia.com/gpu": "4",
"cpu": "10",
"memory": "102400Mi"
}
n1_config.internal_ip = "10.0.0.1"
n1_config.unschedulable = False
n1_config.ready = "True"
node1 = mock_k8s_node(n1_config)
# Create node2
n2_config = MockK8sNodeConfig()
n2_config.name = "node2"
n2_config.labels = {"sku": "m_type2"}
n2_config.capacity = {
"cpu": "20",
"memory": "409600Mi"
}
n2_config.allocatable = {
"cpu": "20",
"memory": "409600Mi"
}
n2_config.internal_ip = "10.0.0.2"
n2_config.unschedulable = False
n2_config.ready = "True"
node2 = mock_k8s_node(n2_config)
# Create node3
n3_config = MockK8sNodeConfig()
n3_config.name = "node3"
n3_config.labels = {"gpuType": "P40", "sku": "m_type3"}
n3_config.capacity = {
"nvidia.com/gpu": "4",
"cpu": "12",
"memory": "102400Mi"
}
n3_config.allocatable = {
"nvidia.com/gpu": "4",
"cpu": "12",
"memory": "102400Mi"
}
n3_config.internal_ip = "10.0.0.3"
n3_config.unschedulable = False
n3_config.ready = "Unknown"
node3 = mock_k8s_node(n3_config)
# Create nodes list
nodes = [node1, node2, node3]
# Create pod1
p1_config = MockK8sPodConfig()
p1_config.name = "pod1"
p1_config.labels = {
"gpuType": "P40",
"sku": "m_type1",
"userName": "user1"
}
p1_config.namespace = "default"
p1_config.phase = "Running"
p1_config.node_name = "node1"
p1_config.container_requests = [{
"nvidia.com/gpu": "1",
"cpu": "4",
"memory": "81920Mi"
}]
pod1 = mock_k8s_pod(p1_config)
# Create pod2
p2_config = MockK8sPodConfig()
p2_config.name = "pod2"
p2_config.labels = {
"userName": "user2",
"sku": "m_type2"
}
p2_config.namespace = "default"
p2_config.phase = "Running"
p2_config.node_name = "node2"
p2_config.container_requests = [{
"cpu": "16",
"memory": "348160Mi"
}]
pod2 = mock_k8s_pod(p2_config)
# Create pod3
p3_config = MockK8sPodConfig()
p3_config.name = "pod3"
p3_config.labels = {
"gpuType": "P40",
"sku": "m_type1",
"userName": "user3"
}
p3_config.namespace = "kube-system"
p3_config.phase = "Running"
p3_config.node_name = "node1"
p3_config.container_requests = [{
"nvidia.com/gpu": "2",
"cpu": "2",
"memory": "2048Mi"
}]
pod3 = mock_k8s_pod(p3_config)
# Create pod4
p4_config = MockK8sPodConfig()
p4_config.name = "pod4"
p4_config.labels = {
"gpuType": "P40",
"sku": "m_type3",
"userName": "user1"
}
p4_config.namespace = "default"
p4_config.phase = "Running"
p4_config.node_name = "node3"
p4_config.container_requests = [{
"nvidia.com/gpu": "2",
"cpu": "6",
"memory": "61440Mi"
}]
pod4 = mock_k8s_pod(p4_config)
# Create pods list
pods = [pod1, pod2, pod3, pod4]
cs = ClusterStatus({}, nodes, pods)
cs.compute()
# Cluster GPU status
self.assertEqual({"P40": 8}, cs.gpu_capacity)
self.assertEqual({"P40": 5}, cs.gpu_used)
self.assertEqual({"P40": 1}, cs.gpu_available)
self.assertEqual({"P40": 4}, cs.gpu_unschedulable)
self.assertEqual({"P40": 2}, cs.gpu_reserved)
# Cluster CPU status
self.assertEqual({
"m_type1": 10 * MilliCPU,
"m_type2": 20 * MilliCPU,
"m_type3": 12 * MilliCPU
}, cs.cpu_capacity)
self.assertEqual({
"m_type1": 6 * MilliCPU,
"m_type2": 16 * MilliCPU,
"m_type3": 6 * MilliCPU
}, cs.cpu_used)
self.assertEqual({
"m_type1": 4 * MilliCPU,
"m_type2": 4 * MilliCPU
}, cs.cpu_available)
self.assertEqual({
"m_type3": 12 * MilliCPU
}, cs.cpu_unschedulable)
self.assertEqual({
"m_type3": 6 * MilliCPU
}, cs.cpu_reserved)
# Cluster memory status
self.assertEqual({
"m_type1": 102400 * Mi,
"m_type2": 409600 * Mi,
"m_type3": 102400 * Mi
}, cs.memory_capacity)
self.assertEqual({
"m_type1": 83968 * Mi,
"m_type2": 348160 * Mi,
"m_type3": 61440 * Mi
}, cs.memory_used)
self.assertEqual({
"m_type1": 18432 * Mi,
"m_type2": 61440 * Mi
}, cs.memory_available)
self.assertEqual({
"m_type3": 102400 * Mi
}, cs.memory_unschedulable)
self.assertEqual({
"m_type3": 40960 * Mi
}, cs.memory_reserved)
# Cluster node status
t_node1_status = {
"name": "node1",
"labels": {"gpuType": "P40", "sku": "m_type1"},
"gpuType": "P40",
"scheduled_service": ["P40", "m_type1"],
"gpu_allocatable": {"P40": 4},
"gpu_capacity": {"P40": 4},
"gpu_used": {"P40": 3},
"gpu_preemptable_used": {},
"cpu_allocatable": {"m_type1": 10 * MilliCPU},
"cpu_capacity": {"m_type1": 10 * MilliCPU},
"cpu_used": {"m_type1": 6 * MilliCPU},
"cpu_preemptable_used": {},
"memory_allocatable": {"m_type1": 102400 * Mi},
"memory_capacity": {"m_type1": 102400 * Mi},
"memory_used": {"m_type1": 83968 * Mi},
"memory_preemptable_used": {},
"InternalIP": "10.0.0.1",
"pods": [
"pod1 : user1 (gpu #:1)"
],
"unschedulable": False
}
t_node2_status = {
"name": "node2",
"labels": {"sku": "m_type2"},
"gpuType": "",
"scheduled_service": ["m_type2"],
"gpu_allocatable": {},
"gpu_capacity": {},
"gpu_used": {},
"gpu_preemptable_used": {},
"cpu_allocatable": {"m_type2": 20 * MilliCPU},
"cpu_capacity": {"m_type2": 20 * MilliCPU},
"cpu_used": {"m_type2": 16 * MilliCPU},
"cpu_preemptable_used": {},
"memory_allocatable": {"m_type2": 409600 * Mi},
"memory_capacity": {"m_type2": 409600 * Mi},
"memory_used": {"m_type2": 348160 * Mi},
"memory_preemptable_used": {},
"InternalIP": "10.0.0.2",
"pods": [
"pod2 : user2 (gpu #:0)"
],
"unschedulable": False
}
t_node3_status = {
"name": "node3",
"labels": {"gpuType": "P40", "sku": "m_type3"},
"gpuType": "P40",
"scheduled_service": ["P40", "m_type3"],
"gpu_allocatable": {"P40": 4},
"gpu_capacity": {"P40": 4},
"gpu_used": {"P40": 2},
"gpu_preemptable_used": {},
"cpu_allocatable": {"m_type3": 12 * MilliCPU},
"cpu_capacity": {"m_type3": 12 * MilliCPU},
"cpu_used": {"m_type3": 6 * MilliCPU},
"cpu_preemptable_used": {},
"memory_allocatable": {"m_type3": 102400 * Mi},
"memory_capacity": {"m_type3": 102400 * Mi},
"memory_used": {"m_type3": 61440 * Mi},
"memory_preemptable_used": {},
"InternalIP": "10.0.0.3",
"pods": [
"pod4 : user1 (gpu #:2)"
],
"unschedulable": True
}
t_node_status = [
t_node1_status,
t_node2_status,
t_node3_status
]
self.assertEqual(t_node_status, cs.node_status)
# Cluster user status
t_user_status = [
{
"userName": "user1",
"userGPU": {"P40": 3},
"userCPU": {
"m_type1": 4 * MilliCPU,
"m_type3": 6 * MilliCPU
},
"userMemory": {
"m_type1": 81920 * Mi,
"m_type3": 61440 * Mi
}
},
{
"userName": "user2",
"userGPU": {},
"userCPU": {"m_type2": 16 * MilliCPU},
"userMemory": {"m_type2": 348160 * Mi}
},
{
"userName": "user3",
"userGPU": {"P40": 2},
"userCPU": {"m_type1": 2 * MilliCPU},
"userMemory": {"m_type1": 2048 * Mi}
}
]
self.assertEqual(t_user_status, cs.user_status)
t_user_status_preemptable = [
{
"userName": "user%s" % i,
"userGPU": {},
"userCPU": {},
"userMemory": {}
}
for i in range(1, 4)
]
self.assertEqual(t_user_status_preemptable,
cs.user_status_preemptable)
|
from .bulk_email import BulkEmail
from .logger import Logger
import sys
if len(sys.argv) != 4:
print("Usage: python -m bulkemail bulkemail/recipients.txt bulkemail/subject.txt bulkemail/body.txt")
exit(1)
if __name__ == '__main__':
# init logger
logger = Logger.getLogger()
with open(sys.argv[1], 'r') as f:
recipients = [i.strip() for i in f.readlines()]
print(recipients)
with open(sys.argv[2], 'r') as f:
subject = f.read()
with open(sys.argv[3], 'r') as f:
body = f.read()
if len(recipients) == 0:
logger.warning("Recipient list is empty. Can't send any emails.")
exit(1)
launch_code = BulkEmail.generate_launch_code(length=4)
user_input = input(f"""You are about to email {len(recipients)} {"people" if len(recipients)!=1 else "person"}. Are you sure about this?
Enter the following: {launch_code}
-> """)
if user_input != launch_code:
print("Abort.")
exit(0)
BulkEmail.bulk_email(recipients, subject, body)
logger.info(f'{recipients}')
|
import pytest
from flowpipe import Graph, Node
@Node(outputs=["out"])
def DemoNode(in_):
return {"out": in_}
def _nested_graph():
"""Create this nested subgraph:
+---------------+ +---------------+ +---------------+ +---------------+
| DemoNode | | DemoNode | | DemoNode | | DemoNode |
|---------------| |---------------| |---------------| |---------------|
o in_<> | +--->o in_<> | +--->o in_<> | +--->o in_<> |
| out o-----+ | out o-----+ | out o-----+ | out o
+---------------+ +---------------+ +---------------+ +---------------+
+-------------+
| sub0-2 |
|-------------|
o in_<> |
| out o
+-------------+
+-------------+
| sub1-2 |
|-------------|
o in_<> |
| out o
+-------------+
+-------------+
| sub2-2 |
|-------------|
o in_<> |
| out o
+-------------+
"""
main = Graph("main")
DemoNode(graph=main)
parent = main
for i in range(3):
sub = Graph("sub" + str(i))
DemoNode(graph=sub)
DemoNode(graph=sub, name="sub" + str(i) + "-2")
parent["DemoNode"].outputs["out"] >> sub["DemoNode"].inputs["in_"]
parent = sub
return main
def test_nodes_only_contains_levels_of_graph():
graph = _nested_graph()
assert len(graph.nodes) == 1
def test_subgraph_names_need_to_be_unique():
"""
+--------------------+ +--------------------+
| node1 | | node1 |
|--------------------| |--------------------|
o in_<> | +--->o in_<{"a": null> |
| out %-----+ | out o
| out.a o | +--------------------+
+--------------------+ | +--------------------+
+------------+ | | node2 |
| node2 | | |--------------------|
|------------| +--->o in_<{"a": null> |
o in_<> | | out o
| out o +--------------------+
+------------+
"""
main = Graph("main")
DemoNode(name="node1", graph=main)
DemoNode(name="node2", graph=main)
sub1 = Graph("sub")
DemoNode(name="node1", graph=sub1)
DemoNode(name="node2", graph=sub1)
sub2 = Graph("sub")
DemoNode(name="node1", graph=sub2)
DemoNode(name="node2", graph=sub2)
main["node1"].outputs["out"] >> sub1["node1"].inputs["in_"]
with pytest.raises(ValueError):
main["node1"].outputs["out"] >> sub2["node1"].inputs["in_"]
with pytest.raises(ValueError):
main["node1"].outputs["out"]["a"] >> sub2["node1"].inputs["in_"]
with pytest.raises(ValueError):
main["node1"].outputs["out"]["a"] >> sub2["node1"].inputs["in_"]["a"]
with pytest.raises(ValueError):
main["node1"].outputs["out"] >> sub2["node1"].inputs["in_"]["a"]
# Connecting to the same graph does not throw an error
#
main["node1"].outputs["out"] >> sub1["node2"].inputs["in_"]
def test_subgraphs_can_be_accessed_by_name():
graph = _nested_graph()
assert len(graph.subgraphs) == 3
assert graph.subgraphs['sub0'].name == 'sub0'
assert graph.subgraphs['sub1'].name == 'sub1'
assert graph.subgraphs['sub2'].name == 'sub2'
def test_plugs_can_be_promoted_to_graph_level_under_new_name():
main = Graph("main")
DemoNode(name="node1", graph=main)
main["node1"].inputs["in_"].promote_to_graph()
main["node1"].outputs["out"].promote_to_graph(name="graph_out")
assert main.inputs["in_"] is main["node1"].inputs["in_"]
assert main.outputs["graph_out"] is main["node1"].outputs["out"]
def test_plugs_can_only_be_promoted_once_to_graph_level():
main = Graph("main")
DemoNode(name="node1", graph=main)
main["node1"].inputs["in_"].promote_to_graph()
main["node1"].outputs["out"].promote_to_graph()
with pytest.raises(ValueError):
main["node1"].inputs["in_"].promote_to_graph(name="different_name")
with pytest.raises(ValueError):
main["node1"].outputs["out"].promote_to_graph(name="different_name")
def test_subplugs_can_not_be_promoted_individually():
main = Graph("main")
DemoNode(name="node1", graph=main)
with pytest.raises(TypeError):
main["node1"].inputs["in_"]["sub"].promote_to_graph()
with pytest.raises(TypeError):
main["node1"].outputs["out"]["sub"].promote_to_graph()
# Promoting the main plug will of course give access to subplugs as well
main["node1"].inputs["in_"].promote_to_graph()
assert main.inputs["in_"]["sub"] == main["node1"].inputs["in_"]["sub"]
def test_serialize_nested_graph_to_json():
graph = _nested_graph()
serialized = graph.to_json()
deserialized = Graph.from_json(serialized).to_json()
assert serialized == deserialized
def test_access_node_of_subgraph_by_key():
main = Graph("main")
main_node = DemoNode(name="node", graph=main)
sub = Graph("sub")
sub_node = DemoNode(name="node", graph=sub)
main["node"].outputs["out"] >> sub["node"].inputs["in_"]
assert main["node"] == main_node
assert main["sub.node"] == sub_node
|
#!/usr/bin/env python
# coding: utf-8
from xumm.resource import XummResource
from typing import Callable, Any
from xumm.ws_client import WSClient
from ..xumm_api import (
XummGetPayloadResponse as XummPayload,
XummPostPayloadResponse as CreatedPayload,
)
class PayloadAndSubscription(XummResource):
"""
Attributes:
model_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
required = {
'created': True,
'payload': True,
'resolve': True,
'resolved': True,
'websocket': True,
}
model_types = {
'created': dict,
'payload': dict,
'resolve': Callable,
'resolved': Callable,
'websocket': WSClient,
}
attribute_map = {
'created': 'created',
'payload': 'payload',
'resolve': 'resolve',
'resolved': 'resolved',
'websocket': 'websocket',
}
def refresh_from(cls, **kwargs):
"""Returns the dict as a model
:param kwargs: A dict.
:type: dict
:return: The PayloadAndSubscription of this PayloadAndSubscription. # noqa: E501
:rtype: PayloadAndSubscription
"""
cls.sanity_check(kwargs)
cls._created = None
cls._payload = None
cls._resolve = None
cls._resolved = None
cls._websocket = None
cls.created = CreatedPayload(**kwargs['created'])
cls.payload = XummPayload(**kwargs['payload'])
cls.resolve = kwargs['resolve']
cls.resolved = kwargs['resolved']
cls.websocket = kwargs['websocket']
@property
def created(cls) -> CreatedPayload:
"""Gets the created of this PayloadAndSubscription.
:return: The created of this PayloadAndSubscription.
:rtype: PostPayloadResponse
"""
return cls._created
@created.setter
def created(cls, created: CreatedPayload):
"""Sets the created of this PayloadAndSubscription.
:param created: The created of this PayloadAndSubscription.
:type meta: PostPayloadResponse
"""
if created is None:
raise ValueError("Invalid value for `created`, must not be `None`") # noqa: E501
cls._created = created
@property
def payload(cls) -> XummPayload:
"""Gets the payload of this PayloadAndSubscription.
:return: The payload of this PayloadAndSubscription.
:rtype: GetPayloadResponse
"""
return cls._payload
@payload.setter
def payload(cls, payload: XummPayload):
"""Sets the payload of this PayloadAndSubscription.
:param payload: The payload of this PayloadAndSubscription.
:type payload: XummPayload
"""
if payload is None:
raise ValueError("Invalid value for `payload`, must not be `None`") # noqa: E501
cls._payload = payload
@property
def resolve(cls) -> Callable[[Any], Any]:
"""Gets the resolve of this PayloadAndSubscription.
:return: The resolve of this PayloadAndSubscription.
:rtype: Callable
"""
return cls._resolve
@resolve.setter
def resolve(cls, resolve: Callable[[Any], Any]):
"""Sets the resolve of this PayloadAndSubscription.
:param resolve: The resolve of this PayloadAndSubscription.
:type meta: Callable
"""
if resolve is None:
raise ValueError("Invalid value for `resolve`, must not be `None`") # noqa: E501
cls._resolve = resolve
@property
def resolved(cls) -> Callable[[Any], Any]:
"""Gets the resolved of this PayloadAndSubscription.
:return: The resolved of this PayloadAndSubscription.
:rtype: Callable
"""
return cls._resolved
@resolved.setter
def resolved(cls, resolved: Callable[[Any], Any]):
"""Sets the resolved of this PayloadAndSubscription.
:param resolved: The resolved of this PayloadAndSubscription.
:type meta: Payload
"""
if resolved is None:
raise ValueError("Invalid value for `resolve`, must not be `None`") # noqa: E501
cls._resolved = resolved
@property
def websocket(cls) -> WSClient:
"""Gets the websocket of this PayloadAndSubscription.
:return: The websocket of this PayloadAndSubscription.
:rtype: WSClient
"""
return cls._websocket
@websocket.setter
def websocket(cls, websocket: WSClient):
"""Sets the websocket of this PayloadAndSubscription.
:param websocket: The websocket of this PayloadAndSubscription.
:type meta: WSClient
"""
if websocket is None:
raise ValueError("Invalid value for `websocket`, must not be `None`") # noqa: E501
cls._websocket = websocket
|
# Fibonacci Numbers
# Find the nth fibonacci number.
# Fibonacci numbers, commonly denoted Fn form a sequence,
# called the Fibonacci sequence, such that each number is the sum
# of the two preceding ones, starting from 0 and 1.
# There are a lot of ways to find the fibonacci numbers but we will
# look at the DP approach here. It is not the most efficient as way available.
def nth_fib(n):
# cache for lookup
fib = {}
# if given number is negative
if n < 0:
return -1
# base case
if n <= 1:
return n
else:
# if the value is not already computed
if n not in fib:
# compute it and store it in fib[n]
fib[n] = nth_fib(n - 1) + nth_fib(n - 2)
# return the value
return fib[n]
# Driver code
print(nth_fib(10))
# Time Complexity : O(n)
# Time Complexity : O(n)
|
import struct
from mod_pywebsocket import stream
def web_socket_do_extra_handshake(request):
pass
def web_socket_transfer_data(request):
line = request.ws_stream.receive_message()
if line is None:
return
if line == '-':
data = ''
elif line == '--':
data = 'X'
else:
code, reason = line.split(' ', 1)
data = struct.pack('!H', int(code)) + reason.encode('utf-8')
request.connection.write(stream.create_close_frame(data))
request.server_terminated = True
# Wait for Close frame from client.
request.ws_stream.receive_message()
|
from tarpan.shared.compare import model_weights
def test_weights():
weights = model_weights(deviances=[[1, 2, 3], [2, 3, 4], [7, 8, 9]])
actual_weights = [
round(weight, 5)
for weight in weights
]
assert actual_weights == [0.81749, 0.18241, 0.0001]
|
from django.db import models
from django.utils import timezone
class Sector(models.Model):
name = models.CharField(max_length = 50, db_index = True)
updated_at = models.DateTimeField(auto_now = True)
class Meta:
unique_together = ('name',)
class Market(models.Model):
name = models.CharField(max_length = 50, db_index = True)
updated_at = models.DateTimeField(auto_now = True)
class Meta:
unique_together = ('name',)
class Stock(models.Model):
sector = models.ForeignKey(Sector, on_delete = models.CASCADE, related_name = 'stocks')
market = models.ForeignKey(Market, on_delete = models.CASCADE, related_name = 'stocks')
code = models.CharField(max_length = 10, db_index = True)
name = models.TextField(max_length = 100)
industry= models.TextField(max_length = 500)
updated_at = models.DateTimeField(auto_now = True)
class DailyStock(models.Model):
stock = models.ForeignKey(Stock, on_delete = models.CASCADE, related_name = 'daily_stocks')
date = models.DateField(db_index = True)
open = models.FloatField()
close = models.FloatField()
low = models.FloatField()
high = models.FloatField()
amount_of_change = models.FloatField(null = True) # 前日終値 - 当日終値
rsi = models.FloatField(null = True) # RSI相対力指数
sma5 = models.FloatField(null = True) # 単純移動平均5日
sma25 = models.FloatField(null = True) # 単純移動平均25日
sma75 = models.FloatField(null = True) # 単純移動平均75日
updated_at = models.DateTimeField(auto_now = True)
|
#Programa printa maior idade escrita.
m=0
i1=int(input("Digite uma idade: "))
if(i1>m):
m=i1
i2=int(input("Digite uma idade: "))
if(i2>m):
m=i2
i3=int(input("Digite uma idade: "))
if(i3>m):
m=i3
i4=int(input("Digite uma idade: "))
if(i4>m):
m=i4
i5=int(input("Digite uma idade: "))
if(i5>m):
m=i5
print(m)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com thumbor@googlegroups.com
from unittest.mock import patch
import pytest
from preggy import expect
from tornado.testing import gen_test
from tests.handlers.test_base_handler import BaseImagingTestCase
from thumbor.config import Config
from thumbor.context import Context, ServerParameters
from thumbor.engines.pil import Engine
from thumbor.importer import Importer
# pylint: disable=broad-except,abstract-method,attribute-defined-outside-init,line-too-long,too-many-public-methods
# pylint: disable=too-many-lines
class EngineLoadException(BaseImagingTestCase):
def get_context(self):
cfg = Config(SECURITY_KEY="ACME-SEC")
cfg.LOADER = "thumbor.loaders.file_loader"
cfg.STORAGE = "thumbor.storages.no_storage"
cfg.FILE_LOADER_ROOT_PATH = self.loader_path
cfg.FILTERS = []
importer = Importer(cfg)
importer.import_modules()
server = ServerParameters(
8889, "localhost", "thumbor.conf", None, "info", None
)
server.security_key = "ACME-SEC"
return Context(server, cfg, importer)
@patch.object(Engine, "load", side_effect=ValueError)
@gen_test
async def test_should_error_on_engine_load_exception(self, _):
response = await self.async_fetch("/unsafe/image.jpg")
expect(response.code).to_equal(500)
@pytest.mark.skip(
reason="I disagree with this test. If the engine fails we "
"should get an error, otherwise we may end up caching an invalid crop."
)
@gen_test
async def test_should_release_ioloop_on_error_on_engine_exception(self):
response = await self.async_fetch("/unsafe/fit-in/134x134/940x2.png")
expect(response.code).to_equal(200)
@pytest.mark.skip(
reason="I disagree with this test. If the engine fails we "
"should get an error, otherwise we may end up caching an invalid crop."
)
@gen_test
async def test_should_exec_other_operations_on_error_on_engine_exception(
self,
): # NOQA
response = await self.async_fetch(
"/unsafe/fit-in/134x134/filters:equalize()/940x2.png"
)
expect(response.code).to_equal(200)
@patch.object(Engine, "read", side_effect=Exception)
@gen_test
async def test_should_fail_with_500_upon_engine_read_exception(
self, _
): # NOQA
response = await self.async_fetch("/unsafe/fit-in/134x134/940x2.png")
expect(response.code).to_equal(500)
|
from fastapi.testclient import TestClient
def test_create_tag(client: TestClient) -> None:
res = client.post("/api/tags/", json={"name": "tag name"})
assert res.status_code == 200
assert res.json()["name"] == "tag name"
def test_create_multiple_tags(client: TestClient) -> None:
res = client.post("/api/tags/", json={"name": "tag 1"})
assert res.status_code == 200
res = client.post("/api/tags/", json={"name": "tag 2"})
assert res.status_code == 200
res = client.get("/api/tags/")
assert res.status_code == 200
data = res.json()
assert len(data) == 2
assert set([x["name"] for x in data]) == {"tag 1", "tag 2"}
def test_get_id(client: TestClient) -> None:
res = client.post("/api/tags/", json={"name": "tag name"})
assert res.status_code == 200
id = res.json()["id"]
res = client.get(f"/api/tags/{id}")
assert res.status_code == 200
assert res.json()["name"] == "tag name"
def test_get_unknown_id(client: TestClient) -> None:
res = client.post("/api/tags/", json={"name": "tag name"})
assert res.status_code == 200
res = client.get("/api/tags/hi")
assert res.status_code == 404
def test_get_name(client: TestClient) -> None:
res = client.post("/api/tags/", json={"name": "tag name"})
assert res.status_code == 200
id = res.json()["id"]
res = client.get("/api/tags/name", params={"name": "tag name"})
assert res.status_code == 200
assert res.json()["id"] == id
def test_get_unknown_name(client: TestClient) -> None:
res = client.post("/api/tags/", json={"name": "tag name"})
assert res.status_code == 200
res = client.get("/api/tags/name", params={"name": "unknown name"})
assert res.status_code == 404
|
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from django.db import connection
from .models import Task
from .forms import TaskForm
def task_list(request):
user = request.GET.get('user', "me")
query = "SELECT * from sqli_task WHERE owner ='" + user + "'"
cursor = connection.cursor()
cursor.execute(query)
tasks = cursor.fetchall()
data = {
'tasks': tasks
}
return render(
request,
'tasklist.html',
data
)
def task_new(request):
if request.method == 'POST':
form = TaskForm(request.POST)
form.save()
return HttpResponseRedirect('/')
else:
form = TaskForm
return render(request, 'task_form.html', {'form': form})
|
'''
Exception class
'''
class PAException(Exception):
'''
Custom exception class
'''
def __init__(self, status_code, message, errorCode):
super(PAException, self).__init__(message)
self.status_code = status_code
self.errorCode = errorCode
|
from ctypes import c_double, cdll, c_int, c_byte
from numpy.ctypeslib import ndpointer
# load library
lib = cdll.LoadLibrary("libtest.so")
# define args types
lib.ExportedFunction.argtypes = [c_double]*3 + [c_int]
# define a lovely function
def lovely_python_function(s0, s1, s2, N):
lib.ExportedFunction.restype = ndpointer(dtype = c_double, shape = (N,))
return lib.ExportedFunction(s0, s1, s2, N)
# test
a = lovely_python_function(2.0, 2.1, 2.2, 10)
# expected (2.0 + 2.1 + 2.2 = 6.3) (N=10 times)
print a
# print [ 6.3 6.3 6.3 6.3 6.3 6.3 6.3 6.3 6.3 6.3] |
##############################################################################
#
# Copyright (c) 2001 Zope Corporation and Contributors. All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
# Modified for Roundup:
#
# 1. more informative traceback info
"""Generic Python Expression Handler
"""
from TALES import CompilerError
from sys import exc_info
class getSecurityManager:
'''Null security manager'''
def validate(self, *args, **kwargs):
return 1
addContext = removeContext = validateValue = validate
class PythonExpr:
def __init__(self, name, expr, engine):
self.expr = expr = expr.strip().replace('\n', ' ')
try:
d = {}
exec 'def f():\n return %s\n' % expr.strip() in d
self._f = d['f']
except:
raise CompilerError, ('Python expression error:\n'
'%s: %s') % exc_info()[:2]
self._get_used_names()
def _get_used_names(self):
self._f_varnames = vnames = []
for vname in self._f.func_code.co_names:
if vname[0] not in '$_':
vnames.append(vname)
def _bind_used_names(self, econtext, _marker=[]):
# Bind template variables
names = {'CONTEXTS': econtext.contexts}
vars = econtext.vars
getType = econtext.getCompiler().getTypes().get
for vname in self._f_varnames:
val = vars.get(vname, _marker)
if val is _marker:
has = val = getType(vname)
if has:
val = ExprTypeProxy(vname, val, econtext)
names[vname] = val
else:
names[vname] = val
return names
def __call__(self, econtext):
__traceback_info__ = 'python expression "%s"'%self.expr
f = self._f
f.func_globals.update(self._bind_used_names(econtext))
return f()
def __str__(self):
return 'Python expression "%s"' % self.expr
def __repr__(self):
return '<PythonExpr %s>' % self.expr
class ExprTypeProxy:
'''Class that proxies access to an expression type handler'''
def __init__(self, name, handler, econtext):
self._name = name
self._handler = handler
self._econtext = econtext
def __call__(self, text):
return self._handler(self._name, text,
self._econtext.getCompiler())(self._econtext)
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
from __future__ import division # Standardmäßig float division - Ganzzahldivision kann man explizit mit '//' durchführen
import math
import numpy as np
from scipy.spatial import cKDTree,KDTree
from time import time
from pprint import pprint
size = 100
def benchmark_construction(constructor,data):
start_time = time()
n = 0
while True:
tree = constructor(data)
n += 1
# check every 10 operations: abort if running for at least 5 seconsd
if n % 10 == 0 and time() - start_time > 5:
break
if n >= 1000:
break
end_time = time()
time_per = (end_time-start_time) / n
op_per_sec = 1/time_per
# print "%s, constructions per second: %.2f" % (constructor.__name__, op_per_sec)
return op_per_sec
def benchmark_query(constructor,data,r,p,num_queries=1000):
tree = constructor(data)
queries = np.random.uniform(0,size,(num_queries,2))
start_time = time()
n = 0
while True:
x,y = queries[n]
# rect
xy_list = tree.query_ball_point((x,y),r,p)
n += 1
# check every 10 operations: abort if running for at least 5 seconsd
if n % 10 == 0 and time() - start_time > 5:
break
if n >= queries.shape[0]:
break
end_time = time()
time_per = (end_time-start_time) / n
op_per_sec = 1/time_per
# print "%s, operations per second: %.2f" % (constructor.__name__, op_per_sec)
return op_per_sec
def benchmark(constructor,data):
result = {
"construction":benchmark_construction(constructor,data),
"query_rect":benchmark_query(constructor,data,10,np.inf),
"query_circle":benchmark_query(constructor,data,10,2),
}
return result
def main(module_name):
if module_name == '__main__':
results = {}
for num_points in [0,10,100,1000,10000,100000]:
data = np.random.uniform(0,size,(100,2))
results[num_points] = (benchmark(KDTree, data), benchmark(cKDTree, data))
pprint(results)
main(__name__) |
from sys import setrecursionlimit
"""
Give, he can hop 1 step, 2 step, or 3 step at a time
1 <= N <= 30 at least one step is there
Time : O(3^n) exponential
"""
def ways_to_reach(stairs) -> int:
# Base Case -> not particular to this question, as here it is given the N >= 1
if stairs == 0:
return 0 # 0 ways to reach
# Base Case
if stairs == 1:
return 1 # One possible way
elif stairs == 2:
return 2
elif stairs == 3: # as we have included stairs == 0 base case, we can exclude it
return 4
# Hypothesis
total_ways_to_reach___n_minus_1_stairs = ways_to_reach(stairs - 1)
total_ways_to_reach___n_minus_2_stairs = ways_to_reach(stairs - 2)
total_ways_to_reach___n_minus_3_stairs = ways_to_reach(stairs - 3)
# Induction
return total_ways_to_reach___n_minus_1_stairs \
+ total_ways_to_reach___n_minus_2_stairs \
+ total_ways_to_reach___n_minus_3_stairs
if __name__ == '__main__':
setrecursionlimit(11000)
staircases = int(input()) # no of stairs
print(ways_to_reach(staircases))
|
from unittest import TestCase
from web3 import HTTPProvider, Web3, Account
from zksync_sdk.zksync import ZkSync
class TestZkSyncContract(TestCase):
private_key = "0xa045b52470d306ff78e91b0d2d92f90f7504189125a46b69423dc673fd6b4f3e"
def setUp(self) -> None:
self.account = Account.from_key(self.private_key)
w3 = Web3(HTTPProvider(
endpoint_uri="https://rinkeby.infura.io/v3/bcf42e619a704151a1b0d95a35cb2e62"))
self.zksync = ZkSync(account=self.account,
web3=w3,
zksync_contract_address="0x82F67958A5474e40E1485742d648C0b0686b6e5D")
def test_deposit_eth(self):
tx = self.zksync.deposit_eth(self.account.address, 2 * 10 ** 12)
assert tx['transactionHash']
def test_full_exit(self):
tx = self.zksync.full_exit(1, "0x3B00Ef435fA4FcFF5C209a37d1f3dcff37c705aD")
assert tx['transactionHash']
def test_auth_facts(self):
tx = self.zksync.auth_facts(self.account.address, 2)
assert tx
|
from PIL import Image
from src.predictionAlgorithms.baseAlgorithm import BaseAlgorithm
from src.utilities.imageAnalysis.pixelsRainStrengthConverter import PixelsRainStrengthConverter
import numpy as np
from os import listdir, curdir
class CNN4L(BaseAlgorithm):
model = None
name = 'CNN 4 Layers'
def __init__(self,file='/app/src/savedModels/3l_3rand_1elev_96_64_3_3'):
self.model = self.load_ml_model(file)
# self.model = None
def reload(self, file='/app/src/savedModels/3l_3rand_1elev_96_64_3_3'):
self.model = self.load_ml_model(file)
def predict(self, source_images, count):
print('Predict ', self.name)
converted_images = PixelsRainStrengthConverter.convert_loaded(source_images[-4:])
window = np.array(converted_images)
print(np.max(window[0]))
print(np.mean(window[0]))
results = []
for i in range(count):
print('generating image ' + str(i))
temp = np.copy(window[:4])[np.newaxis, ...]
print('w', np.max(window))
print('w', np.mean(window))
print(temp.shape)
temp_expanded = self.get_model_input(temp, 1, 3)
forecast = self.model.predict(temp_expanded)
window[:-1] = window[1:]
window[-1] = np.copy(forecast)
img = Image.new('L', (self.size, self.size))
img.putdata(forecast.flatten())
resized = img.resize((128, 128), Image.BILINEAR)
results.append(resized)
print('mx', np.max(forecast))
return results
|
# Auto-generated. Do not edit.
from opendp._convert import *
from opendp._lib import *
from opendp.mod import *
from opendp.typing import *
__all__ = [
"make_cast",
"make_cast_default",
"make_is_equal",
"make_is_null",
"make_cast_inherent",
"make_cast_metric",
"make_clamp",
"make_unclamp",
"make_count",
"make_count_distinct",
"make_count_by",
"make_count_by_categories",
"make_split_lines",
"make_split_records",
"make_create_dataframe",
"make_split_dataframe",
"make_select_column",
"make_identity",
"make_impute_constant",
"make_impute_uniform_float",
"make_sized_bounded_mean",
"make_resize",
"make_bounded_resize",
"make_bounded_sum",
"make_sized_bounded_sum",
"make_sized_bounded_variance"
]
def make_cast(
TIA: RuntimeTypeDescriptor,
TOA: RuntimeTypeDescriptor
) -> Transformation:
"""Make a Transformation that casts a vector of data from type `TIA` to type `TOA`.
Failure to parse results in None, else Some<TOA>.
:param TIA: atomic input data type to cast from
:type TIA: RuntimeTypeDescriptor
:param TOA: atomic data type to cast into
:type TOA: RuntimeTypeDescriptor
:return: A cast step.
:rtype: Transformation
:raises AssertionError: if an argument's type differs from the expected type
:raises UnknownTypeError: if a type-argument fails to parse
:raises OpenDPException: packaged error from the core OpenDP library
"""
assert_features("contrib")
# Standardize type arguments.
TIA = RuntimeType.parse(type_name=TIA)
TOA = RuntimeType.parse(type_name=TOA)
# Convert arguments to c types.
TIA = py_to_c(TIA, c_type=ctypes.c_char_p)
TOA = py_to_c(TOA, c_type=ctypes.c_char_p)
# Call library function.
function = lib.opendp_trans__make_cast
function.argtypes = [ctypes.c_char_p, ctypes.c_char_p]
function.restype = FfiResult
return c_to_py(unwrap(function(TIA, TOA), Transformation))
def make_cast_default(
TIA: RuntimeTypeDescriptor,
TOA: RuntimeTypeDescriptor
) -> Transformation:
"""Make a Transformation that casts a vector of data from type `TIA` to type `TOA`. If cast fails, fill with default.
:param TIA: atomic input data type to cast from
:type TIA: RuntimeTypeDescriptor
:param TOA: atomic data type to cast into
:type TOA: RuntimeTypeDescriptor
:return: A cast_default step.
:rtype: Transformation
:raises AssertionError: if an argument's type differs from the expected type
:raises UnknownTypeError: if a type-argument fails to parse
:raises OpenDPException: packaged error from the core OpenDP library
"""
assert_features("contrib")
# Standardize type arguments.
TIA = RuntimeType.parse(type_name=TIA)
TOA = RuntimeType.parse(type_name=TOA)
# Convert arguments to c types.
TIA = py_to_c(TIA, c_type=ctypes.c_char_p)
TOA = py_to_c(TOA, c_type=ctypes.c_char_p)
# Call library function.
function = lib.opendp_trans__make_cast_default
function.argtypes = [ctypes.c_char_p, ctypes.c_char_p]
function.restype = FfiResult
return c_to_py(unwrap(function(TIA, TOA), Transformation))
def make_is_equal(
value: Any,
TIA: RuntimeTypeDescriptor = None
) -> Transformation:
"""Make a Transformation that checks if each element is equal to `value`.
:param value: value to check against
:type value: Any
:param TIA: atomic input data type
:type TIA: RuntimeTypeDescriptor
:return: A is_equal step.
:rtype: Transformation
:raises AssertionError: if an argument's type differs from the expected type
:raises UnknownTypeError: if a type-argument fails to parse
:raises OpenDPException: packaged error from the core OpenDP library
"""
assert_features("contrib")
# Standardize type arguments.
TIA = RuntimeType.parse_or_infer(type_name=TIA, public_example=value)
# Convert arguments to c types.
value = py_to_c(value, c_type=AnyObjectPtr, type_name=TIA)
TIA = py_to_c(TIA, c_type=ctypes.c_char_p)
# Call library function.
function = lib.opendp_trans__make_is_equal
function.argtypes = [AnyObjectPtr, ctypes.c_char_p]
function.restype = FfiResult
return c_to_py(unwrap(function(value, TIA), Transformation))
def make_is_null(
DIA: RuntimeTypeDescriptor
) -> Transformation:
"""Make a Transformation that checks if each element in a vector is null.
:param DIA: atomic input domain
:type DIA: RuntimeTypeDescriptor
:return: A is_null step.
:rtype: Transformation
:raises AssertionError: if an argument's type differs from the expected type
:raises UnknownTypeError: if a type-argument fails to parse
:raises OpenDPException: packaged error from the core OpenDP library
"""
assert_features("contrib")
# Standardize type arguments.
DIA = RuntimeType.parse(type_name=DIA)
# Convert arguments to c types.
DIA = py_to_c(DIA, c_type=ctypes.c_char_p)
# Call library function.
function = lib.opendp_trans__make_is_null
function.argtypes = [ctypes.c_char_p]
function.restype = FfiResult
return c_to_py(unwrap(function(DIA), Transformation))
def make_cast_inherent(
TIA: RuntimeTypeDescriptor,
TOA: RuntimeTypeDescriptor
) -> Transformation:
"""Make a Transformation that casts a vector of data from type `TI` to a type that can represent nullity `TO`.
If cast fails, fill with `TO`'s null value.
:param TIA: input data type to cast from
:type TIA: RuntimeTypeDescriptor
:param TOA: data type to cast into
:type TOA: RuntimeTypeDescriptor
:return: A cast_inherent step.
:rtype: Transformation
:raises AssertionError: if an argument's type differs from the expected type
:raises UnknownTypeError: if a type-argument fails to parse
:raises OpenDPException: packaged error from the core OpenDP library
"""
assert_features("contrib")
# Standardize type arguments.
TIA = RuntimeType.parse(type_name=TIA)
TOA = RuntimeType.parse(type_name=TOA)
# Convert arguments to c types.
TIA = py_to_c(TIA, c_type=ctypes.c_char_p)
TOA = py_to_c(TOA, c_type=ctypes.c_char_p)
# Call library function.
function = lib.opendp_trans__make_cast_inherent
function.argtypes = [ctypes.c_char_p, ctypes.c_char_p]
function.restype = FfiResult
return c_to_py(unwrap(function(TIA, TOA), Transformation))
def make_cast_metric(
MI: DatasetMetric,
MO: DatasetMetric,
TA: RuntimeTypeDescriptor
) -> Transformation:
"""Make a Transformation that converts the dataset metric from type `MI` to type `MO`.
:param MI: input dataset metric
:type MI: DatasetMetric
:param MO: output dataset metric
:type MO: DatasetMetric
:param TA: atomic type of data
:type TA: RuntimeTypeDescriptor
:return: A cast_metric step.
:rtype: Transformation
:raises AssertionError: if an argument's type differs from the expected type
:raises UnknownTypeError: if a type-argument fails to parse
:raises OpenDPException: packaged error from the core OpenDP library
"""
assert_features("contrib")
# Standardize type arguments.
MI = RuntimeType.parse(type_name=MI)
MO = RuntimeType.parse(type_name=MO)
TA = RuntimeType.parse(type_name=TA)
# Convert arguments to c types.
MI = py_to_c(MI, c_type=ctypes.c_char_p)
MO = py_to_c(MO, c_type=ctypes.c_char_p)
TA = py_to_c(TA, c_type=ctypes.c_char_p)
# Call library function.
function = lib.opendp_trans__make_cast_metric
function.argtypes = [ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p]
function.restype = FfiResult
return c_to_py(unwrap(function(MI, MO, TA), Transformation))
def make_clamp(
bounds: Tuple[Any, Any],
TA: RuntimeTypeDescriptor = None
) -> Transformation:
"""Make a Transformation that clamps numeric data in Vec<`T`> to `bounds`.
If datum is less than lower, let datum be lower.
If datum is greater than upper, let datum be upper.
:param bounds: Tuple of inclusive lower and upper bounds.
:type bounds: Tuple[Any, Any]
:param TA: atomic data type
:type TA: RuntimeTypeDescriptor
:return: A clamp step.
:rtype: Transformation
:raises AssertionError: if an argument's type differs from the expected type
:raises UnknownTypeError: if a type-argument fails to parse
:raises OpenDPException: packaged error from the core OpenDP library
"""
assert_features("contrib")
# Standardize type arguments.
TA = RuntimeType.parse_or_infer(type_name=TA, public_example=get_first(bounds))
# Convert arguments to c types.
bounds = py_to_c(bounds, c_type=AnyObjectPtr, type_name=RuntimeType(origin='Tuple', args=[TA, TA]))
TA = py_to_c(TA, c_type=ctypes.c_char_p)
# Call library function.
function = lib.opendp_trans__make_clamp
function.argtypes = [AnyObjectPtr, ctypes.c_char_p]
function.restype = FfiResult
return c_to_py(unwrap(function(bounds, TA), Transformation))
def make_unclamp(
bounds: Tuple[Any, Any],
TA: RuntimeTypeDescriptor = None
) -> Transformation:
"""Make a Transformation that unclamps a VectorDomain<BoundedDomain<T>> to a VectorDomain<AllDomain<T>>.
:param bounds: Tuple of inclusive lower and upper bounds.
:type bounds: Tuple[Any, Any]
:param TA: atomic data type
:type TA: RuntimeTypeDescriptor
:return: A unclamp step.
:rtype: Transformation
:raises AssertionError: if an argument's type differs from the expected type
:raises UnknownTypeError: if a type-argument fails to parse
:raises OpenDPException: packaged error from the core OpenDP library
"""
assert_features("contrib")
# Standardize type arguments.
TA = RuntimeType.parse_or_infer(type_name=TA, public_example=get_first(bounds))
# Convert arguments to c types.
bounds = py_to_c(bounds, c_type=AnyObjectPtr, type_name=RuntimeType(origin='Tuple', args=[TA, TA]))
TA = py_to_c(TA, c_type=ctypes.c_char_p)
# Call library function.
function = lib.opendp_trans__make_unclamp
function.argtypes = [AnyObjectPtr, ctypes.c_char_p]
function.restype = FfiResult
return c_to_py(unwrap(function(bounds, TA), Transformation))
def make_count(
TIA: RuntimeTypeDescriptor,
TO: RuntimeTypeDescriptor = "i32"
) -> Transformation:
"""Make a Transformation that computes a count of the number of records in data.
:param TIA: Atomic Input Type. Input data is expected to be of the form Vec<TIA>.
:type TIA: RuntimeTypeDescriptor
:param TO: Output Type. Must be an integer.
:type TO: RuntimeTypeDescriptor
:return: A count step.
:rtype: Transformation
:raises AssertionError: if an argument's type differs from the expected type
:raises UnknownTypeError: if a type-argument fails to parse
:raises OpenDPException: packaged error from the core OpenDP library
"""
assert_features("contrib")
# Standardize type arguments.
TIA = RuntimeType.parse(type_name=TIA)
TO = RuntimeType.parse(type_name=TO)
# Convert arguments to c types.
TIA = py_to_c(TIA, c_type=ctypes.c_char_p)
TO = py_to_c(TO, c_type=ctypes.c_char_p)
# Call library function.
function = lib.opendp_trans__make_count
function.argtypes = [ctypes.c_char_p, ctypes.c_char_p]
function.restype = FfiResult
return c_to_py(unwrap(function(TIA, TO), Transformation))
def make_count_distinct(
TIA: RuntimeTypeDescriptor,
TO: RuntimeTypeDescriptor = "i32"
) -> Transformation:
"""Make a Transformation that computes a count of the number of unique, distinct records in data.
:param TIA: Atomic Input Type. Input data is expected to be of the form Vec<TIA>.
:type TIA: RuntimeTypeDescriptor
:param TO: Output Type. Must be an integer.
:type TO: RuntimeTypeDescriptor
:return: A count_distinct step.
:rtype: Transformation
:raises AssertionError: if an argument's type differs from the expected type
:raises UnknownTypeError: if a type-argument fails to parse
:raises OpenDPException: packaged error from the core OpenDP library
"""
assert_features("contrib")
# Standardize type arguments.
TIA = RuntimeType.parse(type_name=TIA)
TO = RuntimeType.parse(type_name=TO)
# Convert arguments to c types.
TIA = py_to_c(TIA, c_type=ctypes.c_char_p)
TO = py_to_c(TO, c_type=ctypes.c_char_p)
# Call library function.
function = lib.opendp_trans__make_count_distinct
function.argtypes = [ctypes.c_char_p, ctypes.c_char_p]
function.restype = FfiResult
return c_to_py(unwrap(function(TIA, TO), Transformation))
def make_count_by(
size: int,
MO: SensitivityMetric,
TIA: RuntimeTypeDescriptor,
TOA: RuntimeTypeDescriptor = "i32"
) -> Transformation:
"""Make a Transformation that computes the count of each unique value in data.
This assumes that the category set is unknown.
This uses a restricted-sensitivity proof that takes advantage of known dataset size.
Use `make_resize` to establish dataset size.
Use meas.make_base_stability to release this query.
:param size: Number of records in input data.
:type size: int
:param MO: Output Metric.
:type MO: SensitivityMetric
:param TIA: Atomic Input Type. Categorical/hashable input data type. Input data must be Vec<TI>.
:type TIA: RuntimeTypeDescriptor
:param TOA: Atomic Output Type. Express counts in terms of this integral type.
:type TOA: RuntimeTypeDescriptor
:return: The carrier type is HashMap<TI, TO>- the counts for each unique data input.
:rtype: Transformation
:raises AssertionError: if an argument's type differs from the expected type
:raises UnknownTypeError: if a type-argument fails to parse
:raises OpenDPException: packaged error from the core OpenDP library
"""
assert_features("contrib")
# Standardize type arguments.
MO = RuntimeType.parse(type_name=MO)
TIA = RuntimeType.parse(type_name=TIA)
TOA = RuntimeType.parse(type_name=TOA)
# Convert arguments to c types.
size = py_to_c(size, c_type=ctypes.c_uint)
MO = py_to_c(MO, c_type=ctypes.c_char_p)
TIA = py_to_c(TIA, c_type=ctypes.c_char_p)
TOA = py_to_c(TOA, c_type=ctypes.c_char_p)
# Call library function.
function = lib.opendp_trans__make_count_by
function.argtypes = [ctypes.c_uint, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p]
function.restype = FfiResult
return c_to_py(unwrap(function(size, MO, TIA, TOA), Transformation))
def make_count_by_categories(
categories: Any,
MO: SensitivityMetric = "L1Distance<i32>",
TIA: RuntimeTypeDescriptor = None,
TOA: RuntimeTypeDescriptor = "i32"
) -> Transformation:
"""Make a Transformation that computes the number of times each category appears in the data.
This assumes that the category set is known.
:param categories: The set of categories to compute counts for.
:type categories: Any
:param MO: output sensitivity metric
:type MO: SensitivityMetric
:param TIA: categorical/hashable input type. Input data must be Vec<TIA>.
:type TIA: RuntimeTypeDescriptor
:param TOA: express counts in terms of this integral type
:type TOA: RuntimeTypeDescriptor
:return: A count_by_categories step.
:rtype: Transformation
:raises AssertionError: if an argument's type differs from the expected type
:raises UnknownTypeError: if a type-argument fails to parse
:raises OpenDPException: packaged error from the core OpenDP library
"""
assert_features("contrib")
# Standardize type arguments.
MO = RuntimeType.parse(type_name=MO)
TIA = RuntimeType.parse_or_infer(type_name=TIA, public_example=next(iter(categories), None))
TOA = RuntimeType.parse(type_name=TOA)
# Convert arguments to c types.
categories = py_to_c(categories, c_type=AnyObjectPtr, type_name=RuntimeType(origin='Vec', args=[TIA]))
MO = py_to_c(MO, c_type=ctypes.c_char_p)
TIA = py_to_c(TIA, c_type=ctypes.c_char_p)
TOA = py_to_c(TOA, c_type=ctypes.c_char_p)
# Call library function.
function = lib.opendp_trans__make_count_by_categories
function.argtypes = [AnyObjectPtr, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p]
function.restype = FfiResult
return c_to_py(unwrap(function(categories, MO, TIA, TOA), Transformation))
def make_split_lines(
) -> Transformation:
"""Make a Transformation that takes a string and splits it into a Vec<String> of its lines.
:return: A split_lines step.
:rtype: Transformation
:raises AssertionError: if an argument's type differs from the expected type
:raises UnknownTypeError: if a type-argument fails to parse
:raises OpenDPException: packaged error from the core OpenDP library
"""
assert_features("contrib")
# No type arguments to standardize.
# No arguments to convert to c types.
# Call library function.
function = lib.opendp_trans__make_split_lines
function.argtypes = []
function.restype = FfiResult
return c_to_py(unwrap(function(), Transformation))
def make_split_records(
separator: str
) -> Transformation:
"""Make a Transformation that splits each record in a Vec<String> into a Vec<Vec<String>>.
:param separator: The token(s) that separate entries in each record.
:type separator: str
:return: A split_records step.
:rtype: Transformation
:raises AssertionError: if an argument's type differs from the expected type
:raises UnknownTypeError: if a type-argument fails to parse
:raises OpenDPException: packaged error from the core OpenDP library
"""
assert_features("contrib")
# No type arguments to standardize.
# Convert arguments to c types.
separator = py_to_c(separator, c_type=ctypes.c_char_p)
# Call library function.
function = lib.opendp_trans__make_split_records
function.argtypes = [ctypes.c_char_p]
function.restype = FfiResult
return c_to_py(unwrap(function(separator), Transformation))
def make_create_dataframe(
col_names: Any,
K: RuntimeTypeDescriptor = None
) -> Transformation:
"""Make a Transformation that constructs a dataframe from a Vec<Vec<String>>.
:param col_names: Column names for each record entry.
:type col_names: Any
:param K: categorical/hashable data type of column names
:type K: RuntimeTypeDescriptor
:return: A create_dataframe step.
:rtype: Transformation
:raises AssertionError: if an argument's type differs from the expected type
:raises UnknownTypeError: if a type-argument fails to parse
:raises OpenDPException: packaged error from the core OpenDP library
"""
assert_features("contrib")
# Standardize type arguments.
K = RuntimeType.parse_or_infer(type_name=K, public_example=next(iter(col_names), None))
# Convert arguments to c types.
col_names = py_to_c(col_names, c_type=AnyObjectPtr, type_name=RuntimeType(origin='Vec', args=[K]))
K = py_to_c(K, c_type=ctypes.c_char_p)
# Call library function.
function = lib.opendp_trans__make_create_dataframe
function.argtypes = [AnyObjectPtr, ctypes.c_char_p]
function.restype = FfiResult
return c_to_py(unwrap(function(col_names, K), Transformation))
def make_split_dataframe(
separator: str,
col_names: Any,
K: RuntimeTypeDescriptor = None
) -> Transformation:
"""Make a Transformation that splits each record in a String into a Vec<Vec<String>>,
and loads the resulting table into a dataframe keyed by `col_names`.
:param separator: The token(s) that separate entries in each record.
:type separator: str
:param col_names: Column names for each record entry.
:type col_names: Any
:param K: categorical/hashable data type of column names
:type K: RuntimeTypeDescriptor
:return: A split_dataframe step.
:rtype: Transformation
:raises AssertionError: if an argument's type differs from the expected type
:raises UnknownTypeError: if a type-argument fails to parse
:raises OpenDPException: packaged error from the core OpenDP library
"""
assert_features("contrib")
# Standardize type arguments.
K = RuntimeType.parse_or_infer(type_name=K, public_example=next(iter(col_names), None))
# Convert arguments to c types.
separator = py_to_c(separator, c_type=ctypes.c_char_p)
col_names = py_to_c(col_names, c_type=AnyObjectPtr, type_name=RuntimeType(origin='Vec', args=[K]))
K = py_to_c(K, c_type=ctypes.c_char_p)
# Call library function.
function = lib.opendp_trans__make_split_dataframe
function.argtypes = [ctypes.c_char_p, AnyObjectPtr, ctypes.c_char_p]
function.restype = FfiResult
return c_to_py(unwrap(function(separator, col_names, K), Transformation))
def make_select_column(
key: Any,
TOA: RuntimeTypeDescriptor,
K: RuntimeTypeDescriptor = None
) -> Transformation:
"""Make a Transformation that retrieves the column `key` from a dataframe as Vec<`TOA`>.
:param key: categorical/hashable data type of the key/column name
:type key: Any
:param K: data type of the key
:type K: RuntimeTypeDescriptor
:param TOA: atomic data type to downcast to
:type TOA: RuntimeTypeDescriptor
:return: A select_column step.
:rtype: Transformation
:raises AssertionError: if an argument's type differs from the expected type
:raises UnknownTypeError: if a type-argument fails to parse
:raises OpenDPException: packaged error from the core OpenDP library
"""
assert_features("contrib")
# Standardize type arguments.
K = RuntimeType.parse_or_infer(type_name=K, public_example=key)
TOA = RuntimeType.parse(type_name=TOA)
# Convert arguments to c types.
key = py_to_c(key, c_type=AnyObjectPtr, type_name=K)
K = py_to_c(K, c_type=ctypes.c_char_p)
TOA = py_to_c(TOA, c_type=ctypes.c_char_p)
# Call library function.
function = lib.opendp_trans__make_select_column
function.argtypes = [AnyObjectPtr, ctypes.c_char_p, ctypes.c_char_p]
function.restype = FfiResult
return c_to_py(unwrap(function(key, K, TOA), Transformation))
def make_identity(
D: RuntimeTypeDescriptor,
M: RuntimeTypeDescriptor
) -> Transformation:
"""Make a Transformation that simply passes the data through.
:param D: Domain of the identity function. Must be VectorDomain<AllDomain<_>> or AllDomain<_>
:type D: RuntimeTypeDescriptor
:param M: metric. Must be a dataset metric if D is a VectorDomain or a sensitivity metric if D is an AllDomain
:type M: RuntimeTypeDescriptor
:return: A transformation where the input and output domain are D and the input and output metric are M
:rtype: Transformation
:raises AssertionError: if an argument's type differs from the expected type
:raises UnknownTypeError: if a type-argument fails to parse
:raises OpenDPException: packaged error from the core OpenDP library
"""
assert_features("contrib")
# Standardize type arguments.
D = RuntimeType.parse(type_name=D)
M = RuntimeType.parse(type_name=M)
# Convert arguments to c types.
D = py_to_c(D, c_type=ctypes.c_char_p)
M = py_to_c(M, c_type=ctypes.c_char_p)
# Call library function.
function = lib.opendp_trans__make_identity
function.argtypes = [ctypes.c_char_p, ctypes.c_char_p]
function.restype = FfiResult
return c_to_py(unwrap(function(D, M), Transformation))
def make_impute_constant(
constant: Any,
DA: RuntimeTypeDescriptor = "OptionNullDomain<AllDomain<TA>>"
) -> Transformation:
"""Make a Transformation that replaces null/None data with `constant`.
By default, the input type is Vec<Option<TA>>, as emitted by make_cast.
Set `DA` to InherentNullDomain<AllDomain<TA>> for imputing on types that have an inherent representation of nullity, like floats.
:param constant: Value to replace nulls with.
:type constant: Any
:param DA: domain of data being imputed. This is OptionNullDomain<AllDomain<TA>> or InherentNullDomain<AllDomain<TA>>
:type DA: RuntimeTypeDescriptor
:return: A impute_constant step.
:rtype: Transformation
:raises AssertionError: if an argument's type differs from the expected type
:raises UnknownTypeError: if a type-argument fails to parse
:raises OpenDPException: packaged error from the core OpenDP library
"""
assert_features("contrib")
# Standardize type arguments.
DA = RuntimeType.parse(type_name=DA, generics=["TA"])
TA = get_domain_atom_or_infer(DA, constant)
DA = DA.substitute(TA=TA)
# Convert arguments to c types.
constant = py_to_c(constant, c_type=AnyObjectPtr, type_name=TA)
DA = py_to_c(DA, c_type=ctypes.c_char_p)
# Call library function.
function = lib.opendp_trans__make_impute_constant
function.argtypes = [AnyObjectPtr, ctypes.c_char_p]
function.restype = FfiResult
return c_to_py(unwrap(function(constant, DA), Transformation))
def make_impute_uniform_float(
bounds: Tuple[Any, Any],
TA: RuntimeTypeDescriptor = None
) -> Transformation:
"""Make a Transformation that replaces null/None data in Vec<`TA`> with uniformly distributed floats within `bounds`.
:param bounds: Tuple of inclusive lower and upper bounds.
:type bounds: Tuple[Any, Any]
:param TA: type of data being imputed
:type TA: RuntimeTypeDescriptor
:return: A impute_uniform_float step.
:rtype: Transformation
:raises AssertionError: if an argument's type differs from the expected type
:raises UnknownTypeError: if a type-argument fails to parse
:raises OpenDPException: packaged error from the core OpenDP library
"""
assert_features("contrib")
# Standardize type arguments.
TA = RuntimeType.parse_or_infer(type_name=TA, public_example=get_first(bounds))
# Convert arguments to c types.
bounds = py_to_c(bounds, c_type=AnyObjectPtr, type_name=RuntimeType(origin='Tuple', args=[TA, TA]))
TA = py_to_c(TA, c_type=ctypes.c_char_p)
# Call library function.
function = lib.opendp_trans__make_impute_uniform_float
function.argtypes = [AnyObjectPtr, ctypes.c_char_p]
function.restype = FfiResult
return c_to_py(unwrap(function(bounds, TA), Transformation))
def make_sized_bounded_mean(
size: int,
bounds: Tuple[Any, Any],
T: RuntimeTypeDescriptor = None
) -> Transformation:
"""Make a Transformation that computes the mean of bounded data.
This uses a restricted-sensitivity proof that takes advantage of known dataset size.
Use `make_clamp` to bound data and `make_bounded_resize` to establish dataset size.
:param size: Number of records in input data.
:type size: int
:param bounds: Tuple of inclusive lower and upper bounds of the input data.
:type bounds: Tuple[Any, Any]
:param T: atomic data type
:type T: RuntimeTypeDescriptor
:return: A sized_bounded_mean step.
:rtype: Transformation
:raises AssertionError: if an argument's type differs from the expected type
:raises UnknownTypeError: if a type-argument fails to parse
:raises OpenDPException: packaged error from the core OpenDP library
"""
assert_features("contrib")
# Standardize type arguments.
T = RuntimeType.parse_or_infer(type_name=T, public_example=get_first(bounds))
# Convert arguments to c types.
size = py_to_c(size, c_type=ctypes.c_uint)
bounds = py_to_c(bounds, c_type=AnyObjectPtr, type_name=RuntimeType(origin='Tuple', args=[T, T]))
T = py_to_c(T, c_type=ctypes.c_char_p)
# Call library function.
function = lib.opendp_trans__make_sized_bounded_mean
function.argtypes = [ctypes.c_uint, AnyObjectPtr, ctypes.c_char_p]
function.restype = FfiResult
return c_to_py(unwrap(function(size, bounds, T), Transformation))
def make_resize(
size: int,
constant: Any,
TA: RuntimeTypeDescriptor = None
) -> Transformation:
"""Make a Transformation that either truncates or imputes records with `constant` in a Vec<`TA`> to match a provided `size`.
:param size: Number of records in output data.
:type size: int
:param constant: Value to impute with.
:type constant: Any
:param TA: Atomic type.
:type TA: RuntimeTypeDescriptor
:return: A vector of the same type `TA`, but with the provided `size`.
:rtype: Transformation
:raises AssertionError: if an argument's type differs from the expected type
:raises UnknownTypeError: if a type-argument fails to parse
:raises OpenDPException: packaged error from the core OpenDP library
"""
assert_features("contrib")
# Standardize type arguments.
TA = RuntimeType.parse_or_infer(type_name=TA, public_example=constant)
# Convert arguments to c types.
size = py_to_c(size, c_type=ctypes.c_uint)
constant = py_to_c(constant, c_type=AnyObjectPtr, type_name=TA)
TA = py_to_c(TA, c_type=ctypes.c_char_p)
# Call library function.
function = lib.opendp_trans__make_resize
function.argtypes = [ctypes.c_uint, AnyObjectPtr, ctypes.c_char_p]
function.restype = FfiResult
return c_to_py(unwrap(function(size, constant, TA), Transformation))
def make_bounded_resize(
size: int,
bounds: Tuple[Any, Any],
constant,
TA: RuntimeTypeDescriptor = None
) -> Transformation:
"""Make a Transformation that either truncates or imputes records with `constant` in a Vec<`TA`> to match a provided `size`.
:param size: Number of records in output data.
:type size: int
:param bounds: Tuple of lower and upper bounds for data in the input domain
:type bounds: Tuple[Any, Any]
:param constant: Value to impute with.
:param TA: Atomic type. If not passed, TA is inferred from the lower bound.
:type TA: RuntimeTypeDescriptor
:return: A vector of the same type `TA`, but with the provided `size`.
:rtype: Transformation
:raises AssertionError: if an argument's type differs from the expected type
:raises UnknownTypeError: if a type-argument fails to parse
:raises OpenDPException: packaged error from the core OpenDP library
"""
assert_features("contrib")
# Standardize type arguments.
TA = RuntimeType.parse_or_infer(type_name=TA, public_example=get_first(bounds))
# Convert arguments to c types.
size = py_to_c(size, c_type=ctypes.c_uint)
bounds = py_to_c(bounds, c_type=AnyObjectPtr, type_name=RuntimeType(origin='Tuple', args=[TA, TA]))
constant = py_to_c(constant, c_type=ctypes.c_void_p, type_name=TA)
TA = py_to_c(TA, c_type=ctypes.c_char_p)
# Call library function.
function = lib.opendp_trans__make_bounded_resize
function.argtypes = [ctypes.c_uint, AnyObjectPtr, ctypes.c_void_p, ctypes.c_char_p]
function.restype = FfiResult
return c_to_py(unwrap(function(size, bounds, constant, TA), Transformation))
def make_bounded_sum(
bounds: Tuple[Any, Any],
T: RuntimeTypeDescriptor = None
) -> Transformation:
"""Make a Transformation that computes the sum of bounded data.
Use `make_clamp` to bound data.
:param bounds: Tuple of lower and upper bounds for data in the input domain
:type bounds: Tuple[Any, Any]
:param T: atomic type of data
:type T: RuntimeTypeDescriptor
:return: A bounded_sum step.
:rtype: Transformation
:raises AssertionError: if an argument's type differs from the expected type
:raises UnknownTypeError: if a type-argument fails to parse
:raises OpenDPException: packaged error from the core OpenDP library
"""
assert_features("contrib")
# Standardize type arguments.
T = RuntimeType.parse_or_infer(type_name=T, public_example=get_first(bounds))
# Convert arguments to c types.
bounds = py_to_c(bounds, c_type=AnyObjectPtr, type_name=RuntimeType(origin='Tuple', args=[T, T]))
T = py_to_c(T, c_type=ctypes.c_char_p)
# Call library function.
function = lib.opendp_trans__make_bounded_sum
function.argtypes = [AnyObjectPtr, ctypes.c_char_p]
function.restype = FfiResult
return c_to_py(unwrap(function(bounds, T), Transformation))
def make_sized_bounded_sum(
size: int,
bounds: Tuple[Any, Any],
T: RuntimeTypeDescriptor = None
) -> Transformation:
"""Make a Transformation that computes the sum of bounded data with known dataset size.
This uses a restricted-sensitivity proof that takes advantage of known dataset size for better utility.
Use `make_clamp` to bound data and `make_bounded_resize` to establish dataset size.
:param size: Number of records in input data.
:type size: int
:param bounds: Tuple of lower and upper bounds for input data
:type bounds: Tuple[Any, Any]
:param T: atomic type of data
:type T: RuntimeTypeDescriptor
:return: A sized_bounded_sum step.
:rtype: Transformation
:raises AssertionError: if an argument's type differs from the expected type
:raises UnknownTypeError: if a type-argument fails to parse
:raises OpenDPException: packaged error from the core OpenDP library
"""
assert_features("contrib")
# Standardize type arguments.
T = RuntimeType.parse_or_infer(type_name=T, public_example=get_first(bounds))
# Convert arguments to c types.
size = py_to_c(size, c_type=ctypes.c_uint)
bounds = py_to_c(bounds, c_type=AnyObjectPtr, type_name=RuntimeType(origin='Tuple', args=[T, T]))
T = py_to_c(T, c_type=ctypes.c_char_p)
# Call library function.
function = lib.opendp_trans__make_sized_bounded_sum
function.argtypes = [ctypes.c_uint, AnyObjectPtr, ctypes.c_char_p]
function.restype = FfiResult
return c_to_py(unwrap(function(size, bounds, T), Transformation))
def make_sized_bounded_variance(
size: int,
bounds: Tuple[Any, Any],
ddof: int = 1,
T: RuntimeTypeDescriptor = None
) -> Transformation:
"""Make a Transformation that computes the variance of bounded data.
This uses a restricted-sensitivity proof that takes advantage of known dataset size.
Use `make_clamp` to bound data and `make_bounded_resize` to establish dataset size.
:param size: Number of records in input data.
:type size: int
:param bounds: Tuple of lower and upper bounds for input data
:type bounds: Tuple[Any, Any]
:param ddof: Delta degrees of freedom. Set to 0 if not a sample, 1 for sample estimate.
:type ddof: int
:param T: atomic data type
:type T: RuntimeTypeDescriptor
:return: A sized_bounded_variance step.
:rtype: Transformation
:raises AssertionError: if an argument's type differs from the expected type
:raises UnknownTypeError: if a type-argument fails to parse
:raises OpenDPException: packaged error from the core OpenDP library
"""
assert_features("contrib")
# Standardize type arguments.
T = RuntimeType.parse_or_infer(type_name=T, public_example=get_first(bounds))
# Convert arguments to c types.
size = py_to_c(size, c_type=ctypes.c_uint)
bounds = py_to_c(bounds, c_type=AnyObjectPtr, type_name=RuntimeType(origin='Tuple', args=[T, T]))
ddof = py_to_c(ddof, c_type=ctypes.c_uint)
T = py_to_c(T, c_type=ctypes.c_char_p)
# Call library function.
function = lib.opendp_trans__make_sized_bounded_variance
function.argtypes = [ctypes.c_uint, AnyObjectPtr, ctypes.c_uint, ctypes.c_char_p]
function.restype = FfiResult
return c_to_py(unwrap(function(size, bounds, ddof, T), Transformation))
|
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 1 15:33:37 2021
@author: shubhransu
"""
num = int(input("Enter any number"))
if num % 2 == 0:
print("NUmber is Even ")
else:
print("Number is odd")
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
import unittest
from swift.common.swob import Request, HTTPMovedPermanently
from swift.common.middleware import domain_remap
from swift.common import registry
class FakeApp(object):
def __call__(self, env, start_response):
start_response('200 OK', [])
if six.PY2:
return [env['PATH_INFO']]
else:
print(env)
return [env['PATH_INFO'].encode('latin-1')]
class RedirectSlashApp(object):
def __call__(self, env, start_response):
loc = env['PATH_INFO'] + '/'
return HTTPMovedPermanently(location=loc)(env, start_response)
def start_response(*args):
pass
class TestDomainRemap(unittest.TestCase):
def setUp(self):
self.app = domain_remap.DomainRemapMiddleware(FakeApp(), {})
def test_domain_remap_passthrough(self):
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET',
'SERVER_NAME': 'example.com'},
headers={'Host': None})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'/'])
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'/'])
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'example.com:8080'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'/'])
def test_domain_remap_account(self):
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET',
'SERVER_NAME': 'AUTH_a.example.com'},
headers={'Host': None})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'/v1/AUTH_a/'])
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'/v1/AUTH_a/'])
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'AUTH-uuid.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'/v1/AUTH_uuid/'])
def test_domain_remap_account_container(self):
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'/v1/AUTH_a/c/'])
def test_domain_remap_extra_subdomains(self):
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'x.y.c.AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'Bad domain in host header'])
def test_domain_remap_account_with_path_root_container(self):
req = Request.blank('/v1', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'/v1/AUTH_a/v1'])
def test_domain_remap_account_with_path_root_unicode_container(self):
req = Request.blank('/%E4%BD%A0%E5%A5%BD',
environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'/v1/AUTH_a/\xe4\xbd\xa0\xe5\xa5\xbd'])
def test_domain_remap_account_container_with_path_root_obj(self):
req = Request.blank('/v1', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'/v1/AUTH_a/c/v1'])
def test_domain_remap_account_container_with_path_obj_slash_v1(self):
# Include http://localhost because urlparse used in Request.__init__
# parse //v1 as http://v1
req = Request.blank('http://localhost//v1',
environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'/v1/AUTH_a/c//v1'])
def test_domain_remap_account_container_with_root_path_obj_slash_v1(self):
req = Request.blank('/v1//v1',
environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'/v1/AUTH_a/c/v1//v1'])
def test_domain_remap_account_container_with_path_trailing_slash(self):
req = Request.blank('/obj/', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'/v1/AUTH_a/c/obj/'])
def test_domain_remap_account_container_with_path(self):
req = Request.blank('/obj', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'/v1/AUTH_a/c/obj'])
def test_domain_remap_account_container_with_path_root_and_path(self):
req = Request.blank('/v1/obj', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'/v1/AUTH_a/c/v1/obj'])
def test_domain_remap_with_path_root_and_path_no_slash(self):
req = Request.blank('/v1obj', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'/v1/AUTH_a/c/v1obj'])
def test_domain_remap_account_matching_ending_not_domain(self):
req = Request.blank('/dontchange', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.aexample.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'/dontchange'])
def test_domain_remap_configured_with_empty_storage_domain(self):
self.app = domain_remap.DomainRemapMiddleware(FakeApp(),
{'storage_domain': ''})
req = Request.blank('/test', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'/test'])
def test_storage_domains_conf_format(self):
conf = {'storage_domain': 'foo.com'}
app = domain_remap.filter_factory(conf)(FakeApp())
self.assertEqual(app.storage_domain, ['.foo.com'])
conf = {'storage_domain': 'foo.com, '}
app = domain_remap.filter_factory(conf)(FakeApp())
self.assertEqual(app.storage_domain, ['.foo.com'])
conf = {'storage_domain': 'foo.com, bar.com'}
app = domain_remap.filter_factory(conf)(FakeApp())
self.assertEqual(app.storage_domain, ['.foo.com', '.bar.com'])
conf = {'storage_domain': 'foo.com, .bar.com'}
app = domain_remap.filter_factory(conf)(FakeApp())
self.assertEqual(app.storage_domain, ['.foo.com', '.bar.com'])
conf = {'storage_domain': '.foo.com, .bar.com'}
app = domain_remap.filter_factory(conf)(FakeApp())
self.assertEqual(app.storage_domain, ['.foo.com', '.bar.com'])
def test_domain_remap_configured_with_prefixes(self):
conf = {'reseller_prefixes': 'PREFIX'}
self.app = domain_remap.DomainRemapMiddleware(FakeApp(), conf)
req = Request.blank('/test', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.prefix_uuid.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'/v1/PREFIX_uuid/c/test'])
def test_domain_remap_configured_with_bad_prefixes(self):
conf = {'reseller_prefixes': 'UNKNOWN'}
self.app = domain_remap.DomainRemapMiddleware(FakeApp(), conf)
req = Request.blank('/test', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.prefix_uuid.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'/test'])
def test_domain_remap_configured_with_no_prefixes(self):
conf = {'reseller_prefixes': ''}
self.app = domain_remap.DomainRemapMiddleware(FakeApp(), conf)
req = Request.blank('/test', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.uuid.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'/v1/uuid/c/test'])
def test_domain_remap_add_prefix(self):
conf = {'default_reseller_prefix': 'FOO'}
self.app = domain_remap.DomainRemapMiddleware(FakeApp(), conf)
req = Request.blank('/test', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'uuid.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'/v1/FOO_uuid/test'])
def test_domain_remap_add_prefix_already_there(self):
conf = {'default_reseller_prefix': 'AUTH'}
self.app = domain_remap.DomainRemapMiddleware(FakeApp(), conf)
req = Request.blank('/test', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'auth-uuid.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'/v1/AUTH_uuid/test'])
def test_multiple_storage_domains(self):
conf = {'storage_domain': 'storage1.com, storage2.com'}
self.app = domain_remap.DomainRemapMiddleware(FakeApp(), conf)
def do_test(host):
req = Request.blank('/test', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': host})
return self.app(req.environ, start_response)
resp = do_test('auth-uuid.storage1.com')
self.assertEqual(resp, [b'/v1/AUTH_uuid/test'])
resp = do_test('auth-uuid.storage2.com')
self.assertEqual(resp, [b'/v1/AUTH_uuid/test'])
resp = do_test('auth-uuid.storage3.com')
self.assertEqual(resp, [b'/test'])
def test_domain_remap_redirect(self):
app = domain_remap.DomainRemapMiddleware(RedirectSlashApp(), {})
req = Request.blank('/cont', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'auth-uuid.example.com'})
resp = req.get_response(app)
self.assertEqual(resp.status_int, 301)
self.assertEqual(resp.headers.get('Location'),
'http://auth-uuid.example.com/cont/')
req = Request.blank('/cont/test', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'auth-uuid.example.com'})
resp = req.get_response(app)
self.assertEqual(resp.status_int, 301)
self.assertEqual(resp.headers.get('Location'),
'http://auth-uuid.example.com/cont/test/')
req = Request.blank('/test', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'cont.auth-uuid.example.com'})
resp = req.get_response(app)
self.assertEqual(resp.status_int, 301)
self.assertEqual(resp.headers.get('Location'),
'http://cont.auth-uuid.example.com/test/')
class TestDomainRemapClientMangling(unittest.TestCase):
def setUp(self):
self.app = domain_remap.DomainRemapMiddleware(FakeApp(), {
'mangle_client_paths': True})
def test_domain_remap_account_with_path_root_container(self):
req = Request.blank('/v1', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'/v1/AUTH_a/'])
def test_domain_remap_account_container_with_path_root_obj(self):
req = Request.blank('/v1', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'/v1/AUTH_a/c/'])
def test_domain_remap_account_container_with_path_obj_slash_v1(self):
# Include http://localhost because urlparse used in Request.__init__
# parse //v1 as http://v1
req = Request.blank('http://localhost//v1',
environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'/v1/AUTH_a/c//v1'])
def test_domain_remap_account_container_with_root_path_obj_slash_v1(self):
req = Request.blank('/v1//v1',
environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'/v1/AUTH_a/c//v1'])
def test_domain_remap_account_container_with_path_trailing_slash(self):
req = Request.blank('/obj/', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'/v1/AUTH_a/c/obj/'])
def test_domain_remap_account_container_with_path_root_and_path(self):
req = Request.blank('/v1/obj', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'/v1/AUTH_a/c/obj'])
def test_domain_remap_with_path_root_and_path_no_slash(self):
req = Request.blank('/v1obj', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'/v1/AUTH_a/c/v1obj'])
class TestSwiftInfo(unittest.TestCase):
def setUp(self):
registry._swift_info = {}
registry._swift_admin_info = {}
def test_registered_defaults(self):
domain_remap.filter_factory({})
swift_info = registry.get_swift_info()
self.assertIn('domain_remap', swift_info)
self.assertEqual(swift_info['domain_remap'], {
'default_reseller_prefix': None})
def test_registered_nondefaults(self):
domain_remap.filter_factory({'default_reseller_prefix': 'cupcake',
'mangle_client_paths': 'yes'})
swift_info = registry.get_swift_info()
self.assertIn('domain_remap', swift_info)
self.assertEqual(swift_info['domain_remap'], {
'default_reseller_prefix': 'cupcake'})
if __name__ == '__main__':
unittest.main()
|
"""
Lambda handler for wanwu requests
"""
import json
from collections import namedtuple
Request = namedtuple(
'Request',
[
'method', # str 'GET' or 'POST'
'path', # str
'body', # None|str
'query', # dict
'headers', # dict
],
)
Response = namedtuple(
'Response',
[
'status', # int
'content_type', # str
'headers', # dict
'body', # str
],
)
def handler(event, context):
"""
The entry point for the lambda
:arg dict event: {
"isBase64Encoded": False,
"path": "/",
"body": None,
"resource": "/",
"requestContext": {
"stage": "prod",
"identity": {
"accountId": None,
"sourceIp": "66.234.34.100",
"cognitoAuthenticationProvider": None,
"cognitoIdentityId": None,
"apiKey": None,
"userArn": None,
"cognitoAuthenticationType": None,
"accessKey": None,
"caller": None,
"userAgent": "Mozilla/5.0 (X11; Linux x86_64) "
"AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/55.0.2883.87 Safari/537.36",
"user": None,
"cognitoIdentityPoolId": None
},
"accountId": "541056992659",
"requestId": "40fc7e97-d229-11e6-8949-99c4d5e523c8",
"httpMethod": "GET",
"resourcePath": "/",
"apiId": "1han4u97l0",
"resourceId": "z8npftrr5d"
},
"queryStringParameters": None,
"httpMethod": "GET",
"pathParameters": None,
"headers": {
"Accept-Encoding": "gzip, deflate, sdch, br",
"CloudFront-Forwarded-Proto": "https",
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 "
"(KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36",
"Accept": "text/html,application/xhtml+xml,application/xml;"
"q=0.9,image/webp,*/*;q=0.8",
"CloudFront-Viewer-Country": "US",
"X-Forwarded-For": "66.234.34.100, 204.246.180.48",
"CloudFront-Is-Mobile-Viewer": "false",
"CloudFront-Is-SmartTV-Viewer": "false",
"CloudFront-Is-Desktop-Viewer": "true",
"CloudFront-Is-Tablet-Viewer": "false",
"Accept-Language": "en-US,en;q=0.8",
"Via": "1.1 f348970492a18bf5c630c5acc86c1ee3.cloudfront.net "
"(CloudFront)",
"Upgrade-Insecure-Requests": "1",
"X-Forwarded-Port": "443",
"Host": "1han4u97l0.execute-api.us-east-1.amazonaws.com",
"X-Forwarded-Proto": "https",
"Referer": "https://console.aws.amazon.com/apigateway/home"
"?region=us-east-1",
"Cache-Control": "max-age=0",
"X-Amz-Cf-Id": "TqUFzq7aH3maNpH3Ih_98Hr8j4tz"
"7HjZrYf8BL7N2K1yNbtBkpjPBA=="
},
"stageVariables": None
}
:arg LambdaContext context: see
http://docs.aws.amazon.com/lambda/latest/dg/python-context-object.html
"""
del context
request = request_from_lambda_event(event)
resource = route_request(request.path)
return response_to_handler_out(resource(request))
def request_from_lambda_event(event):
""" Build a Request from a lambda handler event """
return Request(
path=event['path'],
method=event['httpMethod'],
body=event['body'],
query=(
dict()
if event['queryStringParameters'] is None
else event['queryStringParameters']
),
headers=event['headers'],
)
def response_to_handler_out(response):
"""
Build expected handler return from a Response
"""
return dict(
statusCode=response.status,
body=response.body,
headers={
'Content-Language': 'en',
'Content-Length': len(response.body),
'Content-Type': '{}; charset=utf-8'.format(
response.content_type,
),
},
)
def route_request(path):
"""
Map a path to a resource function
:arg str path: eg "/"
:return: A function Request -> Response
"""
if path == '/':
return resource_root
else:
return resource_not_found
def select_media_type(accept_header, available_types):
"""
Examine the Accept header and select from available_types
https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
"If an Accept header field is present, and if the server cannot send
a response which is acceptable according to the combined Accept
field value, then the server SHOULD send a 406 (not acceptable)
response."
https://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html
"HTTP/1.1 servers are allowed to return responses which are
not acceptable according to the accept headers sent in the request.
In some cases, this may even be preferable to sending a 406
response."
:arg str accept_header: Contents of Accept header
:arg []str available_types: List of available media types
:return: One of available types; the first supplied type will be
returned if no match is found in the Accept header
"""
for accept_type in accept_types(accept_header):
del accept_type
# todo: handle wildcards, etc
del available_types
def accept_types(accept_header):
"""
Turn Accept header into a generator of acceptable types
Highest precedence comes first. Examples: "text/html", "text/*",
"application/xml", "*/*"
"""
parts = accept_header.split(',')
i = 0
qs = {}
while i < len(parts):
q = accept_q(parts[i])
if q > 0.99:
yield parts.pop(i)
else:
qs[parts[i]] = q
i += 1
parts.sort(key=qs.get, reverse=True)
for p in parts:
yield p
def accept_q(accept_part):
"""
The float "q" value for one comma-delimited Accept header part
"""
accept_type, sep, accept_params = [
p.strip() for p in accept_part.partition(';')
]
del accept_type
if not accept_params.startswith('q'):
return 1.0
literal_q, sep, qvalue_etc = [
p.strip() for p in accept_params.partition('=')
]
del sep, literal_q
q, _, __ = qvalue_etc.partition(';')
return float(q)
def resource_root(request):
""" The top level path """
return Response(
status=200,
content_type='application/json',
headers=dict(),
body=json.dumps(dict(
path=request.path,
method=request.method,
body=request.body,
query=request.query,
headers=request.headers,
))
)
def resource_not_found(request):
""" A resource to use when one could not be found """
return resource_root(request)._replace(status=404)
|
import re
import time
import argparse
import collections
import unicodedata
def parseArgs():
parser = argparse.ArgumentParser()
egroup = parser.add_mutually_exclusive_group(required=True)
egroup.add_argument('-C', '--counting-words', action='store_true',
help='outputs the total number of words in FILE')
egroup.add_argument('-W', '--word-count', action='store_true',
help='outputs the occurrences of each word in FILE')
parser.add_argument('-b', '--benchmark', action='store_true',
help='logs the execution time')
parser.add_argument('FILE', help='input file', type=argparse.FileType('r'))
return parser.parse_args()
def timing(func):
def wrapper(*arg, **kw):
ts = time.time()
res = func(*arg, **kw)
te = time.time()
return (te - ts), res
return wrapper
def findWords(text):
text = text.lower()
text = text.decode('unicode-escape')
text = unicodedata.normalize('NFKD', text).encode('ASCII', 'ignore')
return re.findall('\w+', str(text))
@timing
def countingWords(text):
return len(findWords(text))
@timing
def wordCount(text):
return collections.Counter(findWords(text))
if __name__ == '__main__':
args = parseArgs()
text = args.FILE.read()
args.FILE.close()
if args.counting_words:
time_taken, res = countingWords(text)
print('{} words'.format(res))
if args.word_count:
time_taken, res = wordCount(text)
res = [[key, value] for key, value in res.items()]
res.sort(key=lambda x: x[1], reverse=True)
print('; '.join([e[0] + ', ' + str(e[1]) for e in res]))
if args.benchmark:
print('Time taken {:.2f}s'.format(time_taken))
|
from numpy import random, dot, exp, array
class NeuralNetwork:
def __init__(self):
random.seed(1)
self.weights = 2 * random.random((3, 1)) - 1
def train(self, inputs, outputs, num):
for iteration in range(num):
output = self.think(inputs)
error = outputs - output
adjustment = dot(inputs.T, error * output * (1 - output))
self.weights += adjustment
def think(self, inputs):
return self.__sigmoid(dot(inputs, self.weights))
def __sigmoid(self, x):
return 1 / (1 + exp(-x))
def main():
network = NeuralNetwork()
inputs = array([[1, 1, 1], [1, 0, 1], [0, 1, 1]])
outputs = array([[1, 1, 0]]).T
network.train(inputs, outputs, 10000)
print(network.think(array([1, 0, 0])))
if __name__ == '__main__':
main()
|
def prepare_options_for_plugin(module, readopt):
"""Resets P::e.options with options from the C++ "read_options"
block *readopt* labeled for *module*.
"""
import psi4
options = psi4.core.get_options()
options.set_read_globals(True)
options.add_int("PBP_C_POW",0);
readopt(module, options)
options.set_read_globals(False)
|
from enum import Enum
class EnableStateEnum(Enum):
Enable = 2
Disable = 3
Shut_Down = 4
Offline = 6
Test = 7
Defer = 8
Quiesce = 9
Starting = 10
class HealthStateEnum(Enum):
OK = 5
Major_Failure = 20
Critical_Failure = 25
class OperationalStatusEnum(Enum):
Creating_Snapshot = (11, 32768)
Applying_Snapshot = (11, 32769)
Deleting_Snapshot = (11, 32770)
Waiting_Start = (11, 32771)
Merge_Disks = (11, 32772)
Export_Virtual_Machine = (11, 32773)
Migrate_Virtual_Machine = (11, 32774)
Operational_Status_OK = (2,)
class TaskStateEnum(Enum):
Completed = 0
TransitionStarted = 4096
AccessDeined = 32769
InvalidState = 32775
class ResponseEnum(Enum):
Enable = '正在运行'
Disable = '关机'
Shut_Down = '关机状态'
Offline = '已保存'
Test = '测试状态'
Defer = '延迟状态'
Quiesce = '静止状态'
Starting = '正在启动'
OK = '正常'
Major_Failure = '主要故障'
Critical_Failure = '严重故障'
Creating_Snapshot = '创建快照'
Applying_Snapshot = '应用快照'
Deleting_Snapshot = '删除快照'
Waiting_Start = '正在等待'
Merge_Disks = '合并磁盘'
Export_Virtual_Machine = '导出虚拟机'
Migrate_Virtual_Machine = '迁移虚拟机'
Operational_Status_OK = '正常运行'
Completed = '完成'
TransitionStarted = '任务开始'
AccessDeined = '拒绝访问'
InvalidState = '非法状态'
class ApplicationEnum(Enum):
Browser = ('Google Chrome', 'Firefox', 'IE', 'Microsoft Edge')
Office = ('Microsoft Excel', 'Microsoft PowerPoint',
'Microsoft Publisher', 'Microsoft Outlook',
'Microsoft Word', 'Microsoft Access','Microsoft Office','Microsoft OneNote','Microsoft 365','WPS')
UiBot = ('UiBot',) |
from neo4j.v1 import GraphDatabase, basic_auth
from datetime import datetime
import re
job_1 = {
"title": "Senior .NET Developers",
"category": "\u0418\u0422 - \u0420\u0430\u0437\u0440\u0430\u0431\u043e\u0442\u043a\u0430/\u043f\u043e\u0434\u0434\u0440\u044a\u0436\u043a\u0430 \u043d\u0430 \u0441\u043e\u0444\u0442\u0443\u0435\u0440",
"level": "\u0415\u043a\u0441\u043f\u0435\u0440\u0442\u0438/\u0421\u043f\u0435\u0446\u0438\u0430\u043b\u0438\u0441\u0442\u0438",
"busy": "\u041f\u044a\u043b\u043d\u043e \u0440\u0430\u0431\u043e\u0442\u043d\u043e \u0432\u0440\u0435\u043c\u0435",
"place": "\u0421\u043e\u0444\u0438\u044f / \u0411\u044a\u043b\u0433\u0430\u0440\u0438\u044f",
"type": "\u041f\u043e\u0441\u0442\u043e\u044f\u043d\u043d\u0430 \u0440\u0430\u0431\u043e\u0442\u0430",
"publicated": "23.09.2016",
"description": "As we are currently growing our development team in Sofia, we are looking\nto hire skilled FullStack .NET developers eager to work on a variety of in-\nhouse projects and technologies according to the highest development\nstandards. As part of our team you will have the opportunity to use your\ncreativity and technical skills to develop and transform our leading platform\nand brands also take part in many other projects. \n \n \nWhat you will be doing: \n \n\u2022 You will participate in the development of web applications using Web Forms\nand MVC. \n\u2022 Collaborate with other team members to share ideas and coordinate efforts to\nensure that project deliverables are met within specific time frame. \n\u2022 Estimate, track and implement development tasks \n \nWhat you need for this position: \n \n\u2022 At least 5+ years of .Net / C# experience with Web applications \n \nRequired experience with: \n \no ASP.Net, Web Services, WCF, Web API \no JavaScript, Angular, HTML 5, CSS, Ajax- \no Minimum 2-3 years in backend software design in SQL Server , Stored\nprocedures, ++ASP.NET, C# , PHP \no Languages: C#, SQL/T-SQL, JavaScript/DHTML, VBScript, HTML, XML,PHP- \no Some experience with front end UI design preferred \n \n\u2022 Experience in Object Oriented Design and Programming, Multi-threading, web\nservices \n\u2022 Knowledge and experience in Design Patterns & Principles, System\nArchitecture and Distributed systems \n\u2022 Ability to complete all phases of software development life cycle with\nminimal supervision \n\u2022 Bachelor or higher degree in Computer Science or equivalent \n\u2022 Ability to develop large scale web/database applications and to work on\nmultiple projects with multiple deadlines. \n\u2022 Ability to communicate clearly with business users and project manager. \n\u2022 Ability to innovate and provide functional applications with intuitive\ninterfaces. \n\u2022 Excellent personal and communication skills \n\u2022 English language skills on advanced level \n \n \nIf you are willing to work in a fast paced, highly collaborative, dynamic work\nenvironment, then this is the right place for you! Please send us your CV in\nEnglish. Keep in mind that only shortlisted candidates will be contacted.",
"company": "\u0424\u0438\u0440\u043c\u0430/\u041e\u0440\u0433\u0430\u043d\u0438\u0437\u0430\u0446\u0438\u044f \u0434\u0438\u0440\u0435\u043a\u0442\u043d\u043e \u0442\u044a\u0440\u0441\u0435\u0449\u0430 \u0441\u043b\u0443\u0436\u0438\u0442\u0435\u043b\u0438."
}
job_2 = {
"title": "Software Engineer",
"category": "\u0418\u0422 - \u0420\u0430\u0437\u0440\u0430\u0431\u043e\u0442\u043a\u0430/\u043f\u043e\u0434\u0434\u0440\u044a\u0436\u043a\u0430 \u043d\u0430 \u0441\u043e\u0444\u0442\u0443\u0435\u0440",
"level": "\u0415\u043a\u0441\u043f\u0435\u0440\u0442\u0438/\u0421\u043f\u0435\u0446\u0438\u0430\u043b\u0438\u0441\u0442\u0438",
"busy": "\u041f\u044a\u043b\u043d\u043e \u0440\u0430\u0431\u043e\u0442\u043d\u043e \u0432\u0440\u0435\u043c\u0435",
"place": "\u0421\u043e\u0444\u0438\u044f / \u0411\u044a\u043b\u0433\u0430\u0440\u0438\u044f",
"type": "\u041f\u043e\u0441\u0442\u043e\u044f\u043d\u043d\u0430 \u0440\u0430\u0431\u043e\u0442\u0430",
"publicated": "21.09.2016",
"description": "This is a fantastic opportunity to join our Software Development team based\nin our Office in Sofia. \nWe are looking for an exceptional individual whose key responsibilities will\nbe to: \n\u2022 Design and develop web applications in a multi-tier Java EE environment \n\u2022 Support database schemas designs and query optimization \n\u2022 Identify and apply appropriate modern technologies and techniques for\nsoftware development \nYou should be able to demonstrate at least 1 year on the job working\nexperience with Java EE and Enterprise Design patterns. Advance experience in\nrelational database design and SQL is essential. Team player with strong\ncommunication skills and fluent in English are also expected. You should\npossess a BSc or similar computer/engineering degree. Key qualifications are: \n\u2022 Experience in MVC model and good knowledge on the following technologies:\nSpring, Struts2, Hibernate, Ibatis, Maven \n\u2022 Experience in relational database design and SQL \n\u2022 Knowledge of XML, XSL, CSS/3, Javascript \nIf you want to make a difference and work with a top-rated team of talented\nindividuals, come and join us. We offer a friendly, diverse work environment\nand a very competitive benefits package. \nIf this position is of interest to you and match your background and career\nneeds, we can\u2019t wait to hear from you! Please send your CV",
"company": "\u0424\u0438\u0440\u043c\u0430/\u041e\u0440\u0433\u0430\u043d\u0438\u0437\u0430\u0446\u0438\u044f \u0434\u0438\u0440\u0435\u043a\u0442\u043d\u043e \u0442\u044a\u0440\u0441\u0435\u0449\u0430 \u0441\u043b\u0443\u0436\u0438\u0442\u0435\u043b\u0438."
}
jobs = [job_1, job_2]
job_1['company'] = "РЕГУЛУС СЪРВИСИЗ ЕООД"
job_2['company'] = "МОТИВИАН ЕООД"
CAT_SOF_DEV = "Software Development"
CITY_SOF = "Sofia"
BUSY_FULL = "Full Time"
TYPE_PERM = "Permament"
LEVEL_EXPRTS = "Experts/Specialists"
DATAFORMAT = '%d.%m.%Y'
job_1['identificator'] = ""
job_2['identificator'] = ""
for job in jobs:
for key in job_1.keys():
job[key] = job[key].encode(encoding='UTF-8').decode("utf-8", "strict")
job['category'] = CAT_SOF_DEV
job['place'] = CITY_SOF
job['busy'] = BUSY_FULL
job['type'] = TYPE_PERM
job['level'] = LEVEL_EXPRTS
job['identificator'] = re.sub(r"(\s+)", "_", job['title']).lower()
# datatime is not supported in neo4j yet
# job['publicated'] = datetime.strptime(job['publicated'], DATAFORMAT)
# print(job)
# print(50 * '<>')
#
# Neo4j - Cypher
#
driver = GraphDatabase.driver("bolt://localhost:7687", auth=basic_auth("neo4j", "neo4j"))
session = driver.session()
# session.run("MATCH (n) OPTIONAL MATCH (n)-[r]-() DELETE n,r")
for job in jobs:
# creating Company
session.run(
"MERGE (n:Company {name: {company_name}})",
{'company_name': job['company']}
)
# creating Category
session.run(
"MERGE (n:Category {name: {category_name}})",
{'category_name': job['category']}
)
# creating City
session.run(
"MERGE (n:City {name: {city_name}})",
{'city_name': job['place']}
)
# creating JobOffer
session.run(
"MERGE (n:JobOffer {name:{headline}, level:{level}, busy:{busy}, type:{type}, description:{description}, publicated:{publicated}, identificator:{identificator}})",
{
'headline': job['title'],
'level': job['level'],
'busy': job['busy'],
'type': job['type'],
'description': job['description'],
'publicated': job['publicated'],
'identificator': job['identificator']
}
)
# creating relations
session.run(
"MATCH (a:JobOffer), (b:Company), (c:City), (d:Category)"
"WHERE a.name = {job_name} AND b.name = {company_name} AND c.name = {city_name} AND d.name = {category_name}"
"MERGE (a)-[:LOCATED_IN]->(c)"
"MERGE (a)-[:IS_IN]->(d)"
"MERGE (a)-[:PUBLISHED_BY]->(b)",
{
'job_name': job['title'],
'company_name': job['company'],
'city_name': job['place'],
'category_name': job['category']
}
)
session.close()
|
import argparse as ap
import hail
from pprint import pprint
import time
from hail_scripts.v01.utils.add_clinvar import download_and_import_latest_clinvar_vcf, CLINVAR_VDS_PATH
from hail_scripts.v01.utils.vds_utils import write_vds
p = ap.ArgumentParser()
p.add_argument("-g", "--genome-version", help="Genome build: 37 or 38", choices=["37", "38"], required=True)
args = p.parse_args()
hc = hail.HailContext(log="./hail_{}.log".format(time.strftime("%y%m%d_%H%M%S")))
vds = download_and_import_latest_clinvar_vcf(hc, args.genome_version)
pprint(vds.variant_schema)
output_vds_path = CLINVAR_VDS_PATH.format(genome_version=args.genome_version)
write_vds(vds, output_vds_path)
|
from itertools import permutations
def find_maximix_arrangement(n, k):
chars = [chr(i) for i in range(65, 65 + n)]
maximum = 2 * n - 3
count = 0
for order in permutations(chars):
order = "".join(order)
if solve_order(order, n) == maximum:
count += 1
if count == k:
return order
def solve_order(order, n):
steps = 0
for i in range(n):
pos = order.index(chr(i + 65))
if pos == i:
pass
elif pos == n - 1:
order = order[:i] + order[i:][::-1]
steps += 1
else:
order = order[:pos] + order[pos:][::-1]
order = order[:i] + order[i:][::-1]
steps += 2
return steps
if __name__ == "__main__":
print(find_maximix_arrangement(11, 2011))
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-06-28 20:17
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('acquisitions', '0011_auto_20160628_2013'),
]
operations = [
migrations.AlterModelOptions(
name='stage',
options={'ordering': ('order',)},
),
migrations.AddField(
model_name='stage',
name='order',
field=models.PositiveIntegerField(db_index=True, default=0, editable=False),
preserve_default=False,
),
]
|
'''
==================================================================================================
Script to calculate the IPA dielectric function of two-phase binary alloys (A_{1-x}B_{x})
==================================================================================================
method:
-- average: mixture of dielectric function of the two phases weighted by the alloy composition
-- refractive: mixture of refractive index of the two phases weighted by the alloy composition
-- bruggeman: compute dielectric function of the alloy using Bruggeman theory
sys.argv[1] = method
sys.argv[2] = composition (atomic fraction)
sys.argv[3] = A
sys.argv[4] = B
sys.argv[5] = file name with dielectric function of A
sys.argv[6] = file name with dielectric function of B
'''
import os , json , sys
import numpy as np
import math
import colour
procedure = sys.argv[1]
# atomic fraction of element B
x = float(sys.argv[2])
element_a = sys.argv[3]
element_b = sys.argv[4]
eps_filename_a = sys.argv[5]
eps_filename_b = sys.argv[6]
## Here I take epsilon interband from the outputfile of Yambo
data_a = np.genfromtxt(eps_filename_a)
energies_a = data_a[:,0]
eps_im_a = data_a[:,1]
eps_re_a = data_a[:,2]
data_b = np.genfromtxt(eps_filename_b)
energies_b = data_b[:,0]
eps_im_b = data_b[:,1]
eps_re_b = data_b[:,2]
# Check that the energy intervals of the two elements are the same
if len(energies_a) != len(energies_b):
print 'Error: different number of energy values!'
sys.exit()
#for i in xrange(len(energies_a)-1):
# if round(energies_a[i],2) != round(energies_b[i],2):
# print 'Error: different energy values!'
# sys.exit()
energies = energies_a
if procedure == 'average':
eps_im = (1.0 - x)*eps_im_a + x*eps_im_b
eps_re = (1.0 - x)*eps_re_a + x*eps_re_b
norm_epsilon = np.sqrt(eps_re**2 + eps_im**2)
refractive_index = np.sqrt( ( eps_re + norm_epsilon ) / 2. )
extint_coeff = np.sqrt( ( -eps_re + norm_epsilon ) / 2. )
elif procedure == 'bruggeman':
eps_a = eps_re_a + 1j*eps_im_a
eps_b = eps_re_b + 1j*eps_im_b
eps_1 = 1./4. * ( np.sqrt( ( -3*eps_a*x +2*eps_a + 3*eps_b*x - eps_b )**2 + 8*eps_a*eps_b ) -3*eps_a*x + 2*eps_a + 3*eps_b*x - eps_b)
eps_2 = 1./4. * ( - np.sqrt( ( -3*eps_a*x +2*eps_a + 3*eps_b*x - eps_b )**2 + 8*eps_a*eps_b ) -3*eps_a*x + 2*eps_a + 3*eps_b*x - eps_b)
eps_re = []
eps_im = []
for i in xrange(len(energies)):
if eps_1.imag[i]>0.0 and eps_2.imag[i]<0.0:
eps_re.append(eps_1.real[i])
eps_im.append(eps_1.imag[i])
elif eps_2.imag[i]>0.0 and eps_1.imag[i]<0.0:
eps_re.append(eps_2.real[i])
eps_im.append(eps_2.imag[i])
else:
print i, eps_1.imag[i], eps_2.imag[i]
#sys.exit('Error in solving Bruggeman equation')
eps_re = np.array(eps_re)
eps_im = np.array(eps_im)
norm_epsilon = np.sqrt(eps_re**2 + eps_im**2)
refractive_index = np.sqrt( ( eps_re + norm_epsilon ) / 2. )
extint_coeff = np.sqrt( ( -eps_re + norm_epsilon ) / 2. )
elif procedure == 'refractive':
norm_epsilon_a = np.sqrt(eps_re_a**2 + eps_im_a**2)
refractive_index_a = np.sqrt( ( eps_re_a + norm_epsilon_a ) / 2. )
extint_coeff_a = np.sqrt( ( -eps_re_a + norm_epsilon_a ) / 2. )
norm_epsilon_b = np.sqrt(eps_re_b**2 + eps_im_b**2)
refractive_index_b = np.sqrt( ( eps_re_b + norm_epsilon_b ) / 2. )
extint_coeff_b = np.sqrt( ( -eps_re_b + norm_epsilon_b ) / 2. )
refractive_index = (1.0 - x)*refractive_index_a + x*refractive_index_b
extint_coeff = (1.0 - x)*extint_coeff_a + x*extint_coeff_b
else:
sys.exit()
reflectivity = ( (refractive_index - 1)**2 + extint_coeff**2 ) / ( (refractive_index + 1)**2 + extint_coeff**2 )
alloy = '{}{}{}{}'.format(element_a, 1.0-x, element_b, x)
if procedure == 'average' or procedure == 'bruggeman':
with open('epsilon_{}.dat'.format(alloy),'w') as o:
for i in xrange(len(energies)-1):
o.write(str(energies[i]))
o.write(' '+str(eps_im[i]))
o.write(' '+str(eps_re[i]))
o.write('\n')
## I write to file: reflectivity
with open('reflectivity_{}.dat'.format(alloy),'w') as o:
for i in xrange(len(energies)-1):
o.write(str(energies[i]))
o.write(' '+str(reflectivity[i]))
o.write('\n')
# Reflectivity as a function of the wavelength
wavelengths_nm = 1239.8 / energies # in nm
with open('reflectivity_{}_lambda.dat'.format(alloy),'w') as o:
for i in xrange(len(wavelengths_nm)-1):
o.write(str(wavelengths_nm[i]))
o.write(' '+str(reflectivity[i]))
o.write('\n')
### Calculate colour coordinates
# take the files with the D65 and the CMFs from the folder of the colour module
file_d65illuminant = '{}/D65_illuminant_1nm.dat'.format(os.path.dirname(colour.__file__))
file_cmf = '{}/cmf_1nm.dat'.format(os.path.dirname(colour.__file__))
# calculate colour using the colour module
colours = colour.calcColour(energies, reflectivity, file_d65illuminant, file_cmf, do_plot=False)
colours.pop('Fit_residuals')
# write colour coordinates to file in json format
with open('colour_{}.dat'.format(alloy),'w') as o:
json.dump(colours, o, indent=4)
|
class Service(object):
def __init__(self, host):
self._host = host
def start(self, service):
return self.do(service, 'start')
def stop(self, service):
return self.do(service, 'stop')
def restart(self, service):
return self.do(service, 'restart')
def status(self, service):
return self.do(service, 'status')
class Upstart(Service):
def __init__(self, host):
Service.__init__(self, host)
def do(self, service, action):
return self._host.execute('service', service, action)
class Systemd(Service):
def __init__(self, host):
Service.__init__(self, host)
def do(self, service, action):
return self._host.execute('systemctl', action, service)
class Initd(Service):
def __init__(self, host):
Service.__init__(self, host)
def do(self, service, action):
return self._host.execute('/etc/init.d/%s' % service, action)
|
import rouge
from typing import List
def evaluate_rouge(hypo_texts: List[str], ref_texts: List[str]) -> dict:
print('Evaluating ROUGE...')
rouge_scorer = rouge.Rouge()
averaged_scores = rouge_scorer.get_scores(hyps=hypo_texts, refs=ref_texts, avg=True)
results = {}
for k, v in averaged_scores.items():
results[k] = v['f']
return results
|
# -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
import io
import logging
import os
import re
import sys
from gunicorn.http.message import HEADER_RE
from gunicorn.http.errors import InvalidHeader, InvalidHeaderName
from gunicorn import SERVER_SOFTWARE
import gunicorn.util as util
# Send files in at most 1GB blocks as some operating systems can have problems
# with sending files in blocks over 2GB.
BLKSIZE = 0x3FFFFFFF
# exclude control character, 就不能改成 BAD_HEADER_VALUE_RE 么
HEADER_VALUE_RE = re.compile(r'[\x00-\x1F\x7F]')
log = logging.getLogger(__name__)
class FileWrapper(object):
def __init__(self, filelike, blksize=8192):
self.filelike = filelike
self.blksize = blksize
if hasattr(filelike, 'close'):
self.close = filelike.close
def __getitem__(self, key):
data = self.filelike.read(self.blksize)
if data:
return data
raise IndexError
# done: 包装了一个IO, 可以将错误日志发的handlers
class WSGIErrorsWrapper(io.RawIOBase):
def __init__(self, cfg):
# There is no public __init__ method for RawIOBase so
# we don't need to call super() in the __init__ method.
# pylint: disable=super-init-not-called
errorlog = logging.getLogger("gunicorn.error")
handlers = errorlog.handlers
self.streams = []
if cfg.errorlog == "-":
self.streams.append(sys.stderr)
handlers = handlers[1:] # 意思是第一个handler输出到了stderr
for h in handlers:
if hasattr(h, "stream"):
self.streams.append(h.stream)
def write(self, data):
for stream in self.streams:
try:
stream.write(data)
except UnicodeError:
stream.write(data.encode("UTF-8"))
stream.flush()
# done
def base_environ(cfg):
return {
"wsgi.errors": WSGIErrorsWrapper(cfg),
"wsgi.version": (1, 0),
"wsgi.multithread": False,
"wsgi.multiprocess": (cfg.workers > 1),
"wsgi.run_once": False,
# application的返回值可以是FileWrapper(or iterable)
"wsgi.file_wrapper": FileWrapper,
"wsgi.input_terminated": True,
"SERVER_SOFTWARE": SERVER_SOFTWARE,
}
# done
def default_environ(req, sock, cfg):
env = base_environ(cfg)
env.update({
"wsgi.input": req.body,
"gunicorn.socket": sock,
"REQUEST_METHOD": req.method,
"QUERY_STRING": req.query,
"RAW_URI": req.uri,
"SERVER_PROTOCOL": "HTTP/%s" % ".".join([str(v) for v in req.version])
})
return env
# done
def proxy_environ(req):
info = req.proxy_protocol_info
if not info:
return {}
return {
"PROXY_PROTOCOL": info["proxy_protocol"],
"REMOTE_ADDR": info["client_addr"],
"REMOTE_PORT": str(info["client_port"]),
"PROXY_ADDR": info["proxy_addr"],
"PROXY_PORT": str(info["proxy_port"]),
}
# environ example
"""
{'HTTP_ACCEPT': 'text/html,application/xhtml+xml,application/xml',
'HTTP_ACCEPT_ENCODING': 'gzip, deflate, br',
'HTTP_ACCEPT_LANGUAGE': 'zh,en-US;q=0.9,en;q=0.8,zh-CN;q=0.7',
'HTTP_CACHE_CONTROL': 'max-age=0',
'HTTP_CONNECTION': 'keep-alive',
'HTTP_COOKIE': 'Pycharm-209c82f=83317ddd-a999-436e-a535-3239a0441752',
'HTTP_DNT': '1',
'HTTP_HOST': 'localhost:8000',
'HTTP_SEC_FETCH_MODE': 'navigate',
'HTTP_SEC_FETCH_SITE': 'none',
'HTTP_SEC_FETCH_USER': '?1',
'HTTP_UPGRADE_INSECURE_REQUESTS': '1',
'HTTP_USER_AGENT': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6)',
'PATH_INFO': '/test-db/1',
'QUERY_STRING': '',
'RAW_URI': '/test-db/1',
'REMOTE_ADDR': '127.0.0.1',
'REMOTE_PORT': '56199',
'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8000',
'SERVER_PROTOCOL': 'HTTP/1.1',
'SERVER_SOFTWARE': 'gunicorn/20.0.0',
'gunicorn.socket': <socket.socket fd=9, family=AddressFamily.AF_INET, type=SocketKind.SOCK_STREAM, proto=0, laddr=('127.0.0.1', 8000), raddr=('127.0.0.1', 56199)>,
'wsgi.errors': <gunicorn.http.wsgi.WSGIErrorsWrapper object at 0x10e7484e0>,
'wsgi.file_wrapper': <class 'gunicorn.http.wsgi.FileWrapper'>,
'wsgi.input': <gunicorn.http.body.Body object at 0x10e7485f8>,
'wsgi.input_terminated': True,
'wsgi.multiprocess': False,
'wsgi.multithread': False,
'wsgi.run_once': False,
'wsgi.url_scheme': 'http',
'wsgi.version': (1, 0)}
"""
# done
def create(req, sock, client, server, cfg):
"""
:param req: ``.message.Request`` 请求
:param sock: client sock
:param client: client addr, e.g. ``('127.0.0.1', 10000)``
:param server: server sock, e.g. ``('127.0.0.1', 8000)``
"""
resp = Response(req, sock, cfg)
# set initial environ
environ = default_environ(req, sock, cfg)
# default variables
host = None
script_name = os.environ.get("SCRIPT_NAME", "")
# add the headers to the environ
for hdr_name, hdr_value in req.headers:
if hdr_name == "EXPECT":
# https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Expect
if hdr_value.lower() == "100-continue":
sock.send(b"HTTP/1.1 100 Continue\r\n\r\n")
elif hdr_name == 'HOST':
host = hdr_value
elif hdr_name == "SCRIPT_NAME":
script_name = hdr_value
elif hdr_name == "CONTENT-TYPE":
environ['CONTENT_TYPE'] = hdr_value
continue
elif hdr_name == "CONTENT-LENGTH":
environ['CONTENT_LENGTH'] = hdr_value
environ['wsgi.input_terminated'] = False
continue
# todo: 为啥要这么处理其他的请求头?
key = 'HTTP_' + hdr_name.replace('-', '_')
if key in environ:
hdr_value = "%s,%s" % (environ[key], hdr_value)
environ[key] = hdr_value
# set the url scheme
environ['wsgi.url_scheme'] = req.scheme
# set the REMOTE_* keys in environ
# authors should be aware that REMOTE_HOST and REMOTE_ADDR
# may not qualify the remote addr: http://www.ietf.org/rfc/rfc3875
if isinstance(client, str):
environ['REMOTE_ADDR'] = client
elif isinstance(client, bytes):
environ['REMOTE_ADDR'] = client.decode()
else:
environ['REMOTE_ADDR'] = client[0]
environ['REMOTE_PORT'] = str(client[1])
# handle the SERVER_*
# Normally only the application should use the Host header but since the
# WSGI spec doesn't support unix sockets, we are using it to create
# viable SERVER_* if possible.
if isinstance(server, str):
server = server.split(":")
if len(server) == 1:
# unix socket
if host:
server = host.split(':')
if len(server) == 1:
if req.scheme == "http":
server.append(80)
elif req.scheme == "https":
server.append(443)
else:
server.append('')
else:
# no host header given which means that we are not behind a
# proxy, so append an empty port.
server.append('')
environ['SERVER_NAME'] = server[0]
environ['SERVER_PORT'] = str(server[1])
# set the path and script name
path_info = req.path
if script_name:
path_info = path_info.split(script_name, 1)[1]
environ['PATH_INFO'] = util.unquote_to_wsgi_str(path_info)
environ['SCRIPT_NAME'] = script_name
# override the environ with the correct remote and server address if
# we are behind a proxy using the proxy protocol.
environ.update(proxy_environ(req))
return resp, environ
class Response(object):
def __init__(self, req, sock, cfg):
self.req = req
self.sock = sock # client
self.version = SERVER_SOFTWARE
self.status = None
self.status_code = None
self.chunked = False
self.must_close = False
self.headers = []
self.headers_sent = False
self.response_length = None
self.sent = 0 # 已发送数据长度
# 是否upgrade连接. 连接可以以常用的协议启动(如HTTP/1.1),随后可通过upgrade
# 协商再升级到HTTP2甚至是WebSockets. 比如要升级成websocket, 请求头可能如:
# Connection: Upgrade
# Upgrade: websocket
# https://developer.mozilla.org/zh-CN/docs/Web/HTTP/Protocol_upgrade_mechanism
self.upgrade = False
self.cfg = cfg
# done
def force_close(self):
self.must_close = True
# done
def should_close(self):
if self.must_close or self.req.should_close():
return True
if self.response_length is not None or self.chunked:
return False
# todo: HEAD 204 304 为啥是 False
if self.req.method == 'HEAD':
return False
if self.status_code < 200 or self.status_code in (204, 304):
return False
return True
# done
def start_response(self, status, headers, exc_info=None):
if exc_info:
try:
if self.status and self.headers_sent:
util.reraise(exc_info[0], exc_info[1], exc_info[2])
finally:
exc_info = None
elif self.status is not None:
raise AssertionError("Response headers already set!")
self.status = status
# get the status code from the response here so we can use it to check
# the need for the connection header later without parsing the string
# each time.
try:
self.status_code = int(self.status.split()[0])
except ValueError:
self.status_code = None
self.process_headers(headers)
self.chunked = self.is_chunked()
return self.write
# done
def process_headers(self, headers):
for name, value in headers:
if not isinstance(name, str):
raise TypeError('%r is not a string' % name)
if HEADER_RE.search(name): # bad header
raise InvalidHeaderName('%r' % name)
lname = name.lower().strip()
if not isinstance(value, str):
raise TypeError('%r is not a string' % value)
if HEADER_VALUE_RE.search(value): # bad header value
raise InvalidHeader('%r' % value)
value = value.strip()
if lname == "content-length":
# 指定了content长度的请求
self.response_length = int(value)
elif util.is_hoppish(name):
# 处理hop-by-hop请求头, 下面只处理了websocket的情况
if lname == "connection":
# handle websocket
if value.lower().strip() == "upgrade":
self.upgrade = True
elif lname == "upgrade":
if value.lower().strip() == "websocket":
self.headers.append((name.strip(), value))
# ignore hopbyhop headers
continue
self.headers.append((name.strip(), value))
# done
def is_chunked(self):
# Only use chunked responses when the client is speaking HTTP/1.1
# or newer and there was no Content-Length header set.
if self.response_length is not None:
return False
elif self.req.version <= (1, 0):
return False
elif self.req.method == 'HEAD':
# Responses to a HEAD request MUST NOT contain a response body.
return False
elif self.status_code in (204, 304):
# Do not use chunked responses when the response is guaranteed to
# not have a response body.
return False
return True
# done
def default_headers(self):
# set the connection header
if self.upgrade:
connection = "upgrade"
elif self.should_close():
connection = "close"
else:
connection = "keep-alive"
headers = [
"HTTP/%s.%s %s\r\n" % (self.req.version[0],
self.req.version[1], self.status),
"Server: %s\r\n" % self.version,
"Date: %s\r\n" % util.http_date(),
"Connection: %s\r\n" % connection
]
if self.chunked:
headers.append("Transfer-Encoding: chunked\r\n")
return headers
# done
def send_headers(self):
if self.headers_sent:
return
tosend = self.default_headers()
tosend.extend(["%s: %s\r\n" % (k, v) for k, v in self.headers])
header_str = "%s\r\n" % "".join(tosend)
util.write(self.sock, util.to_bytestring(header_str, "latin-1"))
self.headers_sent = True
# done
def write(self, arg):
self.send_headers()
if not isinstance(arg, bytes):
raise TypeError('%r is not a byte' % arg)
total = len(arg)
remain = total
if self.response_length is not None:
if self.sent >= self.response_length:
# Never write more than self.response_length bytes
return
remain = min(self.response_length - self.sent, remain)
if remain < total:
arg = arg[:remain]
# Sending an empty chunk signals the end of the
# response and prematurely closes the response
if self.chunked and remain == 0:
return
self.sent += remain
util.write(self.sock, arg, self.chunked)
# done
def can_sendfile(self):
return self.cfg.sendfile is not False
# sendfile() copies data between one file descriptor and another.
# Because this copying is done within the kernel, sendfile() is more
# efficient than the combination of read(2) and write(2), which would
# require transferring data to and from user space.
def sendfile(self, respiter):
# 如果是https, 不能使用sendfile, 因为数据需要加密
# https://stackoverflow.com/questions/50792406/issue-when-using-sendfile-with-ssl
if self.cfg.is_ssl or not self.can_sendfile():
return False
if not util.has_fileno(respiter.filelike):
return False
fileno = respiter.filelike.fileno()
try:
offset = os.lseek(fileno, 0, os.SEEK_CUR)
if self.response_length is None:
filesize = os.fstat(fileno).st_size
# The file may be special and sendfile will fail.
# It may also be zero-length, but that is okay.
if filesize == 0:
return False
nbytes = filesize - offset
else:
nbytes = self.response_length
except (OSError, io.UnsupportedOperation):
return False
self.send_headers()
if self.is_chunked():
chunk_size = "%X\r\n" % nbytes
self.sock.sendall(chunk_size.encode('utf-8'))
sockno = self.sock.fileno()
sent = 0
while sent != nbytes:
count = min(nbytes - sent, BLKSIZE)
sent += os.sendfile(sockno, fileno, offset + sent, count)
if self.is_chunked():
self.sock.sendall(b"\r\n")
os.lseek(fileno, offset, os.SEEK_SET)
return True
# done
def write_file(self, respiter):
if not self.sendfile(respiter):
for item in respiter:
self.write(item)
# done
def close(self):
if not self.headers_sent:
self.send_headers()
if self.chunked:
# Sending an empty chunk signals the end of the
# response and prematurely closes the response
util.write_chunk(self.sock, b"")
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 29 11:28:56 2018
@author: wdm
"""
import cv2
import numpy as np
import SimpleITK as sitk
from argparse import ArgumentParser
from matplotlib import pyplot as plt
'''
img = sitk.GetArrayFromImage(sitk.ReadImage('./data/liver_image/test/LZH-Prob.nii'))
for i in range(img.shape[0]):
print ('slice: %d' %i)
a = img[i,:,:]
a = a[::-1]
a = np.expand_dims(a,2)
a = np.concatenate((a,a,a),2)
a = a - a.min()
a = a / a.max()
a = a*255.
a = cv2.resize(a, (512,512))
cv2.imwrite('./data/liver_image/test/ct/img/LZH_ct_%d.png'%i, a)
mask = sitk.GetArrayFromImage(sitk.ReadImage('./data/liver_image/test/LZH-Seg.nii.gz'))
for i in range(mask.shape[0]):
print ('slice: %d' %i)
a = mask[i,:,:]
a = np.float32(a)
a = a - mask.min()
a = a/mask.max()
a = a[::-1]*255.
a = np.expand_dims(a,2)
a = np.concatenate((a,a,a),2)
a = cv2.resize(a, (512,512))
#a = np.float32(a)
#plt.imsave('./data/liver_image/test/ct/mask/LZH_ct_%d.png'%i,a)
cv2.imwrite('./data/liver_image/test/ct/mask/LZH_ct_%d.png'%i,a)
'''
def threscut(subject_data, threshold_min=-500, threshold_max=500):
subject_data[subject_data>500]=500
subject_data[subject_data<-500]=-500
return subject_data
def normalize(slice_i, nor_min=-500, nor_max=500):
slice_i = np.float32(slice_i)
slice_i = slice_i - nor_min
slice_i = slice_i / np.float32(nor_max-nor_min)
return slice_i
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--nii_path", type=str,
dest="nii_path", default='XXX',
help="training, validation (test) data path")
parser.add_argument("--test_subject", type=str,
dest="test_subject", default="XXX")
parser.add_argument("--save_path", type=str,
dest="save_path", default='/data/weidongming/Projects/CT_inpainting/data/ct_images',
help="save path")
parser.add_argument("--index",type=int,
dest="index", default=1,
help="save index")
args = parser.parse_args()
print ("[Info]: processing "+ args.nii_path)
subject = sitk.ReadImage(args.nii_path)
subject_data = sitk.GetArrayFromImage(subject)
threshold = 500
nor=threshold
subject_data = threscut(subject_data, -threshold, threshold)
for i in range(subject_data.shape[0]-2):
slice_i = subject_data[i,::-1,:]
slice_i = normalize(slice_i)
slice_i = cv2.resize(slice_i, (512,512))
slice_i = np.expand_dims(slice_i, axis=2)
slice_i_1 = subject_data[i+1,::-1,:]
slice_i_1 = normalize(slice_i_1)
slice_i_1 = cv2.resize(slice_i_1, (512,512))*255
#slice_i_1 = np.expand_dims(slice_i_1, axis=2)
slice_i_2 = subject_data[i+2,::-1,:]
slice_i_2 = normalize(slice_i_2)
slice_i_2 = cv2.resize(slice_i_2, (512,512))
slice_i_2 = np.expand_dims(slice_i_2, axis=2)
#slice_rgb = np.concatenate((slice_i_1, slice_i_1, slice_i_1), axis=-1)*255
cv2.imwrite(args.save_path + "/{}_ct_{}.png".format(args.index,i), slice_i_1,[int(cv2.IMWRITE_PNG_COMPRESSION), 0])
#plt.imsave(args.save_path + "/{}_ct_{}.png".format(args.index,i), slice_i_1, cmap='gray')
|
'''
Created on Sun 04/28/2020 18:42:19
Hangman Game
@author: MarsCandyBars
'''
import random
#All functions contain pictures for each
#phase of the game
def pic1():
print("""\
______
| |
| |
|
|
|
|
|
===========
""")
def pic2():
print("""\
______
| |
| |
O |
|
|
|
|
===========
""")
def pic3():
print("""\
______
| |
| |
O |
| |
|
|
|
===========
""")
def pic4():
print("""\
______
| |
| |
O |
|\ |
|
|
|
===========
""")
def pic5():
print("""\
______
| |
| |
O |
/|\ |
|
|
|
===========
""")
def pic6():
print("""\
______
| |
| |
O |
/|\ |
/ |
|
|
===========
""")
def pic7():
print("""\
______
| |
| |
O |
/|\ |
/ \ |
|
|
===========
""")
#Initializing blank list for spaces
blank = []
#Initializing list to hold wrong answers
wrong_list = []
#The word bank is for random word selection
word_bank = ['boots','neptune','castle','gorilla','pumpkin']
#This section chooses a random word and shows the initial
#picture
word_random = random.choice(word_bank)
word = list(word_random)
pic1()
#Initializing the number of tries and count for
#seeing if the answer was correct
tries = 6
count = 0
#Using len() to set spaces length for word
for i in range(0, len(word)):
blank.append('_')
#Repeats guess input
while True:
if str(blank) == str(word):
print('YOU WIN!')
quit()
guess = input('GUESS: ')
for i in range(0, len(word)):
if word[i] == guess:
blank[i] = guess
print('CORRECT:')
print(blank)
print('WRONG:')
print(wrong_list, '\n')
count += 1
#Prints each picture depending on the stage of the game
if count == 0:
tries -= 1
wrong_list.append(guess)
if tries == 5:
pic2()
elif tries == 4:
pic3()
elif tries == 3:
pic4()
elif tries == 2:
pic5()
elif tries == 1:
pic6()
print('CORRECT:')
print(blank)
print('WRONG:')
print(wrong_list, '\n')
else:
count = 0
if tries == 0:
pic7()
print('YOU LOSE!')
quit()
|
"""seed location
Revision ID: 7a620d7c68a9
Revises: ef9349c42eaa
Create Date: 2021-08-12 13:17:06.427729
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '7a620d7c68a9'
down_revision = 'ef9349c42eaa'
branch_labels = None
depends_on = None
location_table = sa.table('location',
sa.column('tourist_attraction_id', sa.Integer),
sa.column('latitude', sa.String),
sa.column('longitude', sa.String)
)
def upgrade():
op.bulk_insert(location_table,
[
{"tourist_attraction_id": 1, "latitude": -8.2293562, "longitude": 110.958105},
{"tourist_attraction_id": 2, "latitude": -8.226365, "longitude": 110.954433},
{"tourist_attraction_id": 3, "latitude": -8.22479, "longitude": 110.939275},
{"tourist_attraction_id": 4, "latitude": -8.187629, "longitude": 110.954458},
{"tourist_attraction_id": 5, "latitude": -8.226802, "longitude": 110.940681},
{"tourist_attraction_id": 6, "latitude": -8.2245022, "longitude": 110.9408755},
{"tourist_attraction_id": 7, "latitude": -8.2247673, "longitude": 110.9452693},
{"tourist_attraction_id": 8, "latitude": -8.225086, "longitude": 110.950386},
{"tourist_attraction_id": 9, "latitude": -8.229294, "longitude": 110.960355},
{"tourist_attraction_id": 10,"latitude": -8.227047, "longitude": 110.940803},
{"tourist_attraction_id": 11,"latitude": -8.227969, "longitude": 110.956843},
{"tourist_attraction_id": 12,"latitude": -8.224971, "longitude": 110.951992},
{"tourist_attraction_id": 13,"latitude": -8.175843, "longitude": 110.9356253},
{"tourist_attraction_id": 14,"latitude": -8.167969, "longitude": 110.940685},
{"tourist_attraction_id": 15,"latitude": -8.218654, "longitude": 110.920055},
{"tourist_attraction_id": 16,"latitude": -8.219565, "longitude": 110.925031},
{"tourist_attraction_id": 17,"latitude": -8.221456, "longitude": 110.9310737},
{"tourist_attraction_id": 18,"latitude": -8.223411, "longitude": 110.937432},
{"tourist_attraction_id": 19,"latitude": -8.2216836, "longitude": 110.9332644}
]
)
def downgrade():
op.execute("DELETE FROM location")
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from solum.api.controllers import common_types
from solum.api.controllers.v1.datamodel import types as api_types
class Operation(api_types.Base):
"""An Operation resource represents an operation or action.
This is for defining actions that may change the state of the resource they
are related to. For example, the API already provides ways to register,
start, and stop your application (POST an Assembly to register+start, and
DELETE an Assembly to stop) but Operations provide a way to extend the
system to add your own actions such as "pause" and "resume", or "scale_up"
and "scale_down".
"""
documentation = common_types.Uri
"Documentation URI for the operation."
target_resource = common_types.Uri
"Target resource URI to the operation."
@classmethod
def sample(cls):
return cls(uri='http://example.com/v1/operations/resume',
name='resume',
type='operation',
tags=['small'],
project_id='1dae5a09ef2b4d8cbf3594b0eb4f6b94',
user_id='55f41cf46df74320b9486a35f5d28a11',
description='A resume operation',
documentation='http://example.com/docs/resume_op',
target_resource='http://example.com/instances/uuid')
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-05-05 05:57
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('FoBusinessProcess', '0007_businessprocess_fo_client_legal_entity'),
]
operations = [
migrations.RemoveField(
model_name='businessprocess',
name='fo_client_legal_entity',
),
migrations.RemoveField(
model_name='businessprocess',
name='fo_client_person',
),
]
|
import pytest
import numpy as np
import pandas as pd
from numpy.testing import assert_equal, assert_allclose
from sciparse import sampling_period, title_to_quantity, \
to_standard_quantity, frequency_bin_size, quantity_to_title, \
dict_to_string, string_to_dict, is_scalar, column_from_unit, \
cname_from_unit, assertDataDictEqual
from sciparse import assert_allclose_qt, assert_equal_qt, ureg
def test_sampling_period():
data = pd.DataFrame({'Time (ms)': [0, 0.1, 0.2, 0.3, 0.4],
'Values': [0, 1, 2, 3, 4]})
actual_period = sampling_period(data)
desired_period = ureg.ms * 0.1
assert actual_period == desired_period
def test_sampling_period_error():
data = pd.DataFrame({'Time (ms)': [0],
'Values': [0]})
with pytest.raises(ValueError):
actual_period = sampling_period(data)
def test_frequency_bin_error():
data = pd.DataFrame({'frequency (Hz)': [0],
'Values': [0]})
with pytest.raises(ValueError):
actual_bin = frequency_bin_size(data)
def test_quantity_to_title():
quantity = ureg.mV*1.0
desired_title = 'voltage (mV)'
actual_title = quantity_to_title(quantity)
assert_equal(actual_title, desired_title)
quantity = ureg.nA**2*1.0
desired_title = 'power (nA ** 2)'
actual_title = quantity_to_title(quantity)
assert_equal(actual_title, desired_title)
def test_quantity_to_title_with_name():
quantity = ureg.mV * 1.0
desired_name = 'photovoltage'
desired_title = 'photovoltage (mV)'
actual_title = quantity_to_title(quantity, desired_name)
assert_equal(actual_title, desired_title)
def testExtractTimeUnits():
unit_string = 'time (s)'
desired_unit = 1 * ureg.s
actual_unit = title_to_quantity(unit_string)
assert desired_unit == actual_unit
unit_string = 'time (ms)'
desired_unit = 1 * ureg.ms
actual_unit = title_to_quantity(unit_string)
assert desired_unit == actual_unit
unit_string = 'time (us)'
desired_unit = 1 * ureg.us
actual_unit = title_to_quantity(unit_string)
assert desired_unit == actual_unit
unit_string = 'time (ns)'
desired_unit = 1 * ureg.ns
actual_unit = title_to_quantity(unit_string)
assert desired_unit == actual_unit
unit_string = 'time (ps)'
desired_unit = 1 * ureg.ps
actual_unit = title_to_quantity(unit_string)
assert desired_unit == actual_unit
def testExtractElectricalUnits(ureg):
unit_string = 'Photocurrent (pA)'
desired_unit = 1 * ureg.pA
actual_unit = title_to_quantity(unit_string)
assert desired_unit == actual_unit
unit_string = 'Photocurrent (nA)'
desired_unit = 1 * ureg.nA
actual_unit = title_to_quantity(unit_string)
assert desired_unit == actual_unit
unit_string = 'current (uA)'
desired_unit = 1 * ureg.uA
actual_unit = title_to_quantity(unit_string)
assert desired_unit == actual_unit
unit_string = 'Jordans (mA)'
desired_unit = 1 * ureg.mA
actual_unit = title_to_quantity(unit_string)
assert desired_unit == actual_unit
unit_string = 'More Current (A)'
desired_unit = 1 * ureg.A
actual_unit = title_to_quantity(unit_string)
assert desired_unit == actual_unit
unit_string = 'Photovoltage (V)'
desired_unit = 1 * ureg.V
actual_unit = title_to_quantity(unit_string)
assert desired_unit == actual_unit
unit_string = 'Photovoltage (mV)'
desired_unit = 1 * ureg.mV
actual_unit = title_to_quantity(unit_string)
assert desired_unit == actual_unit
unit_string = 'Photovoltage (uV)'
desired_unit = 1 * ureg.uV
actual_unit = title_to_quantity(unit_string)
assert desired_unit == actual_unit
unit_string = 'Photovoltage (nV)'
desired_unit = 1 * ureg.nV
actual_unit = title_to_quantity(unit_string)
assert desired_unit == actual_unit
def testExtractSquaredUnits():
unit_string = 'voltage (mV^2)'
desired_unit = 1 * ureg.mV ** 2
actual_unit = title_to_quantity(unit_string)
assert desired_unit == actual_unit
def test_title_to_quantity_squared_2():
unit_string = 'voltage (mV ** 2)'
desired_unit = 1 * ureg.mV ** 2
actual_unit = title_to_quantity(unit_string)
assert desired_unit == actual_unit
def test_title_to_quantity_name():
unit_string = 'photovoltage (kV)'
desired_name = 'photovoltage'
desired_quantity = 1 * ureg.kV
actual_quantity, actual_name = title_to_quantity(
unit_string, return_name=True)
assert_equal_qt(actual_quantity, desired_quantity)
assert_equal(actual_name, desired_name)
def testToStandardUnit():
quantity = 0.1 * ureg.mV
desired_quantity = 0.1 * 1e-3 * ureg.V
actual_quantity = to_standard_quantity(quantity)
assert desired_quantity == actual_quantity
def test_to_standard_quantity_squared():
quantity = 0.1 * ureg.mV ** 2
desired_quantity = 0.1 * 1e-6 * ureg.V ** 2
actual_quantity = to_standard_quantity(quantity)
assert desired_quantity == actual_quantity
def test_to_standard_quantity_psd():
quantity = 0.1 * ureg.mA ** 2 / ureg.Hz
desired_quantity = 0.1 * 1e-6 * ureg.A ** 2 / ureg.Hz
actual_quantity = to_standard_quantity(quantity)
assert desired_quantity == actual_quantity
def test_frequency_bin_size():
psd_data = pd.DataFrame({
'frequency (Hz)': [1.5, 3.0, 4.5],
'power (V^2)': [0, 1, 2]})
actual_quantity = frequency_bin_size(psd_data)
desired_quantity = 1*ureg.Hz*1.5
assert actual_quantity == desired_quantity
def test_dict_to_string():
metadata = {
'wavelength': 10 * ureg.nm,
'material': 'Al',
'replicate': 2}
actual_string = dict_to_string(metadata)
desired_dict = {
'wavelength (nm)': 10,
'material': 'Al',
'replicate': 2}
assert_equal(actual_string, str(desired_dict))
def test_string_to_dict():
input_string = "{'wavelength (nm)': 10, 'material': 'Al', 'replicate': 2}"
desired_dict = {
'wavelength': 10*ureg.nm,
'material': 'Al',
'replicate': 2}
actual_dict = string_to_dict(input_string)
assertDataDictEqual(actual_dict, desired_dict)
def test_is_scalar():
quantity = 5 * ureg.Hz
data_scalar_actual = is_scalar(quantity)
data_scalar_desired = True
assert_equal(data_scalar_actual, data_scalar_desired)
quantity = 6.0 * ureg.Hz
data_scalar_actual = is_scalar(quantity)
data_scalar_desired = True
assert_equal(data_scalar_actual, data_scalar_desired)
quantity = 6.0
data_scalar_actual = is_scalar(quantity)
data_scalar_desired = True
assert_equal(data_scalar_actual, data_scalar_desired)
quantity = np.array([6.0, 2.5]) * ureg.Hz
data_scalar_actual = is_scalar(quantity)
data_scalar_desired = False
assert_equal(data_scalar_actual, data_scalar_desired)
def test_column_from_unit():
input_data = pd.DataFrame({
'Time (ms)': [0, 1, 2, 3],
'Photovoltage (nV)': [0, 1, 4, 5]})
desired_data = ureg.uV * 1e-3 * np.array([0., 1, 4, 5])
actual_data = column_from_unit(input_data, ureg.uV)
assert_allclose_qt(actual_data, desired_data, atol=1e-12)
desired_data = ureg.s * 1e-3 * np.array([0, 1, 2, 3])
actual_data = column_from_unit(input_data, ureg.s)
assert_allclose_qt(actual_data, desired_data, atol=1e-12)
def test_column_from_unit_extra():
input_data = pd.DataFrame({
'Time': [0, 1, 2, 3],
'Photovoltage (nV)': [0, 1, 4, 5],
'Sync': [0, 0, 2, 1]
})
desired_data = ureg.uV * 1e-3 * np.array([0., 1, 4, 5])
actual_data = column_from_unit(input_data, ureg.uV)
assert_allclose_qt(actual_data, desired_data, atol=1e-12)
def test_column_not_found():
input_data = pd.DataFrame({})
with pytest.raises(ValueError):
column_from_unit(input_data, ureg.ms)
def test_cname_from_unit():
input_data = pd.DataFrame({
'Time': [0, 1, 2, 3],
'Photovoltage (nV)': [0, 1, 4, 5],
'Sync': [0, 0, 2, 1]
})
desired_name = 'Photovoltage (nV)'
actual_name = cname_from_unit(input_data, ureg.V)
assert_equal(actual_name, desired_name)
|
"""
Expand the length of the password fields in the galaxy_user table to allow for other hasing schemes
"""
from sqlalchemy import *
from migrate import *
import logging
log = logging.getLogger( __name__ )
def upgrade( migrate_engine ):
meta = MetaData( bind=migrate_engine )
user = Table( 'galaxy_user', meta, autoload=True )
try:
user.c.password.alter(type=String(255))
except:
log.exception( "Altering password column failed" )
def downgrade(migrate_engine):
meta = MetaData( bind=migrate_engine )
user = Table( 'galaxy_user', meta, autoload=True )
try:
user.c.password.alter(type=String(40))
except:
log.exception( "Altering password column failed" )
|
"""Debugging support."""
# Copyright 2021 The IREE Authors
#
# Licensed under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
from typing import Optional
import logging
import os
import shutil
import sys
import threading
_thread_locals = threading.local()
_invocation_id = 0
def _get_temp_file_saver_stack():
try:
return _thread_locals.temp_file_saver_stack
except AttributeError:
stack = []
_thread_locals.temp_file_saver_stack = stack
return stack
def _interpolate_path_pattern(path_pattern: str, *, invocation_id: str):
# We do not use str.format() because we do not know the providence of
# path_pattern. Instead, handle a fixed set of replacements.
path_pattern = path_pattern.replace("{id}", str(invocation_id))
path_pattern = path_pattern.replace("{pid}", str(os.getpid()))
path_pattern = path_pattern.replace("{main}", os.path.basename(sys.argv[0]))
return path_pattern
class TempFileSaver:
"""Manages the saving of temp files resulting from tool invocations.
The TempFileSaver is a thread-local context bound object. An attempt to
create a new one will return the most recent instance created and entered
as a context manager. This allows up-stack callers to establish the
policy for saving temporaries and deep implementations will inherit it.
Proper usage from users wishing to establish a saver context:
with TempFileSaver():
# Do things with temp files.
Proper usage for implementors wishing to use an established saver context
or set up a new one:
with TempFileSaver.implicit() as tfs:
# Do things with temp files.
The outer-most creator can customize it with explicit arguments to __init__
but these will be ignored if an instance is already thread bound.
"""
TEMP_PATH_ENV_KEY = "IREE_SAVE_TEMPS"
@staticmethod
def implicit():
stack = _get_temp_file_saver_stack()
if stack:
return stack[-1]
return TempFileSaver()
def __init__(self,
temp_path_pattern: str = None,
*,
invocation_id: Optional[str] = None):
self.retained = False
self._refcount = 0
if temp_path_pattern is None:
temp_path_pattern = os.environ.get(TempFileSaver.TEMP_PATH_ENV_KEY)
if temp_path_pattern is None:
return
global _invocation_id
if invocation_id is not None:
self.invocation_id = invocation_id
else:
self.invocation_id = _invocation_id
_invocation_id += 1
self.retained_path = _interpolate_path_pattern(
temp_path_pattern, invocation_id=self.invocation_id)
self.retained = True
self._retained_file_names = set()
self._copy_on_finalize = list() # Of (source_path, target_path)
def __enter__(self):
_get_temp_file_saver_stack().append(self)
self._refcount += 1
return self
def __exit__(self, exc_type, exc_value, traceback):
del _get_temp_file_saver_stack()[-1]
self._refcount -= 1
if self._refcount == 0:
self._finalize()
@staticmethod
def current():
try:
return _get_temp_file_saver_stack()[-1]
except KeyError:
raise RuntimeError("No current TempFileSaver")
def alloc_optional(self,
file_name: str,
*,
export_as: Optional[str] = None) -> Optional[str]:
"""Allocates an optional temporary file.
When in non-retained mode, the return value is 'export_as', meaning that the
file is just some user specified output file.
When in retained mode, the output file will be an index-mangled variant
of 'file_name' under the temp_path. In addition, a mapping will be added
so that upon finalization, the file is also exported to 'export_as' if
specified.
Returns None if neither a user-specified 'export_as' is specified nor in
retained mode.
The distinction between retained temporaries and exports is to help in
cases for when the caller has requested that an artifact be written to
a specific place (i.e. an output file) but for debuggability, we also
want to save it as a temporary. In this case, we save it to the temporary
location and then conclude by moving artifacts to their final location
once the saver goes out of scope.
"""
if not self.retained:
return export_as
alloced_path = self._alloc_retained_path(file_name)
if export_as:
self._copy_on_finalize.append((alloced_path, export_as))
return alloced_path
def _alloc_retained_path(self, file_name: str) -> str:
assert self.retained
index = 0
original_file_name = file_name
while True:
if file_name not in self._retained_file_names:
# First use of this name.
self._retained_file_names.add(file_name)
os.makedirs(self.retained_path, exist_ok=True)
return os.path.join(self.retained_path, file_name)
index += 1
stem, ext = os.path.splitext(original_file_name)
file_name = f"{stem}_{index}{ext}"
def _finalize(self):
if not self.retained:
return
# See which files were materialized.
was_materialized = []
for file_name in self._retained_file_names:
file_path = os.path.join(self.retained_path, file_name)
if os.path.exists(file_path):
was_materialized.append((file_name, file_path))
if was_materialized:
logging.info(
"**** IREE Compiler retained temporary files (%s)***:\n%s",
self.invocation_id, "\n".join([
f" * {file_name} : {file_path}"
for file_name, file_path in was_materialized
]))
for source_path, target_path in self._copy_on_finalize:
if os.path.exists(source_path):
logging.info("Copy retained file to output: %s -> %s", source_path,
target_path)
shutil.copyfile(source_path, target_path)
|
from fabric.api import run
from cbagent.collectors.libstats.remotestats import (
RemoteStats, multi_node_task)
class IOstat(RemoteStats):
METRICS = (
("rps", "r/s", 1),
("wps", "w/s", 1),
("rbps", "rkB/s", 1024), # kB -> B
("wbps", "wkB/s", 1024), # kB -> B
("avgqusz", "avgqu-sz", 1),
("await", "await", 1),
("util", "%util", 1),
)
def get_device_name(self, partition):
for path in (partition, '/'):
stdout = run("mount | grep '{} '".format(path),
warn_only=True, quiet=True)
if not stdout.return_code:
return stdout.split()[0]
def get_iostat(self, device):
stdout = run(
"iostat -xk 1 2 {} | grep -v '^$' | tail -n 2".format(device)
)
stdout = stdout.split()
header = stdout[:len(stdout)/2]
data = dict()
for i, value in enumerate(stdout[len(stdout)/2:]):
data[header[i]] = value
return data
@multi_node_task
def get_samples(self, partitions):
samples = {}
for purpose, partition in partitions.items():
device = self.get_device_name(partition)
data = self.get_iostat(device)
for shorthand, metric, multiplier in self.METRICS:
key = "{}_{}".format(purpose, shorthand)
samples[key] = float(data[metric]) * multiplier
return samples
|
from manpy.simulation.imports import (
Machine,
BatchSource,
Exit,
Batch,
BatchDecomposition,
BatchReassembly,
Queue,
)
from manpy.simulation.Globals import runSimulation
# define the objects of the model
S = BatchSource(
"S",
"Source",
interArrivalTime={"Fixed": {"mean": 1.5}},
entity="manpy.Batch",
batchNumberOfUnits=100,
)
Q = Queue("Q", "StartQueue", capacity=100000)
BD = BatchDecomposition(
"BC",
"BatchDecomposition",
numberOfSubBatches=4,
processingTime={"Fixed": {"mean": 1}},
)
M1 = Machine("M1", "Machine1", processingTime={"Fixed": {"mean": 0.5}})
Q1 = Queue("Q1", "Queue1", capacity=2)
M2 = Machine("M2", "Machine2", processingTime={"Fixed": {"mean": 1}})
BRA = BatchReassembly(
"BRA",
"BatchReassembly",
numberOfSubBatches=4,
processingTime={"Fixed": {"mean": 0}},
)
M3 = Machine("M3", "Machine3", processingTime={"Fixed": {"mean": 1}})
E = Exit("E", "Exit")
# define the predecessors and successors for the objects
S.defineRouting([Q])
Q.defineRouting([S], [BD])
BD.defineRouting([Q], [M1])
M1.defineRouting([BD], [Q1])
Q1.defineRouting([M1], [M2])
M2.defineRouting([Q1], [BRA])
BRA.defineRouting([M2], [M3])
M3.defineRouting([BRA], [E])
E.defineRouting([M3])
def main(test=0):
# add all the objects in a list
objectList = [S, Q, BD, M1, Q1, M2, BRA, M3, E]
# set the length of the experiment
maxSimTime = 1440.0
# call the runSimulation giving the objects and the length of the experiment
runSimulation(objectList, maxSimTime)
# calculate metrics
working_ratio_M1 = (M1.totalWorkingTime / maxSimTime) * 100
blockage_ratio_M1 = (M1.totalBlockageTime / maxSimTime) * 100
waiting_ratio_M1 = (M1.totalWaitingTime / maxSimTime) * 100
working_ratio_M2 = (M2.totalWorkingTime / maxSimTime) * 100
blockage_ratio_M2 = (M2.totalBlockageTime / maxSimTime) * 100
waiting_ratio_M2 = (M2.totalWaitingTime / maxSimTime) * 100
working_ratio_M3 = (M3.totalWorkingTime / maxSimTime) * 100
blockage_ratio_M3 = (M3.totalBlockageTime / maxSimTime) * 100
waiting_ratio_M3 = (M3.totalWaitingTime / maxSimTime) * 100
# return results for the test
if test:
return {
"batches": E.numOfExits,
"working_ratio_M1": working_ratio_M1,
"blockage_ratio_M1": blockage_ratio_M1,
"waiting_ratio_M1": waiting_ratio_M1,
"working_ratio_M2": working_ratio_M2,
"blockage_ratio_M2": blockage_ratio_M2,
"waiting_ratio_M2": waiting_ratio_M2,
"working_ratio_M3": working_ratio_M3,
"blockage_ratio_M3": blockage_ratio_M3,
"waiting_ratio_M3": waiting_ratio_M3,
}
# print the results
print(("the system produced", E.numOfExits, "batches"))
print(("the working ratio of", M1.objName, "is", working_ratio_M1))
print(("the blockage ratio of", M1.objName, "is", blockage_ratio_M1))
print(("the waiting ratio of", M1.objName, "is", waiting_ratio_M1))
print(("the working ratio of", M2.objName, "is", working_ratio_M2))
print(("the blockage ratio of", M2.objName, "is", blockage_ratio_M2))
print(("the waiting ratio of", M2.objName, "is", waiting_ratio_M2))
print(("the working ratio of", M3.objName, "is", working_ratio_M3))
print(("the blockage ratio of", M3.objName, "is", blockage_ratio_M3))
print(("the waiting ratio of", M3.objName, "is", waiting_ratio_M3))
if __name__ == "__main__":
main()
|
from math import sqrt, exp
from scipy.stats import norm
from opfu.bsm import N, bsm_price, d1, d2
def N_d(x):
return norm.pdf(x)
def delta(S0, K, r=0.01, sigma=0.1, T=1, ds=0, is_call=True):
if ds == 0:
# the theortical result
if is_call:
return N(d1(S0, K, r, sigma, T))
else:
return N(d1(S0, K, r, sigma, T)) - 1
# approach
p = bsm_price(S0 + ds / 2, K, r, sigma, T, is_call)
m = bsm_price(S0 - ds / 2, K, r, sigma, T, is_call)
return (p - m) / ds
def gamma(S0, K, r=0.01, sigma=0.1, T=1, ds=0, is_call=True):
if ds == 0:
return N_d(d1(S0, K, r, sigma, T)) / (S0 * sigma * sqrt(T))
# approach
p = delta(S0 + ds / 2, K, r, sigma, T, ds, is_call)
m = delta(S0 - ds / 2, K, r, sigma, T, ds, is_call)
return (p - m) / ds
def theta(S0, K, r=0.01, sigma=0.1, T=1, dt=0, is_call=True):
if dt == 0:
return -S0 * N_d(d1(S0, K, r, sigma, T)) * sigma / (2 * sqrt(T)) - r * K * exp(-r * T) * N(
d2(S0, K, r, sigma, T)) \
if is_call else \
-S0 * N_d(d1(S0, K, r, sigma, T)) * sigma / (2 * sqrt(T)) + r * K * exp(-r * T) * N(-d2(S0, K, r, sigma, T))
# approach
p = bsm_price(S0, K, r, sigma, T + dt / 2, is_call)
m = bsm_price(S0, K, r, sigma, T - dt / 2, is_call)
return -(p - m) / dt
def vega(S0, K, r=0.01, sigma=0.1, T=1, dsigma=0, is_call=True):
if dsigma == 0:
return S0 * sqrt(T) * N_d(d1(S0, K, r, sigma, T))
# approach
p = bsm_price(S0, K, r, sigma + dsigma / 2, T, is_call)
m = bsm_price(S0, K, r, sigma - dsigma / 2, T, is_call)
return (p - m) / dsigma
def rho(S0, K, r=0.01, sigma=0.1, T=1, dr=0, is_call=True):
if dr == 0:
return K * T * exp(-r * T) * N(d2(S0, K, r, sigma, T)) if is_call else -K * T * exp(-r * T) * N(
-d2(S0, K, r, sigma, T))
# approach
p = bsm_price(S0, K, r + dr / 2, sigma, T, is_call)
m = bsm_price(S0, K, r - dr / 2, sigma, T, is_call)
return (p - m) / dr
|
SPLUNK_SERVICE_BLOCK = 'splunk-services'
SPLUNK_SERVICE_NAME = 'name'
SPLUNK_SERVICE_HOST = 'host'
SPLUNK_SERVICE_PORT = 'port'
SPLUNK_SERVICE_USERNAME = 'username'
SPLUNK_SERVICE_PASSWORD = 'password'
SPLUNK_QUERY_RESPONSE = 'response'
SPLUNK_QUERY_RESULTS = 'query_results'
SPLUNK_QUERY_SERVICE = 'service'
SPLUNK_QUERY_TAG = 'tag'
SPLUNK_QUERY_DATA = 'data'
SPLUNK_QUERY_METADATA = 'metadata'
SPLUNK_MONGO_SERVICE = 'splunk_mongo_service'
SPLUNK_EXECUTOR_SERVICE = 'splunk_executor_service'
SPLUNK_SERVICE_CONFIGS = [
SPLUNK_SERVICE_NAME,
SPLUNK_SERVICE_HOST,
SPLUNK_SERVICE_PORT,
SPLUNK_SERVICE_USERNAME,
SPLUNK_SERVICE_PASSWORD,
SPLUNK_MONGO_SERVICE,
SPLUNK_EXECUTOR_SERVICE,
]
SPLUNK_QUERY_BLOCK = 'splunk-query'
SPLUNK_QUERY_NAME = 'name'
SPLUNK_QUERY_SENSITIVE = 'sensitive'
SPLUNK_QUERY_QUERY = 'query'
SPLUNK_QUERY_INSTANCES = 'instances'
SPLUNK_QUERY_TAGS = 'tags'
SPLUNK_QUERY_PARAMETER_NAMES = 'parameter_names'
LAMBDAS = 'lambdas'
SPLUNK_QUERY_CONFIGS = [
SPLUNK_QUERY_SENSITIVE,
SPLUNK_QUERY_QUERY,
SPLUNK_QUERY_INSTANCES,
SPLUNK_QUERY_PARAMETER_NAMES,
LAMBDAS,
]
SPLUNK_JOB_KEYWORDS = "job_keywords"
SPLUNK_QUERYS = 'splunk_querys'
BLOCKING = 'blocking'
EARLIEST_TIME = "earliest_time"
LATEST_TIME = "latest_time"
NOW = "now"
OUTPUT_MODE = "output_mode"
SEARCH_MODE = "search_mode"
NORMAL = "normal"
CONTACT = "contact"
CONTACTS = "contacts"
INVESTIGATORS = "investigators"
DESCRIPTION = "description"
START_DATE = "start_date"
END_DATE = "end_date"
SERVICES = "services"
USER_LIST = "user_list"
GIR_CASE = "gir_case"
EVERY_N_MINUTES = 'every_n_minutes'
DEFAULT_NHOURS_VALUE = 24 * 60 #
DEFAULT_NMINUTES_VALUE = 60
END_MONITORING = 'end_monitoring'
INITIATOR = 'initiator'
SUCCESS = 'success'
STATUS = 'status'
PENDING = 'pending'
RUNNING = 'running'
FAILED = 'failed'
COMPLETE = 'complete'
INIT = 'init'
HAS_RESULTS = 'has_results'
RESULTS = 'results'
LAST_CHECK = 'last_check'
ID = '_id'
SPLUNK_QUERY_RUNNING = 'splunk_query_running'
PURGE_AFTER_MINUTES_VALUE = 60*12 # 12 hours
JOB_ID = 'job_id'
STORED_RESULT_ID = 'stored_result_id'
FILENAME = 'filename'
BUFFERED_CONTENT = 'buffered_content'
EARLIEST = 'earliest'
LATEST = 'latest'
SPLUNK_QUERY = 'splunk_query'
SPLUNK_THREAD = 'splunk_thread'
SERVICE_THREAD = 'service-thread'
USE_SSL = 'use_ssl'
CERT_PEM = 'cert_pem'
KEY_PEM = 'key_pem'
USE_UWSGI = 'use_uwsgi'
PURGE_AFTER_HOURS = 'purge_after_hours'
DATA_KEY = 'data_key'
RESULTS_ENC = 'results_encrypted'
RESULT_DATA = 'result_data'
RESULTS_SHA256 = 'results_hash'
JSON = 'json'
CSV = 'csv'
XML = 'xml'
ALLOWED_SPLUNK_QUERY_RESULTS_FORMAT_VALUES = [JSON, CSV, XML]
SPLUNK_QUERY_RESULTS_FORMAT = 'results_format'
SPLUNK_QUERY_RESULTS_FORMAT_VALUE = JSON
DATA = 'data'
DATA_HASH = 'data_hash'
CIPHER = 'cipher'
SPLUNK_QUERY_NAMES = 'query_names'
TIME_FMT = '%m/%d/%Y:%H:%M:%S'
ADMIN_BLOCK = 'admin'
ADMIN_TOKENS = 'tokens'
ADMIN_USERS = 'users'
ADMIN_GROUPS = 'groups'
ADMIN_USERS_CONFIGS = [
ADMIN_TOKENS,
ADMIN_USERS,
ADMIN_GROUPS
]
ACCESS_CONTROL_BLOCK = 'access-control'
MANAGED_BY = 'managed_by'
USERS = 'users'
TOKENS = 'tokens'
GROUPS = 'groups'
ACCESS_CONTROL_CONFIGS = [
MANAGED_BY,
USERS,
TOKENS,
GROUPS,
]
ALLOWED_TOKENS_BLOCK = 'allowed-tokens'
TOKEN_NAME = 'token_name'
TOKEN_VALUE = 'token_value'
TOKEN_DESCRIPTION = 'token_description'
TOKEN_USERNAME = 'token_username'
TOKEN_ACCOUNT_TYPE = 'token_account_type'
TOKEN_EMAIL = 'token_email'
ALLOWED_TOKENS = 'allowed_tokens'
TOKEN_CONFIGS = [
TOKEN_NAME,
TOKEN_VALUE,
TOKEN_DESCRIPTION,
TOKEN_USERNAME,
TOKEN_ACCOUNT_TYPE,
TOKEN_EMAIL
]
MONGO_SERVICE_BLOCK = 'mongo-services'
MONGO_HOST = 'mongo_host'
MONGO_PORT = 'mongo_port'
MONGO_DB = 'mongo_db'
MONGO_USERNAME = 'mongo_username'
MONGO_PASSWORD = 'mongo_password'
MONGO_NAME = 'mongo_name'
ACCESS_CONTROL = 'access_control'
ACCESS_CONTROL_COLLECTION = 'access_control_collection'
ACCESS_CONTROL_MANAGED_BY = 'access_control_managed_by'
ACCESS_CONTROL_USERS = 'access_control_users'
ACCESS_CONTROL_GROUPS = 'access_control_groups'
ACCESS_CONTROL_TOKENS = 'access_control_tokens'
ADMINS = 'admins'
ADMIN_COLLECTION = 'admin_collection'
ADMIN_USERS = 'admin_users'
ADMIN_GROUPS = 'admin_groups'
ADMIN_TOKENS = 'admin_tokens'
ALLOWED_TOKENS_COLLECTION = 'allowed_tokens_collection'
JOBS_COLLECTION = 'jobs_collection'
QUERY_RESULTS_COLLECTION = 'jobs_query_results'
QUERY_RESULTS_COLLECTION_VALUE = 'query_results'
USE_MONGO_ACL = 'use_mongo_acl'
JOBS_COLLECTION_VALUE = 'jobs'
MONGO_ENCRYPT_DATA = 'encrypt_data'
MONGO_DATA_KEY = 'data_key'
MONGO_CONFIGS = [
MONGO_NAME,
MONGO_HOST,
MONGO_PORT,
MONGO_DB,
MONGO_PASSWORD,
MONGO_USERNAME,
USE_MONGO_ACL,
ACCESS_CONTROL_COLLECTION,
ACCESS_CONTROL_MANAGED_BY,
ACCESS_CONTROL_USERS,
ACCESS_CONTROL_GROUPS,
ACCESS_CONTROL_TOKENS,
ADMIN_COLLECTION,
ADMIN_USERS,
ADMIN_GROUPS,
ADMIN_TOKENS,
ALLOWED_TOKENS_COLLECTION,
JOBS_COLLECTION,
USE_SSL,
MONGO_DATA_KEY
]
# LDAP SEARCH
RAW_QUERY = 'raw_query'
SEARCH_BASE = 'search_base'
ATTRIBUTES = 'attributes'
SEARCH_FILTER = 'search_filter'
CREDENTIALS = 'auth'
# AUTHENTICATE
USERNAME = 'username'
PASSWORD = 'password'
SPLUNK_QUERY_JOB = 'job'
MONGO_DOCKER_NET = 'mongo_docker_net'
MONGO_DOCKER_SUBNET = 'mongo_docker_subnet'
MONGO_DOCKER_GATEWAY = 'mongo_docker_gateway'
MONGO_DOCKER_PORT = 'mongo_docker_port'
MONGO_DOCKER_DETACH = 'mongo_docker_detach'
MONGO_DOCKER_USERNAME = 'mongo_docker_username'
MONGO_DOCKER_PASSWORD = 'mongo_docker_password'
MONGO_DOCKER_NAME = 'mongo_docker_name'
MONGO_DOCKER_ENVIRONMENT = 'mongo_docker_environment'
MONGO_DOCKER_PORTS = 'mongo_docker_ports'
MONGO_DOCKER_IMAGE = 'mongo_docker_image'
MONGO_DOCKER_IP = 'mongo_docker_ip'
MONGO_CONFIGS = [
MONGO_DATA_KEY,
MONGO_ENCRYPT_DATA,
MONGO_NAME,
MONGO_HOST,
MONGO_PORT,
MONGO_DB,
MONGO_PASSWORD,
MONGO_USERNAME,
USE_MONGO_ACL,
ACCESS_CONTROL_COLLECTION,
ACCESS_CONTROL_MANAGED_BY,
ACCESS_CONTROL_USERS,
ACCESS_CONTROL_GROUPS,
ACCESS_CONTROL_TOKENS,
ADMIN_COLLECTION,
ADMIN_USERS,
ADMIN_GROUPS,
ADMIN_TOKENS,
ALLOWED_TOKENS_COLLECTION,
JOBS_COLLECTION,
USE_SSL,
MONGO_DOCKER_NET,
MONGO_DOCKER_SUBNET,
MONGO_DOCKER_GATEWAY,
MONGO_DOCKER_PORT,
MONGO_DOCKER_DETACH,
MONGO_DOCKER_USERNAME,
MONGO_DOCKER_PASSWORD,
MONGO_DOCKER_NAME,
MONGO_DOCKER_ENVIRONMENT,
MONGO_DOCKER_PORTS,
MONGO_DOCKER_IMAGE,
MONGO_DOCKER_IP,
]
MONGO_DOCKER_NET_VALUE = "mongo-jobinfo-net"
MONGO_DOCKER_SUBNET_VALUE = "1.20.1.0/24"
MONGO_DOCKER_GATEWAY_VALUE = "1.20.1.254"
MONGO_DOCKER_PORT_VALUE = 29017
MONGO_DOCKER_DETACH_VALUE = True
MONGO_DOCKER_USERNAME_VALUE = 'mongo_test_user'
MONGO_DOCKER_PASSWORD_VALUE = 'itsasekritssssh1234'
MONGO_DOCKER_NAME_VALUE = 'splunk-mongo-service'
MONGO_INITDB_ROOT_USERNAME = 'MONGO_INITDB_ROOT_USERNAME'
MONGO_INITDB_ROOT_PASSWORD = 'MONGO_INITDB_ROOT_PASSWORD'
MONGO_DOCKER_ENVIRONMENT_VALUE = {
MONGO_INITDB_ROOT_USERNAME: MONGO_DOCKER_USERNAME_VALUE,
MONGO_INITDB_ROOT_PASSWORD: MONGO_DOCKER_PASSWORD_VALUE,
}
MONGO_TCP_PORT = "27017/tcp"
MONGO_DOCKER_PORTS_VALUE = {
MONGO_TCP_PORT : [MONGO_DOCKER_PORT_VALUE],
}
MONGO_DOCKER_IMAGE_VALUE = 'mongo:latest'
MONGO_DOCKER_IP_VALUE = '1.20.1.3'
EXECUTOR_SERVICE_BLOCK = 'executor-services'
EXECUTOR_NUM_PROCS = 'executor_num_procs'
EXECUTOR_NAME = 'executor_name'
EXECUTOR_POLL_TIME = 'executor_poll_time'
EXECUTOR_POLL_TASK = 'executor_poll_task'
EXECUTOR_START_POLLING_WITH_SVC = 'executor_start_polling_with_service'
EXECUTOR_SERVICE_START = 'executor_service_start'
EXECUTOR_POLL_ARGS = 'executor_poll_args'
EXECUTOR_POLL_KARGS = 'executor_poll_kargs'
EXECUTOR_MAX_ITERATIONS = 'executor_max_iterations'
EXECUTOR_JOB_FUNC = "executor_job_func"
EXECUTOR_JOB_CALLBACK = "executor_job_callback"
EXECUTOR_JOB_ERROR_CALLBACK = "executor_job_error_callback"
EXECUTOR_JOB_ARGS = "executor_job_args"
EXECUTOR_JOB_KARGS = "executor_job_kargs"
ERROR = 'error'
CC_JOB_LIST = 'cc_job_list'
EXECUTOR_CONFIGS = [
EXECUTOR_NAME,
EXECUTOR_NUM_PROCS,
EXECUTOR_MAX_ITERATIONS,
EXECUTOR_POLL_TIME,
EXECUTOR_START_POLLING_WITH_SVC,
EXECUTOR_SERVICE_START,
]
FN_NAME = 'name'
FN_CODE = 'code'
FN_CALLABLE = 'callable'
SPLUNK_QUERY_TOML_BLOCK = """
[splunk-query.{name}]
name = '{name}'
sensitive = {sensitive}
tags = {tags}
query = '''{query_fmt}'''
parameter_names = {parameter_names}
{lambdas_block}
"""
SPLUNK_QUERY_TOML_LAMBDAS_BLOCK = """
[splunk-query.{name}.lambdas]
{lambdas}
"""
RUN_ONCE = 'run_once'
MAX_RUNS = 'max_runs'
RUN_COUNT = 'run_count' |
from django.shortcuts import render,redirect,get_object_or_404
from django.http import HttpResponse
from orders.models import Order
from products.models import Product
from users.models import User
from django.core.paginator import Paginator
from django.contrib import messages
from django.conf import settings
from speechText.run import Run
import json
from django.views.decorators.csrf import csrf_exempt
from chatbot1.shopping_bot import ShoppingBot
from cart.cart import Cart
# from django.shortcuts import render_to_response
sb = ShoppingBot()
global x
def initial(request):
# if(request.GET.get('mybtn')):
# x = Run.main()
# print(request)
# print("OH YEAH"+x)
# return redirect('/')
context = {
'title': 'About',
# 'click':run(),
}
return render(request,'ecom/index_tobe.html',context)
def convert(s):
dict_trivial = {"zero":"0", "one":"1", "two":"2", "three":"3", "four":"4", "five":"5", "six":"6", "seven":"7", "eight":"8","nine":"9", "ten":"10"}
dict_teen = {"eleven":"11", "twelve":"12", "thirteen":"13", "fourteen":"14", "fifteen":"15","sixteen":"16", "seventeen":"17", "eighteen":"18", "nineteen":"19"}
dict_large = {"twenty one":"21", "twenty two":"22", "twenty three":"23", "twenty four":"24", "twenty five":"25", "twenty six":"26","twenty seven":"27", "twenty eight":"28", "twenty nine":"29","twenty":"20",
"thirty one":"31", "thirty two":"32", "thirty three":"33", "thirty four":"34", "thirty five":"35", "thirty six":"36","thirty seven":"37", "thirty eight":"38", "thirty nine":"39","thirty":"30",
"fourty one":"41", "fourty two":"42", "fourty three":"43", "fourty four":"44", "fourty five":"45", "fourty six":"46","fourty seven":"47", "fourty eight":"48", "fourty nine":"49","fourty":"40",
"fifty one":"51", "fifty two":"52", "fifty three":"53", "fifty four":"54", "fifty five":"55", "fifty six":"56","fifty seven":"57", "fifty eight":"58", "fifty nine":"59","fifty":"50",
"sixty one":"61", "sixty two":"62", "sixty three":"63", "sixty four":"64", "sixty five":"65", "sixty six":"66","sixty seven":"67", "sixty eight":"68", "sixty nine":"69","sixty":"60",
"seventy one":"71", "seventy two":"72", "seventy three":"73", "seventy four":"74", "seventy five":"75", "seventy six":"76","seventy seven":"77", "seventy eight":"78", "seventy nine":"79","seventy":"70",
"eighty one":"81", "eighty two":"82", "eighty three":"83", "eighty four":"84", "eighty five":"85", "eighty six":"86","eighty seven":"87", "eighty eight":"88", "eighty nine":"89","eighty":"80",
"ninety one":"91", "ninety two":"92", "ninety three":"93", "ninety four":"94", "ninety five":"95", "ninety six":"96","ninety seven":"97", "ninety eight":"98", "ninety nine":"99","ninety":"90",
}
list = {"twenty":"1" , "thirty":"1" , "fourty":"1" , "fifty":"1","sixty":"1","seventy":"1","eighty":"1","ninety":"1"}
flag = 0
for i in list:
print(i)
x = s.find(i)
print(x)
if(s.find(i)>=0):
print(i)
for k in dict_large:
if(s.find(k)):
s = s.replace(k,dict_large[k])
flag = 1
print("FLAg")
print(flag)
flag1 = 0
if(flag==0):
# print("HIII")
for j in dict_teen:
if(s.find(j)>=0):
s = s.replace(j,dict_teen[j])
print(j)
flag1 = 1
print("FLAG 1")
print(flag1)
if(flag1==0):
# print("HIII")
for x in dict_trivial:
if(s.find(x)):
s = s.replace(x,dict_trivial[x])
return s
# @csrf_exempt
# def myajaxtestview(request):
# # print(x + "OH YEAH")
# # print('myajaxtestview')
# # if(request.POST['text']):
# # dict = {"one":"1" , "two":"2"}
# x = Run.main()
# x = convert(x)
# print(x)
# print(request)
# # print("OH YEAH"+x)
# s = request
# if(sb.temp > 0 or s == "checkout"):
# # print("Checking out")
# sb.temp = sb.temp + 1
# if(sb.temp == 1):
# # print("Showing list")
# resp = sb.handle("show list")
# resp,sb.other = resp
# resp = "Processing.... \nEnter your phone number:"
# elif(sb.temp == 2):
# sb.phone = int(s)
# resp = "Processing.... \nEnter your address:"
# else:
# sb.address = s
# for product,quantity in sb.other:
# p = Product.objects.get(slug=product)
# u = User.objects.get(username=request.user.username)
# o = Order(item=product,quantity=quantity,price=p.price,total=(int(quantity)*int(p.price)),name=request.user.username,phone=sb.phone,email=u.email,address=sb.address,user_id=u.id)
# o.save()
# resp = "Adding... \nCheck Dashboard"
# sb.handle("clear list")
# sb.temp = 0
# # print(other)
# data = json.dumps({
# 'response': resp,
# 'user': x,
# })
# return HttpResponse(data)
# resp = sb.handle(x)
# if(resp == None):
# # print("Added")
# resp = "Done..."
# else:
# print(resp)
# data = json.dumps({
# 'response': resp,
# 'user': x,
# })
# # return redirect('/')
# # print("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^")
# # print(request.POST['text'])
# # print("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^")
# # s = str(request.POST['text'])
# # resp = sb.handle(s)
# # # print("*********************************************************************")
# # if(resp == None):
# # # print("Added")
# # resp = "Done..."
# # else:
# # print(resp)
# # print("*********************************************************************")
# return HttpResponse(data)
@csrf_exempt
def myajaxtestview(request):
# print(x + "OH YEAH")
# print('myajaxtestview')
# if(request.POST['text']):
# dict = {"one":"1" , "two":"2"}
print("hii")
x = Run.main()
x = convert(x)
print(x)
print(request)
# print("OH YEAH"+x)
s = request
if(sb.temp > 0 or s == "checkout"):
# print("Checking out")
sb.temp = sb.temp + 1
if(sb.temp == 1):
# print("Showing list")
resp = sb.handle("show list")
resp,sb.other = resp
sb.address = s
for product,quantity in sb.other:
print("YO")
p1 = Product.objects.filter(slug = product)[0]
p = Product.objects.get(productid=p1.productid)
cart = Cart(request)
cart.add(product = p , quantity = quantity)
resp = "Adding... \nCheck CART"
sb.handle("clear list")
sb.temp = 0
# print(other)
data = json.dumps({
'response': resp,
'user': x,
})
return HttpResponse(data)
resp = sb.handle(x)
if(resp == None):
# print("Added")
resp = "Done..."
else:
print(resp)
data = json.dumps({
'response': resp,
'user': x,
})
# return redirect('/')
# print("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^")
# print(request.POST['text'])
# print("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^")
# s = str(request.POST['text'])
# resp = sb.handle(s)
# # print("***********************")
# if(resp == None):
# # print("Added")
# resp = "Done..."
# else:
# print(resp)
# print("***********************")
return HttpResponse(data)
@csrf_exempt
def myajaxtestviewtext(request):
print("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^")
print(request.POST['text'])
print("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^")
s = str(request.POST['text'])
sb.temp = 0
if(sb.temp > 0 or s == "checkout"):
# print("Checking out")
sb.temp = sb.temp + 1
if(sb.temp == 1):
resp = sb.handle("show list")
resp , sb.other = resp
sb.address = s
flag=0
for product,quantity in sb.other:
print("YO")
p1 = Product.objects.filter(slug = product)[0]
p = Product.objects.get(productid=p1.productid)
cart = Cart(request)
qty = p.quantity
if quantity>qty:
resp = "Number of Items exceeded the stock..\nPlease Clear the list and try again."
return HttpResponse(resp)
p.quantity = qty-quantity
p.save()
cart.add(product = p , quantity = quantity)
# u = User.objects.get(username=request.user.username)
# o = Order(item=product,quantity=quantity,price=p.price,total=(int(quantity)*int(p.price)),name=request.user.username,phone=sb.phone,email=u.email,address=sb.address,user_id=u.id)
# o.save()
resp = "Adding... \nCheck CART"
sb.handle("clear list")
sb.temp = 0
# print(other)
return HttpResponse(resp)
if(s == "clear cart" or s=="empty cart"):
cart = Cart(request)
cart.clear()
resp = "CLEARING CART"
# print(other)
return HttpResponse(resp)
resp = sb.handle(s)
# print(resp)
if type(resp) is tuple:
resp,other = resp
print(resp)
# print("***********************")
if(resp == "None"):
resp = "Done..."
else:
print(resp)
# print("***********************")
return HttpResponse(resp)
def ordering():
if(sb.temp == 1):
resp = sb.handle("show list")
resp,sb.other = resp
resp = "Processing.... \nEnter your phone number:"
elif(sb.temp == 2):
sb.phone = int(s)
resp = "Processing.... \nEnter your address:"
else:
sb.address = s
for product,quantity in sb.other:
p = Product.objects.get(slug=product)
u = User.objects.get(username=request.user.username)
o = Order(item=product,quantity=quantity,price=p.price,total=(int(quantity)*int(p.price)),name=request.user.username,phone=sb.phone,email=u.email,address=sb.address,user_id=u.id)
o.save()
resp = "Adding... \nCheck Dashboard"
sb.handle("clear list")
sb.temp = 0
# print(other)
return HttpResponse(resp)
@csrf_exempt
def search_titles(request):
if request.method == "POST":
search_text = request.POST['search_text']
else:
search_text = ''
articles = Product.objects.filter(title__contains = search_text , title__isnull = False)
return render(None,'ecom/search.html',{"articles":articles})
|
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config(object):
DEBUG = False
TESTING = False
CSRF_ENABLED = True
JWT_ALGORITHM = 'HS256'
JWT_SECRET_KEY = 'You and me knows very well it is secret'
JWT_BLACKLIST_ENABLED = True
JWT_BLACKLIST_TOKEN_CHECKS = ['access', 'refresh']
PASSWORD = os.getenv('APP_PASSWORD')
HOST = os.getenv('APP_HOST')
USER = os.getenv('APP_USER')
class ProductionConfig(Config):
DEBUG = False
DATABASE_NAME = os.getenv('DATABASE_NAME')
PASSWORD = os.getenv('PASSWORD')
HOST = os.getenv('HOST')
USER = os.getenv('USER')
DATABASE_URL = os.getenv('DATABASE_URL')
class DevelopmentConfig(Config):
DEVELOPMENT = True
DEBUG = True
DATABASE_NAME = os.getenv('APP_DATABASE')
class TestingConfig(Config):
TESTING = False
DEBUG = True
DATABASE_NAME = os.getenv('TEST_DATABASE')
configuration = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig
}
|
"""
This provides a small set of effect handlers in NumPyro that are modeled
after Pyro's `poutine <http://docs.pyro.ai/en/stable/poutine.html>`_ module.
For a tutorial on effect handlers more generally, readers are encouraged to
read `Poutine: A Guide to Programming with Effect Handlers in Pyro
<http://pyro.ai/examples/effect_handlers.html>`_. These simple effect handlers
can be composed together or new ones added to enable implementation of custom
inference utilities and algorithms.
**Example**
As an example, we are using :class:`~numpyro.handlers.seed`, :class:`~numpyro.handlers.trace`
and :class:`~numpyro.handlers.substitute` handlers to define the `log_likelihood` function below.
We first create a logistic regression model and sample from the posterior distribution over
the regression parameters using :func:`~numpyro.mcmc.mcmc`. The `log_likelihood` function
uses effect handlers to run the model by substituting sample sites with values from the posterior
distribution and computes the log density for a single data point. The `expected_log_likelihood`
function computes the log likelihood for each draw from the joint posterior and aggregates the
results, but does so by using JAX's auto-vectorize transform called `vmap` so that we do not
need to loop over all the data points.
.. testsetup::
import jax.numpy as np
from jax import random, vmap
from jax.scipy.special import logsumexp
import numpyro.distributions as dist
from numpyro.handlers import sample, seed, substitute, trace
from numpyro.hmc_util import initialize_model
from numpyro.mcmc import mcmc
.. doctest::
>>> N, D = 3000, 3
>>> def logistic_regression(data, labels):
... coefs = sample('coefs', dist.Normal(np.zeros(D), np.ones(D)))
... intercept = sample('intercept', dist.Normal(0., 10.))
... logits = np.sum(coefs * data + intercept, axis=-1)
... return sample('obs', dist.Bernoulli(logits=logits), obs=labels)
>>> data = random.normal(random.PRNGKey(0), (N, D))
>>> true_coefs = np.arange(1., D + 1.)
>>> logits = np.sum(true_coefs * data, axis=-1)
>>> labels = dist.Bernoulli(logits=logits).sample(random.PRNGKey(1))
>>> init_params, potential_fn, constrain_fn = initialize_model(random.PRNGKey(2), logistic_regression, data, labels)
>>> num_warmup, num_samples = 1000, 1000
>>> samples = mcmc(num_warmup, num_samples, init_params,
... potential_fn=potential_fn,
... constrain_fn=constrain_fn) # doctest: +SKIP
warmup: 100%|██████████| 1000/1000 [00:09<00:00, 109.40it/s, 1 steps of size 5.83e-01. acc. prob=0.79]
sample: 100%|██████████| 1000/1000 [00:00<00:00, 1252.39it/s, 1 steps of size 5.83e-01. acc. prob=0.85]
mean sd 5.5% 94.5% n_eff Rhat
coefs[0] 0.96 0.07 0.85 1.07 455.35 1.01
coefs[1] 2.05 0.09 1.91 2.20 332.00 1.01
coefs[2] 3.18 0.13 2.96 3.37 320.27 1.00
intercept -0.03 0.02 -0.06 0.00 402.53 1.00
>>> def log_likelihood(rng, params, model, *args, **kwargs):
... model = substitute(seed(model, rng), params)
... model_trace = trace(model).get_trace(*args, **kwargs)
... obs_node = model_trace['obs']
... return np.sum(obs_node['fn'].log_prob(obs_node['value']))
>>> def expected_log_likelihood(rng, params, model, *args, **kwargs):
... n = list(params.values())[0].shape[0]
... log_lk_fn = vmap(lambda rng, params: log_likelihood(rng, params, model, *args, **kwargs))
... log_lk_vals = log_lk_fn(random.split(rng, n), params)
... return logsumexp(log_lk_vals) - np.log(n)
>>> print(expected_log_likelihood(random.PRNGKey(2), samples, logistic_regression, data, labels)) # doctest: +SKIP
-876.172
"""
from __future__ import absolute_import, division, print_function
from collections import OrderedDict
from jax import random
_PYRO_STACK = []
class Messenger(object):
def __init__(self, fn=None):
self.fn = fn
def __enter__(self):
_PYRO_STACK.append(self)
def __exit__(self, *args, **kwargs):
assert _PYRO_STACK[-1] is self
_PYRO_STACK.pop()
def process_message(self, msg):
pass
def postprocess_message(self, msg):
pass
def __call__(self, *args, **kwargs):
with self:
return self.fn(*args, **kwargs)
class trace(Messenger):
"""
Returns a handler that records the inputs and outputs at primitive calls
inside `fn`.
**Example**
.. testsetup::
from jax import random
import numpyro.distributions as dist
from numpyro.handlers import sample, seed, trace
import pprint as pp
.. doctest::
>>> def model():
... sample('a', dist.Normal(0., 1.))
>>> exec_trace = trace(seed(model, random.PRNGKey(0))).get_trace()
>>> pp.pprint(exec_trace) # doctest: +SKIP
OrderedDict([('a',
{'args': (),
'fn': <numpyro.distributions.continuous.Normal object at 0x7f9e689b1eb8>,
'is_observed': False,
'kwargs': {'random_state': DeviceArray([0, 0], dtype=uint32)},
'name': 'a',
'type': 'sample',
'value': DeviceArray(-0.20584235, dtype=float32)})])
"""
def __enter__(self):
super(trace, self).__enter__()
self.trace = OrderedDict()
return self.trace
def postprocess_message(self, msg):
assert msg['name'] not in self.trace, 'all sites must have unique names'
self.trace[msg['name']] = msg.copy()
def get_trace(self, *args, **kwargs):
"""
Run the wrapped callable and return the recorded trace.
:param `*args`: arguments to the callable.
:param `**kwargs`: keyword arguments to the callable.
:return: `OrderedDict` containing the execution trace.
"""
self(*args, **kwargs)
return self.trace
class replay(Messenger):
"""
Given a callable `fn` and an execution trace `guide_trace`,
return a callable which substitutes `sample` calls in `fn` with
values from the corresponding site names in `guide_trace`.
:param fn: Python callable with NumPyro primitives.
:param guide_trace: an OrderedDict containing execution metadata.
**Example**
.. testsetup::
from jax import random
import numpyro.distributions as dist
from numpyro.handlers import replay, sample, seed, trace
.. doctest::
>>> def model():
... sample('a', dist.Normal(0., 1.))
>>> exec_trace = trace(seed(model, random.PRNGKey(0))).get_trace()
>>> print(exec_trace['a']['value']) # doctest: +SKIP
-0.20584235
>>> replayed_trace = trace(replay(model, exec_trace)).get_trace()
>>> print(exec_trace['a']['value']) # doctest: +SKIP
-0.20584235
>>> assert replayed_trace['a']['value'] == exec_trace['a']['value']
"""
def __init__(self, fn, guide_trace):
self.guide_trace = guide_trace
super(replay, self).__init__(fn)
def process_message(self, msg):
if msg['name'] in self.guide_trace:
msg['value'] = self.guide_trace[msg['name']]['value']
class block(Messenger):
"""
Given a callable `fn`, return another callable that selectively hides
primitive sites where `hide_fn` returns True from other effect handlers
on the stack.
:param fn: Python callable with NumPyro primitives.
:param hide_fn: function which when given a dictionary containing
site-level metadata returns whether it should be blocked.
**Example:**
.. testsetup::
from jax import random
from numpyro.handlers import block, sample, seed, trace
import numpyro.distributions as dist
.. doctest::
>>> def model():
... a = sample('a', dist.Normal(0., 1.))
... return sample('b', dist.Normal(a, 1.))
>>> model = seed(model, random.PRNGKey(0))
>>> block_all = block(model)
>>> block_a = block(model, lambda site: site['name'] == 'a')
>>> trace_block_all = trace(block_all).get_trace()
>>> assert not {'a', 'b'}.intersection(trace_block_all.keys())
>>> trace_block_a = trace(block_a).get_trace()
>>> assert 'a' not in trace_block_a
>>> assert 'b' in trace_block_a
"""
def __init__(self, fn=None, hide_fn=lambda msg: True):
self.hide_fn = hide_fn
super(block, self).__init__(fn)
def process_message(self, msg):
if self.hide_fn(msg):
msg['stop'] = True
class scale(Messenger):
"""
This messenger rescales the log probability score.
This is typically used for data subsampling or for stratified sampling of data
(e.g. in fraud detection where negatives vastly outnumber positives).
:param float scale_factor: a positive scaling factor
"""
def __init__(self, scale_factor):
if scale_factor <= 0:
raise ValueError("scale factor should be a positive number.")
self.scale = scale_factor
super(scale, self).__init__()
def process_message(self, msg):
msg["scale"] = self.scale * msg.get('scale', 1)
class seed(Messenger):
"""
JAX uses a functional pseudo random number generator that requires passing
in a seed :func:`~jax.random.PRNGKey` to every stochastic function. The
`seed` handler allows us to initially seed a stochastic function with a
:func:`~jax.random.PRNGKey`. Every call to the :func:`~numpyro.handlers.sample`
primitive inside the function results in a splitting of this initial seed
so that we use a fresh seed for each subsequent call without having to
explicitly pass in a `PRNGKey` to each `sample` call.
"""
def __init__(self, fn, rng):
self.rng = rng
super(seed, self).__init__(fn)
def process_message(self, msg):
if msg['type'] == 'sample':
msg['kwargs']['random_state'] = self.rng
self.rng, = random.split(self.rng, 1)
class substitute(Messenger):
"""
Given a callable `fn` and a dict `param_map` keyed by site names,
return a callable which substitutes all primitive calls in `fn` with
values from `param_map` whose key matches the site name. If the
site name is not present in `param_map`, there is no side effect.
:param fn: Python callable with NumPyro primitives.
:param dict param_map: dictionary of `numpy.ndarray` values keyed by
site names.
**Example:**
.. testsetup::
from jax import random
from numpyro.handlers import sample, seed, substitute, trace
import numpyro.distributions as dist
.. doctest::
>>> def model():
... sample('a', dist.Normal(0., 1.))
>>> model = seed(model, random.PRNGKey(0))
>>> exec_trace = trace(substitute(model, {'a': -1})).get_trace()
>>> assert exec_trace['a']['value'] == -1
"""
def __init__(self, fn=None, param_map=None):
self.param_map = param_map
super(substitute, self).__init__(fn)
def process_message(self, msg):
if msg['name'] in self.param_map:
msg['value'] = self.param_map[msg['name']]
def apply_stack(msg):
pointer = 0
for pointer, handler in enumerate(reversed(_PYRO_STACK)):
handler.process_message(msg)
# When a Messenger sets the "stop" field of a message,
# it prevents any Messengers above it on the stack from being applied.
if msg.get("stop"):
break
if msg['value'] is None:
msg['value'] = msg['fn'](*msg['args'], **msg['kwargs'])
# A Messenger that sets msg["stop"] == True also prevents application
# of postprocess_message by Messengers above it on the stack
# via the pointer variable from the process_message loop
for handler in _PYRO_STACK[-pointer-1:]:
handler.postprocess_message(msg)
return msg
def sample(name, fn, obs=None):
"""
Returns a random sample from the stochastic function `fn`. This can have
additional side effects when wrapped inside effect handlers like
:class:`~numpyro.handlers.substitute`.
:param str name: name of the sample site
:param fn: Python callable
:param numpy.ndarray obs: observed value
:return: sample from the stochastic `fn`.
"""
# if there are no active Messengers, we just draw a sample and return it as expected:
if not _PYRO_STACK:
return fn()
# Otherwise, we initialize a message...
initial_msg = {
'type': 'sample',
'name': name,
'fn': fn,
'args': (),
'kwargs': {},
'value': obs,
'is_observed': obs is not None,
}
# ...and use apply_stack to send it to the Messengers
msg = apply_stack(initial_msg)
return msg['value']
def identity(x, *args, **kwargs):
return x
def param(name, init_value, **kwargs):
"""
Annotate the given site as an optimizable parameter for use with
:mod:`jax.experimental.optimizers`. For an example of how `param` statements
can be used in inference algorithms, refer to :func:`~numpyro.svi.svi`.
:param str name: name of site.
:param numpy.ndarray init_value: initial value specified by the user. Note that
the onus of using this to initialize the optimizer is on the user /
inference algorithm, since there is no global parameter store in
NumPyro.
:return: value for the parameter. Unless wrapped inside a
handler like :class:`~numpyro.handlers.substitute`, this will simply
return the initial value.
"""
# if there are no active Messengers, we just draw a sample and return it as expected:
if not _PYRO_STACK:
return init_value
# Otherwise, we initialize a message...
initial_msg = {
'type': 'param',
'name': name,
'fn': identity,
'args': (init_value,),
'kwargs': kwargs,
'value': None,
}
# ...and use apply_stack to send it to the Messengers
msg = apply_stack(initial_msg)
return msg['value']
|
# Copyright (c) 2011-2016 Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tempfile import NamedTemporaryFile
from testlunr.functional import Struct
from contextlib import contextmanager
from webob import Request
import unittest
import urllib
import json
import os
import logging
from StringIO import StringIO
from lunr.common.logger import LunrLoggerAdapter, local
@contextmanager
def temp_disk_file(body=''):
path = None
try:
with NamedTemporaryFile('w', delete=False) as f:
path = f.name
f.write(body)
yield path
finally:
if path:
os.unlink(path)
@contextmanager
def patch(target, attr, new):
"""
Run in context with patched attribute on target.
:param target: real object to patch
:param attr: name of attribute to patch, a string
:param new: mock or stub to use in place
"""
original = getattr(target, attr)
setattr(target, attr, new)
try:
yield
finally:
setattr(target, attr, original)
class WsgiTestBase(unittest.TestCase):
def request(self, uri, method='GET', params=None):
encoded = urllib.urlencode(params or {})
body = ''
req = Request.blank(uri)
if method in ('PUT', 'POST'):
body = encoded
req.content_type = 'application/x-www-form-urlencoded'
else:
uri = "%s?%s" % (uri, encoded)
req.method = method
req.body = body
resp = self.app(req)
return Struct(code=resp.status_int, body=json.loads(resp.body))
class MockLogger(object):
def __init__(self):
self.local = local
self.log_file = StringIO()
self.logger = None
def get_logger(self, name):
if not self.logger:
logger = logging.getLogger(name)
logger.setLevel(1) # caputure everything
handler = logging.StreamHandler(self.log_file)
handler.setFormatter(
logging.Formatter('%(name)s:%(levelname)s:%(message)s'))
logger.addHandler(handler)
self.logger = LunrLoggerAdapter(logger)
return self.logger
def pop_log_messages(self):
rv = self.log_file.getvalue()
self.log_file.seek(0)
self.log_file.truncate()
return rv
def __getattribute__(self, name):
try:
return object.__getattribute__(self, name)
except AttributeError:
# prevent recursive lookup
logger = object.__getattribute__(self, 'logger')
if hasattr(logger, name):
return getattr(logger, name)
raise
class MockResourceLock(object):
def acquire(self, info):
pass
def remove(self):
pass
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, trace):
pass
|
import gym
from core.config import BaseConfig
class ClassicControlConfig(BaseConfig):
def __init__(self):
super(ClassicControlConfig, self).__init__(max_env_steps=int(2e5),
start_step=int(1e4),
lr=1e-3,
replay_memory_capacity=int(2e5),
fixed_action_repeat=1,
test_interval_steps=2000)
def new_game(self, seed=None):
env = gym.make(self.env_name)
if seed is not None:
env.seed(seed)
return env
run_config = ClassicControlConfig()
|
from contextlib import contextmanager
import nose
from pysics import *
@contextmanager
def manage_world():
world = World((0, 0), True)
yield world
@contextmanager
def manage_static_body(world, *args, **kwargs):
body = world.create_static_body(*args, **kwargs)
yield body
world.destroy_body(body)
@contextmanager
def manage_kinematic_body(world, *args, **kwargs):
body = world.create_kinematic_body(*args, **kwargs)
yield body
world.destroy_body(body)
@contextmanager
def manage_dynamic_body(world, *args, **kwargs):
body = world.create_dynamic_body(*args, **kwargs)
yield body
world.destroy_body(body)
@contextmanager
def manage_circle_fixture(body, *args, **kwargs):
fixture = body.create_circle_fixture(*args, **kwargs)
yield fixture
body.destroy_fixture(fixture)
@contextmanager
def manage_edge_fixture(body, *args, **kwargs):
fixture = body.create_edge_fixture(*args, **kwargs)
yield fixture
body.destroy_fixture(fixture)
@contextmanager
def manage_polygon_fixture(body, *args, **kwargs):
fixture = body.create_polygon_fixture(*args, **kwargs)
yield fixture
body.destroy_fixture(fixture)
@contextmanager
def manage_loop_fixture(body, *args, **kwargs):
vertex_array = VertexArray()
fixture = body.create_loop_fixture(vertex_array, *args, **kwargs)
yield fixture
body.destroy_fixture(fixture)
@contextmanager
def manage_revolute_joint(world, *args, **kwargs):
revolute_joint = world.create_revolute_joint(*args, **kwargs)
yield revolute_joint
world.destroy_joint(revolute_joint)
@contextmanager
def manage_prismatic_joint(world, *args, **kwargs):
prismatic_joint = world.create_prismatic_joint(*args, **kwargs)
yield prismatic_joint
world.destroy_joint(prismatic_joint)
@contextmanager
def manage_distance_joint(world, *args, **kwargs):
distance_joint = world.create_distance_joint(*args, **kwargs)
yield distance_joint
world.destroy_joint(distance_joint)
def test_exercise():
with manage_world() as world:
with manage_dynamic_body(world) as body:
with manage_circle_fixture(body) as fixture:
pass
with manage_edge_fixture(body) as fixture:
pass
with manage_polygon_fixture(body) as fixture:
pass
with manage_loop_fixture(body) as fixture:
pass
def test_create_revolute_joint():
with manage_world() as world:
with manage_static_body(world) as body_a:
with manage_dynamic_body(world) as body_b:
with manage_revolute_joint(world, body_a, body_b, (0, 0)) as revolute_joint:
pass
def test_create_prismatic_joint():
with manage_world() as world:
with manage_static_body(world) as body_a:
with manage_dynamic_body(world) as body_b:
with manage_prismatic_joint(world, body_a, body_b, (0, 0), (0, 0)) as prismatic_joint:
pass
def test_create_distance_joint():
with manage_world() as world:
with manage_static_body(world) as body_a:
with manage_dynamic_body(world) as body_b:
with manage_distance_joint(world, body_a, body_b, (0, 0), (0, 0)) as distance_joint:
pass
def _test_identity(a, b):
assert a is not b
assert id(a) != id(b)
assert a == b
assert not a != b
assert hash(a) == hash(b)
def test_body_identity():
with manage_world() as world:
with manage_dynamic_body(world):
_test_identity(world.bodies[0], world.bodies[0])
def test_fixture_identity():
with manage_world() as world:
with manage_dynamic_body(world) as body:
with manage_circle_fixture(body):
_test_identity(body.fixtures[0], body.fixtures[0])
def test_joint_identity():
with manage_world() as world:
with manage_static_body(world) as body_a:
with manage_dynamic_body(world) as body_b:
with manage_revolute_joint(world, body_a, body_b, (0, 0)):
_test_identity(body_a.joints[0], body_b.joints[0])
if __name__ == '__main__':
nose.main()
|
# pylint: disable=unused-argument
import glob
from dataclasses import asdict, dataclass
from datetime import datetime
from typing import List, Optional
import aiofiles
from redis import Redis
from sanic import Blueprint, Sanic, exceptions
from sanic.response import json
from sanic_ext import openapi
from sanic_jwt import protected
from labfunctions.conf.server_settings import settings
from labfunctions.core.core import nb_job_executor
from labfunctions.core.entities import (
ExecutionResult,
HistoryRequest,
NBTask,
ProjectData,
)
from labfunctions.core.managers import history, projects
from labfunctions.core.registers import register_history_db
from labfunctions.core.scheduler_deprecated import (
QueueExecutor,
SchedulerExecutor,
scheduler_dispatcher,
)
from labfunctions.defaults import API_VERSION
from labfunctions.utils import (
get_query_param,
parse_page_limit,
run_async,
secure_filename,
)
rqjobs_bp = Blueprint("rqjobs", url_prefix="rqjobs", version=API_VERSION)
def _get_scheduler(qname="default") -> SchedulerExecutor:
current_app = Sanic.get_app("labfunctions")
r = current_app.ctx.rq_redis
return SchedulerExecutor(r, qname=qname)
def _get_q_executor(qname="default") -> QueueExecutor:
current_app = Sanic.get_app("labfunctions")
r = current_app.ctx.rq_redis
return QueueExecutor(r, qname=qname)
def list_workflows():
notebooks = []
files = glob(f"{settings.BASE_PATH}/{settings.NB_WORKFLOWS}*")
for x in files:
if ".ipynb" or ".py" in x:
notebooks.append(x.split("/")[-1].split(".")[0])
return notebooks
@dataclass
class JobResponse:
wfid: str
workflow: NBTask
@dataclass
class JobDetail:
wfid: str
func_name: str
# workflow: NBTask
created_at: str
@rqjobs_bp.get("/")
@openapi.response(200, List[JobDetail], "Task Scheduled")
@protected()
def list_scheduled_redis(request):
"""
List the jobs scheduled in the scheduler
"""
# pylint: disable=unused-argument
scheduler = _get_scheduler()
jobs = scheduler.list_jobs()
return json(jobs, 200)
@rqjobs_bp.delete("/_cancel/<wfid>")
@openapi.parameter("wfid", str, "path")
@protected()
async def cancel_job(request, wfid):
"""delete a scheduler job from redis"""
# pylint: disable=unused-argument
scheduler = _get_scheduler()
await run_async(scheduler.cancel_job, wfid)
return json(dict(msg="done"), 200)
@rqjobs_bp.delete("/_cancel_all")
@protected()
async def schedule_cancel_all(request):
"""Cancel all the jobs in the queue"""
# pylint: disable=unused-argument
scheduler = _get_scheduler()
await run_async(scheduler.cancel_all)
return json(dict(msg="done"), 200)
@rqjobs_bp.get("/<wfid>")
@openapi.parameter("wfid", str, "path")
@protected()
def get_job_result(request, wfid):
"""Get job result from the queue"""
Q = _get_q_executor()
job = Q.fetch_job(wfid)
result = job.result
if result:
result = asdict(result)
return json(
dict(
wfid=job.id,
status=job.get_status(),
result=result,
position=job.get_position(),
)
)
@rqjobs_bp.get("/failed")
@protected()
def get_failed_jobs(request):
"""Get jobs failed in RQ"""
Q = _get_q_executor()
jobs = Q.get_jobs_ids("failed")
return json(dict(rows=jobs, total=len(jobs)))
@rqjobs_bp.delete("/failed")
@openapi.parameter("remove", bool, "query")
@protected()
def delete_failed_jobs(request):
"""Remove failed jobs from the queue"""
to_remove = get_query_param(request, "remove", False)
Q = _get_q_executor()
jobs = Q.remove_jobs("failed", to_remove)
return json(dict(rows=jobs, total=len(jobs)))
@rqjobs_bp.get("/running")
@protected()
def get_running_jobs(request):
"""Get jobs Running"""
Q = _get_q_executor()
jobs = Q.get_jobs_ids("started")
return json(dict(rows=jobs, total=len(jobs)))
|
from enum import Enum
from collections import namedtuple
def take_while(predicate, iterator):
for i in iterator:
yield i
if not predicate(i):
break
class TokenType(Enum):
TEXT = 0
VARIABLE = 1
INPUT = 2
CONDITIONAL = 3
BLOCK = 4
Token = namedtuple('Token', ['type', 'data'])
def tokenize(text: str) -> list:
text_iter = iter(text)
tokens = []
for char in text_iter:
if char.isspace():
take_while(lambda c: c.isspace, text_iter)
elif char == '*':
data = list(take_while(lambda c: not c.isspace(), text_iter))
tokens.append(Token(TokenType.VARIABLE, ''.join(data[:-1]))) # cut off end symbol
tokens.append(Token(TokenType.TEXT, data[-1])) # and add it as separate token
elif char == '#':
data = list(take_while(lambda c: c != '\n', text_iter))
for d in ''.join(data).split():
tokens.append(Token(TokenType.INPUT, ''.join(d)))
elif char == '{':
data = list(char)
data.extend(take_while(lambda c: c != '}', text_iter))
if data[-1] != '}':
raise RuntimeError('Closing \'}\' is missing')
tokens.append(Token(TokenType.BLOCK, ''.join(data)))
elif char == '|':
data = list(char)
data.extend(take_while(lambda c: not c.isspace(), text_iter))
tokens.append(Token(TokenType.CONDITIONAL, ''.join(data)))
else:
data = list(char)
data.extend(take_while(lambda c: not c.isspace(), text_iter))
tokens.append(Token(TokenType.TEXT, ''.join(data)))
return tokens
|
from elasticsearch import Elasticsearch
es = Elasticsearch()
# Get all the documents
doc={"query":{"match_all":{}}}
res=es.search(index="users",body=doc,size=10)
print(res['hits']['hits'][9]['_source'])
# Get Ronald Goodman
doc={"query":{"match":{"name":"Ronald Goodman"}}}
res=es.search(index="users",body=doc,size=10)
print(res['hits']['hits'][0]['_source'])
# Get Ronald using Lucene syntax
res=es.search(index="users",q="name:Ronald Goodman",size=10)
print(res['hits']['hits'][0]['_source'])
# Get City Jamesberg - Returns Jamesberg and Lake Jamesberg
doc={"query":{"match":{"city":"Jamesberg"}}}
res=es.search(index="users",body=doc,size=10)
print(res['hits']['hits'])
# Get Jamesberg and filter on zip so Lake Jamesberg is removed
doc={"query":{"bool":{"must":{"match":{"city":"Jamesberg"}},"filter":{"term":{"zip":"63792"}}}}}
res=es.search(index="users",body=doc,size=10)
print(res['hits']['hits'])
|
'''
Knowledge extractor module.
'''
import artm
import inspect
import operator
import os
import numpy as np
import pandas as pd
import sys
from sklearn.manifold import MDS
from sklearn.metrics import pairwise_distances
from knowledge_extractor.utils import text_prepare
class TopicModel(object):
def __init__(self, search_guid, documents, analyze_full_doc=True, num_of_topics=10):
self.search_guid = search_guid
self.documents = documents
self.analyze_full_doc = analyze_full_doc
self.num_of_topics = num_of_topics
self.training_done = False
def is_ready(self):
return self.training_done
def get_doccuments(self):
return self.documents
def train(self):
vocabulary_file = self._prepare_texts_full() if self.analyze_full_doc == True else self._prepare_texts_from_summary()
target_folder = self._get_bigARTM_dir()
batch_vectorizer = artm.BatchVectorizer(
data_path=vocabulary_file, data_format='vowpal_wabbit',
target_folder=target_folder, batch_size=100
)
dict_path = self._get_dictionary_path()
dict_file = '{}.dict'.format(dict_path)
if os.path.isfile(dict_file):
os.remove(dict_file)
my_dictionary = artm.Dictionary()
my_dictionary.gather(data_path=target_folder, vocab_file_path=vocabulary_file)
my_dictionary.save(dictionary_path=dict_path)
my_dictionary.load(dictionary_path=dict_file)
T = self.num_of_topics
topic_names=["sbj"+str(i) for i in range(T-1)]+["bcg"]
self.model_artm = artm.ARTM(
num_topics=T,
topic_names=topic_names,
class_ids={"text": 1, "doc_guid": 1},
dictionary=my_dictionary,
cache_theta=True
)
self.model_artm.initialize(dictionary=my_dictionary)
self.model_artm.scores.add(artm.TopTokensScore(name="text_words", num_tokens=15, class_id="text"))
self.model_artm.scores.add(artm.TopTokensScore(name="doc_guid_words", num_tokens=15, class_id="doc_guid"))
self.model_artm.regularizers.add(artm.SmoothSparsePhiRegularizer(name='SparsePhi', tau=1e5, dictionary=my_dictionary, class_ids="text", topic_names="bcg"))
self.model_artm.fit_offline(batch_vectorizer=batch_vectorizer, num_collection_passes=30)
self.model_artm.regularizers.add(
artm.SmoothSparsePhiRegularizer(
name='SparsePhi-1e5',
tau=-1e5,
dictionary=my_dictionary,
class_ids="text",
topic_names=["sbj"+str(i) for i in range(T-1)]
)
)
self.model_artm.fit_offline(batch_vectorizer=batch_vectorizer, num_collection_passes=15)
self.training_done = True
def get_top_words(self, score_name='text_words', count=None):
top_words = {}
for topic_name in self.model_artm.topic_names:
top_words[topic_name] = self.top_tokens_by_topic(score_name, topic_name, count=count)
return top_words
def get_topic_profile(self):
phi_a = self.model_artm.get_phi(class_ids='doc_guid')
theta = self.model_artm.get_theta()
p_t = theta.sum(axis=1)
df_p_t = pd.DataFrame(p_t / p_t.sum(axis=0))
df_p_t.columns = ['probability']
topic_profile = pd.DataFrame(index=phi_a.index.copy(), columns=df_p_t.index.copy())
for a_idx, _ in enumerate(topic_profile.index):
total = np.sum([phi_a.iloc[a_idx][col] * df_p_t.loc[col]['probability'] for col in topic_profile.columns])
for _, topic in enumerate(topic_profile.columns):
prob = (phi_a.iloc[a_idx][topic] * df_p_t.loc[topic]['probability']) / total
topic_profile.iloc[a_idx][topic] = prob
mds_cos_clstr = MDS(n_components=2)
MDS_transformed_cos = mds_cos_clstr.fit_transform(pairwise_distances(topic_profile, metric='cosine'))
result = []
for a_idx, doc_guid in enumerate(topic_profile.index):
sbj = topic_profile.columns[topic_profile.iloc[a_idx].values.argmax()]
coord = MDS_transformed_cos[a_idx]
docs = [d for d in self.documents if d['docGuid'] == doc_guid]
rank = -1
title = 'N/A'
summary = 'N/A'
description = 'N/A'
if len(docs):
summary = docs[0]['summary'] if 'summary' in docs[0] else description
description = self._get_doc_description_from_file(docs[0]['docGuid'])
rank = docs[0]['rank'] if 'rank' in docs[0] else rank
title = docs[0]['title'] if 'title' in docs[0] else title
else:
print('Not able to find the following doc: {}'.format(doc_guid))
result.append({
'doc_guid': doc_guid,
'rank': rank,
'title': title,
'sbj': sbj,
'description': description,
'summary': summary,
'x': coord[0],
'y': coord[0]
})
return result
def _get_doc_description_from_file(self, doc_guid):
docs_folder = self._get_documents_folder()
description = 'N/A'
with open('{}/{}.txt'.format(docs_folder, doc_guid), encoding='utf-8') as doc:
for line in doc:
description = line
break
return description
def top_tokens_by_topic(self, score_name, topic_name, count=None):
top_tokens = self.model_artm.score_tracker[score_name]
top_words = list()
if topic_name not in top_tokens.last_tokens \
or topic_name not in top_tokens.last_weights:
return []
for (token, weight) in zip(top_tokens.last_tokens[topic_name],
top_tokens.last_weights[topic_name]):
top_words.append((token, weight))
if count is None:
count = len(top_words)
top_words = sorted(top_words,key=operator.itemgetter(1), reverse=True)[:count]
return [x[0] for x in top_words]
def _prepare_texts_from_summary(self):
vocabulary_file = self._get_vocabulary_file_name()
vocabulary = open(vocabulary_file, 'w')
for doc in self.documents:
try:
vocabulary.write('document_{} |text '.format(doc['docGuid']))
prepared = text_prepare(doc['summary'] if 'summary' in doc else 'N/A')
vocabulary.write(prepared)
vocabulary.write(' |doc_guid {}\n'.format(doc['docGuid']))
except:
print('Exception occured for doc_guid: {}, error: {}'.format(doc['docGuid'], sys.exc_info()))
vocabulary.flush()
vocabulary.close()
return vocabulary_file
def _prepare_texts_full(self):
vocabulary_file = self._get_vocabulary_file_name()
vocabulary = open(vocabulary_file, 'w')
docs_folder = self._get_documents_folder()
for guid in [doc['docGuid'] for doc in self.documents]:
with open('{}/{}.txt'.format(docs_folder, guid), encoding='utf-8') as doc:
vocabulary.write('document_{} |text '.format(guid))
for line in doc:
prepared = text_prepare(line)
vocabulary.write(prepared)
vocabulary.write(' |doc_guid {}\n'.format(guid))
vocabulary.flush()
vocabulary.close()
return vocabulary_file
def _get_bigARTM_dir(self):
parent_path = os.path.dirname(os.path.dirname(inspect.getfile(self.__class__)))
dir_path = os.path.join(
parent_path,
'bigARTM')
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
return dir_path
def _get_documents_folder(self):
parent_path = os.path.dirname(os.path.dirname(inspect.getfile(self.__class__)))
dir_path = os.path.join(
parent_path,
'documents')
return dir_path
def _get_dictionary_path(self):
parent_path = self._get_bigARTM_dir()
dir_path = os.path.join(
parent_path,
'dictionary_{}'.format(self.search_guid))
return dir_path
def _get_vocabulary_file_name(self):
dir_path = self._get_bigARTM_dir()
file_name = os.path.join(
dir_path,
'ALL_TEXT_{}.txt'.format(self.search_guid)
)
return file_name
|
from pathlib import Path
from unittest import TestCase
from eyecite import annotate, clean_text, get_citations
class AnnotateTest(TestCase):
def test_annotate(self):
def straighten_quotes(text):
return text.replace("’", "'")
def lower_annotator(before, text, after):
return before + text.lower() + after
test_pairs = (
# single cite
("1 U.S. 1", "<0>1 U.S. 1</0>", []),
# cite with extra text
("foo 1 U.S. 1 bar", "foo <0>1 U.S. 1</0> bar", []),
# cite with punctuation
("foo '1 U.S. 1' bar", "foo '<0>1 U.S. 1</0>' bar", []),
# law cite
(
"foo. Mass. Gen. Laws ch. 1, § 2. bar",
"foo. <0>Mass. Gen. Laws ch. 1, § 2</0>. bar",
[],
),
# journal cite
(
"foo. 1 Minn. L. Rev. 2. bar",
"foo. <0>1 Minn. L. Rev. 2</0>. bar",
[],
),
# Id. cite
(
"1 U.S. 1. Foo. Id. Bar. Id. at 2.",
"<0>1 U.S. 1</0>. Foo. <1>Id.</1> Bar. <2>Id. at 2</2>.",
[],
),
# Supra cite
(
"1 U.S. 1. Foo v. Bar, supra at 2.",
"<0>1 U.S. 1</0>. Foo v. Bar, <1>supra at 2</1>.",
[],
),
# whitespace and html -- no unbalanced tag check
(
"<body>foo <i>1 <b>U.S.</b></i> 1 bar</body>",
"<body>foo <i><0>1 <b>U.S.</b></i> 1</0> bar</body>",
["html", "inline_whitespace"],
),
# whitespace and html -- skip unbalanced tags
(
"foo <i>1 U.S.</i> 1; 2 <i>U.S.</i> 2",
"foo <i>1 U.S.</i> 1; <1>2 <i>U.S.</i> 2</1>",
["html", "inline_whitespace"],
{"unbalanced_tags": "skip"},
),
# whitespace and html -- wrap unbalanced tags
(
"<i>1 U.S.</i> 1; 2 <i>U.S.</i> 2",
"<i><0>1 U.S.</0></i><0> 1</0>; <1>2 <i>U.S.</i> 2</1>",
["html", "inline_whitespace"],
{"unbalanced_tags": "wrap"},
),
# tighly-wrapped html -- skip unbalanced tags (issue #54)
(
"foo <i>Ibid.</i> bar",
"foo <i><0>Ibid.</0></i> bar",
["html", "inline_whitespace"],
{"unbalanced_tags": "skip"},
),
# whitespace containing linebreaks
("1\nU.S. 1", "<0>1\nU.S. 1</0>", ["all_whitespace"]),
# multiple Id. tags
(
"1 U.S. 1. Id. 2 U.S. 2. Id.",
"<0>1 U.S. 1</0>. <1>Id.</1> <2>2 U.S. 2</2>. <3>Id.</3>",
[],
),
# replacement in cleaners
(
"1 Abbott’s Pr.Rep. 1",
"<0>1 Abbott’s Pr.Rep. 1</0>",
[straighten_quotes],
),
# custom annotator
(
"1 U.S. 1",
"<0>1 u.s. 1</0>",
[],
{"annotator": lower_annotator},
),
)
for source_text, expected, clean_steps, *annotate_kwargs in test_pairs:
annotate_kwargs = annotate_kwargs[0] if annotate_kwargs else {}
with self.subTest(
source_text,
clean_steps=clean_steps,
annotate_args=annotate_kwargs,
):
plain_text = clean_text(source_text, clean_steps)
cites = get_citations(plain_text)
annotations = [
(c.span(), f"<{i}>", f"</{i}>")
for i, c in enumerate(cites)
]
annotated = annotate(
plain_text,
annotations,
source_text=source_text,
**annotate_kwargs,
)
self.assertEqual(annotated, expected)
def test_long_diff(self):
"""Does diffing work across a long text with many changes?"""
opinion_text = (
Path(__file__).parent / "assets" / "opinion.txt"
).read_text()
cleaned_text = clean_text(opinion_text, ["all_whitespace"])
annotated_text = annotate(
cleaned_text, [((902, 915), "~FOO~", "~BAR~")], opinion_text
)
self.assertIn("~FOO~539\n U. S. 306~BAR~", annotated_text)
|
# Copyright (c) 2020 The Khronos Group Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import numpy as np
import nnef
import sys
import os
def _is_lambda(value):
LAMBDA = lambda: 0
return isinstance(value, type(LAMBDA)) and value.__name__ == LAMBDA.__name__
def _ensure_lambda(value):
return value() if not _is_lambda(value) else value
def uniform(min=0.0, max=1.0):
return lambda shape: np.random.uniform(min, max, shape).astype(np.float32)
def normal(mean=0.0, std=1.0):
return lambda shape: np.random.normal(mean, std, shape).astype(np.float32)
def bernoulli(prob=0.5):
return lambda shape: np.random.uniform(0.0, 1.0, shape) > prob
def integers(min=0, max=100):
return lambda shape: np.random.randint(min, max, shape).astype(np.int32)
def main(args):
if args.seed is not None:
np.random.seed(args.seed)
distributions = {
'scalar': uniform(0.0, 1.0),
'integer': integers(0, 100),
'logical': bernoulli(0.5),
}
try:
random = eval(args.random)
if isinstance(random, dict):
distributions.update({key: _ensure_lambda(value) for key, value in random.items()})
else:
random = _ensure_lambda(random)
if args.random.startswith('integers'):
distributions['integer'] = random
elif args.random.startswith('bernoulli'):
distributions['logical'] = random
else:
distributions['scalar'] = random
except Exception as e:
print("Could not evaluate distribution: " + str(e), file=sys.stderr)
return -1
graph = nnef.parse_file(os.path.join(args.model, 'graph.nnef'))
for op in graph.operations:
if args.weights and op.name == 'variable':
label = op.attribs['label']
shape = op.attribs['shape']
data = distributions[op.dtype](shape)
filename = os.path.join(args.model, label + '.dat')
os.makedirs(os.path.split(filename)[0], exist_ok=True)
with open(filename, 'wb') as file:
nnef.write_tensor(file, data)
if args.verbose:
print("Generated weight '{}'".format(filename))
if args.inputs and op.name == 'external':
name = op.outputs['output']
shape = op.attribs['shape']
data = distributions[op.dtype](shape)
filename = os.path.join(args.model, args.inputs, name + '.dat')
os.makedirs(os.path.split(filename)[0], exist_ok=True)
with open(filename, 'wb') as file:
nnef.write_tensor(file, data)
if args.verbose:
print("Generated input '{}'".format(filename))
return 0
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('model', type=str,
help='The model to generate')
parser.add_argument('--random', type=str, required=True,
help='Random distribution for input generation, possibly per dtype')
parser.add_argument('--seed', type=int, default=None,
help='Random seed for input generation')
parser.add_argument('--weights', action='store_true',
help='Generate weights')
parser.add_argument('--inputs', type=str, nargs='?', default=None, const='.',
help='Generate inputs')
parser.add_argument('--verbose', action='store_true',
help='Weather to print generated file names')
exit(main(parser.parse_args()))
|
class Sortable():
def sort(self, var, order, sorting):
class Sorting():
def __int__(self, var, order):
self._var
self._order
self._hash
@property
def var(self):
return self._var
@property
def order(self) -> TypeQLArg.Order.ASC:
return self.order
def __str__(self):
pass
def __eq__(self):
pass
def __hash__(self):
pass |
from __future__ import annotations
import pytest
@pytest.fixture
def abc(tmpdir):
a = tmpdir.join("file_a")
a.write("a text")
b = tmpdir.join("file_b")
b.write("b text")
c = tmpdir.join("file_c")
c.write("c text")
yield a, b, c
def test_multiple_files(run, abc):
a, b, c = abc
with run(str(a), str(b), str(c)) as h:
h.await_text("file_a")
h.await_text("[1/3]")
h.await_text("a text")
h.press("Right")
h.await_cursor_position(x=1, y=1)
h.press("M-Right")
h.await_text("file_b")
h.await_text("[2/3]")
h.await_text("b text")
h.await_cursor_position(x=0, y=1)
h.press("M-Left")
h.await_text("file_a")
h.await_text("[1/3]")
h.await_cursor_position(x=1, y=1)
# wrap around
h.press("M-Left")
h.await_text("file_c")
h.await_text("[3/3]")
h.await_text("c text")
# make sure to clear statuses when switching files
h.press("^J")
h.await_text("unknown key")
h.press("M-Right")
h.await_text_missing("unknown key")
h.press("^J")
h.await_text("unknown key")
h.press("M-Left")
h.await_text_missing("unknown key")
# also make sure to clear statuses when exiting files
h.press("^J")
h.await_text("unknown key")
h.press("^X")
h.await_text("file_b")
h.await_text_missing("unknown key")
h.press("^X")
h.await_text("file_a")
h.press("^X")
h.await_exit()
def test_multiple_files_close_from_beginning(run, abc):
a, b, c = abc
with run(str(a), str(b), str(c)) as h:
h.press("^X")
h.await_text("file_b")
h.press("^X")
h.await_text("file_c")
h.press("^X")
h.await_exit()
def test_multiple_files_close_from_end(run, abc):
a, b, c = abc
with run(str(a), str(b), str(c)) as h:
h.press("M-Right")
h.await_text("file_b")
h.press("^X")
h.await_text("file_c")
h.press("^X")
h.await_text("file_a")
h.press("^X")
h.await_exit()
|
# -*- coding: utf-8 -*-
#
# Unit test for the actions module.
#
# Copyright (c) 2015 carlosperate https://github.com/carlosperate/
# Licensed under the Apache License, Version 2.0 (the "License"):
# http://www.apache.org/licenses/LICENSE-2.0
#
from __future__ import unicode_literals, absolute_import, print_function
import os
import sys
import codecs
import unittest
try:
import mock
from mock import patch
except ImportError:
from unittest.mock import MagicMock as mock
from unittest.mock import patch
try:
import ardublocklyserver.actions as actions
from ardublocklyserver.compilersettings import ServerCompilerSettings
except ImportError:
import sys
file_dir = os.path.dirname(os.path.realpath(__file__))
package_dir = os.path.dirname(os.path.dirname(file_dir))
sys.path.insert(0, package_dir)
import ardublocklyserver.actions as actions
from ardublocklyserver.compilersettings import ServerCompilerSettings
class ActionsTestCase(unittest.TestCase):
"""
Tests for actions module
"""
#
# Helper functions
#
def delete_default_settings_file(self):
"""
Checks if there is a settings file in the default location and deletes
it if it finds it.
This will DELETE a file from the directory this script is called !!!
"""
default_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
# Accessing the class static variable does not initialise the singleton
settings_file = os.path.normpath(os.path.join(
default_dir,
ServerCompilerSettings._ServerCompilerSettings__settings_filename))
if os.path.exists(settings_file):
print('Removing settings file from %s' % settings_file)
os.remove(settings_file)
#
# Command line tests
#
@patch('ardublocklyserver.actions.subprocess.Popen', autospec=True)
#@patch.object(
# actions.ServerCompilerSettings, 'get_compiler_dir', autospec=True)
def test_load_arduino_cli_valid(self, mock_popen):
"""
Tests that a compiler path and arduino sketch path can be set
and that a command line can be launched to open the sketch in the
Arduino IDE.
"""
sketch_path = actions.create_sketch_default()
ServerCompilerSettings().load_ide_option = 'open'
with patch(
'ardublocklyserver.actions.ServerCompilerSettings.compiler_dir',
new_callable=mock.PropertyMock) as mock_compiler_dir:
mock_compiler_dir.return_value = 'true' # do nothing command
expected_command = ['true', sketch_path]
success, conclusion, out, error, exit_code = \
actions.load_arduino_cli(sketch_path)
mock_popen.assert_called_with(expected_command, shell=False)
self.assertTrue(success)
ServerCompilerSettings().load_ide_option = 'verify'
with patch(
'ardublocklyserver.actions.ServerCompilerSettings.compiler_dir',
new_callable=mock.PropertyMock) as mock_compiler_dir:
mock_compiler_dir.return_value = 'true' # do nothing command
mock_popen.return_value.communicate.return_value = ('test', 'test')
mock_popen.return_value.returncode = 0
expected_command = ['true', '--verify', sketch_path]
success, conclusion, out, error, exit_code = \
actions.load_arduino_cli(sketch_path)
mock_popen.assert_called_with(expected_command, shell=False,
stderr=-1, stdout=-1)
self.assertTrue(success)
ServerCompilerSettings().load_ide_option = 'upload'
with patch(
'ardublocklyserver.actions.ServerCompilerSettings.compiler_dir',
new_callable=mock.PropertyMock) as mock_compiler_dir:
mock_compiler_dir.return_value = 'true' # do nothing command
mock_popen.return_value.communicate.return_value = ('test', 'test')
mock_popen.return_value.returncode = 0
expected_command = [
'true', '--upload', '--port',
ServerCompilerSettings().get_serial_port_flag(), '--board',
ServerCompilerSettings().get_arduino_board_flag(), sketch_path]
success, conclusion, out, error, exit_code = \
actions.load_arduino_cli(sketch_path)
mock_popen.assert_called_with(expected_command, shell=False,
stderr=-1, stdout=-1)
self.assertTrue(success)
# Test for unicode strings as Py2 can be susceptible to fail there
ServerCompilerSettings().load_ide_option = 'upload'
with patch(
'ardublocklyserver.actions.ServerCompilerSettings.compiler_dir',
new_callable=mock.PropertyMock) as mock_compiler_dir:
mock_compiler_dir.return_value = 'いろはにほへとちり' # unicode
mock_popen.return_value.communicate.return_value = (
'Γαζέες καὶ μυρτιὲς', 'Âne ex aéquo au whist')
mock_popen.return_value.returncode = 0
expected_command = [
mock_compiler_dir.return_value, '--upload', '--port',
ServerCompilerSettings().get_serial_port_flag(), '--board',
ServerCompilerSettings().get_arduino_board_flag(), sketch_path]
success, conclusion, out, error, exit_code = \
actions.load_arduino_cli(sketch_path)
mock_popen.assert_called_with(expected_command, shell=False,
stderr=-1, stdout=-1)
self.assertTrue(success)
def test_load_arduino_cli_invalid(self):
# Test that an path that is not a file returns error
success, conclusion, out, error, exit_code =\
actions.load_arduino_cli(os.getcwd())
self.assertFalse(success)
self.assertTrue('Provided sketch path is not a valid' in conclusion)
# Test for error if compiler dir is not set, default is None
self.delete_default_settings_file()
success, conclusion, out, error, exit_code = actions.load_arduino_cli()
self.assertFalse(success)
self.assertEqual(conclusion, 'Unable to find Arduino IDE')
# Test for error if compiler dir is not set
with patch(
'ardublocklyserver.actions.ServerCompilerSettings.compiler_dir',
new_callable=mock.PropertyMock) as mock_compiler_dir:
with patch(
'ardublocklyserver.actions.ServerCompilerSettings.'
'load_ide_option', new_callable=mock.PropertyMock) as \
mock_load_ide_option:
mock_compiler_dir.return_value = 'true' # do nothing command
mock_load_ide_option.return_value = None
success, conclusion, out, error, exit_code = \
actions.load_arduino_cli()
self.assertFalse(success)
self.assertEqual(conclusion,
'What should we do with the Sketch?')
# Test for error if serial port unset, only required when set to upload
ServerCompilerSettings().load_ide_option = 'upload'
with patch(
'ardublocklyserver.actions.ServerCompilerSettings.compiler_dir',
new_callable=mock.PropertyMock) as mock_compiler_dir:
with patch(
'ardublocklyserver.actions.ServerCompilerSettings.'
'get_serial_port_flag') as mock_get_serial_port_flag:
mock_compiler_dir.return_value = 'true' # do nothing command
mock_get_serial_port_flag.return_value = None
success, conclusion, out, error, exit_code = \
actions.load_arduino_cli()
self.assertFalse(success)
self.assertEqual(conclusion, 'Serial Port unavailable')
# Test for error if board type unset, only required when set to upload
ServerCompilerSettings().load_ide_option = 'upload'
with patch(
'ardublocklyserver.actions.ServerCompilerSettings.compiler_dir',
new_callable=mock.PropertyMock) as mock_compiler_dir:
with patch(
'ardublocklyserver.actions.ServerCompilerSettings.'
'get_arduino_board_flag') as mock_get_arduino_board_flag:
mock_compiler_dir.return_value = 'true' # do nothing command
mock_get_arduino_board_flag.return_value = None
success, conclusion, out, error, exit_code = \
actions.load_arduino_cli()
self.assertFalse(success)
self.assertEqual(conclusion, 'Unknown Arduino Board')
#
# Tests sketch creation
#
def test_create_sketch_default(self):
sketch_dir = actions.create_sketch_default()
self.assertTrue(os.path.isfile(sketch_dir))
def test_create_sketch_from_string(self):
sketch_content = 'test string sketch'
sketch_dir = actions.create_sketch_from_string(sketch_content)
self.assertTrue(os.path.isfile(sketch_dir))
f = codecs.open(sketch_dir, "r", "utf-8")
self.assertEqual(f.read(), sketch_content)
# Test for unicode file
sketch_content = 'いろはにほへとちり Γαζέες καὶ μυρτς Âne aéquo au whist'
sketch_dir = actions.create_sketch_from_string(sketch_content)
self.assertTrue(os.path.isfile(sketch_dir))
f = codecs.open(sketch_dir, "r", "utf-8")
self.assertEqual(f.read(), sketch_content)
#
# Tests sketch creation
#
@patch('ardublocklyserver.gui.browse_file_dialog')
@patch('ardublocklyserver.compilersettings.os.path.isfile')
def test_load_arduino_cli_valid(self, mock_isfile, mock_file_dialog):
"""
Tests that the set_compiler_path method edits the settings based on the
output from the gui.browse_file_dialog() function only if it has not
been cancelled.
The return value is not tested as it is a direct call to the
actions.get_compiler_path() function and will be tested individually.
"""
self.delete_default_settings_file()
settings = ServerCompilerSettings()
new_compiler_dir = os.path.join(os.getcwd(), 'arduino_debug.exe')
mock_file_dialog.return_value = new_compiler_dir
# The settings.compiler_dir checks for file validity
mock_isfile.return_value = True
old_compiler_dir = settings.compiler_dir
actions.set_compiler_path()
self.assertNotEqual(old_compiler_dir, settings.compiler_dir)
# Using in as each OSs will dealt with compiler path differently
self.assertTrue(new_compiler_dir in settings.compiler_dir)
# If the dialog is cancelled, the ServerCompilerSettings class should
# not be invoked at all
with patch(
'ardublocklyserver.actions.ServerCompilerSettings.__new__') \
as mock_settings:
# Avoid call to ServerCompilerSettings() in get_compiler_path
with patch('ardublocklyserver.actions.get_compiler_path') \
as mock_get_compiler_path:
mock_file_dialog.return_value = '' # Dialog cancel return value
mock_get_compiler_path.return_vale = None # Don't care
old_compiler_dir = settings.compiler_dir
actions.set_compiler_path()
self.assertEqual(old_compiler_dir, settings.compiler_dir)
self.assertFalse(mock_settings.called)
def test_get_compiler_path(self):
#TODO: This test method
pass
#
# Test sketch setting functions
#
def test_set_sketch_path(self):
#TODO: This test method
pass
def test_get_sketch_path(self):
#TODO: This test method
pass
#
# Test arduino Board setting functions
#
def test_set_arduino_board(self):
#TODO: This test method
pass
def test_get_arduino_boards(self):
#TODO: This test method
pass
#
# Test serial Port setting functions
#
def test_set_serial_port(self):
#TODO: This test method
pass
def test_get_serial_ports(self):
#TODO: This test method
pass
#
# Test load IDE setting functions
#
def test_set_load_ide_only(self):
#TODO: This test method
pass
def test_get_load_ide_only(self):
#TODO: This test method
pass
if __name__ == '__main__':
unittest.main()
|
min = 0
max = 200
for i in range(min, max):
if i % 2 == 0:
print i, |
import pytest
from mtgjson.jsonproxy import JSONProxy
@pytest.fixture
def data():
return {'foo': 'bar', 'int': 42, 'boolean': True, 'sublist': [1, 2, 3], }
@pytest.fixture
def prox(data):
return JSONProxy(data)
def test_getattr_works(prox):
assert prox.foo == 'bar'
assert prox.int == 42
assert prox.boolean
assert prox.sublist == [1, 2, 3]
def test_setting_attributes(prox):
prox.baz = 'baz'
assert prox.baz == 'baz'
def test_overriding_attribute(prox):
prox.int = -1
assert prox.int == -1
del prox.int
assert prox.int == 42
def test_missing_attributes(prox):
with pytest.raises(AttributeError):
prox.not_set
def test_raw_retrieval(prox, data):
assert prox._get_raw_data() == data
|
import socket
from _thread import *
import pickle
from game import Game
server = "10.11.250.207"
port = 5555
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind((server, port))
except socket.error as e:
str(e)
s.listen(2)
print("Waiting for a connection, Server Started")
connected = set()
games = {}
idCount = 0
def threaded_client(conn, p, gameId):
global idCount
conn.send(str.encode(str(p)))
reply = ""
while True:
try:
data = conn.recv(4096).decode()
if gameId in games:
game = games[gameId]
if not data:
break
else:
if data == "reset":
game.resetWent()
elif data != "get":
game.play(p, data)
conn.sendall(pickle.dumps(game))
else:
break
except:
break
print("Lost connection")
try:
del games[gameId]
print("Closing Game", gameId)
except:
pass
idCount -= 1
conn.close()
while True:
conn, addr = s.accept()
print("Connected to:", addr)
idCount += 1
p = 0
gameId = (idCount - 1)//2
if idCount % 2 == 1:
games[gameId] = Game(gameId)
print("Creating a new game...")
else:
games[gameId].ready = True
p = 1
start_new_thread(threaded_client, (conn, p, gameId)) |
import logging
import os
import platform
import select
import socket
import struct
import sys
import threading
import time
from weakref import proxy
from pydicom.uid import ExplicitVRLittleEndian, ImplicitVRLittleEndian, \
ExplicitVRBigEndian, UID
from pynetdicom.ACSEprovider import ACSEServiceProvider
from pynetdicom.DIMSEprovider import DIMSEServiceProvider
from pynetdicom.DIMSEparameters import *
from pynetdicom.DULprovider import DULServiceProvider
from pynetdicom.SOPclass import *
from pynetdicom.utils import PresentationContextManager, correct_ambiguous_vr, wrap_list
from pynetdicom.primitives import UserIdentityNegotiation, \
SOPClassExtendedNegotiation, \
MaximumLengthNegotiation, \
A_ASSOCIATE, A_RELEASE, A_ABORT, A_P_ABORT
logger = logging.getLogger('pynetdicom.assoc')
class Association(threading.Thread):
"""
Manages Associations with peer AEs. The actual low level work done for
Associations is performed by pynetdicom.ACSEprovider.ACSEServiceProvider
When the local AE is acting as an SCP, initialise the Association using
the socket to listen on for incoming Association requests. When the local
AE is acting as an SCU, initialise the Association with the details of the
peer AE
When AE is acting as an SCP:
assoc = Association(self, client_socket, max_pdu)
When AE is acting as an SCU:
assoc = Association(self,
peer_ae,
acse_timeout,
dimse_timeout,
max_pdu,
ext_neg)
Parameters
----------
local_ae - pynetdicom.applicationentity.ApplicationEntity
The local AE instance
client_socket - socket.socket, optional
If the local AE is acting as an SCP, this is the listen socket for
incoming connection requests
peer_ae - dict, optional
If the local AE is acting as an SCU this is the AE title, host and port
of the peer AE that we want to Associate with
acse_timeout - int, optional
The maximum amount of time to wait for a reply during association, in
seconds. A value of 0 means no timeout (default: 30)
dimse_timeout - int, optional
The maximum amount of time to wait for a reply during DIMSE, in
seconds. A value of 0 means no timeout (default: 0)
max_pdu - int, optional
The maximum PDU receive size in bytes for the association. A value of 0
means no maximum size (default: 16382 bytes).
ext_neg - list of extended negotiation parameters objects, optional
If the association requires an extended negotiation then `ext_neg` is
a list containing the negotiation objects (default: None)
Attributes
----------
acse - ACSEServiceProvider
The Association Control Service Element provider
ae - pynetdicom.applicationentity.ApplicationEntity
The local AE
dimse - DIMSEServiceProvider
The DICOM Message Service Element provider
dul - DUL
The DICOM Upper Layer service provider instance
is_aborted - bool
True if the association has been aborted
is_established - bool
True if the association has been established
is_released - bool
True if the association has been released
mode - str
Whether the local AE is acting as the Association 'Requestor' or
'Acceptor' (i.e. SCU or SCP)
peer_ae - dict
The peer Application Entity details (Port, Address, Title)
client_socket - socket.socket
The socket to use for connections with the peer AE
scu_supported_sop
A list of the supported SOP classes when acting as an SCU
scp_supported_sop
A list of the supported SOP classes when acting as an SCP
"""
def __init__(self, local_ae,
client_socket=None,
peer_ae=None,
acse_timeout=30,
dimse_timeout=0,
max_pdu=16382,
ext_neg=None):
# Why is the AE in charge of supplying the client socket?
# Hmm, perhaps because we can have multiple connections on the same
# listen port. Does that even work? Probably needs testing
# As SCP: supply port number to listen on (listen_port !=None)
# As SCU: supply addr/port to make connection on (peer_ae != None)
if [client_socket, peer_ae] == [None, None]:
raise ValueError("Association must be initialised with either "
"the client_socket or peer_ae parameters")
if client_socket and peer_ae:
raise ValueError("Association must be initialised with either "
"client_socket or peer_ae parameter not both")
# Received a connection from a peer AE
if client_socket:
self.mode = 'Acceptor'
# Initiated a connection to a peer AE
if peer_ae:
self.mode = 'Requestor'
# The socket.socket used for connections
self.client_socket = client_socket
# The parent AE object
self.ae = local_ae
# Why do we instantiate the DUL provider with a socket when acting
# as an SCU?
# Q. Why do we need to feed the DUL an ACSE timeout?
# A. ARTIM timer
self.dul = DULServiceProvider(client_socket,
dul_timeout=self.ae.network_timeout,
acse_timeout=acse_timeout,
local_ae=local_ae,
assoc=self)
# Dict containing the peer AE title, address and port
self.peer_ae = peer_ae
# Lists of pynetdicom.utils.PresentationContext items that the local
# AE supports when acting as an SCU and SCP
self.scp_supported_sop = []
self.scu_supported_sop = []
# Status attributes
self.is_established = False
self.is_refused = False
self.is_aborted = False
self.is_released = False
# Timeouts for the DIMSE and ACSE service providers
self.dimse_timeout = dimse_timeout
self.acse_timeout = acse_timeout
# Maximum PDU sizes (in bytes) for the local and peer AE
self.local_max_pdu = max_pdu
self.peer_max_pdu = None
# A list of extended negotiation objects
self.ext_neg = ext_neg
# Kills the thread loop in run()
self._Kill = False
# Thread setup
threading.Thread.__init__(self)
self.daemon = True
# Start the thread
self.start()
def kill(self):
"""
Kill the main association thread loop, first checking that the DUL has
been stopped
"""
self._Kill = True
self.is_established = False
while not self.dul.Stop():
time.sleep(0.001)
self.ae._cleanup_associations()
def release(self):
"""
Direct the ACSE to issue an A-RELEASE request primitive to the DUL
provider
"""
# A-RELEASE response primitive
response = self.acse.Release()
self.kill()
self.is_released = True
def abort(self):
"""
Direct the ACSE to issue an A-ABORT request primitive to the DUL
provider
DUL service user association abort. Always gives the source as the
DUL service user and sets the abort reason to 0x00 (not significant)
See PS3.8, 7.3-4 and 9.3.8.
"""
self.acse.Abort(source=0x00, reason=0x00)
self.kill()
self.is_aborted = True
def run(self):
"""
The main Association thread
"""
# Set new ACSE and DIMSE providers
self.acse = ACSEServiceProvider(self, self.dul, self.acse_timeout)
self.dimse = DIMSEServiceProvider(self.dul, self.dimse_timeout)
# When the AE is acting as an SCP (Association Acceptor)
if self.mode == 'Acceptor':
# needed because of some thread-related problem. To investigate.
time.sleep(0.1)
# Get A-ASSOCIATE request primitive from the DICOM UL
assoc_rq = self.dul.Receive(Wait=True)
if assoc_rq is None:
self.kill()
return
# If the remote AE initiated the Association then reject it if:
# Rejection reasons:
# a) DUL user
# 0x02 unsupported application context name
# b) DUL ACSE related
# 0x01 no reason given
# 0x02 protocol version not supported
# c) DUL Presentation related
# 0x01 temporary congestion
## DUL User Related Rejections
#
# [result, source, diagnostic]
reject_assoc_rsd = []
# Calling AE Title not recognised
if self.ae.require_calling_aet != '':
if self.ae.require_calling_aet != assoc_rq.calling_ae_title:
reject_assoc_rsd = [(0x01, 0x01, 0x03)]
# Called AE Title not recognised
if self.ae.require_called_aet != '':
if self.ae.require_called_aet != assoc_rq.called_ae_title:
reject_assoc_rsd = [(0x01, 0x01, 0x07)]
## DUL ACSE Related Rejections
#
# User Identity Negotiation (PS3.7 Annex D.3.3.7)
for ii in assoc_rq.user_information:
if isinstance(ii, UserIdentityNegotiation):
# Used to notify the association acceptor of the user
# identity of the association requestor. It may also
# request that the Acceptor response with the server
# identity.
#
# The Acceptor does not provide an A-ASSOCIATE response
# unless a positive response is requested and user
# authentication succeeded. If a positive response
# was requested, the A-ASSOCIATE response shall contain
# a User Identity sub-item. If a Kerberos ticket is used
# the response shall include a Kerberos server ticket
#
# A positive response must be requested if the association
# requestor requires confirmation. If the Acceptor does
# not support user identification it will accept the
# association without making a positive response. The
# Requestor can then decide whether to proceed
#user_authorised = self.ae.on_user_identity(ii.UserIdentityType,
# ii.PrimaryField,
# ii.SecondaryField)
# Associate with all requestors
assoc_rq.user_information.remove(ii)
# Testing
#if ii.PositiveResponseRequested:
# ii.ServerResponse = b''
# Extended Negotiation
for ii in assoc_rq.user_information:
if isinstance(ii, SOPClassExtendedNegotiation):
assoc_rq.user_information.remove(ii)
## DUL Presentation Related Rejections
#
# Maximum number of associations reached (local-limit-exceeded)
if len(self.ae.active_associations) > self.ae.maximum_associations:
reject_assoc_rsd = [(0x02, 0x03, 0x02)]
for (result, src, diag) in reject_assoc_rsd:
assoc_rj = self.acse.Reject(assoc_rq, result, src, diag)
self.debug_association_rejected(assoc_rj)
self.ae.on_association_rejected(assoc_rj)
self.kill()
return
## Presentation Contexts
self.acse.context_manager = PresentationContextManager()
self.acse.context_manager.requestor_contexts = \
assoc_rq.presentation_context_definition_list
self.acse.context_manager.acceptor_contexts = \
self.ae.presentation_contexts_scp
self.acse.presentation_contexts_accepted = \
self.acse.context_manager.accepted
# Set maximum PDU send length
#self.peer_max_pdu = assoc_rq.UserInformation[0].MaximumLengthReceived
self.peer_max_pdu = assoc_rq.maximum_length_received
# Set maximum PDU receive length
assoc_rq.maximum_length_received = self.local_max_pdu
#for user_item in assoc_rq.user_information:
# if isinstance(user_item, MaximumLengthNegotiation):
# user_item.maximum_length_received = self.local_max_pdu
# Issue the A-ASSOCIATE indication (accept) primitive using the ACSE
assoc_ac = self.acse.Accept(assoc_rq)
# Callbacks/Logging
self.debug_association_accepted(assoc_ac)
self.ae.on_association_accepted(assoc_ac)
if assoc_ac is None:
self.kill()
return
# No valid presentation contexts, abort the association
if self.acse.presentation_contexts_accepted == []:
self.acse.Abort(0x02, 0x00)
self.kill()
return
# Assocation established OK
self.is_established = True
# Main SCP run loop
# 1. Checks for incoming DIMSE messages
# If DIMSE message then run corresponding service class' SCP
# method
# 2. Checks for peer A-RELEASE request primitive
# If present then kill thread
# 3. Checks for peer A-ABORT request primitive
# If present then kill thread
# 4. Checks DUL provider still running
# If not then kill thread
# 5. Checks DUL idle timeout
# If timed out then kill thread
while not self._Kill:
time.sleep(0.001)
# Check with the DIMSE provider for incoming messages
# all messages should be a DIMSEMessage subclass
msg, msg_context_id = self.dimse.Receive(False, self.dimse_timeout)
# DIMSE message received
if msg:
# Convert the message's affected SOP class to a UID
uid = msg.AffectedSOPClassUID
# Use the UID to create a new SOP Class instance of the
# corresponding value
try:
sop_class = UID2SOPClass(uid.value)()
except:
sop_class = UID2SOPClass(uid)()
# Check that the SOP Class is supported by the AE
matching_context = False
for context in self.acse.presentation_contexts_accepted:
if context.ID == msg_context_id:
# New method - what is this even used for?
sop_class.presentation_context = context
# Old method
sop_class.pcid = context.ID
sop_class.sopclass = context.AbstractSyntax
sop_class.transfersyntax = context.TransferSyntax[0]
matching_context = True
if matching_context:
# Most of these shouldn't be necessary
sop_class.maxpdulength = self.peer_max_pdu
sop_class.DIMSE = self.dimse
sop_class.ACSE = self.acse
sop_class.AE = self.ae
# Run SOPClass in SCP mode
sop_class.SCP(msg)
# Check for release request
if self.acse.CheckRelease():
# Callback trigger
self.debug_association_released()
self.ae.on_association_released()
self.kill()
# Check for abort
if self.acse.CheckAbort():
# Callback trigger
self.debug_association_aborted()
self.ae.on_association_aborted(None)
self.kill()
# Check if the DULServiceProvider thread is still running
# DUL.is_alive() is inherited from threading.thread
if not self.dul.is_alive():
self.kill()
# Check if idle timer has expired
if self.dul.idle_timer_expired():
self.kill()
# If the local AE initiated the Association
elif self.mode == 'Requestor':
if self.ae.presentation_contexts_scu == []:
logger.error("No presentation contexts set for the SCU")
self.kill()
return
# Build role extended negotiation - needs updating
# in particular, when running a C-GET user the role selection
# needs to be set prior to association
#
# SCP/SCU Role Negotiation (optional)
#self.ext_neg = []
#for context in self.AE.presentation_contexts_scu:
# tmp = SCP_SCU_RoleSelectionParameters()
# tmp.SOPClassUID = context.AbstractSyntax
# tmp.SCURole = 0
# tmp.SCPRole = 1
#
# self.ext_neg.append(tmp)
local_ae = {'Address' : self.ae.address,
'Port' : self.ae.port,
'AET' : self.ae.ae_title}
# Request an Association via the ACSE
is_accepted, assoc_rsp = self.acse.Request(
local_ae,
self.peer_ae,
self.local_max_pdu,
self.ae.presentation_contexts_scu,
userspdu=self.ext_neg)
# Association was accepted or rejected
if isinstance(assoc_rsp, A_ASSOCIATE):
# Association was accepted
if is_accepted:
self.debug_association_accepted(assoc_rsp)
self.ae.on_association_accepted(assoc_rsp)
# No acceptable presentation contexts
if self.acse.presentation_contexts_accepted == []:
logger.error("No Acceptable Presentation Contexts")
self.acse.Abort(0x02, 0x00)
self.kill()
return
# Build supported SOP Classes for the Association
self.scu_supported_sop = []
for context in self.acse.presentation_contexts_accepted:
self.scu_supported_sop.append(
(context.ID,
UID2SOPClass(context.AbstractSyntax),
context.TransferSyntax[0]))
# Assocation established OK
self.is_established = True
# This seems like it should be event driven rather than
# driven by a loop
#
# Listen for further messages from the peer
while not self._Kill:
time.sleep(0.001)
# Check for release request
if self.acse.CheckRelease():
# Callback trigger
self.ae.on_association_released()
self.debug_association_released()
self.kill()
return
# Check for abort
if self.acse.CheckAbort():
# Callback trigger
self.ae.on_association_aborted()
self.debug_association_aborted()
self.kill()
return
# Check if the DULServiceProvider thread is
# still running. DUL.is_alive() is inherited from
# threading.thread
if not self.dul.isAlive():
self.kill()
return
# Check if idle timer has expired
if self.dul.idle_timer_expired():
self.kill()
return
# Association was rejected
else:
self.ae.on_association_rejected(assoc_rsp)
self.debug_association_rejected(assoc_rsp)
self.is_refused = True
self.dul.Kill()
return
# Association was aborted by peer
elif isinstance(assoc_rsp, A_ABORT):
self.ae.on_association_aborted(assoc_rsp)
self.debug_association_aborted(assoc_rsp)
self.is_aborted = True
self.dul.Kill()
return
# Association was aborted by DUL provider
elif isinstance(assoc_rsp, A_P_ABORT):
self.is_aborted = True
self.dul.Kill()
return
# Association failed for any other reason (No peer, etc)
else:
self.dul.Kill()
return
# DIMSE-C services provided by the Association
def send_c_echo(self, msg_id=1):
"""
Send a C-ECHO message to the peer AE to verify end-to-end communication
Parameters
----------
msg_id - int, optional
The message ID to use (default: 1)
Returns
-------
status : pynetdicom.SOPclass.Status or None
Returns None if no valid presentation context or no response
from the peer, Success (0x0000) otherwise.
"""
if self.is_established:
# Service Class - used to determine Status
service_class = VerificationServiceClass()
uid = UID('1.2.840.10008.1.1')
# Determine the Presentation Context we are operating under
# and hence the transfer syntax to use for encoding `dataset`
transfer_syntax = None
for context in self.acse.context_manager.accepted:
if uid == context.AbstractSyntax:
transfer_syntax = context.TransferSyntax[0]
context_id = context.ID
if transfer_syntax is None:
logger.error("No Presentation Context for: '%s'" %uid)
return None
# Build C-STORE request primitive
primitive = C_ECHO_ServiceParameters()
primitive.MessageID = msg_id
primitive.AffectedSOPClassUID = uid
self.dimse.Send(primitive, context_id, self.acse.MaxPDULength)
# If Association is Aborted before we receive the response
# then we hang here
rsp, _ = self.dimse.Receive(True, self.dimse_timeout)
if rsp is None:
return None
return service_class.Code2Status(rsp.Status)
else:
raise RuntimeError("The association with a peer SCP must be "
"established before sending a C-ECHO request")
def send_c_store(self, dataset, msg_id=1, priority=2):
"""
Send a C-STORE request message to the peer AE Storage SCP
PS3.4 Annex B
Service Definition
==================
Two peer DICOM AEs implement a SOP Class of the Storage Service Class
with one serving in the SCU role and one service in the SCP role.
SOP Classes are implemented using the C-STORE DIMSE service. A
successful completion of the C-STORE has the following semantics:
- Both the SCU and SCP support the type of information to be stored
- The information is stored in some medium
- For some time frame, the information may be accessed
(For JPIP Referenced Pixel Data transfer syntaxes, transfer may result
in storage of incomplete information in that the pixel data may be
partially or completely transferred by some other mechanism at the
discretion of the SCP)
Extended Negotiation
====================
Extended negotiation is optional, however SCUs requesting association
may include:
- one SOP Class Extended Negotiation Sub-Item for each supported SOP
Class of the Storage Service Class, as described in PS3.7 Annex D.3.3.5.
- one SOP Class Common Extended Negotiation Sub-Item for each supported
SOP Class of the Storage Service Class, as described in PS3.7 Annex
D.3.3.6
The SCP accepting association shall optionally support:
- one SOP Class Extended Negotiation Sub-Item for each supported SOP
Class of the Storage Service Class, as described in PS3.7 Annex D.3.3.5.
Use of Extended Negotiation is left up to the end user to implement via
the ``AE.extended_negotiation`` attribute.
SOP Class Extended Negotiation
------------------------------
Service Class Application Information (A-ASSOCIATE-RQ)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
PS3.4 Table B.3-1 shows the format of the SOP Class Extended Negotiation
Sub-Item's service-class-application-information field when requesting
association.
Service Class Application Information (A-ASSOCIATE-AC)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
PS3.4 Table B.3-2 shows the format of the SOP Class Extended Negotiation
Sub-Item's service-class-application-information field when accepting
association.
SOP Class Common Extended Negotiation
-------------------------------------
Service Class UID
~~~~~~~~~~~~~~~~~
The SOP-class-uid field of the SOP Class Common Extended Negotiation
Sub-Item shall be 1.2.840.10008.4.2
Related General SOP Classes
~~~~~~~~~~~~~~~~~~~~~~~~~~~
PS3.4 Table B.3-3 identifies the Standard SOP Classes that participate
in this mechanism. If a Standard SOP Class is not listed, then Related
General SOP Classes shall not be included.
Parameters
----------
dataset - pydicom.Dataset
The DICOM dataset to send to the peer
msg_id - int, optional
The message ID, must be between 0 and 65535, inclusive. (default: 1)
priority : int, optional
The C-STORE operation priority (if supported by the peer), one of:
2 - Low (default)
1 - High
0 - Medium
Returns
-------
status : pynetdicom.SOPclass.Status or None
The status for the requested C-STORE operation (see PS3.4 Annex
B.2.3), should be one of the following Status objects:
Success status
sop_class.Success
Success - 0000
Failure statuses
sop_class.OutOfResources
Refused: Out of Resources - A7xx
sop_class.DataSetDoesNotMatchSOPClassFailure
Error: Data Set does not match SOP Class - A9xx
sop_class.CannotUnderstand
Error: Cannot understand - Cxxx
Warning statuses
sop_class.CoercionOfDataElements
Coercion of Data Elements - B000
sop_class.DataSetDoesNotMatchSOPClassWarning
Data Set does not matching SOP Class - B007
sop_class.ElementsDiscarded
Elements Discarded - B006
Returns None if the DIMSE service timed out before receiving a
response
"""
# pydicom can only handle uncompressed transfer syntaxes for conversion
if not dataset._is_uncompressed_transfer_syntax():
logger.warning('Unable to send the dataset due to pydicom not supporting compressed datasets')
logger.error('Sending file failed')
return 0xC000
if self.is_established:
# Service Class - used to determine Status
service_class = StorageServiceClass()
# Determine the Presentation Context we are operating under
# and hence the transfer syntax to use for encoding `dataset`
transfer_syntax = None
for context in self.acse.context_manager.accepted:
if dataset.SOPClassUID == context.AbstractSyntax:
transfer_syntax = context.TransferSyntax[0]
context_id = context.ID
if transfer_syntax is None:
logger.error("No Presentation Context for: '%s'"
%dataset.SOPClassUID)
logger.error("Store SCU failed due to there being no valid "
"presentation context for the current dataset")
return service_class.CannotUnderstand
# Set the correct VR for ambiguous elements
dataset = correct_ambiguous_vr(dataset, transfer_syntax)
# Build C-STORE request primitive
primitive = C_STORE_ServiceParameters()
primitive.MessageID = msg_id
primitive.AffectedSOPClassUID = dataset.SOPClassUID
primitive.AffectedSOPInstanceUID = dataset.SOPInstanceUID
# Message priority
if priority in [0x0000, 0x0001, 0x0002]:
primitive.Priority = priority
else:
logger.warning("C-STORE SCU: Invalid priority value "
"'%s'" %priority)
primitive.Priorty = 0x0000
# Encode the dataset using the agreed transfer syntax
ds = encode(dataset,
transfer_syntax.is_implicit_VR,
transfer_syntax.is_little_endian)
if ds is not None:
primitive.DataSet = BytesIO(ds)
#for s in wrap_list(primitive.DataSet):
# print(s)
# If we failed to encode our dataset
else:
return service_class.CannotUnderstand
# Send C-STORE request primitive to DIMSE
self.dimse.Send(primitive, context_id, self.acse.MaxPDULength)
# Wait for C-STORE response primitive
# returns a C_STORE_ServiceParameters primitive
rsp, _ = self.dimse.Receive(True, self.dimse_timeout)
status = None
if rsp is not None:
status = service_class.Code2Status(rsp.Status)
return status
else:
raise RuntimeError("The association with a peer SCP must be "
"established before sending a C-STORE request")
def send_c_find(self, dataset, msg_id=1, priority=2, query_model='W'):
"""
Send a C-FIND request message to the peer AE
See PS3.4 Annex C - Query/Retrieve Service Class
Parameters
----------
dataset : pydicom.Dataset
The DICOM dataset to containing the Key Attributes the peer AE
should perform the match against
msg_id : int, optional
The message ID
priority : int, optional
The C-FIND operation priority (if supported by the peer), one of:
2 - Low (default)
1 - High
0 - Medium
query_model : str, optional
The Query/Retrieve Information Model to use, one of the following:
'W' - Modality Worklist Information - FIND (default)
1.2.840.10008.5.1.4.31
'P' - Patient Root Information Model - FIND
1.2.840.10008.5.1.4.1.2.1.1
'S' - Study Root Information Model - FIND
1.2.840.10008.5.1.4.1.2.2.1
'O' - Patient Study Only Information Model - FIND
1.2.840.10008.5.1.4.1.2.3.1
Yields
------
status : pynetdicom.SOPclass.Status
The resulting status(es) from the C-FIND operation
dataset : pydicom.dataset.Dataset
The resulting dataset(s) from the C-FIND operation
"""
if self.is_established:
service_class = QueryRetrieveFindServiceClass()
if query_model == 'W':
sop_class = ModalityWorklistInformationFind()
service_class = ModalityWorklistServiceSOPClass()
elif query_model == "P":
# Four level hierarchy, patient, study, series, composite object
sop_class = PatientRootQueryRetrieveInformationModelFind()
elif query_model == "S":
# Three level hierarchy, study, series, composite object
sop_class = StudyRootQueryRetrieveInformationModelFind()
elif query_model == "O":
# Retired
sop_class = PatientStudyOnlyQueryRetrieveInformationModelFind()
else:
raise ValueError("Association::send_c_find() query_model "
"must be one of ['W'|'P'|'S'|'O']")
# Determine the Presentation Context we are operating under
# and hence the transfer syntax to use for encoding `dataset`
transfer_syntax = None
for context in self.acse.context_manager.accepted:
if sop_class.UID == context.AbstractSyntax:
transfer_syntax = context.TransferSyntax[0]
context_id = context.ID
if transfer_syntax is None:
logger.error("No Presentation Context for: '%s'"
%sop_class.UID)
logger.error("Find SCU failed due to there being no valid "
"presentation context for the current dataset")
return service_class.IdentifierDoesNotMatchSOPClass
# Build C-FIND primitive
primitive = C_FIND_ServiceParameters()
primitive.MessageID = msg_id
primitive.AffectedSOPClassUID = sop_class.UID
primitive.Priority = priority
primitive.Identifier = BytesIO(encode(dataset,
transfer_syntax.is_implicit_VR,
transfer_syntax.is_little_endian))
logger.info('Find SCU Request Identifiers:')
logger.info('')
logger.info('# DICOM Dataset')
for elem in dataset:
logger.info(elem)
logger.info('')
# send c-find request
self.dimse.Send(primitive, context_id, self.acse.MaxPDULength)
# Get the responses from the peer
ii = 1
while True:
time.sleep(0.001)
# Wait for c-find responses
rsp, _ = self.dimse.Receive(False, self.dimse.dimse_timeout)
if not rsp:
continue
# Decode the dataset
d = decode(rsp.Identifier,
transfer_syntax.is_implicit_VR,
transfer_syntax.is_little_endian)
# Status may be 'Failure', 'Cancel', 'Success' or 'Pending'
status = service_class.Code2Status(rsp.Status)
if status.Type == 'Success':
# We want to exit the wait loop if we receive
# failure, cancel or success
break
elif status.Type != 'Pending':
break
logger.debug('-' * 65)
logger.debug('Find Response: %s (%s)' %(ii, status.Type))
logger.debug('')
logger.debug('# DICOM Dataset')
for elem in d:
logger.debug(elem)
logger.debug('')
ii += 1
yield status, d
yield status, d
else:
raise RuntimeError("The association with a peer SCP must be "
"established before sending a C-FIND request")
def send_c_cancel_find(self, msg_id, query_model):
"""
See PS3.7 9.3.2.3
Parameters
----------
msg_id : int
The message ID of the C-FIND operation we want to cancel
"""
if self.is_established:
service_class = QueryRetrieveFindServiceClass()
# Build C-FIND primitive
primitive = C_FIND_ServiceParameters()
primitive.CommandField = 0x0fff
primitive.MessageIDBeingRespondedTo = msg_id
primitive.CommandDataSetType = 0x0101
# We need the Context ID unfortunately...
if query_model == 'W':
sop_class = ModalityWorklistInformationFind()
elif query_model == "P":
# Four level hierarchy, patient, study, series, composite object
sop_class = PatientRootQueryRetrieveInformationModelFind()
elif query_model == "S":
# Three level hierarchy, study, series, composite object
sop_class = StudyRootQueryRetrieveInformationModelFind()
elif query_model == "O":
# Retired
sop_class = PatientStudyOnlyQueryRetrieveInformationModelFind()
else:
raise ValueError("Association::send_c_cancel_find() "
"query_model must be one of ['W'|'P'|'S'|'O']")
# Determine the Presentation Context we are operating under
# and hence the transfer syntax to use for encoding `dataset`
transfer_syntax = None
for context in self.acse.context_manager.accepted:
if sop_class.UID == context.AbstractSyntax:
transfer_syntax = context.TransferSyntax[0]
context_id = context.ID
if transfer_syntax is None:
logger.error("No Presentation Context for: '%s'"
%sop_class.UID)
logger.error("Find SCU failed due to there being no valid "
"presentation context for the current dataset")
return service_class.IdentifierDoesNotMatchSOPClass
logger.info('Sending C-CANCEL-FIND')
# send c-find request
self.dimse.Send(primitive, context_id, self.acse.MaxPDULength)
def send_c_move(self, dataset, move_aet, msg_id=1, priority=2, query_model='P'):
"""
C-MOVE Service Procedure
------------------------
PS3.7 9.1.4.2
Invoker
~~~~~~~
The invoking DIMSE user requests a performing DIMSE user match an
Identifier against the Attributes of all SOP Instances known to the
performing user and generate a C-STORE sub-operation for each match.
Performer
~~~~~~~~~
For each matching composite SOP Instance, the C-MOVE performing user
initiates a C-STORE sub-operation on a different Association than the
C-MOVE. In this sub-operation the C-MOVE performer becomes the C-STORE
invoker. The C-STORE performing DIMSE user may or may not be the C-MOVE
invoking DIMSE user.
Parameters
----------
dataset : pydicom.dataset.Dataset
The dataset containing the Attributes to match against
move_aet : str
The AE title for the destination of the C-STORE operations performed
by the C-MOVE performing DIMSE user
msg_id : int, optional
The Message ID to use for the C-MOVE service
priority : int, optional
The C-MOVE operation priority (if supported by the peer), one of:
2 - Low (default)
1 - High
0 - Medium
query_model : str, optional
The Query/Retrieve Information Model to use, one of the following:
'P' - Patient Root Information Model - MOVE (default)
1.2.840.10008.5.1.4.1.2.1.2
'S' - Study Root Information Model - MOVE
1.2.840.10008.5.1.4.1.2.2.2
'O' - Patient Study Only Information Model - MOVE
1.2.840.10008.5.1.4.1.2.3.2
"""
if self.is_established:
if query_model == "P":
sop_class = PatientRootQueryRetrieveInformationModelMove()
elif query_model == "S":
sop_class = StudyRootQueryRetrieveInformationModelMove()
elif query_model == "O":
sop_class = PatientStudyOnlyQueryRetrieveInformationModelMove()
else:
raise ValueError("Association::send_c_move() query_model must "
"be one of ['P'|'S'|'O']")
service_class = QueryRetrieveMoveServiceClass()
# Determine the Presentation Context we are operating under
# and hence the transfer syntax to use for encoding `dataset`
transfer_syntax = None
for context in self.acse.context_manager.accepted:
if sop_class.UID == context.AbstractSyntax:
transfer_syntax = context.TransferSyntax[0]
context_id = context.ID
if transfer_syntax is None:
logger.error("No Presentation Context for: '%s'"
%sop_class.UID)
logger.error("Move SCU failed due to there being no valid "
"presentation context for the current dataset")
return service_class.IdentifierDoesNotMatchSOPClass
# Build C-MOVE primitive
primitive = C_MOVE_ServiceParameters()
primitive.MessageID = msg_id
primitive.AffectedSOPClassUID = sop_class.UID
primitive.MoveDestination = move_aet
primitive.Priority = priority
primitive.Identifier = BytesIO(encode(dataset,
transfer_syntax.is_implicit_VR,
transfer_syntax.is_little_endian))
logger.info('Move SCU Request Identifiers:')
logger.info('')
logger.info('# DICOM Dataset')
for elem in dataset:
logger.info(elem)
logger.info('')
# Send C-MOVE request to peer
self.dimse.Send(primitive, context_id, self.acse.MaxPDULength)
# Get the responses from peer
ii = 1
while True:
time.sleep(0.001)
rsp, context_id = self.dimse.Receive(False, self.dimse.dimse_timeout)
if rsp.__class__ == C_MOVE_ServiceParameters:
status = service_class.Code2Status(rsp.Status)
dataset = decode(rsp.Identifier,
transfer_syntax.is_implicit_VR,
transfer_syntax.is_little_endian)
# If the Status is "Pending" then the processing of
# matches and suboperations is initiated or continuing
if status.Type == 'Pending':
remain = rsp.NumberOfRemainingSuboperations
complete = rsp.NumberOfCompletedSuboperations
failed = rsp.NumberOfFailedSuboperations
warning = rsp.NumberOfWarningSuboperations
# Pending Response
logger.debug('')
logger.info("Move Response: %s (Pending)" %ii)
logger.info(" Sub-Operations Remaining: %s, "
"Completed: %s, Failed: %s, Warning: %s" %(
remain,
complete,
failed,
warning))
ii += 1
yield status, dataset
# If the Status is "Success" then processing is complete
elif status.Type == "Success":
break
# All other possible responses
elif status.Type == "Failure":
logger.debug('')
logger.error('Move Response: %s (Failure)' %ii)
logger.error(' %s' %status.Description)
break
elif status.Type == "Cancel":
logger.debug('')
logger.info('Move Response: %s (Cancel)' %ii)
logger.info(' %s' %status.Description)
break
elif status.Type == "Warning":
logger.debug('')
logger.warning('Move Response: %s (Warning)' %ii)
logger.warning(' %s' %status.Description)
for elem in dataset:
logger.warning('%s: %s' %(elem.name, elem.value))
break
yield status, dataset
else:
raise RuntimeError("The association with a peer SCP must be "
"established before sending a C-MOVE request")
def send_c_cancel_move(self, msg_id, query_model):
"""
See PS3.7 9.3.2.3
Parameters
----------
msg_id : int
The message ID of the C-MOVE operation we want to cancel
query_model : str
The query model SOP class to use (needed to identify context ID)
"""
if self.is_established:
service_class = QueryRetrieveMoveServiceClass()
# Build C-MOVE primitive
primitive = C_MOVE_ServiceParameters()
primitive.CommandField = 0x0fff
primitive.MessageIDBeingRespondedTo = msg_id
primitive.CommandDataSetType = 0x0101
# We need the Context ID unfortunately...
if query_model == "P":
sop_class = PatientRootQueryRetrieveInformationModelMove()
elif query_model == "S":
sop_class = StudyRootQueryRetrieveInformationModelMove()
elif query_model == "O":
sop_class = PatientStudyOnlyQueryRetrieveInformationModelMove()
else:
raise ValueError("Association::send_c_cancel_move() query_model "
"must be one of ['P'|'S'|'O']")
# Determine the Presentation Context we are operating under
# and hence the transfer syntax to use for encoding `dataset`
transfer_syntax = None
for context in self.acse.context_manager.accepted:
if sop_class.UID == context.AbstractSyntax:
transfer_syntax = context.TransferSyntax[0]
context_id = context.ID
if transfer_syntax is None:
logger.error("No Presentation Context for: '%s'"
%sop_class.UID)
logger.error("Move SCU failed due to there being no valid "
"presentation context for the current dataset")
return service_class.IdentifierDoesNotMatchSOPClass
logger.info('Sending C-CANCEL-MOVE')
# Send C-CANCEL-MOVE request
self.dimse.Send(primitive, context_id, self.acse.MaxPDULength)
def send_c_get(self, dataset, msg_id=1, priority=2, query_model='P'):
"""
Send a C-GET request message to the peer AE
See PS3.4 Annex C - Query/Retrieve Service Class
Parameters
----------
dataset : pydicom.Dataset
The DICOM dataset to containing the Key Attributes the peer AE
should perform the match against
msg_id : int, optional
The message ID
priority : int, optional
The C-GET operation priority (if supported by the peer), one of:
2 - Low (default)
1 - High
0 - Medium
query_model : str, optional
The Query/Retrieve Information Model to use, one of the following:
'P' - Patient Root Information Model - GET
1.2.840.10008.5.1.4.1.2.1.3
'S' - Study Root Information Model - GET
1.2.840.10008.5.1.4.1.2.2.3
'O' - Patient Study Only Information Model - GET
1.2.840.10008.5.1.4.1.2.3.3
Yields
------
status : pynetdicom.SOPclass.Status
The resulting status(es) from the C-GET operation
dataset : pydicom.dataset.Dataset
The resulting dataset(s) from the C-GET operation
"""
if self.is_established:
if query_model == "P":
# Four level hierarchy, patient, study, series, composite object
sop_class = PatientRootQueryRetrieveInformationModelGet()
elif query_model == "S":
# Three level hierarchy, study, series, composite object
sop_class = StudyRootQueryRetrieveInformationModelGet()
elif query_model == "O":
# Retired
sop_class = PatientStudyOnlyQueryRetrieveInformationModelGet()
else:
raise ValueError("Association::send_c_get() query_model "
"must be one of ['P'|'S'|'O']")
service_class = QueryRetrieveGetServiceClass()
# Determine the Presentation Context we are operating under
# and hence the transfer syntax to use for encoding `dataset`
transfer_syntax = None
for context in self.acse.context_manager.accepted:
if sop_class.UID == context.AbstractSyntax:
transfer_syntax = context.TransferSyntax[0]
context_id = context.ID
if transfer_syntax is None:
logger.error("No Presentation Context for: '%s'"
%sop_class.UID)
logger.error("Get SCU failed due to there being no valid "
"presentation context for the current dataset")
return service_class.IdentifierDoesNotMatchSOPClass
# Build C-GET primitive
primitive = C_GET_ServiceParameters()
primitive.MessageID = msg_id
primitive.AffectedSOPClassUID = sop_class.UID
primitive.Priority = priority
primitive.Identifier = BytesIO(encode(dataset,
transfer_syntax.is_implicit_VR,
transfer_syntax.is_little_endian))
# Send primitive to peer
self.dimse.Send(primitive, context_id, self.acse.MaxPDULength)
logger.info('Get SCU Request Identifiers:')
logger.info('')
logger.info('# DICOM Dataset')
for elem in dataset:
logger.info(elem)
logger.info('')
ii = 1
while True:
rsp, context_id = self.dimse.Receive(True, self.dimse.dimse_timeout)
# Received a C-GET response
if rsp.__class__ == C_GET_ServiceParameters:
status = service_class.Code2Status(rsp.Status)
dataset = decode(rsp.Identifier,
transfer_syntax.is_implicit_VR,
transfer_syntax.is_little_endian)
# If the Status is "Pending" then the processing of
# matches and suboperations is initiated or continuing
if status.Type == 'Pending':
remain = rsp.NumberOfRemainingSuboperations
complete = rsp.NumberOfCompletedSuboperations
failed = rsp.NumberOfFailedSuboperations
warning = rsp.NumberOfWarningSuboperations
# Pending Response
logger.debug('')
logger.info("Find Response: %s (Pending)" %ii)
logger.info(" Sub-Operations Remaining: %s, "
"Completed: %s, Failed: %s, Warning: %s" %(
remain,
complete,
failed,
warning))
ii += 1
yield status, dataset
# If the Status is "Success" then processing is complete
elif status.Type == "Success":
status = service_class.Success
break
# All other possible responses
elif status.Type == "Failure":
logger.debug('')
logger.error('Find Response: %s (Failure)' %ii)
logger.error(' %s' %status.Description)
# Print out the status information
for elem in dataset:
logger.error('%s: %s' %(elem.name, elem.value))
break
elif status.Type == "Cancel":
logger.debug('')
logger.info('Find Response: %s (Cancel)' %ii)
logger.info(' %s' %status.Description)
break
elif status.Type == "Warning":
logger.debug('')
logger.warning('Find Response: %s (Warning)' %ii)
logger.warning(' %s' %status.Description)
# Print out the status information
for elem in dataset:
logger.warning('%s: %s' %(elem.name, elem.value))
break
# Received a C-STORE request in response to the C-GET
elif rsp.__class__ == C_STORE_ServiceParameters:
c_store_rsp = C_STORE_ServiceParameters()
c_store_rsp.MessageIDBeingRespondedTo = rsp.MessageID
c_store_rsp.AffectedSOPInstanceUID = \
rsp.AffectedSOPInstanceUID
c_store_rsp.AffectedSOPClassUID = rsp.AffectedSOPClassUID
d = decode(rsp.DataSet,
transfer_syntax.is_implicit_VR,
transfer_syntax.is_little_endian)
# Callback for C-STORE SCP (user implemented)
status = self.ae.on_c_store(d)
# Send C-STORE confirmation back to peer
c_store_rsp.Status = int(status)
self.dimse.Send(c_store_rsp,
context_id,
self.acse.MaxPDULength)
yield status, dataset
else:
raise RuntimeError("The association with a peer SCP must be "
"established before sending a C-GET request")
def send_c_cancel_get(self, msg_id, query_model):
"""
See PS3.7 9.3.2.3
Parameters
----------
msg_id : int
The message ID of the C-GET operation we want to cancel
"""
if self.is_established:
service_class = QueryRetrieveGetServiceClass()
# Build C-GET primitive
primitive = C_GET_ServiceParameters()
primitive.CommandField = 0x0fff
primitive.MessageIDBeingRespondedTo = msg_id
primitive.CommandDataSetType = 0x0101
# We need the Context ID unfortunately...
if query_model == "P":
# Four level hierarchy, patient, study, series, composite object
sop_class = PatientRootQueryRetrieveInformationModelGet()
elif query_model == "S":
# Three level hierarchy, study, series, composite object
sop_class = StudyRootQueryRetrieveInformationModelGet()
elif query_model == "O":
# Retired
sop_class = PatientStudyOnlyQueryRetrieveInformationModelGet()
else:
raise ValueError("Association::send_c_cancel_get() query_model "
"must be one of ['P'|'S'|'O']")
# Determine the Presentation Context we are operating under
# and hence the transfer syntax to use for encoding `dataset`
transfer_syntax = None
for context in self.acse.context_manager.accepted:
if sop_class.UID == context.AbstractSyntax:
transfer_syntax = context.TransferSyntax[0]
context_id = context.ID
if transfer_syntax is None:
logger.error("No Presentation Context for: '%s'"
%sop_class.UID)
logger.error("Find SCU failed due to there being no valid "
"presentation context for the current dataset")
return service_class.IdentifierDoesNotMatchSOPClass
logger.info('Sending C-CANCEL-GET')
# Send c-cancel-get request
self.dimse.Send(primitive, context_id, self.acse.MaxPDULength)
# DIMSE-N services provided by the Association
def send_n_event_report(self):
raise NotImplementedError
def send_n_get(self, msg_id, dataset=None):
if self.is_established:
service_class = QueryRetrieveGetServiceClass()
# Determine the Presentation Context we are operating under
# and hence the transfer syntax to use for encoding `dataset`
transfer_syntax = None
for context in self.acse.context_manager.accepted:
if sop_class.UID == context.AbstractSyntax:
transfer_syntax = context.TransferSyntax[0]
context_id = context.ID
if transfer_syntax is None:
logger.error("No Presentation Context for: '%s'"
%sop_class.UID)
logger.error("Get SCU failed due to there being no valid "
"presentation context for the current dataset")
return service_class.IdentifierDoesNotMatchSOPClass
# Build N-GET primitive
primitive = N_GET_ServiceParameters()
primitive.MessageID = msg_id
# The SOP Class for which Attribute Values are to be retrieved
primitive.RequestedSOPClassUID = None
# The SOP Instance for which Attribute Values are to be retrieved
primitive.RequestedSOPInstanceUID = None
# A set of Attribute identifiers, if omitted then all identifiers are assumed
# The definitions of the Attributes are found in PS3.3
if dataset is not None:
primitive.AttributeIdentifierList = encode(dataset,
transfer_syntax.is_implicit_VR,
transfer_syntax.is_little_endian)
primitive.AttributeIdentifierList = BytesIO(primitive.AttributeIdentifierList)
# Send primitive to peer
self.dimse.Send(primitive, context_id, self.acse.MaxPDULength)
def send_n_set(self):
raise NotImplementedError
def send_n_action(self):
raise NotImplementedError
def send_n_create(self):
raise NotImplementedError
def send_n_delete(self):
raise NotImplementedError
# Association logging/debugging functions
def debug_association_requested(self, primitive):
"""
Called when an association is reuested by a peer AE, used for
logging/debugging information
Parameters
----------
assoc_primitive - pynetdicom.DULparameters.A_ASSOCIATE_ServiceParameter
The A-ASSOCIATE-RJ PDU instance received from the peer AE
"""
pass
def debug_association_accepted(self, assoc):
"""
Called when an association attempt is accepted by a peer AE, used for
logging/debugging information
Parameters
----------
assoc - pynetdicom.DULparameters.A_ASSOCIATE_ServiceParameter
The Association parameters negotiated between the local and peer AEs
#max_send_pdv = associate_ac_pdu.UserInformationItem[-1].MaximumLengthReceived
#logger.info('Association Accepted (Max Send PDV: %s)' %max_send_pdv)
pynetdicom_version = 'PYNETDICOM_' + ''.join(__version__.split('.'))
# Shorthand
assoc_ac = a_associate_ac
# Needs some cleanup
app_context = assoc_ac.ApplicationContext.__repr__()[1:-1]
pres_contexts = assoc_ac.PresentationContext
user_info = assoc_ac.UserInformation
responding_ae = 'resp. AP Title'
our_max_pdu_length = '[FIXME]'
their_class_uid = 'unknown'
their_version = 'unknown'
if user_info.ImplementationClassUID:
their_class_uid = user_info.ImplementationClassUID
if user_info.ImplementationVersionName:
their_version = user_info.ImplementationVersionName
s = ['Association Parameters Negotiated:']
s.append('====================== BEGIN A-ASSOCIATE-AC ================'
'=====')
s.append('Our Implementation Class UID: %s' %pynetdicom_uid_prefix)
s.append('Our Implementation Version Name: %s' %pynetdicom_version)
s.append('Their Implementation Class UID: %s' %their_class_uid)
s.append('Their Implementation Version Name: %s' %their_version)
s.append('Application Context Name: %s' %app_context)
s.append('Calling Application Name: %s' %assoc_ac.CallingAETitle)
s.append('Called Application Name: %s' %assoc_ac.CalledAETitle)
#s.append('Responding Application Name: %s' %responding_ae)
s.append('Our Max PDU Receive Size: %s' %our_max_pdu_length)
s.append('Their Max PDU Receive Size: %s' %user_info.MaximumLength)
s.append('Presentation Contexts:')
for item in pres_contexts:
context_id = item.PresentationContextID
s.append(' Context ID: %s (%s)' %(item.ID, item.Result))
s.append(' Abstract Syntax: =%s' %'FIXME')
s.append(' Proposed SCP/SCU Role: %s' %'[FIXME]')
if item.ResultReason == 0:
s.append(' Accepted SCP/SCU Role: %s' %'[FIXME]')
s.append(' Accepted Transfer Syntax: =%s'
%item.TransferSyntax)
ext_nego = 'None'
#if assoc_ac.UserInformation.ExtendedNegotiation is not None:
# ext_nego = 'Yes'
s.append('Requested Extended Negotiation: %s' %'[FIXME]')
s.append('Accepted Extended Negotiation: %s' %ext_nego)
usr_id = 'None'
if assoc_ac.UserInformation.UserIdentity is not None:
usr_id = 'Yes'
s.append('Requested User Identity Negotiation: %s' %'[FIXME]')
s.append('User Identity Negotiation Response: %s' %usr_id)
s.append('======================= END A-ASSOCIATE-AC =================='
'====')
for line in s:
logger.debug(line)
"""
pass
def debug_association_rejected(self, assoc_primitive):
"""
Called when an association attempt is rejected by a peer AE, used for
logging/debugging information
Parameters
----------
assoc_primitive - pynetdicom.primitives.A_ASSOCIATE
The A-ASSOCIATE primitive instance (RJ) received from the peer AE
"""
# See PS3.8 Section 7.1.1.9 but mainly Section 9.3.4 and Table 9-21
# for information on the result and diagnostic information
source = assoc_primitive.result_source
result = assoc_primitive.result
reason = assoc_primitive.diagnostic
source_str = { 1 : 'Service User',
2 : 'Service Provider (ACSE)',
3 : 'Service Provider (Presentation)'}
reason_str = [{ 1 : 'No reason given',
2 : 'Application context name not supported',
3 : 'Calling AE title not recognised',
4 : 'Reserved',
5 : 'Reserved',
6 : 'Reserved',
7 : 'Called AE title not recognised',
8 : 'Reserved',
9 : 'Reserved',
10 : 'Reserved'},
{ 1 : 'No reason given',
2 : 'Protocol version not supported'},
{ 0 : 'Reserved',
1 : 'Temporary congestion',
2 : 'Local limit exceeded',
3 : 'Reserved',
4 : 'Reserved',
5 : 'Reserved',
6 : 'Reserved',
7 : 'Reserved'}]
result_str = { 1 : 'Rejected Permanent',
2 : 'Rejected Transient'}
logger.error('Association Rejected:')
logger.error('Result: %s, Source: %s' %(result_str[result], source_str[source]))
logger.error('Reason: %s' %reason_str[source - 1][reason])
def debug_association_released(self):
logger.info('Association Released')
def debug_association_aborted(self, abort_primitive=None):
logger.error('Association Aborted')
|
#!/usr/bin/env python
"""
helpers.py
"""
import sys
import torch
import random
import numpy as np
import scipy.sparse as sp
def set_seeds(seed):
_ = random.seed(seed + 1)
_ = np.random.seed(seed + 2)
_ = torch.manual_seed(seed + 3)
_ = torch.cuda.manual_seed(seed + 4)
class SimpleEarlyStopping:
def __init__(self, model, patience=100, store_weights=False):
self.model = model
self.patience = patience
self.max_patience = patience
self.store_weights = store_weights
self.record = None,
self.best_acc = -np.inf
self.best_nloss = -np.inf
self.best_epoch = -1
self.best_epoch_score = (-np.inf, -np.inf)
def should_stop(self, acc, loss, epoch, record=None):
nloss = -1 * loss
if (acc < self.best_acc) and (nloss < self.best_nloss):
self.patience -= 1
return self.patience == 0
self.patience = self.max_patience
self.best_acc = max(acc, self.best_acc)
self.best_nloss = max(nloss, self.best_nloss)
if (acc, nloss) > self.best_epoch_score:
self.best_epoch = epoch
self.best_epoch_score = (acc, nloss)
if self.store_weights:
self.best_state = {k:v.cpu() for k,v in self.model.state_dict().items()}
if record:
self.record = record
return False
def calc_A_hat(adj, mode):
A = adj + sp.eye(adj.shape[0])
D = np.sum(A, axis=1).A1
if mode == 'sym':
D_inv = sp.diags(1 / np.sqrt(D))
return D_inv @ A @ D_inv
elif mode == 'rw':
D_inv = sp.diags(1 / D)
return D_inv @ A
def compute_ppr(adj, alpha, mode='sym'):
A_hat = calc_A_hat(adj, mode=mode)
A_inner = sp.eye(adj.shape[0]) - (1 - alpha) * A_hat
return alpha * np.linalg.inv(A_inner.toarray()) |
#
# @lc app=leetcode.cn id=190 lang=python3
#
# [190] reverse-bits
#
None
# @lc code=end |
#!/usr/bin/env python
print "Content-type:text/html"
print
print "<html><head><title> Test URL Encoding </title></head><body>"
print "<a href=\"http://localhost:8888/test_urlencode.py?first=Jack&last=Trades\">Link</a>"
print "</body></html>" |
from simple_playgrounds.engine import Engine
from simple_playgrounds.playground.layouts import SingleRoom
from simple_playgrounds.element.elements.conditioning import ColorChanging, FlipReward
from simple_playgrounds.element.elements.activable import RewardOnActivation
from simple_playgrounds.common.timer import PeriodicTimer
def test_color_changing(base_forward_interactive_agent_external):
playground = SingleRoom(size=(200, 200))
agent = base_forward_interactive_agent_external
color_1 = (100, 100, 0)
color_2 = (0, 100, 100)
color_3 = (20, 200, 2)
colors = [color_1, color_2, color_3]
durations = [3, 4, 5]
elem = ColorChanging(textures=colors)
playground.add_agent(agent, ((80, 100), 0))
playground.add_element(elem, ((80 + agent.base_platform.radius + elem.radius + 2, 100), 0))
timer = PeriodicTimer(durations=durations)
playground.add_timer(timer, elem)
engine = Engine(playground, time_limit=100)
while engine.game_on:
index_color = 0
for d in durations:
for _ in range(d):
assert elem.texture.base_color == colors[index_color]
engine.step()
index_color += 1
assert elem.texture.base_color == colors[0]
def test_reward_changer(reward, base_forward_interactive_agent_external):
playground = SingleRoom(size=(200, 200))
agent = base_forward_interactive_agent_external
color_1 = (100, 100, 0)
color_2 = (0, 100, 100)
colors = [color_1, color_2]
durations = [3, 4]
roa = RewardOnActivation(reward=reward)
change = FlipReward(textures=colors, element_changed=roa)
timer = PeriodicTimer(durations=durations)
playground.add_agent(agent, ((80, 100), 0))
playground.add_element(roa, ((80 + agent.base_platform.radius + roa.radius + 2, 100), 0))
playground.add_element(change, ((40, 40), 0))
playground.add_timer(timer, change)
engine = Engine(playground, time_limit=100)
actions = {agent: {agent.activate: 1}}
index_color = 0
while engine.game_on:
sign = 1
for d in durations:
for t in range(d-1):
engine.step(actions)
assert change.texture.base_color == colors[index_color]
assert agent.reward == sign * reward
sign *= -1
index_color = (index_color + 1) % len(colors)
engine.step(actions)
assert change.texture.base_color == colors[index_color]
assert agent.reward == sign * reward
|
import gc
import random
import logging
import time
import sys
import os
import pandas as pd
import numpy as np
import config.constants as constants
import viz.plot_util as plot_util
def set_seed(seed=0):
random.seed(seed)
np.random.seed(seed)
def trigger_gc(logger):
"""
Trigger GC
"""
logger.info(f"Number of object collected [{gc.collect()}]")
def set_timezone():
"""
Sets the time zone to Kolkata.
"""
os.environ["TZ"] = "Asia/Calcutta"
time.tzset()
def get_logger(logger_name, model_number=None, run_id=None, path=None):
"""
Returns a logger with Stream & File Handler.
File Handler is created only if model_number, run_id, path
are not None.
https://realpython.com/python-logging/
https://www.kaggle.com/ogrellier/user-level-lightgbm-lb-1-4480
"""
FORMAT = "[%(levelname)s]%(asctime)s:%(name)s:%(message)s"
logger = logging.getLogger(logger_name)
logger.setLevel(logging.DEBUG)
s_handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter(FORMAT)
s_handler.setFormatter(formatter)
logger.addHandler(s_handler)
if all([model_number, run_id, path]):
f_handler = logging.FileHandler(f'{path}/{model_number}_{run_id}.log')
f_handler.setFormatter(formatter)
logger.addHandler(f_handler)
return logger
def update_tracking(run_id,
key,
value,
csv_file=constants.TRACKING_FILE,
is_integer=False,
no_of_digits=None,
drop_incomplete_rows=False):
"""
Function to update the tracking CSV with information about the model
https://github.com/RobMulla/kaggle-ieee-fraud-detection/blob/master/scripts/M001.py#L98
"""
try:
df = pd.read_csv(csv_file, index_col=[0])
df['lb_score'] = 0
# If the file exists, drop rows (without final results)
# for previous runs which has been stopped inbetween.
if (drop_incomplete_rows & ('oof_score' in df.columns)):
df = df.loc[~df['oof_score'].isna()]
except FileNotFoundError:
df = pd.DataFrame()
if is_integer:
value = round(value)
elif no_of_digits is not None:
value = round(value, no_of_digits)
# Model number is index
df.loc[run_id, key] = value
df.to_csv(csv_file)
def save_file(logger, df, dir_name, file_name):
"""
common method to save submission, off files etc.
"""
logger.info(f'Saving {dir_name}/{file_name}')
df.to_csv(f'{dir_name}/{file_name}', index=False)
def save_artifacts(logger, is_test, is_plot_fi,
result_dict,
submission_df,
model_number,
run_id, sub_dir, oof_dir, fi_dir, fi_fig_dir):
"""
Save the submission, OOF predictions, feature importance values
and plos to different directories.
"""
score = result_dict['avg_cv_scores']
if is_test is False:
# Save submission file
submission_df.target = result_dict['prediction']
save_file(logger,
submission_df,
sub_dir,
f'sub_{model_number}_{run_id}_{score:.4f}.csv')
# Save OOF
oof_df = pd.DataFrame(result_dict['yoof'])
save_file(logger,
oof_df,
oof_dir,
f'oof_{model_number}_{run_id}_{score:.4f}.csv')
if is_plot_fi is True:
# Feature Importance
feature_importance_df = result_dict['feature_importance']
save_file(logger,
feature_importance_df,
fi_dir,
f'fi_{model_number}_{run_id}_{score:.4f}.csv')
# Save the plot
best_features = result_dict['best_features']
plot_util.save_feature_importance_as_fig(
best_features, fi_fig_dir,
f'fi_{model_number}_{run_id}_{score:.4f}.png')
|
# Object Classification with TensorRT using a pretrained EfficientNetB2 CNN on ImageNet.
# Please see References.md in this repository.
# This script will take images from camera and predict the class of object.
# Please refer to the LICENSE file in this repository.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# coding: utf-8
#Import Packages
import cv2 as cv
import numpy as np
from onnx_helper import ONNXClassifierWrapper,convert_onnx_to_engine
import torch
import json
from PIL import Image
from torchvision import transforms
#Set constants
BATCH_SIZE=1
N_CLASSES=1000
PRECISION=np.float32
image_size=224
TRT_PATH='models/efficientnetb2_batch1.trt'
#Load TensorRT Engine
print("Loading TRT Engine")
trt_model=ONNXClassifierWrapper(TRT_PATH,[BATCH_SIZE,N_CLASSES],target_dtype=PRECISION)
print("Loaded TRT Engine!!")
#Load Labels
print("Loading classification labels")
labels_map=json.load(open('labels_map.txt'))
labels_map=[labels_map[str(i)] for i in range(1000)]
# Function for Inferencing
def infer_objects(image):
img=cv.cvtColor(image,cv.COLOR_BGR2RGB)
img=Image.fromarray(img)
#img=Image.open(image)
image_size=224
tfms=transforms.Compose([transforms.Resize(image_size),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
#Pytorch tensor transform.
img=tfms(img)
img=img.unsqueeze(0)
#Convert to numpy.
trt_input=img.numpy()
#Convert input to shape required by TensorRT [batch,W,H,Channels] ->[1,224,224,3]
trt_input=trt_input.transpose((0,3,2, 1))
#Infer
predictions=trt_model.predict(trt_input)
#Convert numpy to Torch tensor to get the topK predictions.
predt=torch.from_numpy(predictions)
preds=torch.topk(predt,k=1).indices.squeeze(0).tolist()
for idx in preds:
label=labels_map[idx]
prob=torch.softmax(predt,dim=1)[0,idx].item()
return prob,label
#Starting the camera. Please modify the cv.VideoCapture(x)
# x is the camera number.
print("Start Classifying...")
capture=cv.VideoCapture(1) #USB camera number 1.
print("Camera started...")
#Loops until Enter Key is pressed on the keyboard
while (True):
ret, img=capture.read()
prob,label=infer_objects(img)
cv.putText(img, label, (int(50), int(30)), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 255), 2)
cv.imshow('Image',img)
if cv.waitKey(1) == 13: #13 is the Enter Key
break
print("Releasing Image Window !")
capture.release()
cv.destroyAllWindows()
print("Exiting !.")
exit()
|
import RPi.GPIO as GPIO
import random
import time
# import cv2 as cv
import numpy as np
rPin =26
gPin =19
bPin =13
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(rPin, GPIO.OUT)
GPIO.setup(gPin, GPIO.OUT)
GPIO.setup(bPin, GPIO.OUT)
GPIO.output(rPin, GPIO.LOW)
GPIO.output(gPin, GPIO.LOW)
GPIO.output(bPin, GPIO.LOW)
red= GPIO.PWM(rPin, 100)
green= GPIO.PWM(gPin, 100)
blue= GPIO.PWM(bPin, 100)
red.start(0)
green.start(0)
blue.start(0)
def changeColor(r_value, g_value, b_value):
red.ChangeDutyCycle(r_value)
green.ChangeDutyCycle(g_value)
blue.ChangeDutyCycle(b_value)
r, g , b =0,0,0
while True:
# img = np.zeros((200,200, 3), dtype=np.uint8)
# cv.imshow('img', img)
# key =cv.waitKey(1)
for i in range(100):
g = (100 - i)
color = (0, g, i)
print(g)
changeColor(i, g,i)
time.sleep(0.1)
for i in range(100, 1, -1):
g = (100 - i)
print(g)
color = (0, g, i)
changeColor(i, g,i)
time.sleep(0.1)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.