repo
stringlengths
1
99
file
stringlengths
13
215
code
stringlengths
12
59.2M
file_length
int64
12
59.2M
avg_line_length
float64
3.82
1.48M
max_line_length
int64
12
2.51M
extension_type
stringclasses
1 value
NMTGMinor
NMTGMinor-master/onmt/legacy/UniversalTransformer/Layers.py
import math import torch import torch.nn as nn from torch.autograd import Variable import torch.nn.init as init import torch.nn.utils.weight_norm as WeightNorm import onmt import torch.nn.functional as F from onmt.modules.bottle import Bottle from onmt.models.transformer_layers import XavierLinear, MultiHeadAttention...
11,195
37.740484
156
py
NMTGMinor
NMTGMinor-master/onmt/legacy/UniversalTransformer/Models.py
import numpy as np import torch, math import torch.nn as nn from onmt.models.transformer_layers import PositionalEncoding from onmt.models.transformer_layers import EncoderLayer, DecoderLayer from onmt.legacy.UniversalTransformer.Layers import UniversalDecoderLayer, UniversalEncoderLayer #~ from onmt.modules.ParallelTr...
13,602
38.428986
178
py
NMTGMinor
NMTGMinor-master/onmt/legacy/ParallelTransformer/Layers.py
import math import torch import torch.nn as nn from torch.autograd import Variable import torch.nn.init as init import torch.nn.utils.weight_norm as WeightNorm import onmt import torch.nn.functional as F from onmt.modules.bottle import Bottle from onmt.models.transformer_layers import XavierLinear, MultiHeadAttention...
9,252
40.124444
123
py
NMTGMinor
NMTGMinor-master/onmt/legacy/ParallelTransformer/Models.py
import numpy as np import torch, math import torch.nn as nn from onmt.models.transformer_layers import PositionalEncoding from onmt.models.transformer_layers import EncoderLayer, DecoderLayer from onmt.legacy.ParallelTransformer.Layers import ParallelEncoderLayer from onmt.modules.base_seq2seq import NMTModel, Reconstr...
25,098
39.417069
175
py
NMTGMinor
NMTGMinor-master/onmt/legacy/old_models/distance_transformer_layers.py
import torch import torch.nn as nn import onmt from onmt.models.transformer_layers import PrePostProcessing, MultiHeadAttention, Linear from onmt.modules.relative_attention import RelPartialLearnableMultiHeadAttn from onmt.utils import flip from onmt.modules.bottle import Bottle from onmt.modules.linear import XavierL...
9,073
40.43379
116
py
NMTGMinor
NMTGMinor-master/onmt/legacy/old_models/relative_unified_transformer.py
import torch import torch.nn as nn import torch.nn.functional as F from onmt.models.transformer_layers import PositionalEncoding, PrePostProcessing from onmt.models.transformer_layers import EncoderLayer, DecoderLayer from onmt.models.transformers import TransformerEncoder, TransformerDecoder, TransformerDecodingState ...
24,270
36.982786
116
py
NMTGMinor
NMTGMinor-master/onmt/legacy/old_models/memory_transformer.py
import torch import torch.nn as nn import torch.nn.functional as F from onmt.modules.relative_attention import RelPartialLearnableMultiHeadAttn from onmt.models.transformer_layers import PositionalEncoding, PrePostProcessing from onmt.models.transformer_layers import EncoderLayer, DecoderLayer from onmt.models.transfor...
32,849
37.06489
120
py
NMTGMinor
NMTGMinor-master/onmt/legacy/old_models/reformer.py
# coding=utf-8 # Copyright 2020 The Trax Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at...
5,517
41.446154
122
py
NMTGMinor
NMTGMinor-master/onmt/legacy/old_models/relative_universal_transformer_layers.py
import torch import torch.nn as nn import onmt from onmt.models.transformer_layers import PrePostProcessing, MultiHeadAttention, Linear from onmt.modules.relative_attention import RelPartialLearnableMultiHeadAttn from onmt.modules.optimized.relative_self_attention import RelativeSelfMultiheadAttn from onmt.utils impor...
10,170
45.231818
120
py
NMTGMinor
NMTGMinor-master/onmt/legacy/old_models/distance_transformer.py
import torch import torch.nn as nn from onmt.models.transformer_layers import PositionalEncoding, PrePostProcessing from onmt.models.transformer_layers import EncoderLayer, DecoderLayer from onmt.models.transformers import TransformerEncoder, TransformerDecoder, Transformer, TransformerDecodingState import onmt from on...
30,203
41.721358
127
py
NMTGMinor
NMTGMinor-master/onmt/legacy/old_models/unified_transformer.py
import torch import torch.nn as nn from onmt.models.transformer_layers import PositionalEncoding, PrePostProcessing from onmt.models.transformers import TransformerEncoder, TransformerDecoder, TransformerDecodingState import onmt from onmt.modules.dropout import embedded_dropout from onmt.models.transformer_layers impo...
19,467
40.866667
119
py
NMTGMinor
NMTGMinor-master/onmt/legacy/old_models/universal_transformer.py
import torch import torch.nn as nn from onmt.models.transformer_layers import PositionalEncoding, PrePostProcessing from onmt.models.transformer_layers import EncoderLayer, DecoderLayer from onmt.models.transformers import TransformerEncoder, TransformerDecoder, Transformer, TransformerDecodingState import onmt from on...
14,946
42.074928
120
py
NMTGMinor
NMTGMinor-master/onmt/legacy/old_models/universal_transformer_layers.py
import math import torch import torch.nn as nn from torch.autograd import Variable import torch.nn.init as init import torch.nn.utils.weight_norm as WeightNorm import onmt import torch.nn.functional as F from onmt.modules.bottle import Bottle from onmt.modules.static_dropout import StaticDropout from onmt.modules.linea...
4,861
33.48227
87
py
NMTGMinor
NMTGMinor-master/onmt/legacy/old_models/relative_universal_transformer.py
import torch import torch.nn as nn from onmt.models.transformer_layers import PositionalEncoding, PrePostProcessing from onmt.models.transformer_layers import EncoderLayer, DecoderLayer from onmt.models.transformers import TransformerEncoder, TransformerDecoder, Transformer, TransformerDecodingState import onmt from on...
16,566
41.155216
120
py
NMTGMinor
NMTGMinor-master/onmt/legacy/FCTransformer/Layers.py
import math import torch import torch.nn as nn from torch.autograd import Variable import torch.nn.init as init import torch.nn.utils.weight_norm as WeightNorm import onmt import torch.nn.functional as F from onmt.modules.bottle import Bottle from onmt.models.transformer_layers import XavierLinear, MultiHeadAttention...
21,054
38.801512
173
py
NMTGMinor
NMTGMinor-master/onmt/legacy/FCTransformer/Models.py
import numpy as np import torch, math import torch.nn as nn from onmt.models.transformer_layers import PositionalEncoding from onmt.legacy.FCTransformer.Layers import FCTEncoderLayer, FCTDecoderLayer from onmt.modules.base_seq2seq import NMTModel, Reconstructor import onmt from onmt.modules.dropout import embedded_drop...
12,177
38.411003
170
py
NMTGMinor
NMTGMinor-master/onmt/legacy/LSTMLM/Models.py
import numpy as np import torch, math import torch.nn as nn from onmt.models.transformers import TransformerDecodingState from onmt.modules.base_seq2seq import NMTModel, Reconstructor, DecoderState import onmt from onmt.modules.dropout import embedded_dropout #~ from onmt.modules.Checkpoint import checkpoint from torch...
9,163
29.751678
113
py
NMTGMinor
NMTGMinor-master/onmt/legacy/FusionNetwork/Models.py
import numpy as np import torch, math import torch.nn as nn from onmt.modules.base_seq2seq import DecoderState from onmt.models.transformers import TransformerDecodingState from collections import defaultdict import torch.nn.functional as F class FusionNetwork(nn.Module): """Main model in 'Attention is all you ne...
6,887
33.964467
109
py
NMTGMinor
NMTGMinor-master/onmt/legacy/TransformerLM/Layers.py
import math import torch import torch.nn as nn import torch.nn.init as init import onmt import torch.nn.functional as F from onmt.models.transformer_layers import PrePostProcessing, MultiHeadAttention, Bottle, FeedForward class LMDecoderLayer(nn.Module): """Wraps multi-head attentions and position-wise feed fo...
3,338
32.059406
113
py
NMTGMinor
NMTGMinor-master/onmt/legacy/TransformerLM/Models.py
import numpy as np import torch, math import torch.nn as nn from onmt.models.transformers import TransformerDecodingState from onmt.modules.base_seq2seq import NMTModel, Reconstructor, DecoderState import onmt from onmt.modules.dropout import embedded_dropout #~ from onmt.modules.Checkpoint import checkpoint from torch...
8,777
32.632184
112
py
pixyz
pixyz-main/setup.py
import io import os import re from setuptools import setup, find_packages def read(*names, **kwargs): with io.open( os.path.join(os.path.dirname(__file__), *names), encoding=kwargs.get("encoding", "utf8") ) as fp: return fp.read() def find_version(*file_paths): version_file = rea...
2,028
26.053333
68
py
pixyz
pixyz-main/pixyz/utils.py
import functools import torch import sympy from IPython.display import Math import pixyz _EPSILON = 1e-07 _CACHE_MAXSIZE = 2 * 10 def set_epsilon(eps): """Set a `epsilon` parameter. Parameters ---------- eps : int or float Returns ------- Examples -------- >>> from unittest imp...
10,567
24.965602
111
py
pixyz
pixyz-main/pixyz/distributions/distributions.py
from __future__ import print_function import torch import re import networkx as nx from torch import nn from ..utils import get_dict_values, replace_dict_keys, delete_dict_values,\ tolist, sum_samples, convert_latex_name, lru_cache_for_sample_dict from ..losses import LogProb, Prob def _make_prob_text(dist_name,...
70,384
36.800752
137
py
pixyz
pixyz-main/pixyz/distributions/exponential_distributions.py
import torch from torch.distributions import Normal as NormalTorch from torch.distributions import Bernoulli as BernoulliTorch from torch.distributions import RelaxedBernoulli as RelaxedBernoulliTorch from torch.distributions import RelaxedOneHotCategorical as RelaxedOneHotCategoricalTorch from torch.distributions.one_...
14,788
33.154734
117
py
pixyz
pixyz-main/pixyz/distributions/poe.py
from __future__ import print_function import torch from torch import nn from ..utils import tolist, get_dict_values from ..distributions import Normal class ProductOfNormal(Normal): r"""Product of normal distributions. .. math:: p(z|x,y) \propto p(z)p(z|x)p(z|y) In this models, :math:`p(z|x)` and...
16,619
38.856115
118
py
pixyz
pixyz-main/pixyz/distributions/mixture_distributions.py
import torch from torch import nn from ..distributions.distributions import Distribution from ..utils import convert_latex_name class MixtureModel(Distribution): r"""Mixture models. .. math:: p(x) = \sum_i p(x|z=i)p(z=i) Examples -------- >>> from pixyz.distributions import Normal, Cat...
8,520
32.415686
116
py
pixyz
pixyz-main/pixyz/distributions/custom_distributions.py
from ..utils import get_dict_values, sum_samples from .distributions import Distribution class CustomProb(Distribution): """This distribution is constructed by user-defined probability density/mass function. Note that this distribution cannot perform sampling. Examples -------- >>> import torch ...
2,210
29.708333
90
py
pixyz
pixyz-main/pixyz/distributions/moe.py
from __future__ import print_function import torch from torch import nn import numpy as np from ..utils import tolist, get_dict_values from ..distributions import Normal class MixtureOfNormal(Normal): r"""Mixture of normal distributions. .. math:: p(z|x,y) = p(z|x) + p(z|y) In this models, :math:...
5,758
32.876471
162
py
pixyz
pixyz-main/pixyz/distributions/special_distributions.py
from __future__ import print_function from .distributions import Distribution class Deterministic(Distribution): """ Deterministic distribution (or degeneration distribution) Examples -------- >>> import torch >>> class Generator(Deterministic): ... def __init__(self): ... ...
3,517
27.836066
98
py
pixyz
pixyz-main/pixyz/distributions/flow_distribution.py
import torch from ..distributions import Distribution from ..utils import get_dict_values class TransformedDistribution(Distribution): r""" Convert flow transformations to distributions. .. math:: p(z=f_{flow}(x)), where :math:`x \sim p_{prior}(x)`. Once initializing, it can be handle...
9,870
28.912121
119
py
pixyz
pixyz-main/pixyz/flows/conv.py
import torch from torch import nn from torch.nn import functional as F import numpy as np import scipy as sp from .flows import Flow class ChannelConv(Flow): """ Invertible 1 × 1 convolution. Notes ----- This is implemented with reference to the following code. https://github.com/chaiyujin/...
3,370
36.455556
115
py
pixyz
pixyz-main/pixyz/flows/normalizations.py
import torch from torch import nn import numpy as np from .flows import Flow from ..utils import epsilon class BatchNorm1d(Flow): """ A batch normalization with the inverse transformation. Notes ----- This is implemented with reference to the following code. https://github.com/ikostrikov/pyt...
5,961
29.731959
96
py
pixyz
pixyz-main/pixyz/flows/coupling.py
import torch import numpy as np from .flows import Flow class AffineCoupling(Flow): r""" Affine coupling layer .. math:: :nowrap: \begin{eqnarray*} \mathbf{y}_{1:d} &=& \mathbf{x}_{1:d} \\ \mathbf{y}_{d+1:D} &=& \mathbf{x}_{d+1:D} \odot \exp(s(\mathbf{x}_{1:d})+t(\mathbf...
6,754
27.263598
110
py
pixyz
pixyz-main/pixyz/flows/normalizing_flows.py
import math import torch from torch import nn from torch.nn import functional as F from ..utils import epsilon from .flows import Flow class PlanarFlow(Flow): r""" Planar flow. .. math:: f(\mathbf{x}) = \mathbf{x} + \mathbf{u} h( \mathbf{w}^T \mathbf{x} + \mathbf{b}) """ def __init__(s...
2,136
29.971014
95
py
pixyz
pixyz-main/pixyz/flows/flows.py
from torch import nn class Flow(nn.Module): """Flow class. In Pixyz, all flows are required to inherit this class.""" def __init__(self, in_features): """ Parameters ---------- in_features : int Size of input data. """ super().__init__() s...
2,914
23.291667
111
py
pixyz
pixyz-main/pixyz/flows/operations.py
import torch import torch.nn.functional as F import numpy as np from .flows import Flow from ..utils import sum_samples class Squeeze(Flow): """ Squeeze operation. c * s * s -> 4c * s/2 * s/2 Examples -------- >>> import torch >>> a = torch.tensor([i+1 for i in range(16)]).view(1,1,4,4)...
6,742
24.541667
98
py
pixyz
pixyz-main/pixyz/models/vi.py
from torch import optim from ..models.model import Model from ..utils import tolist from ..losses import ELBO class VI(Model): """ Variational Inference (Amortized inference) The ELBO for given distributions (p, approximate_dist) is set as the loss class of this model. """ def __init__(self, p,...
1,794
31.636364
98
py
pixyz
pixyz-main/pixyz/models/model.py
from torch import optim, nn import torch from torch.nn.utils import clip_grad_norm_, clip_grad_value_ import re from ..utils import tolist from ..distributions.distributions import Distribution class Model(object): """ This class is for training and testing a loss class. It requires a defined loss class,...
6,465
29.790476
122
py
pixyz
pixyz-main/pixyz/models/vae.py
from torch import optim from ..models.model import Model from ..utils import tolist class VAE(Model): """ Variational Autoencoder. In VAE class, reconstruction loss on given distributions (encoder and decoder) is set as the default loss class. However, if you want to add additional terms, e.g., the ...
2,227
34.935484
116
py
pixyz
pixyz-main/pixyz/models/gan.py
from torch import optim from ..models.model import Model from ..losses import AdversarialJensenShannon from ..distributions import EmpiricalDistribution class GAN(Model): r""" Generative Adversarial Network (Adversarial) Jensen-Shannon divergence between given distributions (p_data, p) is set as the...
5,491
30.745665
91
py
pixyz
pixyz-main/pixyz/models/ml.py
from torch import optim from ..models.model import Model from ..utils import tolist class ML(Model): """ Maximum Likelihood (log-likelihood) The negative log-likelihood of a given distribution (p) is set as the loss class of this model. """ def __init__(self, p, other_distributi...
1,624
30.862745
99
py
pixyz
pixyz-main/pixyz/layers/resnet.py
import torch import torch.nn as nn import torch.nn.functional as F from .norm_util import WNConv2d class ResidualBlock(nn.Module): """ResNet basic block with weight norm.""" def __init__(self, in_channels, out_channels): super().__init__() self.in_norm = nn.BatchNorm2d(in_channels) s...
2,757
33.475
109
py
pixyz
pixyz-main/pixyz/layers/norm_util.py
import torch.nn as nn class WNConv2d(nn.Module): """Weight-normalized 2d convolution. Args: in_channels (int): Number of channels in the input. out_channels (int): Number of channels in the output. kernel_size (int): Side length of each convolutional kernel. padding (int): Padd...
746
32.954545
90
py
pixyz
pixyz-main/pixyz/losses/losses.py
import abc import sympy import torch from torch.nn import DataParallel from torch.nn.parallel import DistributedDataParallel import numbers from copy import deepcopy from ..utils import get_dict_values class Loss(torch.nn.Module, metaclass=abc.ABCMeta): """Loss class. In Pixyz, all loss classes are required to ...
28,918
29.83049
128
py
pixyz
pixyz-main/pixyz/losses/mmd.py
import torch import sympy from .losses import Divergence from ..utils import get_dict_values class MMD(Divergence): r""" The Maximum Mean Discrepancy (MMD). .. math:: D_{MMD^2}[p||q] = \mathbb{E}_{p(x), p(x')}[k(x, x')] + \mathbb{E}_{q(x), q(x')}[k(x, x')] - 2\mathbb{E}_{p(x), q(x')}[k(x...
3,976
31.598361
109
py
pixyz
pixyz-main/pixyz/losses/adversarial_loss.py
import sympy from torch import optim, nn import torch from .losses import Divergence from ..utils import get_dict_values, detach_dict class AdversarialLoss(Divergence): def __init__(self, p, q, discriminator, optimizer=optim.Adam, optimizer_params={}): if set(p.var) != set(q.var): raise ValueE...
19,715
33.650264
116
py
pixyz
pixyz-main/pixyz/losses/divergences.py
import sympy import torch from torch.distributions import kl_divergence from ..utils import get_dict_values from .losses import Divergence def KullbackLeibler(p, q, dim=None, analytical=True, sample_shape=torch.Size([1])): r""" Kullback-Leibler divergence (analytical or Monte Carlo Apploximation). .. ma...
4,222
37.045045
114
py
pixyz
pixyz-main/pixyz/losses/iteration.py
from copy import deepcopy import sympy from .losses import Loss from ..utils import get_dict_values, replace_dict_keys class IterativeLoss(Loss): r""" Iterative loss. This class allows implementing an arbitrary model which requires iteration. .. math:: \mathcal{L} = \sum_{t=0}^{T-1}\mathca...
5,840
34.186747
113
py
pixyz
pixyz-main/pixyz/losses/wasserstein.py
from torch.nn.modules.distance import PairwiseDistance import sympy from .losses import Divergence from ..utils import get_dict_values class WassersteinDistance(Divergence): r""" Wasserstein distance. .. math:: W(p, q) = \inf_{\Gamma \in \mathcal{P}(x_p\sim p, x_q\sim q)} \mathbb{E}_{(x_p, x_q)...
2,504
32.4
119
py
pixyz
pixyz-main/pixyz/losses/pdf.py
import sympy import torch from .losses import Loss class LogProb(Loss): r""" The log probability density/mass function. .. math:: \log p(x) Examples -------- >>> import torch >>> from pixyz.distributions import Normal >>> p = Normal(loc=torch.tensor(0.), scale=torch.tensor(1...
1,978
25.039474
115
py
pixyz
pixyz-main/pixyz/losses/elbo.py
import torch def ELBO(p, q, sample_shape=torch.Size([1])): r""" The evidence lower bound (Monte Carlo approximation). .. math:: \mathbb{E}_{q(z|x)}\left[\log \frac{p(x,z)}{q(z|x)}\right] \approx \frac{1}{L}\sum_{l=1}^L \log p(x, z_l), \quad \text{where} \quad z_l \sim q(z|x). Note:...
985
33
114
py
pixyz
pixyz-main/pixyz/losses/entropy.py
import sympy import torch from pixyz.losses.losses import Loss from pixyz.losses.divergences import KullbackLeibler def Entropy(p, analytical=True, sample_shape=torch.Size([1])): r""" Entropy (Analytical or Monte Carlo approximation). .. math:: H(p) &= -\mathbb{E}_{p(x)}[\log p(x)] \qquad \text...
3,375
33.44898
126
py
pixyz
pixyz-main/tests/test_example_usage.py
# flake8: noqa: F841 from __future__ import print_function # if you want to run all tests (contains below), type> pytest -m "performance or not performance" import pytest import torch import torch.utils.data from torch import nn, optim from torch.nn import functional as F from torch.utils.data import DataLoader import...
126,799
29.517449
554
py
pixyz
pixyz-main/tests/distributions/test_distribution.py
import pytest from os.path import join as pjoin import torch from pixyz.distributions import Normal, MixtureModel, Categorical, FactorizedBernoulli from pixyz.utils import lru_cache_for_sample_dict from pixyz.losses import KullbackLeibler from pixyz.models import VAE class TestGraph: def test_rename_atomdist(self...
10,844
41.034884
120
py
pixyz
pixyz-main/tests/distributions/test_expornential_distributions.py
import pytest import torch from pixyz.distributions.exponential_distributions import RelaxedBernoulli, Normal class TestNormal: def test_init_with_same_param(self): n = Normal(var=['x'], cond_var=['y'], loc='y', scale='y') result = n.sample({'y': torch.ones(2, 3)}) assert result['x'].shap...
932
34.884615
103
py
pixyz
pixyz-main/tests/models/test_model.py
import os import torch import torch.nn as nn from pixyz.distributions import Normal from pixyz.losses import CrossEntropy from pixyz.models import Model class TestModel: def _make_model(self, loc): class Dist(Normal): def __init__(self): super().__init__(loc=loc, scale=1) ...
1,009
24.897436
55
py
pixyz
pixyz-main/tests/losses/test_iteration.py
import torch from pixyz.losses import IterativeLoss, Parameter, Expectation from pixyz.distributions import Normal class TestIterativeLoss: def test_print_latex(self): t_max = 3 itr = IterativeLoss(Parameter('t'), max_iter=t_max, timestep_var='t') assert itr.loss_text == r"\sum_{t=0}^{" + ...
1,412
43.15625
99
py
pixyz
pixyz-main/tutorial/English/utils.py
from torch.utils.data import Dataset import pickle import numpy as np import torch import torchvision import matplotlib.pyplot as plt def imshow(img_tensors): img = torchvision.utils.make_grid(img_tensors) npimg = img.numpy() plt.figure(figsize=(16, 12)) plt.imshow(np.transpose(npimg, (1, 2, 0))) ...
2,721
29.931818
117
py
pixyz
pixyz-main/tutorial/Japanese/utils.py
from torch.utils.data import Dataset import pickle import numpy as np import torch import torchvision import matplotlib.pyplot as plt def imshow(img_tensors): img = torchvision.utils.make_grid(img_tensors) npimg = img.numpy() plt.figure(figsize=(16, 12)) plt.imshow(np.transpose(npimg, (1, 2, 0))) ...
2,721
29.931818
117
py
archive-query-log
archive-query-log-main/archive_query_log/results/test/test_facebook_serp_parsing.py
# flake8: noqa # This file is auto-generated by generate_tests.py. from archive_query_log.results.test.test_utils import verify_serp_parsing def test_parse_query_facebook_vanilla_1481832838(): verify_serp_parsing( "https://web.archive.org/web/20161215201358id_/https://www.facebook.com/search/photos/?q=%23...
6,107
41.416667
596
py
anticipatr
anticipatr-main/src/main.py
import os import argparse import random import numpy as np import time from pathlib import Path import json import datetime import pickle import torch from torch.utils.data import DataLoader import datasets import utils.misc as utils from datasets import build_dataset from models import build_model from engine import t...
10,477
46.627273
208
py
anticipatr
anticipatr-main/src/engine.py
import torch import torch.nn as nn import torch.nn.functional as F from torch import optim import os,sys import copy import numpy as np import math from typing import Iterable import time import utils.misc as utils import datasets from metrics.longfuture_metrics import AnticipationEvaluator def train_one_epoch(epo...
4,794
37.36
141
py
anticipatr
anticipatr-main/src/snippet_models/model.py
import torch import torch.nn.functional as F from torch import nn from .transformer import build_transformer from .joiner import build_joiner import numpy as np from utils.misc import accuracy, get_world_size, get_rank,is_dist_avail_and_initialized class MLP(nn.Module): """ Very simple multi-layer perceptron (al...
6,287
35.77193
156
py
anticipatr
anticipatr-main/src/snippet_models/position_encoding.py
""" Various positional encodings for the transformer. """ import math import torch from torch import nn class PositionEmbeddingSineIndex(nn.Module): """ Sinusoidal positional encodings based on sequence timestamps """ def __init__(self, num_pos_feats, temperature=10000, normalize=True, scale=...
1,693
33.571429
97
py
anticipatr
anticipatr-main/src/snippet_models/transformer.py
""" Transformer class. Copy-paste from torch.nn.Transformer with modifications: * positional encodings are passed in MHattention * extra LN at the end of encoder is removed * decoder returns a stack of activations from all decoding layers """ import copy from typing import Optional, List import torch impo...
12,622
39.458333
139
py
anticipatr
anticipatr-main/src/snippet_models/joiner.py
""" Joiner modules. """ from collections import OrderedDict import torch import torch.nn.functional as F import torchvision from torch import nn from torchvision.models._utils import IntermediateLayerGetter from typing import Dict, List from .position_encoding import build_position_encoding class Joiner(nn.Sequentia...
959
29.967742
118
py
anticipatr
anticipatr-main/src/models/matcher.py
import torch from scipy.optimize import linear_sum_assignment from torch import nn import numpy as np import utils.segment_utils as segment_utils class GreedyMatcher(nn.Module): """This class computes an assignment between the targets and the predictions of the network For efficiency reasons, the targets don'...
3,980
46.392857
152
py
anticipatr
anticipatr-main/src/models/antr.py
import torch import torch.nn.functional as F from torch import nn from .transformer import build_transformer from .joiner import build_joiner from .matcher import build_matcher import snippet_models import numpy as np from utils.misc import accuracy, get_world_size, get_rank,is_dist_avail_and_initialized from utils im...
15,660
46.457576
163
py
anticipatr
anticipatr-main/src/models/position_encoding.py
""" Various positional encodings for the transformer. """ import math import torch from torch import nn class PositionEmbeddingSineIndex(nn.Module): def __init__(self, num_pos_feats, temperature=10000, normalize=True, scale=None): super().__init__() self.num_pos_feats = num_pos_feats sel...
1,604
33.891304
97
py
anticipatr
anticipatr-main/src/models/transformer.py
""" Transformer class. Code inspired by torch.nn.Transformer with modifications: * positional encodings are passed in MHattention * decoder handles multiple encoders * decoder returns a stack of activations from all decoding layers """ import copy from typing import Optional, List import os, sys import tor...
14,850
43.199405
180
py
anticipatr
anticipatr-main/src/models/joiner.py
""" Joiner modules. """ from collections import OrderedDict import torch import torch.nn.functional as F import torchvision from torch import nn from torchvision.models._utils import IntermediateLayerGetter from typing import Dict, List from .position_encoding import build_position_encoding class Joiner(nn.Sequentia...
873
28.133333
118
py
anticipatr
anticipatr-main/src/metrics/longfuture_metrics.py
""" Evaluator class for action anticipation benchmarks """ import math import numpy as np import torch import warnings from collections import OrderedDict warnings.filterwarnings("ignore", category=UserWarning) import sklearn.metrics as skmetrics class AnticipationEvaluator(object): def __init__(self,dataset)...
4,064
40.907216
151
py
anticipatr
anticipatr-main/src/datasets/bf.py
""" Constructs a dataloader for breakfast dataset for the task of long term action anticipation. """ import numpy as np import lmdb from tqdm import tqdm from torch.utils.data import Dataset import pandas as pd from .baseds_longfuture import SequenceDatasetLongFuture def build_bf_anticipation(args,mode,override_mo...
1,600
33.804348
126
py
anticipatr
anticipatr-main/src/datasets/ek.py
""" Constructs a dataloader for Epic-Kitchens-55 for the task of long term action anticipation. """ import numpy as np import lmdb from tqdm import tqdm from torch.utils.data import Dataset import pandas as pd from .baseds_longfuture import SequenceDatasetLongFuture #verbs, nouns,action: 125,3522,3806 #train_many_sh...
1,834
38.042553
153
py
anticipatr
anticipatr-main/src/datasets/__init__.py
import torch.utils.data import torchvision def build_dataset(args, mode): if args.dataset == 'ek': from datasets.ek import build_ek_anticipation return build_ek_anticipation(args=args, mode=mode) elif args.dataset == 'bf': from datasets.bf import build_bf_anticipation return bu...
363
27
58
py
anticipatr
anticipatr-main/src/datasets/baseds_longfuture.py
import bisect import copy import os import os.path as osp import random from functools import partial import itertools import numpy as np import pickle as pkl import collections from collections import Sequence import tqdm import torch from torch.utils.data import Dataset from torchvision import transforms from PIL i...
16,321
42.641711
280
py
anticipatr
anticipatr-main/src/utils/misc.py
""" Misc functions, including distributed helpers. Mostly copy-paste from torchvision references and https://github.com/facebookresearch/detr """ import os import subprocess import time from collections import defaultdict, deque import datetime import pickle from typing import Optional, List import torch import torch...
13,447
30.716981
137
py
anticipatr
anticipatr-main/src/utils/segment_utils.py
import torch import numpy as np def segment_iou(target_segment,candidate_segments): tt1 = torch.max(target_segment[0], candidate_segments[:, 0]) tt2 = torch.min(target_segment[1], candidate_segments[:, 1]) # Intersection including Non-negative overlap score. segments_intersection = (tt2 - tt1).clamp(mi...
1,137
33.484848
139
py
anticipatr
anticipatr-main/pretraining/main_pretraining.py
import os import argparse import random import numpy as np import time from pathlib import Path import json import datetime import pickle import torch from torch.utils.data import DataLoader import utils.misc as utils from tasks import build_task from engine_pretraining import train_one_epoch, evaluate parser = argpa...
9,503
45.817734
208
py
anticipatr
anticipatr-main/pretraining/engine_pretraining.py
import torch import torch.nn as nn import torch.nn.functional as F from torch import optim import os,sys import copy import numpy as np import math from typing import Iterable import time import utils.misc as utils import datasets from metrics.longfuture_metrics import AnticipationEvaluator def train_one_epoch(epoch...
5,666
39.769784
208
py
anticipatr
anticipatr-main/pretraining/models/model.py
import torch import torch.nn.functional as F from torch import nn from .transformer import build_transformer from .joiner import build_joiner import numpy as np class MLP(nn.Module): """ Very simple multi-layer perceptron (also called FFN)""" def __init__(self, input_dim, hidden_dim, output_dim, num_layers):...
6,367
38.308642
136
py
anticipatr
anticipatr-main/pretraining/models/position_encoding.py
""" Various positional encodings for the transformer. """ import math import torch from torch import nn class PositionEmbeddingSineIndex(nn.Module): """ Sinusoidal positional encodings based on sequence timestamps """ def __init__(self, num_pos_feats, temperature=10000, normalize=True, scale=...
1,693
33.571429
97
py
anticipatr
anticipatr-main/pretraining/models/transformer.py
""" Transformer class. Copy-paste from torch.nn.Transformer with modifications: * positional encodings are passed in MHattention * extra LN at the end of encoder is removed * decoder returns a stack of activations from all decoding layers """ import copy from typing import Optional, List import torch impo...
12,622
39.458333
139
py
anticipatr
anticipatr-main/pretraining/models/joiner.py
""" Joiner modules. """ from collections import OrderedDict import torch import torch.nn.functional as F import torchvision from torch import nn from torchvision.models._utils import IntermediateLayerGetter from typing import Dict, List from .position_encoding import build_position_encoding class Joiner(nn.Sequentia...
959
29.967742
118
py
anticipatr
anticipatr-main/pretraining/metrics/longfuture_metrics.py
import math import numpy as np import torch import warnings from collections import OrderedDict warnings.filterwarnings("ignore", category=UserWarning) import sklearn.metrics as skmetrics class AnticipationEvaluator(object): """ The pretraining task is multilabel classification problem.""" def __init__(self):...
1,809
31.909091
151
py
anticipatr
anticipatr-main/pretraining/datasets/bf.py
""" Builds a dataloader class for snippet-level anticipation task """ import numpy as np import lmdb from tqdm import tqdm from torch.utils.data import Dataset import pandas as pd from .baseds_snippetprediction import SequenceDatasetLongFuture def build_bf_pretraining(args,mode,override_modality=None): path...
1,770
36.680851
130
py
anticipatr
anticipatr-main/pretraining/datasets/ek.py
import numpy as np import lmdb from tqdm import tqdm from torch.utils.data import Dataset import pandas as pd from .baseds_snippetprediction import SequenceDatasetLongFuture def build_ek_pretraining(args,mode,override_modality=None): path_to_features = "{}/{}/{}/features/".format(args.root, args.dataset, args...
1,866
41.431818
157
py
anticipatr
anticipatr-main/pretraining/datasets/baseds_snippetprediction.py
""" Implementation of dataloader for snippet anticipation. Code inspired by: https://github.com/facebookresearch/ego-topo """ import bisect import copy import os import os.path as osp import random from functools import partial import itertools import numpy as np import pickle as pkl import collections from colle...
12,985
38.956923
246
py
anticipatr
anticipatr-main/pretraining/datasets/__init__.py
import torch.utils.data import torchvision def build_dataset(args): if args.dataset == 'ek': from datasets.ek import build_ek_pretraining dataset_train = build_ek_pretraining(args,mode='train') dataset_val = build_ek_pretraining(args,mode='val') return dataset_train, dataset_val ...
568
34.5625
63
py
anticipatr
anticipatr-main/pretraining/utils/misc.py
""" Misc functions, including distributed helpers. Mostly copy-paste from torchvision references and https://github.com/facebookresearch/detr """ import os import subprocess import time from collections import defaultdict, deque import datetime import pickle from typing import Optional, List import torch import torch...
13,549
30.807512
138
py
anticipatr
anticipatr-main/pretraining/tasks/__init__.py
import torch from datasets import build_dataset from models import build_model def build_task(args): dataset_train,dataset_test = build_dataset(args) model, criterion = build_model(args) return dataset_train, dataset_test, model, criterion
257
18.846154
56
py
benchmarks
benchmarks-master/my_tests/reportLmdbError.py
from __future__ import unicode_literals from __future__ import absolute_import from __future__ import division import numpy as np import tensorflow as tf from tensorflow.python.framework import dtypes from six.moves import xrange # pylint: disable=redefined-builtin import lmdb import PIL.Image from StringIO import St...
1,388
27.9375
65
py
benchmarks
benchmarks-master/my_tests/LmdbInputImagePreprocessor.py
# Copyright 2017 Ioannis Athanasiadis(supernlogn). All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required ...
5,750
29.754011
101
py
Geometric_Transformation_CMR
Geometric_Transformation_CMR-main/dataloader.py
import random import shutil import cv2 import torch from PIL import Image from matplotlib import pylab as plt import nibabel as nib from nibabel import nifti1 import torchvision from torch.utils.data import Dataset, DataLoader import torchvision.transforms as transforms import os import numpy as np class MyData(Datas...
5,273
34.635135
120
py
Geometric_Transformation_CMR
Geometric_Transformation_CMR-main/GeoNet.py
import torch from torch import nn from torch.nn import Sequential, Conv2d, MaxPool2d, Flatten, Linear, BatchNorm2d, ReLU, BatchNorm1d class GeoNet(nn.Module): def __init__(self): super(GeoNet, self).__init__() self.conv1 = Conv2d(1, 32, kernel_size=5, padding=2) self.conv2 = Conv2d(32, 64...
1,344
27.020833
99
py
Geometric_Transformation_CMR
Geometric_Transformation_CMR-main/OtherExperiment.py
from torchvision.transforms import transforms from dataloader import * from GeoNet import * def predict(model): model.eval() total_LGE_accuracy = 0 total_C0_accuracy = 0 data_aug = transforms.Compose([ transforms.ToTensor(), transforms.Grayscale(num_output_channels=1), ...
1,827
44.7
158
py
Geometric_Transformation_CMR
Geometric_Transformation_CMR-main/train.py
import cv2 import torch from torch import nn from torch.utils.tensorboard import SummaryWriter from dataloader import * from GeoNet import * from d2l import torch as d2l def train(image_datasets, data_loaders, epochs, learning_rate, wt_decay): train_data_size = len(image_datasets['train']) test_data_size = le...
4,003
33.817391
116
py
ZeCon
ZeCon-main/optimization/losses.py
# PatchNCE loss from https://github.com/taesungp/contrastive-unpaired-translation from torch.nn import functional as F import torch import numpy as np import torch.nn as nn def d_clip_loss(x, y, use_cosine=False): x = F.normalize(x, dim=-1) y = F.normalize(y, dim=-1) if use_cosine: distance = 1 - ...
4,600
29.879195
95
py
ZeCon
ZeCon-main/optimization/augmentations.py
import torch from torch import nn import kornia.augmentation as K # import ipdb class ImageAugmentations(nn.Module): def __init__(self, output_size, aug_prob, p_min, p_max, patch=False): super().__init__() self.output_size = output_size self.aug_prob = aug_prob self.patch =...
1,974
34.267857
105
py
ZeCon
ZeCon-main/optimization/image_editor_zecon.py
import os from pathlib import Path from optimization.constants import ASSETS_DIR_NAME from utils.metrics_accumulator import MetricsAccumulator from numpy import random from optimization.augmentations import ImageAugmentations as ImageAugmentations from PIL import Image import torch from torchvision import transforms i...
17,010
43.648294
152
py