seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
37751767465 | r"""Inference components such as estimators, training losses and MCMC samplers."""
import torch
import torch.nn as nn
import torch.nn.functional as F
from itertools import islice
from torch import Tensor, BoolTensor, Size
from typing import *
from .distributions import Distribution, DiagNormal, AffineTransform
from .nn import Affine, MLP
from .nn.flows import Buffer, FlowModule, MAF
from .utils import broadcast
class NRE(nn.Module):
r"""Creates a neural ratio estimation (NRE) classifier network.
The principle of neural ratio estimation is to train a classifier network
:math:`d_\phi(\theta, x)` to discriminate between pairs :math:`(\theta, x)`
equally sampled from the joint distribution :math:`p(\theta, x)` and the
product of the marginals :math:`p(\theta)p(x)`. Formally, the optimization
problem is
.. math:: \arg\min_\phi
\mathbb{E}_{p(\theta, x)} \big[ \ell(d_\phi(\theta, x)) \big] +
\mathbb{E}_{p(\theta)p(x)} \big[ \ell(1 - d_\phi(\theta, x)) \big]
where :math:`\ell(p) = - \log p` is the negative log-likelihood.
For this task, the decision function modeling the Bayes optimal classifier is
.. math:: d(\theta, x)
= \frac{p(\theta, x)}{p(\theta, x) + p(\theta) p(x)}
thereby defining the likelihood-to-evidence (LTE) ratio
.. math:: r(\theta, x)
= \frac{d(\theta, x)}{1 - d(\theta, x)}
= \frac{p(\theta, x)}{p(\theta) p(x)}
= \frac{p(x | \theta)}{p(x)}
= \frac{p(\theta | x)}{p(\theta)} .
To prevent numerical stability issues when :math:`d_\phi(\theta, x) \to 0`,
the neural network returns the logit of the class prediction
:math:`\text{logit}(d_\phi(\theta, x)) = \log r_\phi(\theta, x)`.
References:
Approximating Likelihood Ratios with Calibrated Discriminative Classifiers
(Cranmer et al., 2015)
https://arxiv.org/abs/1506.02169
Likelihood-free MCMC with Amortized Approximate Ratio Estimators
(Hermans et al., 2019)
https://arxiv.org/abs/1903.04057
Arguments:
theta_dim: The dimensionality :math:`D` of the parameter space.
x_dim: The dimensionality :math:`L` of the observation space.
moments: The parameters moments :math:`\mu` and :math:`\sigma`. If provided,
the moments are used to standardize the parameters.
build: The network constructor (e.g. :class:`lampe.nn.ResMLP`).
kwargs: Keyword arguments passed to the constructor.
"""
def __init__(
self,
theta_dim: int,
x_dim: int,
moments: Tuple[Tensor, Tensor] = None,
build: Callable[[int, int], nn.Module] = MLP,
**kwargs,
):
super().__init__()
if moments is None:
self.standardize = nn.Identity()
else:
mu, sigma = moments
self.standardize = Affine(-mu / sigma, 1 / sigma)
self.net = build(theta_dim + x_dim, 1, **kwargs)
def forward(self, theta: Tensor, x: Tensor) -> Tensor:
r"""
Arguments:
theta: The parameters :math:`\theta`, with shape :math:`(*, D)`.
x: The observation :math:`x`, with shape :math:`(*, L)`.
Returns:
The log-ratio :math:`\log r_\phi(\theta, x)`, with shape :math:`(*,)`.
"""
theta = self.standardize(theta)
theta, x = broadcast(theta, x, ignore=1)
return self.net(torch.cat((theta, x), dim=-1)).squeeze(-1)
class NRELoss(nn.Module):
r"""Creates a module that calculates the loss :math:`l` of a NRE classifier
:math:`d_\phi`. Given a batch of :math:`N` pairs :math:`\{ (\theta_i, x_i) \}`,
the module returns
.. math:: l = \frac{1}{N} \sum_{i = 1}^N
\ell(d_\phi(\theta_i, x_i)) + \ell(1 - d_\phi(\theta_{i+1}, x_i))
where :math:`\ell(p) = - \log p` is the negative log-likelihood.
Arguments:
estimator: A classifier network :math:`d_\phi(\theta, x)`.
"""
def __init__(self, estimator: nn.Module):
super().__init__()
self.estimator = estimator
def forward(self, theta: Tensor, x: Tensor) -> Tensor:
r"""
Arguments:
theta: The parameters :math:`\theta`, with shape :math:`(N, D)`.
x: The observation :math:`x`, with shape :math:`(N, L)`.
Returns:
The scalar loss :math:`l`.
"""
theta_prime = torch.roll(theta, 1, dims=0)
log_r, log_r_prime = self.estimator(
torch.stack((theta, theta_prime)),
x,
)
l1 = -F.logsigmoid(log_r).mean()
l0 = -F.logsigmoid(-log_r_prime).mean()
return l1 + l0
class BNRELoss(nn.Module):
r"""Creates a module that calculates the loss :math:`l` of a balanced NRE (BNRE)
classifier :math:`d_\phi`. Given a batch of :math:`N` pairs
:math:`\{ (\theta_i, x_i) \}`, the module returns
.. math::
\begin{align}
l & = \frac{1}{N} \sum_{i = 1}^N
\ell(d_\phi(\theta_i, x_i)) + \ell(1 - d_\phi(\theta_{i+1}, x_i)) \\
& + \gamma \left(1 - \frac{1}{N} \sum_{i = 1}^N
d_\phi(\theta_i, x_i) + d_\phi(\theta_{i+1}, x_i)
\right)^2
\end{align}
where :math:`\ell(p) = - \log p` is the negative log-likelihood.
References:
Towards Reliable Simulation-Based Inference with Balanced Neural Ratio Estimation
(Delaunoy et al., 2022)
Arguments:
estimator: A classifier network :math:`d_\phi(\theta, x)`.
"""
def __init__(self, estimator: nn.Module, gamma: float = 42.0):
super().__init__()
self.estimator = estimator
self.gamma = gamma
def forward(self, theta: Tensor, x: Tensor) -> Tensor:
r"""
Arguments:
theta: The parameters :math:`\theta`, with shape :math:`(N, D)`.
x: The observation :math:`x`, with shape :math:`(N, L)`.
Returns:
The scalar loss :math:`l`.
"""
theta_prime = torch.roll(theta, 1, dims=0)
log_r, log_r_prime = self.estimator(
torch.stack((theta, theta_prime)),
x,
)
l1 = -F.logsigmoid(log_r).mean()
l0 = -F.logsigmoid(-log_r_prime).mean()
lb = (1 - torch.sigmoid(log_r) + torch.sigmoid(log_r_prime)).mean().square()
return l1 + l0 + self.gamma * lb
class AMNRE(NRE):
r"""Creates an arbitrary marginal neural ratio estimation (AMNRE) classifier
network.
The principle of AMNRE is to introduce, as input to the classifier, a binary mask
:math:`b \in \{0, 1\}^D` indicating a subset of parameters :math:`\theta_b =
(\theta_i: b_i = 1)` of interest. Intuitively, this allows the classifier to
distinguish subspaces and to learn a different ratio for each of them. Formally,
the classifier network takes the form :math:`d_\phi(\theta_b, x, b)` and the
optimization problem becomes
.. math:: \arg\min_\phi
\mathbb{E}_{p(\theta, x) P(b)} \big[ \ell(d_\phi(\theta_b, x, b)) \big] +
\mathbb{E}_{p(\theta)p(x) P(b)} \big[ \ell(1 - d_\phi(\theta_b, x, b)) \big],
where :math:`P(b)` is a binary mask distribution. In this context, the Bayes
optimal classifier is
.. math:: d(\theta_b, x, b)
= \frac{p(\theta_b, x)}{p(\theta_b, x) + p(\theta_b) p(x)}
= \frac{r(\theta_b, x)}{1 + r(\theta_b, x)} .
Therefore, a classifier network trained for AMNRE gives access to an estimator
:math:`\log r_\phi(\theta_b, x, b)` of all marginal LTE log-ratios
:math:`\log r(\theta_b, x)`.
References:
Arbitrary Marginal Neural Ratio Estimation for Simulation-based Inference
(Rozet et al., 2021)
https://arxiv.org/abs/2110.00449
Arguments:
theta_dim: The dimensionality :math:`D` of the parameter space.
x_dim: The dimensionality :math:`L` of the observation space.
args: Positional arguments passed to :class:`NRE`.
kwargs: Keyword arguments passed to :class:`NRE`.
"""
def __init__(
self,
theta_dim: int,
x_dim: int,
*args,
**kwargs,
):
super().__init__(theta_dim * 2, x_dim, *args, **kwargs)
def forward(self, theta: Tensor, x: Tensor, b: BoolTensor) -> Tensor:
r"""
Arguments:
theta: The parameters :math:`\theta`, with shape :math:`(*, D)`, or
a subset :math:`\theta_b`, with shape :math:`(*, |b|)`.
x: The observation :math:`x`, with shape :math:`(*, L)`.
b: A binary mask :math:`b`, with shape :math:`(*, D)`.
Returns:
The log-ratio :math:`\log r_\phi(\theta_b, x, b)`, with shape :math:`(*,)`.
"""
if theta.shape[-1] < b.shape[-1]:
theta, b = broadcast(theta, b, ignore=1)
theta = theta.new_zeros(b.shape).masked_scatter(b, theta)
theta = self.standardize(theta) * b
theta, x, b = broadcast(theta, x, b * 2.0 - 1.0, ignore=1)
return self.net(torch.cat((theta, x, b), dim=-1)).squeeze(-1)
class AMNRELoss(nn.Module):
r"""Creates a module that calculates the loss :math:`l` of a AMNRE classifier
:math:`d_\phi`. Given a batch of :math:`N` pairs :math:`\{ (\theta_i, x_i) \}`,
the module returns
.. math:: l = \frac{1}{N} \sum_{i = 1}^N
\ell(d_\phi(\theta_i \odot b_i, x_i, b_i)) +
\ell(1 - d_\phi(\theta_{i+1} \odot b_i, x_i, b_i))
where the binary masks :math:`b_i` are sampled from a distribution :math:`P(b)`.
Arguments:
estimator: A classifier network :math:`d_\phi(\theta, x, b)`.
mask_dist: A binary mask distribution :math:`P(b)`.
"""
def __init__(
self,
estimator: nn.Module,
mask_dist: Distribution,
):
super().__init__()
self.estimator = estimator
self.mask_dist = mask_dist
def forward(self, theta: Tensor, x: Tensor) -> Tensor:
r"""
Arguments:
theta: The parameters :math:`\theta`, with shape :math:`(N, D)`.
x: The observation :math:`x`, with shape :math:`(N, L)`.
Returns:
The scalar loss :math:`l`.
"""
theta_prime = torch.roll(theta, 1, dims=0)
b = self.mask_dist.sample(theta.shape[:-1])
log_r, log_r_prime = self.estimator(
torch.stack((theta, theta_prime)),
x,
b,
)
l1 = -F.logsigmoid(log_r).mean()
l0 = -F.logsigmoid(-log_r_prime).mean()
return l1 + l0
class NPE(nn.Module):
r"""Creates a neural posterior estimation (NPE) normalizing flow.
The principle of neural posterior estimation is to train a parametric conditional
distribution :math:`p_\phi(\theta | x)` to approximate the posterior distribution
:math:`p(\theta | x)`. The optimization problem is to minimize the Kullback-Leibler
(KL) divergence between the two distributions or, equivalently,
.. math:: \arg\min_\phi \mathbb{E}_{p(\theta, x)} \big[ -\log p_\phi(\theta | x) \big] .
Normalizing flows are typically used for :math:`p_\phi(\theta | x)` as they are
differentiable parametric distributions enabling gradient-based optimization
techniques.
References:
https://wikipedia.org/wiki/Kullback-Leibler_divergence
Arguments:
theta_dim: The dimensionality :math:`D` of the parameter space.
x_dim: The dimensionality :math:`L` of the observation space.
moments: The parameters moments :math:`\mu` and :math:`\sigma`. If provided,
the moments are used to standardize the parameters.
build: The flow constructor (e.g. :class:`lampe.nn.flows.NSF`).
kwargs: Keyword arguments passed to the constructor.
"""
def __init__(
self,
theta_dim: int,
x_dim: int,
moments: Tuple[Tensor, Tensor] = None,
build: Callable[[int, int], FlowModule] = MAF,
**kwargs,
):
super().__init__()
self.flow = build(theta_dim, x_dim, **kwargs)
if moments is not None:
mu, sigma = moments
self.flow.transforms.insert(0, Buffer(AffineTransform, -mu / sigma, 1 / sigma))
def forward(self, theta: Tensor, x: Tensor) -> Tensor:
r"""
Arguments:
theta: The parameters :math:`\theta`, with shape :math:`(*, D)`.
x: The observation :math:`x`, with shape :math:`(*, L)`.
Returns:
The log-density :math:`\log p_\phi(\theta | x)`, with shape :math:`(*,)`.
"""
theta, x = broadcast(theta, x, ignore=1)
return self.flow(x).log_prob(theta)
@torch.no_grad()
def sample(self, x: Tensor, shape: Size = ()) -> Tensor:
r"""
Arguments:
x: The observation :math:`x`, with shape :math:`(*, L)`.
shape: The shape :math:`S` of the samples.
Returns:
The samples :math:`\theta \sim p_\phi(\theta | x)`,
with shape :math:`S + (*, D)`.
"""
return self.flow(x).sample(shape)
class NPELoss(nn.Module):
r"""Creates a module that calculates the loss :math:`l` of a NPE normalizing flow
:math:`p_\phi`. Given a batch of :math:`N` pairs :math:`\{ (\theta_i, x_i) \}`,
the module returns
.. math:: l = \frac{1}{N} \sum_{i = 1}^N -\log p_\phi(\theta_i | x_i) .
Arguments:
estimator: A normalizing flow :math:`p_\phi(\theta | x)`.
"""
def __init__(self, estimator: nn.Module):
super().__init__()
self.estimator = estimator
def forward(self, theta: Tensor, x: Tensor) -> Tensor:
r"""
Arguments:
theta: The parameters :math:`\theta`, with shape :math:`(N, D)`.
x: The observation :math:`x`, with shape :math:`(N, L)`.
Returns:
The scalar loss :math:`l`.
"""
log_p = self.estimator(theta, x)
return -log_p.mean()
class AMNPE(NPE):
r"""Creates an arbitrary marginal neural posterior estimation (AMNPE)
normalizing flow.
TODO
Arguments:
theta_dim: The dimensionality :math:`D` of the parameter space.
x_dim: The dimensionality :math:`L` of the observation space.
args: Positional arguments passed to :class:`NPE`.
kwargs: Keyword arguments passed to :class:`NPE`.
"""
def __init__(
self,
theta_dim: int,
x_dim: int,
*args,
**kwargs,
):
super().__init__(theta_dim, x_dim + theta_dim, *args, **kwargs)
def forward(self, theta: Tensor, x: Tensor, b: BoolTensor) -> Tensor:
r"""
Arguments:
theta: The parameters :math:`\theta`, with shape :math:`(*, D)`.
x: The observation :math:`x`, with shape :math:`(*, L)`.
b: A binary mask :math:`b`, with shape :math:`(*, D)`.
Returns:
The log-density :math:`\log p_\phi(\theta | x, b)`, with shape :math:`(*,)`.
"""
theta, x, b = broadcast(theta, x, b * 2.0 - 1.0, ignore=1)
return self.flow(torch.cat((x, b), dim=-1)).log_prob(theta)
@torch.no_grad()
def sample(self, x: Tensor, b: BoolTensor, shape: Size = ()) -> Tensor:
r"""
Arguments:
x: The observation :math:`x`, with shape :math:`(*, L)`.
b: A binary mask :math:`b`, with shape :math:`(D,)`.
shape: The shape :math:`S` of the samples.
Returns:
The samples :math:`\theta_b \sim p_\phi(\theta_b | x, b)`,
with shape :math:`S + (*, D)`.
"""
x, b_ = broadcast(x, b * 2.0 - 1.0, ignore=1)
return self.flow(torch.cat((x, b_), dim=-1)).sample(shape)[..., b]
class AMNPELoss(nn.Module):
r"""Creates a module that calculates the loss :math:`l` of an AMNPE normalizing flow
:math:`p_\phi`. Given a batch of :math:`N` pairs :math:`\{ (\theta_i, x_i) \}`,
the module returns
.. math:: l = \frac{1}{N} \sum_{i = 1}^N
-\log p_\phi(\theta_i \odot b_i + \theta_{i + 1} \odot (1 - b_i) | x_i, b_i)
where the binary masks :math:`b_i` are sampled from a distribution :math:`P(b)`.
Arguments:
estimator: A normalizing flow :math:`p_\phi(\theta | x, b)`.
mask_dist: A binary mask distribution :math:`P(b)`.
"""
def __init__(
self,
estimator: nn.Module,
mask_dist: Distribution,
):
super().__init__()
self.estimator = estimator
self.mask_dist = mask_dist
def forward(self, theta: Tensor, x: Tensor) -> Tensor:
r"""
Arguments:
theta: The parameters :math:`\theta`, with shape :math:`(N, D)`.
x: The observation :math:`x`, with shape :math:`(N, L)`.
Returns:
The scalar loss :math:`l`.
"""
theta_prime = torch.roll(theta, 1, dims=0)
b = self.mask_dist.sample(theta.shape[:-1])
theta = torch.where(b, theta, theta_prime)
log_prob = self.estimator(theta, x, b)
return -log_prob.mean()
class MetropolisHastings(object):
r"""Creates a batched Metropolis-Hastings sampler.
Metropolis-Hastings is a Markov chain Monte Carlo (MCMC) sampling algorithm used to
sample from intractable distributions :math:`p(x)` whose density is proportional to a
tractable function :math:`f(x)`, with :math:`x \in \mathcal{X}`. The algorithm
consists in repeating the following routine for :math:`t = 1` to :math:`T`, where
:math:`x_0` is the initial sample and :math:`q(x' | x)` is a pre-defined transition
distribution.
1. sample :math:`x' \sim q(x' | x_{t-1})`
2. :math:`\displaystyle \alpha \gets \frac{f(x')}{f(x_{t-1})} \frac{q(x_{t-1} | x')}{q(x' | x_{t-1})}`
3. sample :math:`u \sim \mathcal{U}(0, 1)`
4. :math:`x_t \gets \begin{cases} x' & \text{if } u \leq \alpha \\ x_{t-1} & \text{otherwise} \end{cases}`
Asymptotically, i.e. when :math:`T \to \infty`, the distribution of samples
:math:`x_t` is guaranteed to converge towards :math:`p(x)`. In this implementation,
a Gaussian transition :math:`q(x' | x) = \mathcal{N}(x'; x, \Sigma)` is used, which
can be modified by sub-classing :class:`MetropolisHastings`.
Wikipedia:
https://wikipedia.org/wiki/Metropolis-Hastings_algorithm
Arguments:
x_0: A batch of initial points :math:`x_0`, with shape :math:`(*, L)`.
f: A function :math:`f(x)` proportional to a density function :math:`p(x)`.
log_f: The logarithm :math:`\log f(x)` of a function proportional
to :math:`p(x)`.
sigma: The standard deviation of the Gaussian transition.
Either a scalar or a vector.
Example:
>>> x_0 = torch.randn(128, 7)
>>> log_f = lambda x: -(x**2).sum(dim=-1) / 2
>>> sampler = MetropolisHastings(x_0, log_f=log_f, sigma=0.5)
>>> samples = [x for x in sampler(256, burn=128, step=4)]
>>> samples = torch.stack(samples)
>>> samples.shape
torch.Size([32, 128, 7])
"""
def __init__(
self,
x_0: Tensor,
f: Callable[[Tensor], Tensor] = None,
log_f: Callable[[Tensor], Tensor] = None,
sigma: Union[float, Tensor] = 1.0,
):
super().__init__()
self.x_0 = x_0
assert f is not None or log_f is not None, \
"Either 'f' or 'log_f' has to be provided."
if f is None:
self.log_f = log_f
else:
self.log_f = lambda x: f(x).log()
self.sigma = sigma
def q(self, x: Tensor) -> Distribution:
return DiagNormal(x, torch.ones_like(x) * self.sigma)
@property
def symmetric(self) -> bool:
return True
def __iter__(self) -> Iterator[Tensor]:
x = self.x_0
# log f(x)
log_f_x = self.log_f(x)
while True:
# y ~ q(y | x)
y = self.q(x).sample()
# log f(y)
log_f_y = self.log_f(y)
# f(y) q(x | y)
# a = ---- * --------
# f(x) q(y | x)
log_a = log_f_y - log_f_x
if not self.symmetric:
log_a = log_a + self.q(y).log_prob(x) - self.q(x).log_prob(y)
a = log_a.exp()
# u in [0; 1]
u = torch.rand(a.shape).to(a)
# if u < a, x <- y
# else x <- x
mask = u < a
x = torch.where(mask.unsqueeze(-1), y, x)
log_f_x = torch.where(mask, log_f_y, log_f_x)
yield x
def __call__(self, stop: int, burn: int = 0, step: int = 1) -> Iterable[Tensor]:
return islice(self, burn, stop, step)
| ADelau/lampe | lampe/inference.py | inference.py | py | 20,743 | python | en | code | null | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "torch.Tensor",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"lin... |
13142587611 | import argparse
from auxilliaries.settings_reader import SettingsReader
from graph_indexing.graph_indexing_components.graph_name_handler import GraphNameHandler
from graph_indexing.graph_indexing_components.graph_printer import GraphPrinter
from graph_indexing.graph_indexing_components.graph_relation_counter import GraphRelationCounter
from graph_indexing.graph_indexing_components.graph_relation_filter import GraphRelationFilter
from graph_indexing.graph_indexing_components.graph_relation_indexer import GraphRelationIndexer
from graph_indexing.graph_indexing_components.graph_type_handler import GraphTypeHandler
from graph_indexing.graph_indexing_components.graph_vertex_indexer import GraphVertexIndexer
from graph_indexing.graph_iterators.graph_file_iterator import GraphFileIterator
from indexes.element_cache import ElementCache
from indexes.element_index import ElementIndex
import sys
"""
Future-proof graph indexing system
"""
parser = argparse.ArgumentParser(description='Indexes a graph')
parser.add_argument('--graph', type=str, help='The location of the graph')
parser.add_argument('--preprocessor_settings', type=str, help='Settings file for preprocessor')
args = parser.parse_args()
settings_reader = SettingsReader()
settings = settings_reader.read(args.preprocessor_settings)
name_cache = settings["cache_locations"]["name_cache"]
name_relation = settings["other"]["name_relation"]
relation_index_location = settings["cache_locations"]["relation_index"]
vertex_index_location = settings["cache_locations"]["vertex_index"]
vertex_type_cache = settings["cache_locations"]["vertex_type_cache"]
discarded_name_file = settings["filters"]["names"]
discarded_relation_file = settings["filters"]["relations"]
graph_processor = GraphPrinter()
relation_index = ElementIndex(relation_index_location)
graph_processor = GraphRelationIndexer(graph_processor, relation_index)
graph_processor = GraphRelationCounter(graph_processor)
vertex_index = ElementIndex(vertex_index_location)
graph_processor = GraphVertexIndexer(graph_processor, vertex_index)
name_cache = ElementCache(name_cache)
type_cache = ElementCache(vertex_type_cache)
graph_processor = GraphTypeHandler(graph_processor, type_cache, name_cache)
graph_processor = GraphRelationFilter(graph_processor, discarded_relation_file, name_relation)
graph_iterator = GraphFileIterator(args.graph)
non_events = []
print("Indexing graph", file=sys.stderr)
for i,graph in enumerate(graph_iterator.iterate()):
if i % 10000000 == 0:
print(str(i), file=sys.stderr)
processed = graph_processor.process(graph)
if processed is not None:
print("\t".join(processed)) | MichSchli/QARehash | graph_indexing/index_graph.py | index_graph.py | py | 2,658 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "auxilliaries.settings_reader.SettingsReader",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "graph_indexing.graph_indexing_components.graph_printer.GraphPrinter",
"li... |
24339594409 | import scrapy
class GoStdlibSpider(scrapy.Spider):
"""Spider for scraping the Go standard libraries."""
name = "go-stdlib-spider"
def start_requests(self):
"""Start making requests."""
for url, kind in [
("https://godoc.org/-/go", "core"),
("https://godoc.org/-/subrepo", "subrepo"),
]:
request = scrapy.Request(url, callback=self._parse)
request.meta["kind"] = kind
yield request
def _parse(self, response):
"""Parse the response for the index."""
library_metadata = [response.meta["kind"]]
for library_name in response.css("td > a::attr(href)").getall():
yield {
"library_name": library_name.strip("/"),
"lang": "go",
"library_metadata": library_metadata,
}
| src-d/ml-mining | sourced/ml/mining/spiders/go_stdlib.py | go_stdlib.py | py | 861 | python | en | code | 8 | github-code | 36 | [
{
"api_name": "scrapy.Spider",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "scrapy.Request",
"line_number": 15,
"usage_type": "call"
}
] |
14722454202 | import json as js, pandas, os, re
from matplotlib import pyplot as plt
dataset_completo_path = "C:/Users/Mark/Marco/Magistrale/Anno I/Secondo semestre/DS & ML/Progetto/Social-Mapper-Extended/social_mapper2/dataset/dataset_completo.json"
cf_path = "C:/Users/Mark/Marco/Magistrale/Anno I/Secondo semestre/DS & ML/Progetto/Social-Mapper-Extended/social_mapper2/dataset/cf.json"
f = open(dataset_completo_path, "r")
dataset = js.load(f)
dictionary = dict()
for account, info in dataset.items():
if account == "dylanchristopher":
continue
locations = []
max_location = None
if len(info["info_locations"]) > 0:
max_location = info["info_locations"][0]
for loc in info["info_locations"]:
if loc == max_location:
continue
if loc["occurrences"] > max_location["occurrences"]:
max_location = loc
elif loc["occurrences"] == max_location["occurrences"]:
locations.append(loc)
remove_obj = []
for locs in locations:
if locs["occurrences"] < max_location["occurrences"]:
remove_obj.append(locs)
for obj in remove_obj:
locations.remove(obj)
if max_location != None:
locations.insert(0,max_location)
if len(locations) > 0:
dictionary[account] = {"name": info["info_name"], "locations": locations}
with open(cf_path, "w") as cf:
js.dump(dictionary, cf) | gaelix98/progetto-fdsml | codici aggiunti/generate_cf.py | generate_cf.py | py | 1,504 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "json.load",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 45,
"usage_type": "call"
}
] |
23472198434 | import pygame
from bullet import Bullet
from settings import *
life = 100
class Player(pygame.sprite.Sprite):
def __init__(self, pos):
global life
super().__init__()
'''
# size of player
self.image = pygame.Surface((32, 64))
# color of player
self.image.fill("orange")
'''
self.image = pygame.image.load("graphics/player/ddcritbcharacter-1.png (4).png")
self.rect = self.image.get_rect(topleft=pos)
# x and y axis
self.direction = pygame.math.Vector2(0, 0)
self.speed = 8
# shooting
self.bullets = pygame.sprite.Group()
self.firing = False
self.life = life
self.score = 0
self.last_direct = 0
self.shoot = pygame.mixer.Sound("sounds/zapsplat_foley_rubber_tube_swish_whoosh_through_air_002_96127.mp3")
self.hit = pygame.mixer.Sound("sounds/zapsplat_multimedia_beep_digital_soft_click_delayed_ascending_87487.mp3")
def horizontal_movement_collision(self, tiles):
self.rect.x += self.direction.x * self.speed
keys = pygame.key.get_pressed()
for tile in tiles.sprites():
if tile.rect.colliderect(self.rect):
if self.direction.x < 0:
self.rect.left = tile.rect.right
if keys[pygame.K_LSHIFT]:
tile.kill()
elif self.direction.x > 0:
self.rect.right = tile.rect.left
if keys[pygame.K_LSHIFT]:
tile.kill()
def vertical_movement_collision(self, tiles):
self.rect.y += self.direction.y * self.speed
keys = pygame.key.get_pressed()
for tile in tiles.sprites():
if tile.rect.colliderect(self.rect):
if self.direction.y < 0:
self.rect.top = tile.rect.bottom
if keys[pygame.K_LSHIFT]:
tile.kill()
elif self.direction.y > 0:
self.rect.bottom = tile.rect.top
if keys[pygame.K_LSHIFT]:
tile.kill()
def get_input(self, tiles):
keys = pygame.key.get_pressed()
# going left and right
# player coordinate9s are (self.rect.x, self.rect.y)
if keys[pygame.K_RIGHT]:
self.direction.x = 1
self.last_direct = 1
elif keys[pygame.K_LEFT]:
self.direction.x = -1
self.last_direct = -1
# going up and down
elif keys[pygame.K_UP]:
self.direction.y = -1
elif keys[pygame.K_DOWN]:
self.direction.y = 1
# not moving
else:
self.direction.x = 0
self.direction.y = 0
# preventing multiple bullets from being shot at once
if keys[pygame.K_SPACE] and not self.firing:
self.fire()
self.firing = True
elif not keys[pygame.K_SPACE] and self.firing:
self.firing = False
def fire(self):
# we need to send it x and y coordinates
# center x means it fires from the center of the player
bullet = Bullet((self.rect.centerx, self.rect.centery), self.last_direct)
self.bullets.add(bullet)
pygame.mixer.Sound.play(self.shoot)
def draw_bullets(self, surface):
self.bullets.draw(surface)
def removelife(self):
self.life -= 1
def getlife(self):
return self.life
def editscore(self):
# play sound effect when enemy is killed
pygame.mixer.Sound.play(self.hit)
if self.life < 50:
self.score += 10
else:
self.score += 15
def update(self, tiles, enemies):
self.get_input(tiles)
self.enemies = enemies
collided2 = pygame.sprite.groupcollide(self.bullets, self.enemies, True, True)
if len(collided2) > 0:
self.editscore()
self.horizontal_movement_collision(tiles)
self.vertical_movement_collision(tiles)
self.bullets.update(tiles)
# stops player from leaving the screen
if self.rect.left < 0:
self.rect.left = 0
if self.rect.right > screen_width:
self.rect.right = screen_width
if self.rect.top <= 0:
self.rect.top = 0
if self.rect.bottom >= screen_height:
self.rect.bottom = screen_height
| emilia-jura/summativeunit3 | player.py | player.py | py | 4,459 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pygame.sprite",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "pygame.math.Vec... |
72440795944 | #!/usr/bin/env python
from urllib.parse import urlparse, parse_qs
import json
import os
import logging
import re
import bs4
import trackleaders_scraper.common as common
def parse_riders_from_race_page(race_page_text):
race_page = bs4.BeautifulSoup(race_page_text, "html.parser")
rider_links = (
race_page
.find(string=re.compile('All Riders.*'))
.find_parent('h3')
.next_sibling
.find_all('a', title="Click for individual history")
)
return [
{
'url_fragment': parse_qs(urlparse(rider_link['href']).query)['name'][0],
'name': rider_link.string,
}
for rider_link in rider_links
]
def main():
args = common.get_base_argparser().parse_args()
common.configure_logging(args)
logging.info('Getting Riders')
session = common.get_trackleaders_session()
race_page_response = session.get('http://trackleaders.com/{}f.php'.format(args.race))
race_page_response.raise_for_status()
riders = parse_riders_from_race_page(race_page_response.text)
race_path = common.get_race_path(args.race)
if not os.path.exists(race_path):
os.mkdir(race_path)
with open(common.get_riders_path(race_path), 'w') as f:
json.dump(riders, f, indent=2, sort_keys=True)
| garyvdm/trackleaders_scraper | trackleaders_scraper/getriders.py | getriders.py | py | 1,355 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "bs4.BeautifulSoup",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "urllib.parse.parse_qs",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "urllib.parse.urlpa... |
3538900252 | import os
from celery import Celery
from celery.schedules import crontab
# Set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'sendmails.settings')
app = Celery('sendmails')
app.config_from_object('django.conf:settings', namespace='CELERY')
# Load task modules from all registered Django apps.
app.autodiscover_tasks()
app.conf.beat_schedule = {
'generate_weekly_triggers': {
'task': 'app.tasks.send_scheduled_mails',
'schedule': 1,
}
}
@app.task(bind=True, ignore_result=True)
def debug_task(self):
print(f'Request: {self.request!r}') | sama50/sendmails | sendmails/celery.py | celery.py | py | 635 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "os.environ.setdefault",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "celery.Celery",
"line_number": 9,
"usage_type": "call"
}
] |
27366167047 | import torch
from torch.utils.data import Dataset
from gpt3.utils import add_special_tokens
from data_utils import *
tokenizer = add_special_tokens()
class GPT21024Dataset(Dataset):
def __init__(self, records, max_len=2048):
self.data = records
self.max_len = max_len
self.tokenizer = add_special_tokens()
self.sep_token = self.tokenizer.encode(self.tokenizer.sep_token)
self.pad_token = self.tokenizer.encode(self.tokenizer.pad_token)
def __len__(self):
return len(self.data)
def _truncate(self, article, abstract, addit_symbols_count = 1):
len_abstract = len(abstract)
max_len_article = self.max_len - len_abstract - addit_symbols_count
return article[:max_len_article], abstract
def __getitem__(self, idx):
sample = self.data[idx]
text = self.pad_token * self.max_len
article, abstract = self._truncate(sample['text'], sample['summary'])
content = article + self.sep_token + abstract
text[:len(content)] = content
text = torch.tensor(text)
sample = {'article': text, 'sum_idx': len(article)}
return sample
if __name__ == "__main__":
train_records = read_gazeta_records("gazeta_train.jsonl")
val_records = read_gazeta_records("gazeta_val.jsonl")
test_records = read_gazeta_records("gazeta_test.jsonl")
train_records = tokenize_gazeta(train_records)
val_records = tokenize_gazeta(val_records)
test_records = tokenize_gazeta(test_records)
| Paleontolog/summarizer_service | train_model/gpt3/dataset.py | dataset.py | py | 1,569 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "gpt3.utils.add_special_tokens",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.Dataset",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "gpt3.utils.add_special_tokens",
"line_number": 14,
"usage_type": "call"
},
{
... |
10702184707 | # Import the dependencies.
import numpy as np
import pandas as pd
import datetime as dt
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from flask import Flask, jsonify
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
#################################################
# Database Setup
#################################################
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(autoload_with=engine)
# Save references to each table
Measurement = Base.classes.measurement
Station = Base.classes.station
# Create our session (link) from Python to the DB
session = Session(engine)
#################################################
# Flask Setup
#################################################
app = Flask(__name__)
#################################################
# Flask Routes
#################################################
@app.route("/")
def climate():
return(f"/api/v1.0/percipitation<br/>" f"/api/v1.0/stations<br/>" f"/api/v1.0/tobs<br/>"
f"/api/v1.0/ start<br/>" f"/api/v1.0/start/end<br/>")
@app.route("/api/v1.0/percipitation")
def percipitation():
session = Session(engine)
last_date = dt.date(2017, 8, 23) - dt.timedelta(days=365)
results = session.query(Measurement.date, Measurement.prcp).filter(Measurement.date >= last_date).all()
##() generally means calling a function, when calling function parameters go into parantheses
##example dt.function(2017, 8, 23)
## set is in parantheses, different from list because immutable, cannot change what's in it
##{} dictionary, dictionary has key value pairs unlike list
##example key is a name of a column and values are names within column
##[] a list, in python can be different types of objects, also used to set key in dict
##tuples shows with parantheses, a tuple is one variable with multiple objects inside like a list
##tuples stores collection of variables like a list
##different from list because immutable, cannot change what's in it
##also ordered
##key value pair because want value = percipitation and key = date
##i is value and x is percipitation
##calling on object and jsonifying object
results_dict = {}
for i, x in results:
results_dict[i] = x
session.close()
return(jsonify(results_dict))
##object is a unit of code that computer understands, computer understands c
##jsonify converts into object computer understands, similar to translation tool
##/api/v1.0/stations
@app.route("/api/v1.0/stations")
def stations():
session = Session(engine)
results = session.query(Station.station).all()
stations = list(np.ravel(results))
session.close()
return(jsonify(stations))
##/api/v1.0/tobs
@app.route("/api/v1.0/tobs")
def tobs():
session = Session(engine)
last_date = dt.date(2017, 8, 23) - dt.timedelta(days=365)
results = session.query(Measurement.tobs).filter(Measurement.station=="USC00519281").filter(Measurement.date>=last_date).all()
stations = list(np.ravel(results))
session.close()
return(jsonify(stations))
##runs the app(variable created substantiating flask)
##dynamic route means that user can choose
##dynamic route means based on user input
##m, d y allows user to input date and year, uppercase year is full year, lowercase is last two digits
@app.route ("/api/v1.0/<start>")
@app.route ("/api/v1.0/<start>/<end>")
def stats(start = None, end = None):
session = Session(engine)
if not end:
start = dt.datetime.strptime(start, "%m-%d-%Y")
results = session.query(func.min(Measurement.tobs),func.max(Measurement.tobs),func.avg(Measurement.tobs)).filter(Measurement.station == "USC00519281").filter(Measurement.date >= start).all()
session.close()
return(jsonify(list(np.ravel(results))))
start = dt.datetime.strptime(start, "%m-%d-%Y")
end = dt.datetime.strptime(end, "%m-%d-%Y")
results = session.query(func.min(Measurement.tobs),func.max(Measurement.tobs),func.avg(Measurement.tobs)).filter(Measurement.station == "USC00519281").filter(Measurement.date >= start).filter(Measurement.date <= end).all()
session.close()
return(jsonify(list(np.ravel(results))))
if __name__ == "__main__":
app.run(debug=True)
| isabelleroet/sqlalchemy-challenge | app.py | app.py | py | 4,486 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sqlalchemy.create_engine",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.ext.automap.automap_base",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.orm.Session",
"line_number": 27,
"usage_type": "call"
},
{
... |
14411049043 | # coding: utf-8
import os
import sys
import logging
#logging.basicConfig()
from traceback import print_exc
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
def getLogger(name):
formatter = logging.Formatter(
fmt='%(asctime)s %(filename)s:%(lineno)s: %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
console_handler = logging.StreamHandler(stream=sys.stderr)
console_handler.setFormatter(formatter)
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG) # default level
logger.addHandler(console_handler)
log_file_path = os.path.join(
__location__,
'logs/common-logs.txt')
log_dir = os.path.dirname(log_file_path)
if os.path.isdir(log_dir):
file_handler = logging.FileHandler(log_file_path,mode='a')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
return logger
if __name__ == '__main__':
logger = getLogger(__name__)
logger.warn('Hello, World')
| rhee/browser-websocket-tts-server | getlogger.py | getlogger.py | py | 1,049 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.realpath",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": ... |
29296064672 | from django.urls import path
from . import views
urlpatterns = [
path('healths/', views.HealthListView.as_view(), name='healths'),
path('health/<int:pk>', views.HealthDetailView.as_view(), name='healths-detail'),
path('fashions/', views.FashionListView.as_view(), name='fashions'),
path('fashion/<int:pk>', views.FashionDetailView.as_view(), name='fashion-detail'),
path('cooks/', views.CookListView.as_view(), name='cooks'),
path('cook/<int:pk>', views.CookDetailView.as_view(), name='cook-detail'),
]
| anowar143/django-news-frontend | src/lifestyle/urls.py | urls.py | py | 534 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
21644919031 | import glob
import os
import sys
import platform
import typer
import shutil
from .utils import runCommand, loadPedaUserConfig
loadPedaUserConfig()
app = typer.Typer()
@app.command(
context_settings={"allow_extra_args": True, "ignore_unknown_options": True}
)
def genbuild(ctx: typer.Context, buildDir: str = './build'):
runCommand(["cmake", ".", "-B", buildDir] + ctx.args)
@app.command()
def delbuild(buildDir: str = './build'):
shutil.rmtree(buildDir)
@app.command(
context_settings={"allow_extra_args": True, "ignore_unknown_options": True}
)
def build(ctx: typer.Context, buildDir: str = './build'):
runCommand(["cmake", "--build", buildDir] + ctx.args)
@app.command()
def clean(buildDir: str = './build'):
runCommand(["cmake", "--build", buildDir, "--target", "clean"])
@app.command()
def sln(buildDir: str = './build'):
if platform.system() == 'Windows':
# print("We are in windows")
slns = glob.glob(buildDir + '/*.sln')
# print(slns)
if len(slns) > 0:
#runCommand(["start", slns[0]])
os.startfile(slns[0])
else:
print("No solution files found!", file=sys.stderr)
@app.command(
context_settings={"allow_extra_args": True, "ignore_unknown_options": True}
)
def run(ctx: typer.Context, prog: str, buildDir: str = './build', buildConfig: str = 'Debug'):
if prog == None:
print("Program name missing", file=sys.stderr)
else:
# for extra_arg in ctx.args:
# print(f"Got extra arg: {extra_arg}")
if platform.system() == 'Windows':
winExePath = os.path.join(buildDir, buildConfig, prog + '.exe')
# print(winExePath)
runCommand([winExePath] + ctx.args)
else:
exePath = os.path.join(buildDir, prog)
# print(exePath)
runCommand([exePath] + ctx.args)
| abhishekmishra/peda | pypeda/peda/cmk.py | cmk.py | py | 1,892 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "utils.loadPedaUserConfig",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "typer.Typer",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "typer.Context",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "utils.runComm... |
43489935655 | import inspect
import logging
from logging.handlers import TimedRotatingFileHandler
log_server = logging.getLogger('server')
log_server.setLevel(logging.DEBUG)
rotate_handler = TimedRotatingFileHandler("log\logs\server.log", when='m', interval=1, backupCount=5)
rotate_handler.suffix = '%Y%m%d'
formatter = logging.Formatter("%(asctime)s %(levelname)-5s %(module)s %(message)s")
rotate_handler.setFormatter(formatter)
log_server.addHandler(rotate_handler)
def log_server_deco(func):
def wrapper(*args, **kwargs):
log_server.info(f"Функция {func.__name__} вызвана из функции {inspect.currentframe().f_back.f_code.co_name}")
return func(*args, **kwargs)
return wrapper
| solovyova-1996/async_chat | chat/log/server_log_config.py | server_log_config.py | py | 719 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "logging.handlers.TimedRotatingFileHandler",
"line_number": 7,
"usage_type": "call"
},
{
"api_n... |
8555925095 | import os
import sys
import time
import jsbsim
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
from tqdm import tqdm
torch.set_num_threads(8)
sys.path.append(str(jsbsim.get_default_root_dir()) + '/pFCM/')
from repLearning.repLearning import Representation
from src.environments.jsbsim.jsbsimEnv import DogfightEnv as Env
class DogfightDataset(Dataset):
def __init__(
self,
status, # of size [num_of_data, 9, 10, 50, 50]
property, # of size [num_of_data, num_of_property]
label, # of size [num_of data]
):
self.status = status
self.property = property
self.label = label
def __len__(self):
return len(self.status)
def __getitem__(self, index):
data = {}
data["status"] = self.status[index]
data["property"] = self.property[index]
data["label"] = self.label[index]
return data
class IDSD():
def __init__(
self,
modelPath='/data/wnn_data/bestModel/',
) -> None:
from repLearning.cnn import Model
self.model = Model(
)
self.modelPath = modelPath
if not os.path.exists(self.modelPath):
os.mkdir(self.modelPath)
if os.listdir(self.modelPath) != []:
try:
self.model.load_state_dict(torch.load(self.modelPath + 'Epoch.pt'))
except:
print("Model Loading Error!")
time.sleep(1)
def episode(
self,
device,
optimizer,
):
env = Env()
print("**********Nof: {}**********".format(env.getNof()))
wins_record1 = []
wins_record2 = []
while True:
terminate = env.step(playSpeed=0)
if terminate != 0:
break
rep1 = Representation(env)
rep2 = Representation(env)
rl1 = rep1.getRepresentation('IDSD', self.model, device, 1) + torch.cat([torch.rand([1, 4]) / 2, torch.zeros([1, 1])], dim=1).to(device)
rl2 = rep2.getRepresentation('IDSD', self.model, device, 2)
if env.getNof() % 12 == 0:
if wins_record1 == []:
wins_record1 = torch.cat([rl1, torch.ones([1, 1]).to(device)], dim=1).unsqueeze(0)
input1 = rep1.getStatus()
inputp_1 = torch.Tensor(rep1.getProperty(1)).unsqueeze(0)
wins_record2 = torch.cat([rl2, torch.ones([1, 1]).to(device)], dim=1).unsqueeze(0)
input2 = rep2.getStatus()
inputp_2 = torch.Tensor(rep2.getProperty(2)).unsqueeze(0)
else:
wins_record1 = torch.cat([wins_record1, torch.cat([rl1, torch.ones(1, 1).to(device)], dim=1).unsqueeze(0)], dim=0)
input1 = torch.cat([input1, rep1.getStatus()])
inputp_1 = torch.cat([inputp_1, torch.Tensor(rep1.getProperty(1)).unsqueeze(0)])
wins_record2 = torch.cat([wins_record2, torch.cat([rl2, torch.ones(1, 1).to(device)], dim=1).unsqueeze(0)], dim=0)
input2 = torch.cat([input2, rep2.getStatus()])
inputp_2 = torch.cat([inputp_2, torch.Tensor(rep2.getProperty(2)).unsqueeze(0)])
env.getFdm(1).sendAction(rl1.tolist())
env.getFdm(2).sendAction(rl2.tolist())
if terminate == 1:
wins_data = wins_record1
wins_input = input1
wins_inputp = inputp_1
elif terminate == 2:
wins_data = wins_record2
wins_input = input2
wins_inputp = inputp_2
elif terminate == -1:
return
else:
raise Exception("Return code error!", terminate)
fullDataset = DogfightDataset(wins_input, wins_inputp, wins_data)
trainLoader = DataLoader(dataset=fullDataset, batch_size=1, shuffle=True)
for _ in range(1):
for batch in trainLoader:
self.model.train()
status = batch['status']
property = batch['property']
label = batch['label']
status = status.to(device)
property = property.to(device)
label = label.to(device)
pred = self.model(status, property) # torch.Size([1, 7])
pred = pred.to(device)
# for i in range(0, 3):
# label[0, i] = min(max(label[0, i], 1), -1)
# for i in range(3, 6):
# label[0, i] = min(max(label[0, i], 1), 0)
# print(batch_pred.size())
# pred = F.normalize(pred, dim=0)
# label = F.normalize(pred, dim=0)
print(pred.size())
loss = 0
label = label.squeeze().unsqueeze(dim=0)
print(label)
for i in range(5):
# loss = loss + np.log(np.abs(pred[0, i].item()) + .0001) * np.abs(label[0, i].item())
loss = loss + (label[0, i].item() - pred[0, i].item()) ** 2
# loss = loss_function(pred, label)
print("pred: {}\nlabel: {}".format(pred, label))
print("loss: {}".format(loss))
loss = torch.tensor(loss, requires_grad=True)
self.model.zero_grad()
loss.backward(retain_graph=True)
optimizer.step()
def train(
self,
epochs=20000,
cuda='0',
optimizer='SGD',
lr=1e-2,
momentum=0.9,
weight_decay=1e-3
):
device = torch.device("cuda:{}".format(cuda) if torch.cuda.is_available() else "cpu")
self.model.to(device)
if optimizer == 'SGD':
optimizer = optim.SGD(self.model.parameters(), lr, momentum, weight_decay)
elif optimizer == 'Adam':
raise Exception("Optimizer Adam isn't supported yet :(")
else:
raise Exception("Optimizer {} doesn't exist.".format(optimizer))
for _ in tqdm(range(epochs)):
self.episode(device, optimizer)
torch.save(self.model.state_dict(), self.modelPath + 'Epoch.pt')
if __name__ == '__main__':
model = IDSD(
)
model.train(
cuda='3',
)
| mrwangyou/IDSD | src/model/idsd.py | idsd.py | py | 6,469 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "torch.set_num_threads",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sys.path.append",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "jsbsim.get_defaul... |
7667813533 | import glob
import pathlib
import pandas as pd
import xlsxwriter
import ghgscenario as gs
import lukeghg.crf.ghginventory as ghg
uid_file_dict = dict()
def uidmatrix_cell_count(uid_set:set):
return len(uid_set)
def possible_missing_uid(scenuid_set,uidmatrix_set):
return scenuid_set.difference(uidmatrix_set)
def scen_files_to_uid_set(files:str,uid_mapping_file:str):
file_ls = glob.glob(files)
uid_set = set()
for fname in file_ls:
p=pathlib.Path(fname)
ls = ghg.ParseGHGInventoryFile(fname,uid_mapping_file)
for x in ls:
if len(x) == 0:
print("Empty line")
else:
uid = x.pop(0)
uid_set.add(uid)
uid_file_dict[uid]=p.name
return uid_set
def uid_matrix_to_uid_cells(uid_matrix_file:str):
df = gs.read_uid_matrix_file(uid_matrix_file)
df = df.loc[:,'FL-FL':]
a = df.to_numpy()
a = a.flatten()
uid_set = set(a)
return uid_set
def uid_matrix_to_uid_set(uid_matrix_file:str):
df = gs.read_uid_matrix_file(uid_matrix_file)
#Start column is FL-FL
df = df.loc[:,'FL-FL':]
#This will be array of arrays
a = df.to_numpy()
#This will be an array
a = a.flatten()
#The set data type
uid_set = set(a)
#Filter out empty cells
uid_set = {x for x in a if pd.notna(x)}
return uid_set
def create_uid_df(uidmatrix_count,uidmatrix_set,scenuid_set,missing_uid_set):
ls1 = sorted(list(uidmatrix_set))
ls2 = sorted(list(missing_uid_set))
ls3 = sorted(list(scenuid_set))
fname_ls2 = [uid_file_dict[uid] for uid in ls2]
fname_ls3 = [uid_file_dict[uid] for uid in ls3]
ls_table = [ls1,ls2,fname_ls2,ls3,fname_ls3]
df = pd.DataFrame(ls_table)
df = df.transpose()
df.columns=["In UIDMatrix: "+str(len(ls1)),"Missing from UIDMatrix: "+str(len(ls2)),"File",
"In Scenario files: "+str(len(ls3)),"File"]
df.index.name="All UIDMatrix entries: "+str(uidmatrix_count)
return df
def create_missing_scen_uid_excel(uid_matrix_file:str,scen_files:str,uid_mapping_file:str,excel_file:str):
"""Create excel file containing missing uid from the inventory
uid_matrix_file: template uid matrix file
scen_files: scenario inventory files (wild card search)
uid_mapping_file: the uid mapping file
excel_file: output file for missing uid
"""
uidmatrix_count = uidmatrix_cell_count(uid_matrix_to_uid_cells(uid_matrix_file))
uidmatrix_set = uid_matrix_to_uid_set(uid_matrix_file)
scenuid_set = scen_files_to_uid_set(scen_files,uid_mapping_file)
missing_uid_set = possible_missing_uid(scenuid_set,uidmatrix_set)
df = create_uid_df(uidmatrix_count,uidmatrix_set,scenuid_set,missing_uid_set)
writer = pd.ExcelWriter(excel_file,engine='xlsxwriter')
df.to_excel(writer,sheet_name='UIDMatrix')
writer.close()
| jariperttunen/lukeghg | lukeghg/lukeghg/scen/missinguid.py | missinguid.py | py | 2,888 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "glob.glob",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "lukeghg.crf.ghginventory.ParseGHGInventoryFile",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": ... |
72640397223 | import librosa
import tensorflow as tf
import numpy as np
from copy import deepcopy
from tensorflow.keras.layers import Dense, Activation, Dropout, Conv1D, MaxPooling1D, BatchNormalization
from tensorflow.contrib.rnn import GRUCell, RNNCell
from util.hparams import *
def pre_net(input_data, training):
x = Dense(256)(input_data)
x = Activation('relu')(x)
x = Dropout(0.5)(x, training=training)
x = Dense(128)(x)
x = Activation('relu')(x)
x = Dropout(0.5)(x, training=training)
return x
def CBHG(input_data, sequence_length, K, conv_dim):
x = tf.concat([
Activation('relu')(BatchNormalization()(
Conv1D(128, kernel_size=k, strides=1, padding='same')(input_data))) for k in range(1, K+1)], axis=-1)
x = MaxPooling1D(pool_size=2, strides=1, padding='same')(x)
x = Conv1D(conv_dim[0], kernel_size=3, strides=1, padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv1D(conv_dim[1], kernel_size=3, strides=1, padding='same')(x)
x = BatchNormalization()(x)
highway_input = input_data + x
if K == 8:
highway_input = Dense(128)(highway_input)
for _ in range(4):
H = Dense(128)(highway_input)
H = Activation('relu')(H)
T = Dense(128, bias_initializer=tf.constant_initializer(-1.0))(highway_input)
T = Activation('sigmoid')(T)
highway_input = H * T + highway_input * (1.0 - T)
x, _ = tf.nn.bidirectional_dynamic_rnn(
GRUCell(128),
GRUCell(128),
highway_input,
sequence_length=sequence_length,
dtype=tf.float32)
x = tf.concat(x, axis=2)
return x
class ConcatWrapper(RNNCell):
def __init__(self, cell):
super(ConcatWrapper, self).__init__()
self.cell = cell
@property
def state_size(self):
return self.cell.state_size
@property
def output_size(self):
return self.cell.output_size + self.cell.state_size.attention
def zero_state(self, batch_size, dtype):
return self.cell.zero_state(batch_size, dtype)
def call(self, inputs, state):
output, res_state = self.cell(inputs, state)
return tf.concat([output, res_state.attention], axis=-1), res_state
def griffin_lim(spectrogram):
spec = deepcopy(spectrogram)
for i in range(50):
est_wav = librosa.istft(spec, hop_length=hop_length, win_length=win_length)
est_stft = librosa.stft(est_wav, n_fft=n_fft, hop_length=hop_length, win_length=win_length)
phase = est_stft / np.maximum(1e-8, np.abs(est_stft))
spec = spectrogram * phase
wav = librosa.istft(spec, hop_length=hop_length, win_length=win_length)
return np.real(wav)
| chldkato/Tacotron-Korean | models/modules.py | modules.py | py | 2,717 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "tensorflow.keras.layers.Dense",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.layers.Activation",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.layers.Dropout",
"line_number": 13,
"usage_type": "call... |
21628491364 | from django.db import models
from django.conf import settings
from django.db import models, transaction
from django.core.mail import send_mail
import secrets
import string
from translation.metadata import TRANSLATION_LANGUAGE_CHOICES
from django.contrib.postgres.fields import ArrayField
TRANSCRIPT_TYPE = (
("ORIGINAL_SOURCE", "Original Source"),
("MACHINE_GENERATED", "Machine Generated"),
("MANUALLY_CREATED", "Manually Created"),
("MANUALLY_UPLOADED", "Manually Uploaded"),
)
TRANSLATION_TYPE_CHOICES = (
("MACHINE_GENERATED", "Machine Generated"),
("MANUALLY_CREATED", "Manually Created"),
("ORIGINAL_SOURCE", "Original Source"),
)
VOICEOVER_TYPE_CHOICES = (
("MACHINE_GENERATED", "Machine Generated"),
("MANUALLY_CREATED", "Manually Created"),
)
TASK_TYPE = (
("TRANSCRIPTION_EDIT", "Transcription Edit"),
("TRANSCRIPTION_REVIEW", "Transcription Review"),
("TRANSLATION_EDIT", "Translation Edit"),
("TRANSLATION_REVIEW", "Translation Review"),
("VOICEOVER_EDIT", "VoiceOver Edit"),
)
class Organization(models.Model):
"""
Model for organizations
"""
title = models.CharField(
verbose_name="organization_title", max_length=512, null=False, unique=True
)
email_domain_name = models.CharField(
verbose_name="organization_email_domain", max_length=512, null=True
)
is_active = models.BooleanField(
verbose_name="organization_is_active",
default=True,
help_text=("Designates whether an organization is active or not."),
)
created_by = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.SET_NULL,
null=True,
related_name="organization_created",
verbose_name="created_by",
)
organization_owner = models.OneToOneField(
settings.AUTH_USER_MODEL,
on_delete=models.SET_NULL,
null=True,
related_name="organization_owned",
verbose_name="organization_owner",
)
default_transcript_editor = models.ForeignKey(
settings.AUTH_USER_MODEL,
verbose_name="transcript editor",
related_name="transcript_editor",
on_delete=models.SET_NULL,
default=None,
null=True,
blank=True,
)
default_transcript_reviewer = models.ForeignKey(
settings.AUTH_USER_MODEL,
verbose_name="transcript reviewer",
related_name="transcript_reviewer",
on_delete=models.SET_NULL,
default=None,
null=True,
blank=True,
)
default_translation_editor = models.ForeignKey(
settings.AUTH_USER_MODEL,
verbose_name="translation editor",
related_name="translation_editor",
on_delete=models.SET_NULL,
default=None,
null=True,
blank=True,
)
default_translation_reviewer = models.ForeignKey(
settings.AUTH_USER_MODEL,
verbose_name="translation reviewer",
related_name="translation_reviewer",
on_delete=models.SET_NULL,
default=None,
null=True,
blank=True,
)
default_transcript_type = models.CharField(
choices=TRANSCRIPT_TYPE,
max_length=35,
default=None,
verbose_name="default transcript type",
null=True,
blank=True,
)
default_translation_type = models.CharField(
choices=TRANSLATION_TYPE_CHOICES,
max_length=35,
verbose_name="Default Translation Type",
default=None,
null=True,
blank=True,
)
created_at = models.DateTimeField(verbose_name="created_at", auto_now_add=True)
updated_at = models.DateTimeField(verbose_name="updated_at", auto_now=True)
default_voiceover_type = models.CharField(
choices=VOICEOVER_TYPE_CHOICES,
max_length=35,
default=None,
verbose_name="Project Default VoiceOver Type",
null=True,
blank=True,
)
default_task_types = ArrayField(
models.CharField(
choices=TASK_TYPE,
blank=True,
default=None,
null=True,
max_length=50,
),
blank=True,
default=None,
null=True,
verbose_name="Organization Default Task Types",
)
default_target_languages = ArrayField(
models.CharField(
choices=TRANSLATION_LANGUAGE_CHOICES,
blank=True,
default=None,
null=True,
max_length=50,
),
blank=True,
default=None,
null=True,
verbose_name="Organization Default Target Languages",
)
description = models.TextField(
max_length=1000, null=True, blank=True, help_text=("Organization Description")
)
enable_upload = models.BooleanField(
verbose_name="enable_upload",
default=False,
help_text=("Indicates whether CSV upload is enable or not."),
)
def __str__(self):
return self.title + ", id=" + str(self.pk)
class Invite(models.Model):
"""
Invites to invite users to organizations.
"""
user = models.OneToOneField(
settings.AUTH_USER_MODEL,
related_name="invite_users",
on_delete=models.CASCADE,
null=True,
)
organization = models.ForeignKey(
Organization,
on_delete=models.CASCADE,
null=True,
related_name="invite_oganization",
verbose_name="organization",
)
invite_code = models.CharField(
verbose_name="invite_code", max_length=256, null=True, unique=True
)
def __str__(self):
return str(self.user.email)
@classmethod
def create_invite(cls, organization=None, users=None):
with transaction.atomic():
for user in users:
try:
invite = Invite.objects.get(user=user)
except:
invite = Invite.objects.create(organization=organization, user=user)
invite.invite_code = cls.generate_invite_code()
invite.save()
if organization is not None:
organization_name = organization.title
else:
organization_name = "be the Org Owner."
send_mail(
"Invitation to join Organization",
f"Hello! You are invited to {organization_name}. Your Invite link is: https://chitralekha.ai4bharat.org/#/invite/{invite.invite_code}",
settings.DEFAULT_FROM_EMAIL,
[user.email],
)
# def has_permission(self, user):
# if self.organization.created_by.pk == user.pk or user.is_superuser:
# return True
# return False
@classmethod
def generate_invite_code(cls):
return "".join(
secrets.choice(string.ascii_uppercase + string.digits) for i in range(10)
)
| AI4Bharat/Chitralekha-Backend | backend/organization/models.py | models.py | py | 6,964 | python | en | code | 18 | github-code | 36 | [
{
"api_name": "django.db.models.Model",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 42,
"usage_type": "call"
},
{
"api_name"... |
1938145805 | import pathlib
from typing import List
import kclvm.config
import kclvm.internal.util as util
import kclvm.internal.gpyrpc.gpyrpc_pb2 as pb2
KCL_MOD_PATH_ENV = "${KCL_MOD}"
def load_settings_files(
work_dir: str, files: List[str]
) -> pb2.LoadSettingsFiles_Result:
"""Load KCL CLI config from the setting files.
Parameter
---------
work_dir : str
The kcl run work directory.
files:
The setting YAML files.
Returns
-------
result: LoadSettingsFiles_Result
The merged kcl singleton config.
"""
from kclvm.compiler.vfs import GetPkgRoot
if not files:
return pb2.LoadSettingsFiles_Result(
kcl_cli_configs=pb2.CliConfig(), kcl_options=[]
)
key_value_pairs = [
pb2.KeyValuePair(key=k, value=v)
for k, v in util.merge_option_same_keys(
kclvm.config.KCLCLISettingAction().deal(files)[0]
).items()
]
if work_dir or kclvm.config.current_path:
files = [
str(
pathlib.Path(work_dir)
.joinpath(
str(x).replace(
KCL_MOD_PATH_ENV,
GetPkgRoot(work_dir or kclvm.config.current_path or files[0])
or "",
)
)
.resolve()
)
for x in kclvm.config.input_file
]
return pb2.LoadSettingsFiles_Result(
kcl_cli_configs=pb2.CliConfig(
files=files,
output=kclvm.config.output,
overrides=kclvm.config.overrides,
path_selector=kclvm.config.path_selector,
strict_range_check=kclvm.config.strict_range_check,
disable_none=kclvm.config.disable_none,
verbose=kclvm.config.verbose,
debug=kclvm.config.debug,
),
kcl_options=key_value_pairs,
)
| kcl-lang/kcl-py | kclvm/config/settings.py | settings.py | py | 1,916 | python | en | code | 8 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "kclvm.internal.gpyrpc.gpyrpc_pb2.LoadSettingsFiles_Result",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "kclvm.internal.gpyrpc.gpyrpc_pb2",
"line_number": 32,
"usage_type":... |
5313931877 | import datetime
from flask import Blueprint
from core import Utils
from exts import db, APIResponse
from models import Interview, Person
listing_resources = Blueprint('listing_resources', __name__)
@listing_resources.route('/interview/<interview_id>', methods=['GET'])
def get_interview(interview_id):
interview = db.session.query(Interview).filter_by(interview_id=interview_id).one()
return APIResponse.respond(Utils.extract_details(interview))
@listing_resources.route('/interviews', methods=['GET'])
def list_interviews():
now = datetime.datetime.now()
interviews = db.session.query(Interview).filter(Interview.start >= now).order_by(Interview.start).all()
interview_list = []
for interview in interviews:
print(interview.get_all())
interview_list.append(Utils.extract_details(interview))
return APIResponse.respond(interview_list)
@listing_resources.route('/persons', methods=['GET'])
def list_person():
persons = db.session.query(Person).all()
persons_list = []
for person in persons:
persons_list.append({'email': person.email, 'name': person.name})
return APIResponse.respond(persons_list)
| msiddhu/interview_portal | backend/controllers/ViewController.py | ViewController.py | py | 1,172 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "flask.Blueprint",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "exts.db.session.query",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "models.Interview",
"line_number": 12,
"usage_type": "argument"
},
{
"api_name": "exts.db.ses... |
21503702095 | # -*- coding: utf-8 -*-
import os
import telebot
import time
import random
import threading
from emoji import emojize
from telebot import types
from pymongo import MongoClient
import game_classes
import lobbys
import cards
from tools import medit
import traceback
games=lobbys.games
from game_classes import codetoclass, findallenemy, findnearenemy
token = os.environ['TELEGRAM_TOKEN']
bot = telebot.TeleBot(token)
#client=MongoClient(os.environ['database'])
#db=client.unknown
#users=db.users
@bot.message_handler(commands=['creategame'])
def creategame(m):
if m.chat.id not in games:
if m.chat.id!=m.from_user.id:
game=game_classes.Game(m)
games.update({game.id:game})
kb=types.InlineKeyboardMarkup()
kb.add(types.InlineKeyboardButton(text='Присоединиться',callback_data='join'))
msg=bot.send_message(m.chat.id,'Набор участников для экспедиции открыт! Жмите "Присоединиться" для вступления в игру.',reply_markup=kb)
game=games[m.chat.id]
game.message=msg
t=threading.Timer(30,game.cancelgame)
t.start()
game.canceltimer=t
else:
bot.send_message(m.chat.id, 'В эту игру нельзя играть в личке! Добавьте бота в какой-нибудь чат.')
@bot.message_handler(commands=['del'])
def delett(m):
try:
del games[m.chat.id]
bot.send_message(m.chat.id, 'Игра была удалена!')
except:
pass
@bot.message_handler(commands=['startgame'])
def startgame(m):
try:
game=games[m.chat.id]
except:
game=None
if game!=None:
game.canceltimer=None
game.startgame()
@bot.callback_query_handler(func=lambda call:True)
def inline(call):
if call.data=='join':
try:
game=games[call.message.chat.id]
except:
game=None
if game!=None:
if game.started==False:
if call.from_user.id not in game.playerlist:
game.createplayer(call.from_user)
bot.send_message(call.message.chat.id,call.from_user.first_name+' присоединился!')
game.m_update()
else:
bot.send_message(call.message.chat.id, call.from_user.first_name+', в этом чате нет запущенной игры! Сначала начните её '+
'командой /creategame.')
else:
try:
kb=types.InlineKeyboardMarkup()
game=games[int(call.data.split(' ')[1])]
for ids in game.playerlist:
if game.playerlist[ids].id==call.from_user.id:
user=game.playerlist[ids]
chat=game
if 'playcard' in call.data:
for ids in user.cards:
print(ids)
print(ids.name)
kb.add(types.InlineKeyboardButton(text=ids.name, callback_data='info '+str(chat.id)+' '+ids.code))
medit('Выберите карту:', call.message.chat.id, call.message.message_id, reply_markup=kb)
if 'info' in call.data:
x=call.data.split(' ')[2]
text='none'
card=None
for ids in user.cards:
if ids.code==x:
card=ids
print('Карта: ')
print(card)
if card!=None:
text=card.info
if card.type!='unknown' and card.type!='infection':
kb.add(types.InlineKeyboardButton(text='⚡️Использовать карту', callback_data='usecard '+str(chat.id)+' '+card.code))
kb.add(types.InlineKeyboardButton(text='↩️Назад', callback_data='mainmenu '+str(chat.id)))
medit(text, call.message.chat.id, call.message.message_id, reply_markup=kb, parse_mode='markdown')
if 'trade' in call.data:
x=call.data.split(' ')[2]
card=None
for ids in user.cards:
if ids.code==x:
card=ids
if card!=None:
user.fortrade=card
medit('Выбрано для обмена: "'+card.name+'".',call.message.chat.id, call.message.message_id)
else:
bot.answer_callback_query(call.id, 'У вас этого нет!')
if 'usecard' in call.data:
x=call.data.split(' ')[2]
card=None
for ids in user.cards:
if ids.code==x:
card=ids
try:
trgt=call.data.split(' ')[3]
for ids in chat.playerlist:
if chat.playerlist[ids].id==int(trgt):
target=chat.playerlist[ids]
except:
target=None
if card!=None:
print(card)
if card.type=='action' or card.type=='barrier':
if user.active:
if card.targetable:
if target==None:
if card.targetall:
enemies=findallenemy(user, game)
else:
enemies=findnearenemy(user, game)
if card.target_self:
enemies.append(user)
for ids in enemies:
kb.add(types.InlineKeyboardButton(text=ids.name, callback_data='usecard '+str(chat.id)+' '+x+' '+str(ids.id)))
kb.add(types.InlineKeyboardButton(text='Назад', callback_data='mainmenu'))
medit('Выберите цель для карты "'+card.name+'":', call.message.chat.id, call.message.message_id, reply_markup=kb)
else:
if card.cancancelled!=[]:
t=threading.Timer(10, card.use, args=[user, target, chat])
t.start()
target.defmenu(card)
else:
card.use(user, target, chat)
medit('Выбрано: "'+card.name+'".', call.message.chat.id, call.message.message_id)
else:
try:
enm=call.data.split(' ')[3]
except:
enm=None
enemy=None
if enm!=None:
for ids in chat.playerlist:
if chat.playerlist[ids].id==int(enm):
enemy=chat.playerlist[ids]
else:
enemy=user
if card.cancancelled!=[]:
t=threading.Timer(10, card.use, args=[user, enemy, chat])
t.start()
enemy.defmenu(card)
else:
card.use(user, enemy, chat)
medit('Выбрано: "'+card.name+'".', call.message.chat.id, call.message.message_id)
else:
bot.answer_callback_query(call.id, 'Сейчас не ваш ход!')
elif card.type=='defence':
if user.active==False and user.attacked:
pass
else:
bot.answer_callback_query(call.id, 'Эту карту можно сыграть только в ответ на сыгранную на вас карту!')
except Exception as e:
print('Ошибка:\n', traceback.format_exc())
bot.send_message(441399484, traceback.format_exc())
print('7777')
bot.polling(none_stop=True,timeout=600)
| egor5q/Unknown-table-game- | bot.py | bot.py | py | 8,507 | python | ru | code | 0 | github-code | 36 | [
{
"api_name": "lobbys.games",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "telebot.TeleBot",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "game_classes.Game",... |
22782893938 | # Graph Valid Tree
# Description
# Given n nodes labeled from 0 to n - 1 and a list of undirected edges
# (each edge is a pair of nodes), write a function to check whether these
# edges make up a valid tree.
# You can assume that no duplicate edges will appear in edges. Since all
# edges are undirected, [0, 1] is the same as [1, 0] and thus will not
# appear together in edges.
# Example
# Example 1:
# Input: n = 5 edges = [[0, 1], [0, 2], [0, 3], [1, 4]]
# Output: true.
# Example 2:
# Input: n = 5 edges = [[0, 1], [1, 2], [2, 3], [1, 3], [1, 4]]
# Output: false.
from collections import defaultdict, deque
class Solution_BFS:
"""
@param n: An integer
@param edges: a list of undirected edges
@return: true if it's a valid tree, or false
"""
def __init__(self):
self.neighbors = defaultdict(list)
def validTree(self, n, edges):
# write your code here
if n - 1 != len(edges):
return False
self._build_neighbors(edges)
queue = deque([0])
seen = set([0])
while queue:
node = queue.popleft()
for neighbor in self.neighbors[node]:
if neighbor not in seen:
queue.append(neighbor)
seen.add(neighbor)
return len(seen) == n
def _build_neighbors(self, edges):
for start, end in edges:
self.neighbors[start].append(end)
self.neighbors[end].append(start)
class UnionFind():
def __init__(self, n):
self.father = {}
for i in range(n):
self.father[i] = i
self.count = n
def find(self, other):
path = []
while other != self.father[other]:
path.append(other)
other = self.father[other]
for p in path:
self.father[p] = other
return other
def union(self, a, b):
father_a = self.find(a)
father_b = self.find(b)
if father_a != father_b:
self.father[father_a] = father_b
self.count -= 1
class Solution_UnionFind:
"""
@param n: An integer
@param edges: a list of undirected edges
@return: true if it's a valid tree, or false
"""
def validTree(self, n, edges):
# write your code here
if n - 1 != len(edges):
return False
uf = UnionFind(n)
for start, end in edges:
uf.union(start, end)
return uf.count == 1 | Zhenye-Na/leetcode | python/261.graph-valid-tree.py | 261.graph-valid-tree.py | py | 2,485 | python | en | code | 17 | github-code | 36 | [
{
"api_name": "collections.defaultdict",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 42,
"usage_type": "call"
}
] |
70474066663 | import time
import datetime
from timeConvert import dayInSeconds
from futureTides import HighTideKeeper, LowTideKeeper
import json
dayTimeTimeStamp = 1602907205
capeCharlesTimeStamp = 1592057307 #Low Tide morning of 6/13/2020 in Cape Charles Harbor
jamesTimeStamp = capeCharlesTimeStamp + 27480 #Jmes River Locks 458 minutes behind cape charles
piankatankTimeStamp = capeCharlesTimeStamp + 3660 # Cherry points tide is 61 minutes behind cape charles
yorkTimeStamp = capeCharlesTimeStamp - 240 #York River Spit is 4 minutes behind the cape charles
def mergeDict(hDict, lDict):
return {**hDict, **lDict}
#jDict = {'a': '44', 'b' : '77', 'c': '23'}
if __name__ == ('__main__'):
with open('jTide.json', 'w') as f1:
json.dump(mergeDict(HighTideKeeper(jamesTimeStamp), LowTideKeeper(jamesTimeStamp)), f1, indent=2)
with open('pTide.json', 'w') as f2:
json.dump(mergeDict(HighTideKeeper(piankatankTimeStamp), LowTideKeeper(piankatankTimeStamp)), f2, indent=2)
with open('yTide.json', 'w') as f3:
json.dump(mergeDict(HighTideKeeper(yorkTimeStamp), LowTideKeeper(yorkTimeStamp)), f3, indent=2) | blutherRVA/TideWeatherWebsite | flaskr/TideJsonWrites.py | TideJsonWrites.py | py | 1,135 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.dump",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "futureTides.HighTideKeeper",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "futureTides.LowTideKeeper",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "json.d... |
34024325932 | import PySimpleGUI as sg
import os
from PIL import Image as I
import datetime
import time
from COCO import Coco, Image, Anno
import utilities
def worm(dir_path: str, category: str, coco):
"""
recursive flow that adds image objects to the coco object
:param dir_path: the dir of the folder with all the data
:param category: the name of the category until this run
:param coco: the coco object
:return: coco object
"""
# get all folders in the dir_path
folders = [f for f in os.listdir(dir_path) if os.path.isdir(os.path.join(dir_path, f))]
if folders:
for f in folders:
coco = worm(dir_path=os.path.join(dir_path, f), category=f'{category}_{f}' if category else f, coco=coco)
# get all files in the dir_path
files = [f for f in os.listdir(dir_path) if os.path.isfile(os.path.join(dir_path, f))]
if not files: # if there are no images...
return coco
im_exist = False # lets check if there is an im in folder
for i in files:
ext = i.split('.')[-1].lower() # ending of file
valid_types = ["bmp", "jpg", 'jpeg', 'png']
if ext in valid_types:
im_exist = True
break
if not im_exist:
return coco
# create a new category
elif not category:
sg.popup_error('There is an image file which is not in a folder!', background_color='#181c21')
return False # abort process
coco.categories.append(
{
'supercategory': 'None',
'id': len(coco.categories) + 1,
'name': category,
'status': 'Pass'
}
)
for f in files:
ext = f.split('.')[-1].lower() # ending of file
valid_types = ["bmp", "jpg", 'jpeg', 'png']
if ext not in valid_types:
# raise TypeError("Invalid type of file")
pass
else:
full_path = os.path.join(dir_path, f)
im = I.open(full_path)
width, height = im.size
# date and time the image was created
created_time = os.path.getctime(full_path)
date_captured = time.ctime(created_time)
date_obj = time.strptime(date_captured)
date_captured = time.strftime("%d/%m/%Y , %H:%M", date_obj)
# create Image object
im_o = Image(
id=len(coco.output_images) + 1,
path=full_path, # full path
name=f,
width=width,
height=height,
license=0,
date_captured=date_captured
)
# load annotations, in this case we dont care about seg, area, bbox, iscrowd
im_o.add_anno(
segmentation=[[width / 2 - 10, height / 2 - 10,
width / 2 - 10, height / 2 + 10,
width / 2 + 10, height / 2 + 10,
width / 2 + 10, height / 2 - 10]],
area=100,
bbox=[width / 2 - 10, height / 2 - 10, width / 2 + 10, height / 2 + 10],
iscrowd=0,
id=len(coco.output_images) + 1,
image_id=len(coco.output_images) + 1,
category_id=len(coco.categories),
)
coco.output_images.append(im_o)
return coco
def GUI():
"""
build the window object
:return: Window object
"""
# setting up theme for gui window
sg.LOOK_AND_FEEL_TABLE['MyCreatedTheme'] = {'BACKGROUND': '#181c21',
'TEXT': '#FFFFFF',
'INPUT': '#181c21',
'TEXT_INPUT': '#FFFFFF',
'SCROLL': '# 99CC99',
'BUTTON': ('#FFFFFF', '#181c21'),
'PROGRESS': ('# 99CC99', '#FFFFFF'),
'BORDER': 1, 'SLIDER_DEPTH': 0,
'PROGRESS_DEPTH': 0, }
sg.theme('MyCreatedTheme') # this is implementing the custom them that was build earlier
# UI setup
font: tuple = ("Arial", 20)
frame_width: int = 1000
frame = [[sg.Text('Main folder', font=font, background_color='#181c21'),
sg.Input(key='-IMPORT DIR-', font=font, size=(32, 1)),
sg.FolderBrowse('Browse',
font=font, # read only 'json' files
button_color='#181c21')],
[sg.Text('New folder name', font=font, background_color='#181c21'),
sg.Input(key='-COCO NAME-', font=font, size=(35, 1))],
[sg.Text('Folder to export to', font=font, background_color='#181c21'),
sg.Input(key='-EXPORT DIR-', font=font, size=(29, 1)),
sg.FolderBrowse('Browse',
font=font, # read only 'json' files
button_color='#181c21')
],
[sg.Push(), sg.Button('Export to COCO', font=font, key='-run-', button_color='#181c21')]
]
layout = [
[sg.Frame('Select folder with classified folders',
frame,
font=font,
background_color='#181c21',
size=(frame_width, 270))
]
]
# GUI object
window = sg.Window("Bright Machines Classification tool",
layout=layout,
icon='bm_logo.ico',
finalize=True,
return_keyboard_events=True,
grab_anywhere=True,
background_color='#181c21')
return window
def run_gui(window: sg.Window):
"""
will iterate until a run command is made
:param window: sg object
:return:
"""
while True:
event, values = window.read(timeout=100)
if event == sg.WIN_CLOSED:
break
elif event == '-run-':
if values['-IMPORT DIR-'] == '' or values['-EXPORT DIR-'] == '':
sg.popup_error('Please enter a folder direction', background_color='#181c21')
elif values['-COCO NAME-'] == '':
sg.popup_error('Please enter wanted coco file name', background_color='#181c21')
else:
dir_name, base_name = values['-EXPORT DIR-'], values['-COCO NAME-']
full_dir: str = os.path.join(dir_name, base_name)
run_coco_build(folder=values['-IMPORT DIR-'], dir_for_coco=full_dir)
def copy_images(ims: list, destination: str):
ims_d: list = [im.path for im in ims] # list of full dirs of all images
ok: bool = utilities.move_files_to_folder_sg(list_of_files=ims_d, destination_folder=destination)
if ok:
sg.popup('Images where copied to a single folder', background_color='#181c21',
auto_close=True, auto_close_duration=3)
return
def run_coco_build(folder: str, dir_for_coco: str):
"""
this will be the backbone for building and exporting the coco file
:param folder: the dir of the folder with all the data
:param dir_for_coco: the full dir of the location and name for the new coco json file
:return:
"""
# create coco object
new_coco = Coco(coco_dir=dir_for_coco)
time_now = datetime.datetime.now()
new_coco.coco_info = {
"year": time_now.year,
"version": "1.0",
"description": "BMA",
"contributor": "",
"url": "",
"date_created": "%s/%s/%s , %s:%s" % (
time_now.day, time_now.month, time_now.year, time_now.hour, time_now.minute)
}
new_coco.licenses = [{
"id": 0,
"name": "Unknown License",
"url": ""
}]
coco = worm(dir_path=folder, category='', coco=new_coco)
if not isinstance(coco, bool): # the process went OK
# create a folder
# checking if the directory demo_folder
# exist or not.
while os.path.exists(dir_for_coco):
dir_for_coco += '_new'
os.makedirs(dir_for_coco)
# save coco
coco.export_coco('/' + os.path.basename(dir_for_coco))
sg.popup('Export is done, the file was created', background_color='#181c21',
auto_close=True, auto_close_duration=3)
# copy all images to the same folder
os.makedirs(os.path.join(dir_for_coco, 'images'))
copy_images(ims=coco.output_images, destination=os.path.join(dir_for_coco, 'images'))
def main():
window = GUI()
run_gui(window)
if __name__ == "__main__":
main()
| OmriHerzfeld1/FinalProject | Classificator.py | Classificator.py | py | 8,713 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.listdir",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number... |
12993263023 | import json
import time
import sys
import requests
def try_get_json(url, timeout=20):
t = time.time()
try:
with Timer("requests.get(%s)" % url):
response = requests.get(url, timeout=timeout)
except requests.exceptions.Timeout:
log("GET %s timed out after %s." % (url, time.time()-t))
raise
except requests.exceptions.MissingSchema:
log("%s is not a valid URL" % url)
raise
except requests.exceptions.ConnectionError as e:
log("GET %s failed: %s" % (url, e))
raise
except:
log("Unexpected error from %s : %s" % (url, sys.exc_info()[0]))
raise
if response.status_code == 200:
with Timer("get text"):
t = response.content
log("len(t): %d" % len(t))
with Timer("json.loads(%s)" % url):
return json.loads(t)
else:
log("GET %s failed - Non 200 HTTP Error" % url)
return False
def log(message):
ts = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
print('%s %s' % (ts, message))
class Timer:
def __init__(self, name):
self.name = name
def __enter__(self):
log("%s starting" % (self.name))
self.time = time.time()
def __exit__(self, type, value, traceback):
return log("%s took %ss" % (self.name, time.time() - self.time))
| opentable/mesos_stats | mesos_stats/util.py | util.py | py | 1,358 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "time.time",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "requests.exceptions",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_... |
25978662424 | from flask import url_for, redirect, request
from flask_admin import BaseView, expose
from app.controllers import interceptors_controller
from app.models.models.http_method import HTTPMethod
from app.utils.utils import toast, call
class View(BaseView):
def is_visible(self):
return False
@expose('/')
def index(self):
return self.render('admin/interceptors/interceptors.html')
@expose('/<mock_id>/<response_id>/interceptors/back')
def interceptor_back(self, mock_id, response_id):
return redirect(url_for('mocks.mock_response', mock_id=mock_id, response_id=response_id))
@expose('/<mock_id>/<response_id>/interceptors/<interceptor_id>')
def interceptor(self, mock_id, response_id, interceptor_id):
interceptor = interceptors_controller.interceptor(mock_id, response_id, interceptor_id)
interceptor_configuration_example = interceptors_controller.interceptor_configuration_example(interceptor.type.value)
interceptor_is_configurable = interceptors_controller.interceptor_is_configurable(interceptor.type.value)
return self.render('admin/interceptors/interceptors.html', mock_id=mock_id, response_id=response_id,
interceptor=interceptor, interceptor_configuration_example=interceptor_configuration_example,
interceptor_is_configurable=interceptor_is_configurable)
# interceptors
@expose('/<mock_id>/<response_id>/interceptors/<interceptor_id>/update', methods=[HTTPMethod.POST.value])
def interceptor_update(self, mock_id, response_id, interceptor_id):
name = request.form.get('interceptors_definition_form_name')
call(
lambda: interceptors_controller.interceptor_update(mock_id, response_id, interceptor_id, name),
lambda: toast('Interceptors has been updated', category='success')
)
return redirect(url_for('interceptors.interceptor', mock_id=mock_id, response_id=response_id,
interceptor_id=interceptor_id))
@expose('/<mock_id>/<response_id>/interceptors/<interceptor_id>/enable', methods=[HTTPMethod.POST.value])
def interceptor_enable(self, mock_id, response_id, interceptor_id):
call(
lambda: interceptors_controller.interceptor_enable(mock_id, response_id, interceptor_id),
lambda: toast('Interceptor has been enabled', category='success')
)
return url_for('interceptors.interceptor', mock_id=mock_id, response_id=response_id,
interceptor_id=interceptor_id)
@expose('/<mock_id>/<response_id>/interceptors/<interceptor_id>/disable', methods=[HTTPMethod.POST.value])
def interceptor_disable(self, mock_id, response_id, interceptor_id):
call(
lambda: interceptors_controller.interceptor_disable(mock_id, response_id, interceptor_id),
lambda: toast('Interceptor has been disabled', category='success')
)
return url_for('interceptors.interceptor', mock_id=mock_id, response_id=response_id,
interceptor_id=interceptor_id)
@expose('/<mock_id>/<response_id>/interceptors/<interceptor_id>/configuration', methods=[HTTPMethod.POST.value])
def interceptor_update_configuration(self, mock_id, response_id, interceptor_id):
configuration = request.form.get("configuration")
call(
lambda: interceptors_controller.interceptor_update_configuration(mock_id, response_id, interceptor_id,
configuration),
lambda: toast('Interceptor has been updated', category='success'))
return url_for('interceptors.interceptor', mock_id=mock_id, response_id=response_id,
interceptor_id=interceptor_id)
| sayler8182/MockServer | app/views/interceptors/interceptors_view.py | interceptors_view.py | py | 3,836 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "flask_admin.BaseView",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "flask_admin.expose",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "flask.redirect",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "flask.url_for",... |
9237399008 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 10 14:15:37 2022
@author: sunlin
@实现声音震动的单通道模型准确率计算和混淆矩阵生成
"""
from cProfile import label
from utils.confusionMatrixGenerator import confusionMatrixGenerator,plot_confusion_matrix
from utils.accCalculator import accCalculateFrame
from sklearn.metrics import classification_report
import torch
import numpy as np
def classificationPerform(model_aco,model_seis,frame_data_aco, frame_data_seis):
predict_aco=model_aco(torch.tensor(frame_data_aco, dtype=torch.float).reshape(1,-1)).data.numpy()
predict_seis=model_seis(torch.tensor(frame_data_seis, dtype=torch.float).reshape(1,-1)).data.numpy()
return predict_aco.reshape(-1), predict_seis.reshape(-1)
def resultClassifier(predict_aco,predict_seis,predict_aco_mfcc,predict_seis_medium,predict_aco_wavelet,
predict_seis_wavelet,label_per):
cm_aco,acc_aco = confusionMatrixGenerator(predict_aco, label_per)
cm_seis,acc_seis = confusionMatrixGenerator(predict_seis, label_per)
cm_aco_mfcc,acc_aco_mfcc = confusionMatrixGenerator(predict_aco_mfcc, label_per)
cm_seis_medium,acc_seis_medium = confusionMatrixGenerator(predict_seis_medium, label_per)
cm_aco_wavelet,acc_aco_wavelet = confusionMatrixGenerator(predict_aco_wavelet, label_per)
cm_seis_wavelet,acc_seis_wavelet = confusionMatrixGenerator(predict_seis_wavelet, label_per)
#图片1、2,分类器的混淆矩阵
plot_confusion_matrix(cm_aco,1,'confusion matrix of acoustic classifier')
plot_confusion_matrix(cm_seis,2,'confusion matrix of seismic classifier')
plot_confusion_matrix(cm_aco_mfcc,3,'confusion matrix of acoustic classifier with MFCC')
plot_confusion_matrix(cm_seis_medium,4,'confusion matrix of seismic classifier with medium scale')
plot_confusion_matrix(cm_aco_wavelet,5,'confusion matrix of acoustic classifier with wavelet')
plot_confusion_matrix(cm_seis_wavelet,6,'confusion matrix of seismic classifier with wavelet')
#输出准确率
return [acc_aco,acc_seis,acc_aco_mfcc,acc_seis_medium,acc_aco_wavelet,acc_seis_wavelet ]
def resultPerform(n,info,label_per,predict,target_names):
#label_per = np.argmax(np.array(label_per),axis=1)
#label_per = [np.argmax(i) for i in label_per]
predict = [np.argmax(i) for i in predict]
#predict = np.argmax(np.array(predict),axis=1)
#y = (predict == predict.max(axis=1,keepdims=1)).astype(int)
#print(predict,label_per)
#print(label_per,predict)
print(classification_report(label_per,predict,target_names = target_names))
cm_aco,acc_aco = confusionMatrixGenerator(predict, label_per)
plot_confusion_matrix(cm_aco,n,'confuse_matrix of '+info)
def classificationProject(label_per,
predict_aco,predict_aco_mfcc,predict_aco_wavelet,
predict_seis,predict_seis_medium,predict_seis_wavelet,
target_names):
# frame classifier performance
print(" aco AlexNet classifier performance")
resultPerform(1,'acoustic classifier',label_per=label_per, predict=predict_aco, target_names=target_names)
print(" seis AlexNet classifier performance")
resultPerform(2,'seismic classifier',label_per=label_per,predict=predict_seis,target_names=target_names)
print(" aco_mfcc classifier performance")
resultPerform(3,'mfcc classifier',label_per=label_per,predict=predict_aco_mfcc,target_names=target_names)
print(" aco_wavelet classifier performance")
resultPerform(4,'ACO wavelet classifier',label_per=label_per,predict=predict_aco_wavelet,target_names=target_names)
print(" seis_wavelet classifier performance")
resultPerform(5,'SEIS wavelet classifier',label_per=label_per,predict=predict_seis_wavelet,target_names=target_names)
print(" medium classifier performance")
resultPerform(6,'SEIS medium scale classifier',label_per=label_per,predict=predict_seis_medium,target_names=target_names)
# frame fusion performance
#LTCFN performance
if __name__ == "__main__":
y_true = [0,1,2,2,2]
y_pred = [0,0,2,2,1]
target_names = ['class 0', 'class 1', 'class 2']
print(classification_report(y_true,y_pred,target_names = target_names))
| Seafood-SIMIT/Long-Term-Correlation-Feature-Network | utils/classificationPerformance.py | classificationPerformance.py | py | 4,309 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "torch.tensor",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "torch.float",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "torch.tensor",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "torch.float",
"line_nu... |
2671700806 | import numpy as np
from const import PitchExtractorType
from voice_changer.DiffusionSVC.pitchExtractor.PitchExtractor import PitchExtractor
from voice_changer.RVC.deviceManager.DeviceManager import DeviceManager
import onnxruntime
class RMVPOnnxEPitchExtractor(PitchExtractor):
def __init__(self, file: str, gpu: int):
super().__init__()
self.file = file
self.pitchExtractorType: PitchExtractorType = "rmvpe_onnx"
self.f0_min = 50
self.f0_max = 1100
self.f0_mel_min = 1127 * np.log(1 + self.f0_min / 700)
self.f0_mel_max = 1127 * np.log(1 + self.f0_max / 700)
(
onnxProviders,
onnxProviderOptions,
) = DeviceManager.get_instance().getOnnxExecutionProvider(gpu)
self.onnxProviders = onnxProviders
self.onnxProviderOptions = onnxProviderOptions
so = onnxruntime.SessionOptions()
so.log_severity_level = 3
self.onnx_session = onnxruntime.InferenceSession(self.file, sess_options=so, providers=onnxProviders, provider_options=onnxProviderOptions)
def extract(self, audio, pitchf, f0_up_key, sr, window, silence_front=0):
try:
# データ変換
if isinstance(audio, np.ndarray) is False:
audio = audio = audio.cpu().numpy()
if isinstance(pitchf, np.ndarray) is False:
pitchf = pitchf.cpu().numpy().astype(np.float32)
if audio.ndim != 1:
raise RuntimeError(f"Exeption in {self.__class__.__name__} audio.ndim is not 1 (size :{audio.ndim}, {audio.shape})")
if pitchf.ndim != 1:
raise RuntimeError(f"Exeption in {self.__class__.__name__} pitchf.ndim is not 1 (size :{pitchf.ndim}, {pitchf.shape})")
# 処理
silenceFrontFrame = silence_front * sr
startWindow = int(silenceFrontFrame / window) # 小数点以下切り捨て
slienceFrontFrameOffset = startWindow * window
targetFrameLength = len(audio) - slienceFrontFrameOffset
minimumFrames = 0.01 * sr
targetFrameLength = max(minimumFrames, targetFrameLength)
audio = audio[-targetFrameLength:]
audio = np.expand_dims(audio, axis=0)
output = self.onnx_session.run(
["f0", "uv"],
{
"waveform": audio.astype(np.float32),
"threshold": np.array([0.3]).astype(np.float32),
},
)
f0 = output[0].squeeze()
f0 *= pow(2, f0_up_key / 12)
pitchf[-f0.shape[0]:] = f0[: pitchf.shape[0]]
f0_mel = 1127.0 * np.log(1.0 + pitchf / 700.0)
f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - self.f0_mel_min) * 254 / (self.f0_mel_max - self.f0_mel_min) + 1
f0_mel[f0_mel <= 1] = 1
f0_mel[f0_mel > 255] = 255
f0_coarse = np.rint(f0_mel).astype(int)
except Exception as e:
raise RuntimeError(f"Exeption in {self.__class__.__name__}", e)
return f0_coarse, pitchf
| w-okada/voice-changer | server/voice_changer/RVC/pitchExtractor/RMVPOnnxEPitchExtractor.py | RMVPOnnxEPitchExtractor.py | py | 3,109 | python | en | code | 12,673 | github-code | 36 | [
{
"api_name": "voice_changer.DiffusionSVC.pitchExtractor.PitchExtractor.PitchExtractor",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "const.PitchExtractorType",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "numpy.log",
"line_number": 16,
"usage_typ... |
1842661731 | import os
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
symbol_dict = {'cell': 'Celltrion',
'hmotor': 'HyundaiMotor',
'naver': 'NAVER',
'kakao': 'Kakao',
'lgchem': 'LGChemical',
'lghnh': 'LGH&H',
'bio': 'SamsungBiologics',
'samsung1': 'SamsungElectronics',
'samsung2': 'SamsungElectronics2',
'sdi': 'SamsungSDI',
'sk': 'SKhynix',
'kospi': 'KOSPI', }
def symbol_to_path(symbol, base_dir="../data"):
return os.path.join(base_dir, "{}.csv".format(str(symbol)))
def merge_data(start_date, end_date, symbols):
dates = pd.date_range(start_date, end_date)
df = pd.DataFrame(index=dates)
for symbol in symbols:
df_temp = pd.read_csv(symbol_to_path(symbol), index_col="Date", parse_dates=True,
usecols=['Date', 'Open', 'High', 'Low', 'Close', 'Volume'], na_values=['nan'])
df_temp = df_temp.rename(columns={'Open': symbol + '_open', 'High': symbol + '_high', 'Low': symbol + '_low',
'Close': symbol + '_close', 'Volume': symbol + '_volume'})
if symbol == 'NAVER':
stock_split_date_index = df_temp.index.get_loc(pd.to_datetime('2018-10-12'), method='nearest')
before_stock_split_df = df_temp.iloc[:stock_split_date_index]
before_stock_split_df = before_stock_split_df.replace(to_replace=0, method='ffill')
before_stock_split_df.loc[:, before_stock_split_df.columns != 'NAVER_volume'] =\
before_stock_split_df.loc[:, before_stock_split_df.columns != 'NAVER_volume']/5
after_stock_split_df = df_temp.iloc[stock_split_date_index:]
df_temp = pd.concat([before_stock_split_df, after_stock_split_df], axis=0)
df = df.join(df_temp)
# TODO: cleaning or filling missing value
df = df.dropna()
# KOSPI_volume 열의 형태 바꾸기 (예: 296,548K —> 296548000)
if 'KOSPI' in symbols:
df['KOSPI_volume'] = df['KOSPI_volume'].apply(lambda x: float(x.replace(',', '').replace('K', '') + '000'))
return df
def rsi(df, period):
U = np.where(df.diff(1) > 0, df.diff(1), 0)
D = np.where(df.diff(1) < 0, df.diff(1) * (-1), 0)
AU = pd.DataFrame(U, index=df.index).rolling(window=period).mean()
AD = pd.DataFrame(D, index=df.index).rolling(window=period).mean()
RSI = AU / (AD + AU) * 100
return RSI
def macd(m_Df, m_NumFast=12, m_NumSlow=26, m_NumSignal=9):
for col in m_Df:
if 'close' in col:
comp_name = col.split('_close')[0]
m_Df[comp_name + '_EMAFast'] = m_Df[comp_name + '_close'].ewm(span=m_NumFast,
min_periods=m_NumFast - 1).mean()
m_Df[comp_name + '_EMASlow'] = m_Df[comp_name + '_close'].ewm(span=m_NumSlow,
min_periods=m_NumSlow - 1).mean()
m_Df[comp_name + '_MACD'] = m_Df[comp_name + '_EMAFast'] - m_Df[comp_name + '_EMASlow']
m_Df[comp_name + '_MACDSignal'] = m_Df[comp_name + '_MACD'].ewm(span=m_NumSignal,
min_periods=m_NumSignal - 1).mean()
m_Df[comp_name + '_MACDDiff'] = m_Df[comp_name + '_MACD'] - m_Df[comp_name + '_MACDSignal']
return m_Df
def make_features(trade_company_list, start_date, end_date, is_training):
# TODO: Choose symbols to make feature
# symbol_list = ['Celltrion', 'HyundaiMotor', 'NAVER', 'Kakao', 'LGChemical', 'LGH&H',
# 'SamsungElectronics', 'SamsungElectronics2', 'SamsungSDI', 'SKhynix', 'KOSPI']
feature_company_list = ['cell', 'hmotor', 'naver', 'lgchem', 'lghnh', 'samsung1', 'sdi', 'sk', 'kakao', 'kospi']
# kospi는 가장 volatility가 낮아서 buffer 역할 수행
symbol_list = [symbol_dict[c] for c in feature_company_list]
table = merge_data(start_date, end_date, symbol_list)
for col in table.columns:
if 'close' in col:
comp_name = col.split('_close')[0]
table[comp_name + '_rsi'] = rsi(table[col], 14)
table = macd(table)
# DO NOT CHANGE
test_days = 10
open_prices = np.asarray(table[[symbol_dict[c] + '_open' for c in trade_company_list]])
close_prices = np.asarray(table[[symbol_dict[c] + '_close' for c in trade_company_list]])
# TODO: select columns to use
data = dict()
for c in feature_company_list:
data[c, 'close'] = table[symbol_dict[c] + '_close']
# data[c, 'open'] = table[symbol_dict[c] + '_open'] # 종가, 시가 등은 서로 상관관계가 있으니 제거
data[c, 'close_ema'] = table[symbol_dict[c] + '_close'].ewm(alpha=0.5).mean()
data[c, 'rsi'] = table[symbol_dict[c] + '_rsi']
data[c, 'macd'] = table[symbol_dict[c] + '_MACD']
data[c, 'macd_diff'] = table[symbol_dict[c] + '_MACDDiff']
data[c, 'close_pc'] = table[symbol_dict[c] + '_close'].pct_change().fillna(0)
# TODO: make features
input_days = 1
features = list()
for a in range(data['kospi', 'close'].shape[0] - input_days):
# kospi close price
kospi_close_feature = data['kospi', 'close'][a:a + input_days]
# stock close price: cell, sk, kakao
tmps = list()
for c in trade_company_list:
tmp = data[c, 'close'][a:a + input_days] # 시가 기준 정규화
tmps.append(tmp)
close_feature = np.concatenate(tmps, axis=0)
# stock close ema price: cell, sk, kakao
tmps = list()
for symbol in feature_company_list:
tmp = data[symbol, 'close_ema'][a:a + input_days]
tmps.append(tmp)
ema_feature = np.concatenate(tmps, axis=0)
# 추가 코드: stock close rsi: cell, sk, kakao
tmps = list()
for symbol in feature_company_list:
tmp = data[symbol, 'rsi'][a:a + input_days]
tmps.append(tmp)
rsi_feature = np.concatenate(tmps, axis=0)
# macd
tmps = list()
for symbol in feature_company_list:
tmp = data[symbol, 'macd'][a:a + input_days]
tmps.append(tmp)
macd_feature = np.concatenate(tmps, axis=0)
# macd_dff
tmps = list()
for symbol in feature_company_list:
tmp = data[symbol, 'macd_diff'][a:a + input_days]
tmps.append(tmp)
macdDiff_feature = np.concatenate(tmps, axis=0)
tmps = list()
for symbol in feature_company_list:
tmp = data[symbol, 'close_pc'][a:a + input_days]
tmps.append(tmp)
pc_feature = np.concatenate(tmps, axis=0)
features.append(np.concatenate([
kospi_close_feature,
close_feature,
ema_feature,
rsi_feature, # 추가 코드: stock close rsi
# rsi_volume_feature,# 추가 코드: volume rsi
macd_feature,
macdDiff_feature,
pc_feature
], axis=0))
scaler = StandardScaler()
scaler.fit(features)
features = scaler.transform(features)
if not is_training:
return open_prices[-test_days:], close_prices[-test_days:], features[-test_days:]
return open_prices[input_days:], close_prices[input_days:], features
if __name__ == "__main__":
trade_company_list = ['lgchem', 'samsung1'] # 실제로 trade할 company
open, close, feature = make_features(trade_company_list, '2010-01-01', '2019-05-08', False)
print(open, '\n')
print(close, '\n')
print(*feature[0], sep=' / ') | jaewonlee-728/2020-lfd | LFD_Project4/src/DataGenerator.py | DataGenerator.py | py | 7,789 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "pandas.date_range",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"l... |
3558679885 | from django.shortcuts import render
from django.http import HttpResponse
from .models import *
from django.shortcuts import redirect
import re
from django.utils.html import escape
def index(request, id=-1):
if not request.user.is_authenticated:
dir_tree = ''
file_content = ''
return render(request, 'utils/base.html')
dirs = Directory.objects.all().filter(parent_dir=None, owner=request.user)
return render(request, 'utils/base.html', {
'files' : File.objects.all(),
'directories' : Directory.objects.all(),
'currentDir' : None,
})
def add_dir(request, id=-1):
if request.method == 'POST':
name = request.POST.get('dir_name')
desc = request.POST.get('dir_desc')
user = request.user
id = int(request.POST.get('dest_for_dir'))
parent_set = Directory.objects.all().filter(id=id)
if id == -1:
new_dir = Directory(name=name, description=desc, owner=user)
elif parent_set.exists():
dir = parent_set[0]
if dir.availability_flag == False:
return redirect("/")
new_dir = Directory(name=name, description=desc, owner=user, parent_dir=parent_set[0])
else:
return redirect('/')
new_dir.save()
context = {
'id' : new_dir.id,
}
return redirect('/')
else:
return redirect('/')
def add_file(request, id=-1):
if request.method == 'POST':
name = request.POST.get('file_name')
desc = request.POST.get('file_desc')
f = request.FILES.get('file_file')
user = request.user
id = int(request.POST.get('dest_for_file'))
parent_set = Directory.objects.all().filter(id=id)
if id == -1:
new_file = File(name=name, description=desc, owner=user, content=f)
elif parent_set.exists():
dir = parent_set[0]
if dir.availability_flag == False:
return redirect("/")
new_file = File(name=name, description=desc, owner=user, parent_dir=parent_set[0], content=f)
else:
return redirect('/')
new_file.save()
context = {
'id' : new_file.id
}
return redirect('/')
else:
return redirect('/')
def delete_all(root_dir):
dirs = Directory.objects.all().filter(parent_dir=root_dir)
for dir in dirs:
dir.availability_flag = False
dir.save()
delete_all(dir)
files = File.objects.all().filter(parent_dir=root_dir)
for file in files:
file.availability_flag = False
file.save()
def delete(request):
if request.method == 'POST':
id = request.POST.get("to_delete")
dir = Directory.objects.all().filter(id=id)
if dir.exists():
d = dir[0]
d.availability_flag = False
d.save()
delete_all(dir[0])
return redirect("/")
file = File.objects.all().filter(id=id)
if file.exists():
f = file[0]
f.availability_flag = False
f.save()
return redirect("/")
return redirect("/")
else:
return redirect('/') | KacperSzczepanski/awww-webapp | webapp/utils/views.py | views.py | py | 3,284 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "django.shortcuts.render",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 38,
"usage_type": "call"
},
{
"api_nam... |
39207358817 | # --- carolin schieferstein & jose c. garcia alanis
# --- utf-8
# --- Python 3.7 / mne 0.20
#
# --- eeg pre-processing for dpx-r40
# --- version: january 2020
#
# --- detect and annotate artifact distorted segments in continuous data,
# --- average reference
# ========================================================================
# ------------------- import relevant extensions -------------------------
import os.path as op
from os import mkdir
from glob import glob
from re import findall
import numpy as np
import pandas as pd
from mne.io import read_raw_fif
from mne import pick_types, Annotations
# ========================================================================
# --- global settings
# --- prompt user to set project path
root_path = input("Type path to project directory: ")
# look for directory
if op.isdir(root_path):
print("Setting 'root_path' to ", root_path)
else:
raise NameError('Directory not found!')
# derivatives path
derivatives_path = op.join(root_path, 'derivatives')
# path to eeg files
data_path = op.join(derivatives_path, 'extract_blocks')
# create directory for output
if not op.isdir(op.join(derivatives_path, 'artifact_detection')):
mkdir(op.join(derivatives_path, 'artifact_detection'))
# path for saving output
output_path = op.join(derivatives_path, 'artifact_detection')
# files to be analysed
files = sorted(glob(op.join(data_path, 'sub-*', '*-raw.fif')))
# ========================================================================
# ----------- loop through files and detect artifacts --------------------
for file in files:
# --- 1) set up paths and file names -----------------------
file_path, filename = op.split(file)
# subject in question
subj = findall(r'\d+', filename)[0].rjust(3, '0')
# --- 2) import the preprocessed data ----------------------
raw = read_raw_fif(file, preload=True)
# index of eogs and stim channels
picks_no_eeg = pick_types(raw.info,
eeg=False,
eog=True,
stim=True)
# channels which are of type "eeg"
picks_eeg = pick_types(raw.info,
eeg=True,
eog=False,
stim=False)
# channel names
channels = raw.info['ch_names']
# sampling frequency
sfreq = raw.info['sfreq']
# channels that should be ignored during the artifact detection procedure
ignore_ch = {'Fp1', 'Fpz', 'Fp2', 'AF7', 'AF3', 'AFz', 'AF4', 'AF8'}
# update dict
ignore_ch.update({raw.info['ch_names'][chan] for chan in picks_no_eeg})
# --- 3.1) filter the data ---------------------------------
# copy the file
raw_copy = raw.copy()
# apply filter
raw_copy = raw_copy.filter(l_freq=0.1, h_freq=40.0,
picks=['eeg', 'eog'],
filter_length='auto',
l_trans_bandwidth='auto',
h_trans_bandwidth='auto',
method='fir',
phase='zero',
fir_window='hamming',
fir_design='firwin')
# --- 3.2) find distorted segments in data -----------------
# copy of data
data = raw_copy.get_data(picks_eeg)
# channels to be checked by artifact detection procedure
ch_ix = [channels.index(ch) for ch in channels if ch not in ignore_ch]
# detect artifacts (i.e., absolute amplitude > 500 microV)
times = []
annotations_df = pd.DataFrame(times)
onsets = []
duration = []
annotated_channels = []
bad_chans = []
# loop through samples
for sample in range(0, data.shape[1]):
if len(times) > 0:
if sample <= (times[-1] + int(1 * sfreq)):
continue
peak = []
for channel in ch_ix:
peak.append(abs(data[channel][sample]))
if max(peak) >= 200e-6:
times.append(float(sample))
annotated_channels.append(channels[ch_ix[int(np.argmax(peak))]])
# if artifact found create annotations for raw data
if len(times) > 0:
# get first time
first_time = raw_copy._first_time
# column names
annot_infos = ['onset', 'duration', 'description']
# save onsets
onsets = np.asarray(times)
# include one second before artifact onset
onsets = ((onsets / sfreq) + first_time) - 1
# durations and labels
duration = np.repeat(2, len(onsets))
description = np.repeat('Bad', len(onsets))
# get annotations in data
artifacts = np.array((onsets, duration, description)).T
# to pandas data frame
artifacts = pd.DataFrame(artifacts,
columns=annot_infos)
# annotations from data
annotations = pd.DataFrame(raw_copy.annotations)
annotations = annotations[annot_infos]
# merge artifacts and previous annotations
artifacts = artifacts.append(annotations, ignore_index=True)
# create new annotation info
annotations = Annotations(artifacts['onset'],
artifacts['duration'],
artifacts['description'],
orig_time=raw_copy.annotations.orig_time)
# apply to raw data
raw_copy.set_annotations(annotations)
# save total annotated time
total_time = sum(duration)
# save frequency of annotation per channel
frequency_of_annotation = {x: annotated_channels.count(x)*2
for x in annotated_channels}
# if exceeds 0.9% of total time --> mark as bad channel
threshold = raw_copy.times[-1] * .01
# save bads in info structure
bad_chans = [chan for chan, value in frequency_of_annotation.items()
if value >= int(threshold)]
raw_copy.info['bads'] = bad_chans
# --- 3.3) plot data and check for inconsistencies ----------
raw_copy.plot(scalings=dict(eeg=50e-6),
n_channels=len(raw.info['ch_names']),
bad_color='red',
block=True)
# save bad channels for summary
interpolated = raw_copy.info['bads'].copy()
# --- if bad channels were found, repeat preprocessing ---------
if bad_chans:
# re-run artifact detection
raw_copy = raw.copy()
raw_copy.info['bads'] = bad_chans
# interpolate bads
raw_copy.interpolate_bads(reset_bads=True,
verbose=False,
mode='accurate')
# apply filter
raw_copy = raw_copy.filter(l_freq=0.1, h_freq=40.0,
picks=['eeg', 'eog'],
filter_length='auto',
l_trans_bandwidth='auto',
h_trans_bandwidth='auto',
method='fir',
phase='zero',
fir_window='hamming',
fir_design='firwin')
# --- find distorted segments in data ----------------------
# copy of data
data = raw_copy.get_data(picks_eeg)
# channels to be checked by artifact detection procedure
ch_ix = [channels.index(chan) for chan in channels
if chan not in ignore_ch]
# detect artifacts (i.e., absolute amplitude > 500 microV)
times = []
annotations_df = pd.DataFrame(times)
onsets = []
duration = []
annotated_channels = []
bad_chans = []
# loop through samples
for sample in range(0, data.shape[1]):
if len(times) > 0:
if sample <= (times[-1] + int(1 * sfreq)):
continue
peak = []
for channel in ch_ix:
peak.append(abs(data[channel][sample]))
if max(peak) >= 200e-6:
times.append(float(sample))
annotated_channels.append(channels[ch_ix[int(np.argmax(peak))]])
# if artifact found create annotations for raw data
if len(times) > 0:
# get first time
first_time = raw_copy._first_time
# column names
annot_infos = ['onset', 'duration', 'description']
# save onsets
onsets = np.asarray(times)
# include one second before artifact onset
onsets = ((onsets / sfreq) + first_time) - 1
# durations and labels
duration = np.repeat(2, len(onsets))
description = np.repeat('Bad', len(onsets))
# get annotations in data
artifacts = np.array((onsets, duration, description)).T
# to pandas data frame
artifacts = pd.DataFrame(artifacts,
columns=annot_infos)
# annotations from data
annotations = pd.DataFrame(raw_copy.annotations)
annotations = annotations[annot_infos]
# merge artifacts and previous annotations
artifacts = artifacts.append(annotations, ignore_index=True)
# create new annotation info
annotations = Annotations(artifacts['onset'],
artifacts['duration'],
artifacts['description'],
orig_time=raw_copy.annotations.orig_time)
# apply to raw data
raw_copy.set_annotations(annotations)
# --- 4) re-reference data to average of 64 electrodes ----
raw_copy.set_eeg_reference(ref_channels='average',
projection=True)
# --- 5) plot data and check for inconsistencies ----------
raw_copy.plot(scalings=dict(eeg=50e-6),
n_channels=len(raw.info['ch_names']),
bad_color='red',
block=True)
# --- 6) save segmented data -----------------------------
# create directory for save
if not op.exists(op.join(output_path, 'sub-%s' % subj)):
mkdir(op.join(output_path, 'sub-%s' % subj))
# save file
raw_copy.save(op.join(output_path, 'sub-%s' % subj,
'sub-%s_artifact_detection-raw.fif' % subj),
overwrite=True)
# write summary
name = 'sub-%s_artifact_detection.txt' % subj
sfile = open(op.join(output_path, 'sub-%s', name) % subj, 'w')
# channels info
sfile.write('Channels_interpolated:\n')
for ch in interpolated:
sfile.write('%s\n' % ch)
# frequency of annotation
sfile.write('Frequency_of_annotation:\n')
for ch, f in frequency_of_annotation.items():
sfile.write('%s, %f\n' % (ch, f))
sfile.write('total_annotated:\n')
sfile.write(str(round(total_time / raw_copy.times[-1], 3) * 100) + ' %\n')
sfile.close()
del raw, raw_copy
| CarolinSchieferstein/Master-DPX-EEG | python_scripts/02_artefact_detection.py | 02_artefact_detection.py | py | 11,112 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "os.path.isdir",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 36,
... |
2891422631 | """
api.middleware
~~~~~~~~~~~~~~
blah blah blah
"""
import json
from werkzeug.local import Local, release_local
from werkzeug.wrappers import Request, Response
from werkzeug.exceptions import BadRequest, NotAcceptable, HTTPException, abort
class BeforeAfterMiddleware(object):
"""A simple middleware base class providing a before/after interface.
A werkzeug.Local instance called `local` is bound to the middleware for
saving state in a thread-safe way between the `before` and `after` calls.
"""
def __init__(self, app):
# Keep a reference to the wsgi app we're wrapping
super(BeforeAfterMiddleware, self).__setattr__('app', app)
super(BeforeAfterMiddleware, self).__setattr__('local', Local())
def before(self, request):
"""Do stuff before deferring to the wrapped app."""
def after(self, request, response):
"""Do more stuff after getting a response from the wrapped app"""
def __call__(self, environ, start_response):
"""Process a request"""
# Set up the request and do our pre-processing
request = Request(environ)
self.before(request)
# Defer to the wrapped app, then do our cleanup n stuff
response = Response.from_app(self.app, environ)
self.after(request, response)
release_local(self.local)
# finally, blah
return response(environ, start_response)
def mutate_error(self, *args, **kwargs):
raise TypeError('Mutating a BeforeAfterMiddleware is (usually) not thread-safe. '
'Use the thread-safe `self.local` property.')
__setattr__ = mutate_error
__delattr__ = mutate_error
class DataTransformer(BeforeAfterMiddleware):
"""Flexible accept, nice and normalized for internal use.
Requests:
* Form-encoded POST requests are transformed to flat key/value json.
* requests with JSON bodies are passed through
Responses:
* JSON-encoded response bodies are transformed to whatever the client
accepts.
"""
def before(self, request):
self.local.target = request.accept_mimetypes.best_match(['application/json'])
if self.local.target is None:
raise NotAcceptable()
def after(self, request, response):
body = response.get_data(as_text=True)
if response.headers.get('Content-Type') != 'application/json':
warnings.warn('leaving non-JSON data as a string')
data = body
else:
data = json.loads(body)
if self.local.target == 'application/json':
cereal = json.dumps(data)
response.set_data(cereal)
class FieldLimiter(BeforeAfterMiddleware):
"""Pares response data down to that set by a ?field= query parameter.
Assumes JSON response data from app.
Limit fields by providing field= query args. EG:
GET http://whatever/?field=code&field=subject
The limits only work for top-level keys in structured response bodies.
"""
def limit(self, data, fields):
# have they asked for fields that don't exist?
if not all(field in data for field in fields):
raise BadRequest()
limited = {key: data[key] for key in fields}
return limited
def after(self, request, response):
if 'field' not in request.args:
return
fields = [s.lower() for s in request.args.getlist('field')]
body = response.get_data(as_text=True)
data = json.loads(body)
if isinstance(data, list):
limited_data = [self.limit(d, fields) for d in data]
else:
limited_data = self.limit(data, fields)
cereal = json.dumps(limited_data)
response.set_data(cereal)
class PrettyJSON(BeforeAfterMiddleware):
"""Prettify JSON responses"""
def after(self, request, response):
if response.headers.get('Content-Type') == 'application/json':
body = response.get_data(as_text=True)
data = json.loads(body)
pretty_data = json.dumps(data, indent=2)
response.set_data(pretty_data)
class JsonifyHttpException(object):
"""Format http errors as json, but keep the error status in the response
Should wrap the highest level possible so that any errors thrown in nested
wrapped apps will be caught.
"""
def __init__(self, app, error_prefixes=[4, 5]):
# Keep a reference to the wsgi app we're wrapping
self.app = app
self.local = Local()
self.error_prefixes = error_prefixes
def jsonify_error(self, http_err, environ):
"""Creates a error response with body as json"""
data = {
'status code': http_err.code,
'error name': http_err.name,
'description': http_err.description
}
response = http_err.get_response(environ)
response.data = json.dumps(data)
response.headers['content-type'] = 'application/json'
return response
def __call__(self, environ, start_response):
"""Process a request"""
try:
# Set up the request
request = Request(environ)
# Defer to the wrapped app, then do our cleanup
response = Response.from_app(self.app, environ)
if response.status_code/100 in self.error_prefixes:
abort(response.status_code)
release_local(self.local)
return response(environ, start_response)
except HTTPException as err:
response = self.jsonify_error(err, environ)
return response(environ, start_response)
| Queens-Hacks/qcumber-api | api/middleware.py | middleware.py | py | 5,677 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "werkzeug.local.Local",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "werkzeug.wrappers.Request",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "werkzeug.wrappers.Response.from_app",
"line_number": 39,
"usage_type": "call"
},
{
... |
73339046183 | '''
Created on Sep 19, 2012
@author: jluker
'''
import logging
from config import config
from solrdoc import SolrDocument
from flask import request as current_request, current_app as app
from flask.ext.solrquery import SearchResponseMixin
__all__ = ['SolrResponse']
class SolrResponse(SearchResponseMixin):
def __init__(self, data, request, http_response, **kwargs):
self.raw = data
self.request = request
self.http_response = http_response
self.meta = {}
def is_error(self):
return self.raw.get('responseHeader',{}).get('status', False)
def get_http_status(self):
return self.http_response.status_code
def search_response(self):
self.meta.update({
'query': self.get_query(),
'qtime': self.get_qtime(),
'hits': self.get_hits(),
'count': self.get_count(),
})
resp = {
'meta': self.meta,
'results': {
'docs': self.get_docset(),
}
}
if self.request.facets_on():
resp['results']['facets'] = self.get_all_facets()
return resp
def record_response(self, idx=0):
try:
return self.get_docset()[idx]
except IndexError:
return None
def get_error_message(self):
"""Function to remove the useless part of the error message coming from SOLR"""
error_message = self.get_error()
if error_message:
if error_message.startswith('org.apache.lucene'):
error_split = error_message.split(':', 1)
if len(error_split) > 1:
return (''.join(error_split[1])).strip()
else:
return 'Unspecified error from search engine.'
else:
return error_message
else:
return None
def add_meta(self, key, value):
self.meta[key] = value
def raw_response(self):
return self.raw
def get_docset_objects(self):
return [SolrDocument(x) for x in self.get_docset()]
def get_doc_object(self, idx):
doc = self.get_doc(idx)
if doc:
return SolrDocument(doc)
def get_all_facet_queries(self):
return self.get_all_facets().get('facet_queries',{})
def get_facets_fields(self, facet_name, hierarchical=False):
"""
Returns the facets list for a specific facet.
It takes care of checking if the facet has been selected
"""
if not self.request.facets_on():
return []
solr_field_name = config.ALLOWED_FACETS_FROM_WEB_INTERFACE.get(facet_name, None)
#I extract the facets from the raw response
if self.raw.has_key('facet_counts'):
raw_facet_fields = self.raw['facet_counts']['facet_fields']
else:
raw_facet_fields = {}
facets_list = raw_facet_fields.get(solr_field_name, [])
#I split the list in tuples
facets_tuples_list = [tuple(facets_list[i:i+2]) for i in xrange(0, len(facets_list), 2)]
#I extract the facet parameter submitted
query_parameters = self.get_facet_param_field(facet_name)
if not hierarchical:
return sorted([(elem[0], elem[1], 'selected') if elem[0] in query_parameters else (elem[0], elem[1], '') for elem in facets_tuples_list], key= lambda x: (-x[1], x[0]))
else:
return sorted([tuple(elem[0].split('/') + [elem[1], 'selected', elem[0]]) if elem[0] in query_parameters else tuple(elem[0].split('/') + [elem[1], '', elem[0]]) for elem in facets_tuples_list], key= lambda x: (-x[-3], x[-4]))
def get_hier_facets_fields(self, facet_name):
"""
Like get_facets_fields but returns a more complex structure for the hierarchical facets
"""
def update_multidict(fac_dict, hier_facet):
""" Function to create the data structure for the facets"""
x = fac_dict
level = hier_facet[0]
fac_list = hier_facet[1:-2]
last_value = hier_facet[-2]
selection = hier_facet[-1]
for i in range(int(level) +1):
if i != int(level):
x = x[fac_list[i]][2]
else:
x[fac_list[i]] = (last_value, selection, {})
def fac_dict_to_tuple(fac_dict):
"""Returns a tuple version of the dictionary of facets"""
tuple_facets = sorted(fac_dict.items(), key= lambda x: (-x[1][0], x[0]))
ret_list = []
for elem in tuple_facets:
if not elem[1][2]:
ret_list.append(elem)
else:
ret_list.append((elem[0], (elem[1][0], elem[1][1], fac_dict_to_tuple(elem[1][2]))))
return tuple(ret_list)
if not self.request.facets_on():
return []
solr_field_name = config.ALLOWED_FACETS_FROM_WEB_INTERFACE.get(facet_name, None)
raw_facet_fields = self.raw['facet_counts']['facet_fields']
facets_list = raw_facet_fields.get(solr_field_name, [])
#I split the list in tuples
facets_tuples_list = [tuple(facets_list[i:i+2]) for i in xrange(0, len(facets_list), 2)]
#I extract the facet parameter submitted
query_parameters = self.get_facet_param_field(facet_name)
#then I put all the levels of the hierarchical facets in a unique tuple
hier_facets_split = [tuple(elem[0].split('/') + [elem[1], 'selected']) if elem[0] in query_parameters else tuple(elem[0].split('/') + [elem[1], '']) for elem in facets_tuples_list]
#I sort the tuples because I need them in the right order to fill in the dictionary
hier_facets_split.sort(key = lambda x: (x[0], -x[-2]))
#I re organize the facets
final_facets = {}
for elem in hier_facets_split:
update_multidict(final_facets, elem)
#then I convert them back to lists of tuples and I return it
return fac_dict_to_tuple(final_facets)
def get_facet_param_field(self, facet_name):
"""
Returns the list of query parameters for the current facet name
"""
return [elem[1] for elem in self.get_facet_parameters() if elem[0] == facet_name]
def get_facet_parameters(self):
"""
Returns the list of query parameters
"""
if not hasattr(self, 'request_facet_params'):
facet_params = []
#first I extract the query parameters excluding the default ones
search_filters = self.request.get_param('ui_filters')
#I extract only the parameters of the allowed facets
inverted_allowed_facet_dict = dict((v,k) for k,v in config.ALLOWED_FACETS_FROM_WEB_INTERFACE.iteritems())
for filter_val in search_filters:
filter_split = filter_val.split(':', 1)
filter_query_name = filter_split[0]
#if the filter starts with a "-" sign (exclude), need to remove it and prepend to the value
negative_filter = filter_query_name.startswith(u'-')
if negative_filter:
filter_query_name = filter_query_name[1:]
#remove the filter query parser if present
if config.SOLR_FILTER_QUERY_PARSER:
if filter_query_name.startswith(u"{!%s}" % config.SOLR_FILTER_QUERY_PARSER):
filter_query_name = filter_query_name[len(u"{!%s}" % config.SOLR_FILTER_QUERY_PARSER):]
if filter_query_name in inverted_allowed_facet_dict:
facet_name = inverted_allowed_facet_dict[filter_query_name]
filter_value = filter_split[1].strip('"')
if negative_filter:
filter_value = u'-%s' % filter_value
facet_params.append((facet_name, filter_value))
self.request_facet_params = facet_params
return self.request_facet_params
def get_pagination(self):
"""
wrap default pagination but use our row count setting
"""
try:
num_rows = int(self.request.params.rows)
except (ValueError, TypeError):
num_rows = int(config.SEARCH_DEFAULT_ROWS)
return super(SolrResponse, self).get_pagination(rows_per_page=num_rows)
| adsabs/adsabs | adsabs/core/solr/response.py | response.py | py | 8,723 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "flask.ext.solrquery.SearchResponseMixin",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "solrdoc.SolrDocument",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "solrdoc.SolrDocument",
"line_number": 85,
"usage_type": "call"
},
{
... |
33149860797 | import argparse
import time
import io
import zenoh
import json
from servo import *
from pycdr2 import IdlStruct
from pycdr2.types import int8, int32, uint32, float64
@dataclass
class Vector3(IdlStruct, typename="Vector3"):
x: float64
y: float64
z: float64
@dataclass
class Twist(IdlStruct, typename="Twist"):
linear: Vector3
angular: Vector3
DEVICENAME = '/dev/ttyACM0'
PROTOCOL_VERSION = 2.0
BAUDRATE = 115200
MOTOR_ID = 200
parser = argparse.ArgumentParser(
prog='drive_motors',
description='zenoh drive_motors example')
parser.add_argument('-m', '--mode', type=str, choices=['peer', 'client'],
help='The zenoh session mode.')
parser.add_argument('-e', '--connect', type=str, metavar='ENDPOINT', action='append',
help='zenoh endpoints to connect to.')
parser.add_argument('-l', '--listen', type=str, metavar='ENDPOINT', action='append',
help='zenoh endpoints to listen on.')
parser.add_argument('-d', '--delay', type=float, default=0.1,
help='delay between each iteration in seconds')
parser.add_argument('-p', '--prefix', type=str, default='rt/turtle1',
help='resources prefix')
parser.add_argument('-c', '--config', type=str, metavar='FILE',
help='A zenoh configuration file.')
args = parser.parse_args()
count = 0
cmd = Twist(Vector3(0.0, 0.0, 0.0), Vector3(0.0, 0.0, 0.0))
conf = zenoh.config_from_file(args.config) if args.config is not None else zenoh.Config()
if args.connect is not None:
conf.insert_json5(zenoh.config.CONNECT_KEY, json.dumps(args.connect))
if args.mode is not None:
conf.insert_json5(zenoh.config.MODE_KEY, json.dumps(args.mode))
if args.listen is not None:
conf.insert_json5(zenoh.config.LISTEN_KEY, json.dumps(args.listen))
print('[INFO] Open zenoh session...')
zenoh.init_logger()
z = zenoh.open(conf)
publ = z.declare_publisher('{}/heartbeat'.format(args.prefix))
def listener(sample):
global cmd
cmd = Twist.deserialize(sample.value.payload)
print('[INFO] Connect to motor...')
servo = Servo(DEVICENAME, PROTOCOL_VERSION, BAUDRATE, MOTOR_ID)
if servo is None:
print('[WARN] Unable to connect to motor.')
else:
servo.write1ByteTxRx(IMU_RE_CALIBRATION, 1)
sub = z.declare_subscriber('{}/cmd_vel'.format(args.prefix), listener)
time.sleep(3.0)
print('[INFO] Running!')
while True:
if servo is not None:
servo.write1ByteTxRx(HEARTBEAT, count)
servo.write4ByteTxRx(CMD_VELOCITY_LINEAR_X, int(cmd.linear.x))
servo.write4ByteTxRx(CMD_VELOCITY_LINEAR_Y, int(cmd.linear.y))
servo.write4ByteTxRx(CMD_VELOCITY_LINEAR_Z, int(cmd.linear.z))
servo.write4ByteTxRx(CMD_VELOCITY_ANGULAR_X, int(cmd.angular.x))
servo.write4ByteTxRx(CMD_VELOCITY_ANGULAR_Y, int(cmd.angular.y))
servo.write4ByteTxRx(CMD_VELOCITY_ANGULAR_Z, int(cmd.angular.z))
cmd = Twist(Vector3(0.0, 0.0, 0.0), Vector3(0.0, 0.0, 0.0))
publ.put(count)
count += 1
if count > 255:
count = 0
time.sleep(args.delay)
| eclipse-zenoh/zenoh-demos | turtlebot3/zdrive-python/zdrive.py | zdrive.py | py | 3,143 | python | en | code | 27 | github-code | 36 | [
{
"api_name": "pycdr2.IdlStruct",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "pycdr2.types.float64",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "pycdr2.types.float64",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "pycdr2.typ... |
35674891165 | """
*Prefix*
A prefix.
"""
from abc import ABCMeta
from typing import TypeVar
__all__ = ["Prefix"]
class Prefix:
__metaclass__ = ABCMeta
Meta = TypeVar("Meta")
Control = TypeVar("Control")
Shift = TypeVar("Shift")
Hyper = TypeVar("Hyper")
Super = TypeVar("Super")
Alt = TypeVar("Alt")
| jedhsu/text | text/_elisp/key/_prefix/_prefix.py | _prefix.py | py | 328 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "abc.ABCMeta",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "typing.TypeVar",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "typing.TypeVar",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "typing.TypeVar",
"line_... |
29432719201 | """
构造MeshSegNet数据集
"""
import os
import vtk
from shutil import copyfile
def convert(dataset: str, img_path: str, label_path: str):
"""
提供原始数据路径和目标数据路径
"""
reader = vtk.vtkPLYReader()
writer = vtk.vtkPolyDataWriter()
# 当前文件夹下的所有子文件夹
folders = [name for name in os.listdir(dataset) if os.path.isdir(os.path.join(dataset, name))]
for folder in folders:
files = [name for name in os.listdir(os.path.join(dataset, folder))]
for file in files:
reader.SetFileName(os.path.join(dataset, folder, file))
reader.Update()
# 将PLY数据转换为VTK数据
ply_data = reader.GetOutput()
if 'label' in file:
writer.SetFileName(os.path.join(label_path, folder.split('_')[0] + '_' + file.split('.')[0] + '.vtk'))
else:
writer.SetFileName(os.path.join(img_path, folder.split('_')[0] + '_' + file.split('.')[0] + '.vtk'))
writer.SetInputData(ply_data)
writer.Write()
def copyto(dataset: str, img_path: str, label_path: str):
"""
拷贝原始文件到其他文件夹
"""
# 当前文件夹下的所有子文件夹
folders = [name for name in os.listdir(dataset) if os.path.isdir(os.path.join(dataset, name))]
for folder in folders:
files = [name for name in os.listdir(os.path.join(dataset, folder))]
for file in files:
if 'label' in file:
copyfile(os.path.join(dataset, folder, file),
os.path.join(label_path, folder.split('_')[0] + '_' + file.split('.')[0] + '.ply'))
else:
copyfile(os.path.join(dataset, folder, file),
os.path.join(img_path, folder.split('_')[0] + '_' + file.split('.')[0] + '.ply'))
def main():
datasets = ['/media/why/77B8B456EE73FE06/users/xsf_ubuntu/Dataset/OralScan/dataset_labelled_cell_color_downsampled_10000']
img_path = '/media/why/77B8B456EE73FE06/users/xsf_ubuntu/Dataset/OralScan/img'
label_path = '/media/why/77B8B456EE73FE06/users/xsf_ubuntu/Dataset/OralScan/label'
# datasets = ['D:\\users\\xsf\\Dataset\\OralScan\\dataset_labelled_cell_color_downsampled_50000_colorrefine']
# img_path = 'D:\\users\\xsf\\Dataset\\OralScan\\img'
# label_path = 'D:\\users\\xsf\\Dataset\\OralScan\\label'
for dataset in datasets:
copyto(dataset, img_path, label_path)
if __name__ == '__main__':
main() | XiShuFan/MeshSegNet | step0_prepare.py | step0_prepare.py | py | 2,615 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "vtk.vtkPLYReader",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "vtk.vtkPolyDataWriter",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
... |
37493947110 | ''' Elabore um programa em Python que gere uma matriz aleatória (9x9), com números entre 0 e 10, imprima-a. Após, peça
o quadrante desejado e imprima os elementos desse quadrante. '''
from random import randint
from termcolor import colored
m = [0] * 9
for i in range(9):
m[i] = [0] * 9
for j in range(9):
m[i][j] = randint(0,10)
if i == 4 or j == 4: # Criando a "divisão" entre os quadrantes
print(colored(f"{m[i][j]:02}","blue"),end=" ")
else:
print(f"{m[i][j]:02}",end=" ")
print()
while True:
quadrante = int(input("\nInforme o quadrante [1-4] >> "))
if quadrante in range(1,5):
break
print("Valor inválido!! Digite novamente...")
if quadrante == 1:
for i in range(4):
for j in range(4):
print(f"{m[i][j]:02}", end=" ")
print()
elif quadrante == 2:
for i in range(4):
for j in range(5,9):
print(f"{m[i][j]:02}", end=" ")
print()
elif quadrante == 3:
for i in range(5,9):
for j in range(4):
print(f"{m[i][j]:02}", end=" ")
print()
else:
for i in range(5,9):
for j in range(5,9):
print(f"{m[i][j]:02}", end=" ")
print()
'''
from random import randint
from termcolor import colored
m = []
for i in range(9):
m.append([])
for j in range(9):
m[i].append(randint(0,10))
print(f'{m[i][j]:02}', end=' ')
print()
while True:
q = int(input('\nInforme o quadrante >> '))
if 1 <= q <= 4:
break
print('Quadrante inválido. Digite um valor entre 1 e 4')
print()
if q == 1:
for i in range(9):
for j in range(9):
if i < 4 and j < 4:
print(colored(f'{m[i][j]:02}','red'), end=' ')
else:
print(f'{m[i][j]:02}', end=' ')
print()
elif q == 2:
for i in range(9):
for j in range(9):
if i < 4 and j > 4:
print(colored(f'{m[i][j]:02}','red'), end=' ')
else:
print(f'{m[i][j]:02}', end=' ')
print()
elif q == 3:
for i in range(9):
for j in range(9):
if i > 4 and j < 4:
print(colored(f'{m[i][j]:02}','red'), end=' ')
else:
print(f'{m[i][j]:02}', end=' ')
print()
else:
for i in range(9):
for j in range(9):
if i > 4 and j > 4:
print(colored(f'{m[i][j]:02}','red'), end=' ')
else:
print(f'{m[i][j]:02}', end=' ')
print()
''' | danibassetto/Python | pythonProjectListasExercicio/Lista7/L7_E15.py | L7_E15.py | py | 2,579 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "random.randint",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "termcolor.colored",
"line_number": 14,
"usage_type": "call"
}
] |
14076176482 | from collections import Counter
import numpy as np
import pandas as pd
from matplotlib import pyplot
from backfit.BackfitUtils import init_objects
from backfit.utils.utils import load_new_diffs, load_mcmc_diffs
from utils.utils import extract_runs_w_timestamp
if __name__ == '__main__':
n_users = -1
cats, cat_lookup, all_qids, users, _stretches_, levels, cat_ixs = init_objects(n_users, seed=666)
passdiffs, stretches, passquals, all_qids = load_new_diffs()
all_qids = list(all_qids)
users = np.unique(users)
n_users = len(users)
print("kickoff for n_users?",n_users)
n_qids = 1+len(all_qids) #+1 to represent ROOT
usersf = open("direct_mcmc_users.txt","w")
udf = pd.read_csv("users_all.csv")
students = udf[ (udf["role"]=="STUDENT") & (udf["date_of_birth"].notnull()) ]
print(students["date_of_birth"])
ages = ( pd.to_datetime(students['registration_date']) - pd.to_datetime(students['date_of_birth'])).dt.total_seconds() / (86400*365.2425)
print(ages)
binwidth=0.25
binz = np.arange(min(ages), max(ages) + binwidth, binwidth)
ages.plot.hist(alpha=0.5, bins=binz)
print(np.median(ages))
print(np.mean(ages))
print(np.std(ages))
pyplot.show() | rjm49/isaacdata | student_profiling/StudentProfiling.py | StudentProfiling.py | py | 1,235 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "backfit.BackfitUtils.init_objects",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "backfit.utils.utils.load_new_diffs",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 17,
"usage_type": "call"
},
{
... |
74352481063 | # -*- coding: utf-8 -*-
"""
-------------------------------------------------------------------------------
GUFY - Copyright (c) 2019, Fabian Balzer
Distributed under the terms of the GNU General Public License v3.0.
The full license is in the file LICENSE.txt, distributed with this software.
-------------------------------------------------------------------------------
@author: Fabian Balzer (fabian.balzer@studium.uni-hamburg.de)
A script for writing plot scripts.
The module is structured as follows:
- The WriteToScriptDialog class for the write-to-script options
- Functions for saving the file and writing it
- Functions for constructing the slice and projection plot strings
- Functions for constructing the line and phase plot strings
- Function for constructing the profile plot strings
(All variants including time series)
- Function for constructing the plot-making string
- Functions to construct time series strings
- Functions to construct miscellaneous strings like derived fields, file
loading and annotations (alphabetical order)
"""
import PyQt5.QtWidgets as QW
import PyQt5.QtCore as QC
import PyQt5.QtGui as QG
from datetime import datetime
import math
import yt
from simgui_modules.utils import getCalcQuanName, getCalcQuanString
from simgui_modules.additionalWidgets import GUILogger
from simgui_modules.checkBoxes import coolCheckBox
# %% The WriteToScriptDialog class for the write-to-script options
class WriteToScriptDialog(QW.QDialog):
"""A dialog that pops up if the user wants to write the settings of the GUI
or the plot to a reproducible script.
Params:
Param_Dict: For receiving information that has to be written to script
PlotWindow: Whether the instance belongs to a plot window or the
general GUI
"""
def __init__(self, Param_Dict, PlotWindow=True):
super().__init__()
if PlotWindow:
GUILogger.info("Options for writing the plot displayed as a reproducible skript")
self.setWindowTitle("Options for writing the plot to script")
self.text = "that were used to create the plot"
else:
GUILogger.info("Options for writing settings of the GUI as a reproducible skript")
self.setWindowTitle("Options for writing the settings to script")
self.text = "of the GUI as they are specified in the <b>Plot options</b>"
GUILogger.info("Detailed information for making plots using yt can "
"also be found "
'<a href="https://yt-project.org/doc/visualizing/plots.html">here</a>, '
'<a href="https://yt-project.org/doc/cookbook/simple_plots.html">here</a> and '
'<a href="https://yt-project.org/doc/cookbook/complex_plots.html">here</a>.')
self.noComments = False
self.plotAll = False
self.Param_Dict = Param_Dict
self.setModal(True)
self.initUi(PlotWindow)
self.signalsConnection()
self.resize(400, 300)
self.setWindowIcon(QG.QIcon('simgui_registry/CoverIcon.png'))
self.show()
def initUi(self, PlotWindow):
"""Initialize all of the ingredients for this UI."""
# Buttons for closing the window:
self.buttonBox = QW.QDialogButtonBox(self)
self.buttonBox.addButton("Create script", QW.QDialogButtonBox.AcceptRole)
self.buttonBox.addButton("Cancel", QW.QDialogButtonBox.RejectRole)
self.textBrowser = QW.QTextBrowser()
text = ""
if not PlotWindow:
text += ("""<p><b><font color="red">Warning:</font></b> Due to the way
the script writing works, the script might be
incomplete concerning some of the extrema or other
settings. </p>""")
text += f"""<p>If you want, I can now produce a script with the
settings {self.text} for you. </p>
<p>Pressing the <b>Create script</b>-button will prompt you
to enter a filename of where the script is to be created.</p>
<p>If you don't want comments in the script - which is understandable
if you're experienced, for beginners they might be useful - just
check the checkbox below.</p>"""
self.commentCheckBox = coolCheckBox(text="Disable comments", width=None)
layout = QW.QVBoxLayout()
layout.addWidget(self.textBrowser)
layout.addWidget(self.commentCheckBox)
series = self.Param_Dict["isValidSeries"]
noMultiple = not self.Param_Dict["TimeSeriesProf"]
notTime = not self.Param_Dict["XAxis"] == "time"
if series and noMultiple and notTime:
text += """<p>Since you have loaded a time series, you may transfer
the <b>Plot all</b> functionality to the script as well. Just tick
the corresponding CheckBox"""
self.makeSeriesCheckBox = coolCheckBox(text="Write script to plot all", width=None)
self.makeSeriesCheckBox.toggled.connect(self.getSeriesInput)
layout.addWidget(self.makeSeriesCheckBox)
self.textBrowser.setHtml(text)
layout.addWidget(self.buttonBox)
self.setLayout(layout)
def signalsConnection(self):
"""Connects all the signals and emits some of them for a proper start
"""
self.buttonBox.accepted.connect(self.applyPressed)
self.buttonBox.rejected.connect(self.cancelPressed)
self.commentCheckBox.toggled.connect(self.getCommentInput)
self.accepted.connect(lambda: saveFile(self))
def applyPressed(self):
"""Handles the Button Press of 'Create n plots'"""
self.accept()
def cancelPressed(self):
"""Handles the Button press of 'Cancel'"""
self.reject()
def getCommentInput(self, state):
"""Saves the state of the checkbox"""
self.noComments = state
def getSeriesInput(self, state):
self.plotAll = state
# %% Functions for saving the file and writing it
def saveFile(dialog):
"""Ask the user to give a name for the file to be written out and save
the plot with its current settings as a script."""
filename = QW.QFileDialog.getSaveFileName(dialog, 'Write Plot to file',
dialog.Param_Dict["Directory"],
"Python files (*.py);; "
"All files (*)")[0]
if filename != "":
writePlotToScript(dialog.Param_Dict, filename, dialog.noComments,
dialog.plotAll)
GUILogger.log(29, "A script to reproduce the plot has been writt"
f"en to <b>{filename}</b>.")
else:
GUILogger.info("No filename selected.")
def writePlotToScript(Param_Dict, filename, noComments, plotAll):
"""Write the plot passed through Param_Dict to a file filename.
Params:
Param_Dict: Parameter dictionary containing all of the settings
filename: Name of the file to be written (including directory)
noComments: bool if the user doesn't want comments
plotAll: For time series if the user wants to have iteration
"""
if not filename.endswith(".py"):
filename += ".py"
text = constructCompleteString(Param_Dict, plotAll, filename)
if noComments:
textLines = text.split("\n")
# remove all lines that begin with a hashtag:
textLines = [line for line in textLines if not line.strip().startswith("#")]
textLines.insert(0, "# -*- coding: utf-8 -*-")
# remove all in-line-comments:
text = ""
for line in textLines:
if "#" in line:
text += line.split(" # ")[0]
elif not line.isspace(): # check if all of the characters are spaces
text += line
text += "\n"
# remove all 'Note:'-blocks:
textBlocks = text.split('"""\nNote:')
text = ""
for i, block in enumerate(textBlocks):
if i != 0:
block = "".join(block.split('"""')[1:])
text += block
with open(filename, "w") as file:
file.write(text)
# %% Function for writing the introduction
def constructCompleteString(Param_Dict, plotAll, filename):
"""Construct a long string that can be used to replot the plot with its
current settings.
Params:
Param_Dict: Parameter dictionary containing all of the plot data
Returns:
text: String that can be used to replot the plot
"""
introString = f'''# -*- coding: utf-8 -*-
"""
{filename.split("/")[-1]}
Script containing the plot produced using GUFY - GUI for FLASH Code simulations
based on yt.
Created on {datetime.now().strftime("%a, %b %d %X %Y")}.
Contains a suggestion to reproduce the plot(s).
Since it is dynamically created, it may not be the prettiest but I hope it can
help to get started.\n
If you detect any bugs or have questions, please contact me via email through
fabian.balzer@studium.uni-hamburg.de.
"""
import yt
import matplotlib.pyplot as plt
'''
if Param_Dict["DimMode"] == "2D":
introString += "from mpl_toolkits.axes_grid1 import make_axes_locatable\n"
if plotAll:
introString += "import os"
introString += "\n\n"
introString += constructDerFieldString(Param_Dict)
if checkTimeSeriesPlot(Param_Dict, plotAll):
plotString = constructSeriesString(Param_Dict, plotAll)
elif Param_Dict["isValidFile"]:
plotString = constructFileString(Param_Dict)
else:
return "# This shouldn't happen. Something went wrong writing the file"
text = introString + plotString
return text
def constructFileString(Param_Dict):
"""Constructs the string when handling a single file"""
loadingString = "# Load the file through yt and save the dataset as ds:\n"
loadingString += f'ds = yt.load("{Param_Dict["Directory"]}/{str(Param_Dict["CurrentDataSet"])}")\n\n'
plotString = eval("construct"+Param_Dict["PlotMode"]+"Plot(Param_Dict)")
text = loadingString + plotString
return text
# %% Functions for constructing the slice and projection plot strings
def constructSlicePlot(Param_Dict):
"""Constructs the Slice plot script"""
ds = Param_Dict["CurrentDataSet"]
gridUnit = Param_Dict["GridUnit"]
c0, c1, c2 = Param_Dict["XCenter"], Param_Dict["YCenter"], Param_Dict["ZCenter"]
width = f'(({Param_Dict["HorWidth"]}, "{gridUnit}"), ({Param_Dict["VerWidth"]}, "{gridUnit}"))'
field = Param_Dict["ZAxis"]
if Param_Dict["NormVecMode"] == "Axis-Aligned":
plotString = (
f'''
# Initialize a yt axis-aligned slice plot with the dataset ds, normal vector {Param_Dict["NAxis"]},
# field {field} and the optional parameters axes_unit, center, width and fontsize:
c0 = yt.YTQuantity({c0}, "{gridUnit}") # Get the center coordinates.
c1 = yt.YTQuantity({c1}, "{gridUnit}") # Unfortunately, using a YTArray
c2 = yt.YTQuantity({c2}, "{gridUnit}") # does not work properly.
slc = yt.SlicePlot(ds, "{Param_Dict["NAxis"]}", "{field}", axes_unit="{gridUnit}",
center=[c0, c1, c2], width={width},
fontsize=14)
''')
else:
normVec = [Param_Dict[axis + "NormDir"] for axis in ["X", "Y", "Z"]]
northVec = [Param_Dict[axis + "NormNorth"] for axis in ["X", "Y", "Z"]]
plotString = (
f'''# Initialize a yt off-axis slice plot with the dataset ds, normal vector,
# field {field} and the optional parameters north_vector, axes_unit, center and
# fontsize. The north vector is the vector that defines the 'up'-direction:
c0 = yt.YTQuantity({c0}, "{gridUnit}") # Get the center coordinates.
c1 = yt.YTQuantity({c1}, "{gridUnit}") # Unfortunately, using a YTArray
c2 = yt.YTQuantity({c2}, "{gridUnit}") # does not work properly.
slc = yt.OffAxisSlicePlot(ds, {normVec}, "{field}", north_vector={northVec},
axes_unit="{gridUnit}", fontsize=14, center=[c0, c1, c2])
''')
plotString += f'# Hint: You can access the generated data using slc.frb["{field}"]\n\n\n'
modString = ""
fieldMin = Param_Dict["ZMin"] # Float
fieldMax = Param_Dict["ZMax"]
if field not in Param_Dict["FieldMins"].keys():
modString = (
f'''# It seems that you have not calculated extrema for {field}.
# You can do this by using
# minMaxArray = ds.all_data().quantities.extrema({field})
# which will return a YTArray with two entries plus the units.
''')
unit = Param_Dict["ZUnit"]
cmap = Param_Dict["ColorScheme"]
modString += ("# Set unit, minimum, maximum and color scheme:\n"
'slc.set_unit("{0}", "{1}")\nslc.set_zlim("{0}", {2}, {3}) '
'# These are given in the same unit, {1}.\n'
'slc.set_cmap("{0}", "{4}")\n'.format(field, unit, fieldMin, fieldMax, cmap))
log = Param_Dict["ZLog"] # Boolean
if fieldMin != "":
modString += "# Set our field scaling logarithmic if wanted:\n"
if min(fieldMin, fieldMax) <= 0 and log:
modString += ('slc.set_log("{0}", True, linthresh=(({1}-{2})/1000)) '
"# linthresh sets a linear scale for a small portion and then a symbolic one "
"for negative values\n".format(field, fieldMax, fieldMin))
else:
modString += f'slc.set_log("{field}", {log}) # This may be redundant in some cases\n'
zoom = Param_Dict["Zoom"]
modString += f"slc.zoom({zoom})\n\n\n"
# Do the annotations:
annoString = ""
title = Param_Dict["PlotTitle"]
if title != "":
annoString += f'slc.annotate_title("{title}") # Give the plot the title it deserves.\n'
if Param_Dict["Timestamp"]:
annoString += ("slc.annotate_timestamp(corner='upper_left', draw_inset_box=Tr"
"ue) # There are many more modifications for the timestamp.\n")
if Param_Dict["Geometry"] == "cartesian":
if Param_Dict["Scale"]:
annoString += "slc.annotate_scale(corner='upper_right')\n"
if Param_Dict["Grid"]:
annoString += ('WARNING = "There is a yt-internal bug where grid '
'annotation doesn\'t work if a center coordinate is '
'set to 0!"\nslc.annotate_grids()\n')
if Param_Dict["ParticleAnno"]:
if Param_Dict["PSlabWidth"] == "" or float(Param_Dict["PSlabWidth"]) == 0:
Param_Dict["PSlabWidth"] = 1
height = abs(Param_Dict["FieldMins"]["DomainHeight"] - Param_Dict["FieldMaxs"]["DomainHeight"])
width = float(Param_Dict["PSlabWidth"])*height
annoString += (f"slc.annotate_particles({width})\n")
if Param_Dict["VelVectors"]:
annoString += "slc.annotate_velocity(normalize=True)\n"
if Param_Dict["VelStreamlines"]:
annoString += ('WARNING = "There is a yt-internal bug where streamline '
'annotation doesn\'t work if a center coordinate is '
'set to 0!"\n')
if Param_Dict["NAxis"] == "x":
annoString += "slc.annotate_streamlines('velocity_y', 'velocity_z')\n"
elif Param_Dict["NAxis"] == "y":
annoString += "slc.annotate_streamlines('velocity_x', 'velocity_z')\n"
elif Param_Dict["NAxis"] == "z":
annoString += "slc.annotate_streamlines('velocity_x', 'velocity_y')\n"
if Param_Dict["MagVectors"]:
annoString += "slc.annotate_magnetic_field(normalize=True)\n"
if Param_Dict["MagStreamlines"]:
annoString += ('WARNING = "There is a yt-internal bug where streamline '
'annotation doesn\'t work if a center coordinate is '
'set to 0!"\n')
if Param_Dict["NAxis"] == "x":
annoString += "slc.annotate_streamlines('magy', 'magz')\n"
elif Param_Dict["NAxis"] == "y":
annoString += "slc.annotate_streamlines('magx', 'magz')\n"
elif Param_Dict["NAxis"] == "z":
annoString += "slc.annotate_streamlines('magx', 'magy')\n"
if Param_Dict["Contour"]:
annoString += "slc.annotate_contour('{}')\n".format(field)
elif Param_Dict["Geometry"] == "cylindrical":
if Param_Dict["Grid"]:
annoString += "slc.annotate_grids()\n"
if len(annoString) > 0: # If annotations are made, declare them:
annoString = "# Annotations for the plot:\n" + annoString + "\n\n"
figureString = constructUsingMPL(Param_Dict, "slc")
text = plotString + modString + annoString + figureString
return text
def constructProjectionPlot(Param_Dict):
"""Constructs the projection plot script"""
ds = Param_Dict["CurrentDataSet"]
field = Param_Dict["ZAxis"]
if Param_Dict["WeightField"] is None:
weightField = "None"
else:
weightField = '"{}"'.format(Param_Dict["WeightField"])
plotString = ""
if Param_Dict["DomainDiv"]:
# in case the user wants to divide everything by the domain_height,
# we define a new field which is just the old field divided by height
# and then do a projectionPlot for that.
height = Param_Dict["FieldMaxs"]["DomainHeight"] - Param_Dict["FieldMins"]["DomainHeight"]
plotString += (
f"""# We want to norm our projection by the domain height:
domainHeight = {height} # You can obtain this by calculating
# ds.domain_right_edge - ds.domain_left_edge for all dimensions
# To do this, we can define a new field for the dataset:
def _NormField(field, data):
return data["{field}"]/yt.units.YTQuantity(ds.arr(domainHeight, "code_length")) # This way, yt will understand the units
""")
field = "Normed " + field
plotString += f'unit = yt.units.unit_object.Unit("{Param_Dict["ZUnit"]}/cm")'
plotString += ("# Unfortunately add_field doesn't understand lambda functions.\n"
'ds.add_field(("gas", "{field}"), function=_NormField,\n'
" units='auto', dimensions=unit.dimensions)\n\n\n")
NVector, gridUnit = Param_Dict["NAxis"], Param_Dict["GridUnit"]
c0, c1, c2 = Param_Dict["XCenter"], Param_Dict["YCenter"], Param_Dict["ZCenter"]
width = f'(({Param_Dict["HorWidth"]}, "{gridUnit}"), ({Param_Dict["VerWidth"]}, "{gridUnit}"))'
field = Param_Dict["ZAxis"]
if Param_Dict["ParticlePlot"]:
plotString += (
f'''
# Initialize a yt Particle Projection plot with the dataSet ds, Normal
# Vector {NVector}, field {field} and the optional parameters axes_unit,
# weight_field, center and fontsize:
c0 = yt.YTQuantity({c0}, "{gridUnit}") # Get the center coordinates.
c1 = yt.YTQuantity({c1}, "{gridUnit}") # Unfortunately, using a YTArray
c2 = yt.YTQuantity({c2}, "{gridUnit}") # does not work properly.
proj = yt.ParticleProjectionPlot(ds, "{Param_Dict["NAxis"]}", "{field}",
axes_unit="{gridUnit}", center=[c0, c1, c2],
weight_field={weightField}, width={width},
fontsize=14)
# Warning: Particle plots are still an experimental feature of the GUI and may
# produce errors
''')
elif Param_Dict["NormVecMode"] == "Axis-Aligned":
plotString += (
f'''
# Initialize a yt Projection plot with the dataSet ds, Normal Vector {NVector},
# field {field} and the optional parameters axes_unit, weight_field,
# center and fontsize:
c0 = yt.YTQuantity({c0}, "{gridUnit}") # Get the center coordinates.
c1 = yt.YTQuantity({c1}, "{gridUnit}") # Unfortunately, using a YTArray
c2 = yt.YTQuantity({c2}, "{gridUnit}") # does not work properly.
proj = yt.ProjectionPlot(ds, "{Param_Dict["NAxis"]}", "{field}", axes_unit="{gridUnit}",
center=[c0, c1, c2], weight_field={weightField},
width={width}, fontsize=14)
''')
else:
normVec = [Param_Dict[axis + "NormDir"] for axis in ["X", "Y", "Z"]]
northVec = [Param_Dict[axis + "NormNorth"] for axis in ["X", "Y", "Z"]]
plotString += (
f'''
# Initialize a yt Projection plot with the dataSet ds, Normal Vector {NVector},
# field {field} and the optional parameters axes_unit, weight_field,
# center and fontsize:
c0 = yt.YTQuantity({c0}, "{gridUnit}") # Get the center coordinates.
c1 = yt.YTQuantity({c1}, "{gridUnit}") # Unfortunately, using a YTArray
c2 = yt.YTQuantity({c2}, "{gridUnit}") # does not work properly.
proj = yt.OffAxisProjectionPlot(ds, {normVec}, "{field}", north_vector={northVec},
axes_unit="{gridUnit}", center=[c0, c1, c2],
weight_field={weightField}, width={width},
fontsize=14)
''')
plotString += f'# Hint: You can access the generated data using proj.frb["{field}"]\n\n\n'
modString = ""
fieldMin = Param_Dict["ZMin"] # Float
fieldMax = Param_Dict["ZMax"]
if field not in Param_Dict["FieldMins"].keys():
modString = (
f'''# It seems that you have not calculated extrema for {field}.
# You can do this by using
# minMaxArray = ds.all_data().quantities.extrema({field})
# which will return a YTArray with two entries plus the units.
''')
unit = Param_Dict["ZUnit"]
cmap = Param_Dict["ColorScheme"]
modString += ("# Set unit, minimum, maximum and color scheme:\n"
'proj.set_unit("{0}", "{1}")\nproj.set_zlim("{0}", {2}, {3}) '
'# These are given in the same unit, {1}.\n'
'proj.set_cmap("{0}", "{4}")\n'.format(field, unit, fieldMin, fieldMax, cmap))
if fieldMin == "":
log = Param_Dict["ZLog"] # Boolean
modString += "# Set our field scaling logarithmic if wanted:\n"
if min(fieldMin, fieldMax) <= 0 and log:
modString += ("proj.set_log('{0}', True, linthresh=(({1}-{2})/1000) "
"# linthresh sets a linear scale for a small portion and then a symbolic one "
"for negative values\n".format(field, fieldMax, fieldMin))
else:
modString += f'proj.set_log("{field}", {log}) # This may be redundant in some cases.\n'
zoom = Param_Dict["Zoom"]
modString += f"proj.zoom({zoom})\n\n"
# Do the annotations:
annoString = ""
title = Param_Dict["PlotTitle"]
if title != "":
annoString += f'proj.annotate_title("{title}") # Give the plot the title it deserves.\n'
if Param_Dict["Timestamp"]:
annoString += ("proj.annotate_timestamp(corner='upper_left', draw_inset_box=Tr"
"ue) # There are many more modifications for the timestamp.\n")
if Param_Dict["Geometry"] == "cartesian":
if Param_Dict["Scale"]:
annoString += "proj.annotate_scale(corner='upper_right')\n"
if Param_Dict["Grid"]:
annoString += ('WARNING = "There is a yt-internal bug where grid '
'annotation doesn\'t work if a center coordinate is '
'set to 0!"\n')
annoString += "proj.annotate_grids()\n"
if Param_Dict["ParticleAnno"]:
if Param_Dict["PSlabWidth"] == "" or float(Param_Dict["PSlabWidth"]) == 0:
Param_Dict["PSlabWidth"] = 1
height = abs(Param_Dict["FieldMins"]["DomainHeight"] - Param_Dict["FieldMaxs"]["DomainHeight"])
width = float(Param_Dict["PSlabWidth"])*height
annoString += (f"slc.annotate_particles({width})\n")
if Param_Dict["VelVectors"]:
annoString += "proj.annotate_velocity(normalize=True)\n"
if Param_Dict["VelStreamlines"]:
annoString += ('WARNING = "There is a yt-internal bug where streamline '
'annotation doesn\'t work if a center coordinate is '
'set to 0!"\n')
if Param_Dict["NAxis"] == "x":
annoString += "proj.annotate_streamlines('velocity_y', 'velocity_z')\n"
elif Param_Dict["NAxis"] == "y":
annoString += "proj.annotate_streamlines('velocity_x', 'velocity_z')\n"
elif Param_Dict["NAxis"] == "z":
annoString += "proj.annotate_streamlines('velocity_x', 'velocity_y')\n"
if Param_Dict["MagVectors"]:
annoString += "proj.annotate_magnetic_field(normalize=True)\n"
if Param_Dict["MagStreamlines"]:
annoString += ('WARNING = "There is a yt-internal bug where streamline '
'annotation doesn\'t work if a center coordinate is '
'set to 0!"\n')
if Param_Dict["NAxis"] == "x":
annoString += "proj.annotate_streamlines('magy', 'magz')\n"
elif Param_Dict["NAxis"] == "y":
annoString += "proj.annotate_streamlines('magx', 'magz')\n"
elif Param_Dict["NAxis"] == "z":
annoString += "proj.annotate_streamlines('magx', 'magy')\n"
if Param_Dict["Contour"]:
annoString += "proj.annotate_contour('{}')\n".format(field)
elif Param_Dict["Geometry"] == "cylindrical":
if Param_Dict["Grid"]:
annoString += "proj.annotate_grids()\n"
annoString += "\n"
figureString = constructUsingMPL(Param_Dict, "proj")
text = plotString + modString + annoString + figureString
return text
# %% Functions for constructing the line and phase plot strings
def constructLinePlot(Param_Dict):
"""Constructs the line plot script"""
ds = Param_Dict["CurrentDataSet"]
startends = ["XLStart", "YLStart", "ZLStart", "XLEnd", "YLEnd", "ZLEnd"]
valueList = []
for key in startends:
value = float(yt.units.YTQuantity(Param_Dict[key], Param_Dict["oldGridUnit"]).to(ds.quan(1, 'code_length').units).value)
valueList.append(value)
field = Param_Dict["YAxis"]
plotString = (
f"""# Initialize a yt Line plot with the dataSet ds, field {field}, the
# given start- and end points in code_length and the number of sampling points:
lplot = yt.LinePlot(ds, "{field}", {valueList[:3]}, {valueList[3:]},
npoints=512, fontsize=14)
""")
plotString += ("# Note that you can also add more than one field for the "
"line plot and that you\n# can label them independently "
'using field_labels={"field":label}.\n\n')
modString = (f'lplot.annotate_legend("{field}") # Optional, but looks nice\n')
fieldMin = Param_Dict["YMin"] # Float
fieldMax = Param_Dict["YMax"]
unit = Param_Dict["ZUnit"]
modString += f'lplot.set_x_unit("{Param_Dict["LineUnit"]}")\n'
modString += f'lplot.set_unit("{field}", "{unit}")\n'
modString += ("# Unfortunately, yt line-plots don't have built in min and "
"max settings, so we use pyplot later.\n")
log = Param_Dict["ZLog"] # Boolean
modString += "# Set our field scaling logarithmic if wanted:\n"
if min(fieldMin, fieldMax) <= 0 and log:
modString += (f'lplot.set_log("{field}", True, linthresh=(({fieldMax}-{fieldMin})/1000) '
"# linthresh sets a linear scale for a small portion and then a symbolic one "
"for negative values\n")
else:
modString += (f'lplot.set_log("{field}", {log}) # This may be redundant '
"in some cases.\n")
annoString = ""
title = Param_Dict["PlotTitle"]
if title != "":
annoString += (f'lplot.annotate_title("{Param_Dict["YAxis"]}", "{title}")'
"# Give the plot the title it deserves.\n")
figureString = constructUsingMPL(Param_Dict, "lplot")
text = plotString + modString + annoString + figureString
return text
def constructPhasePlot(Param_Dict):
"""Constructs the phase plot script"""
XField, YField, ZField = Param_Dict["XAxis"], Param_Dict["YAxis"], Param_Dict["ZAxis"]
plotString = ("ad = ds.all_data() # through e.g. ad = ds.sphere('c', (50, 'kpc"
"')) you could also only select a region of the dataset.\n\n")
if Param_Dict["WeightField"] is None:
weightField = "None"
else:
weightField = f'"{Param_Dict["WeightField"]}"'
plotString += (
f"""# Initialize a yt phase plot with the data ad, XField {XField},
# YField {YField}, ZField {ZField} and the optional parameters
# weight_field, fractional, the number of bins and fontsize:
""")
if Param_Dict["ParticlePlot"]:
plotString += (
f'''phas = yt.ParticlePhasePlot(ad, "{XField}", "{YField}", "{ZField}",
weight_field={weightField}, x_bins=128, y_bins=128,
fontsize=14)
# Warning: Particle plots are still an experimental feature of the GUI and may
# produce errors
''')
else:
plotString += (
f'''phas = yt.PhasePlot(ad, "{XField}", "{YField}", "{ZField}",
weight_field={weightField}, fontsize=14)
''')
cmap = Param_Dict["ColorScheme"]
modString = ("# Set our field scaling logarithmic if wanted. "
"Phase plots don't support symlog scales.\n")
for axis in ["X", "Y", "Z"]:
log = Param_Dict[axis + "Log"] # Boolean
field = Param_Dict[axis + "Axis"]
modString += f'phas.set_log("{field}", {log}) # This may be redundant in some cases.\n'
modString += "# Set unit, minimum, maximum and color scheme:\n"
for axis in ["X", "Y", "Z"]:
modString += (f'phas.set_unit("{Param_Dict[axis +"Axis"]}", "{Param_Dict[axis + "Unit"]}")\n')
if XField not in Param_Dict["FieldMins"].keys():
modString += (
f'''# It seems that you have not calculated extrema for {XField}.
# You can do this by using
# minMaxArray = ds.all_data().quantities.extrema({XField})
# which will return a YTArray with two entries plus the units.
''')
if YField not in Param_Dict["FieldMins"].keys():
modString += (
f'''# It seems that you have not calculated extrema for {YField}.
# You can do this by using
# minMaxArray = ds.all_data().quantities.extrema({YField})
# which will return a YTArray with two entries plus the units.
''')
if ZField not in Param_Dict["FieldMins"].keys():
modString += (
f'''# It seems that you have not calculated extrema for {ZField}.
# You can do this by using
# minMaxArray = ds.all_data().quantities.extrema({ZField})
# which will return a YTArray with two entries plus the units.
''')
XMin = Param_Dict["XMin"]
XMax = Param_Dict["XMax"]
YMin = Param_Dict["YMin"]
YMax = Param_Dict["YMax"]
ZMin = Param_Dict["ZMin"]
ZMax = Param_Dict["ZMax"]
modString += (f'phas.set_xlim({XMin}, {XMax})\n'
f'phas.set_ylim({YMin}, {YMax})\n'
f'phas.set_zlim("{ZField}", {ZMin}, {ZMax}) # These are given in the same unit, {Param_Dict["ZUnit"]}.\n')
modString += f'phas.set_cmap("{ZField}", "{cmap}")\n'
annoString = ""
title = Param_Dict["PlotTitle"]
if title != "":
annoString += (f'phas.annotate_title("{Param_Dict["YAxis"]}", "{title}")'
"# Give the plot the title it deserves.\n")
figureString = constructUsingMPL(Param_Dict, "phas")
text = plotString + modString + annoString + figureString
return text
# %% Function for constructing the profile plot strings
# (All variants including time series)
def constructProfilePlot(Param_Dict, time=False, multiPlot=False):
"""Constructs the Profile plot script.
Params:
time: bool to indicate whether 'time' has been used for the x-axis
multiPlot: bool to indicate whether the user wants to do plots at
multiple times
"""
if Param_Dict["WeightField"] is None:
weightField = "None"
else:
weightField = "'{}'".format(Param_Dict["WeightField"])
ds = Param_Dict["CurrentDataSet"]
if time:
if weightField == "None":
weightField = "'ones'"
calcQuanName = getCalcQuanName(Param_Dict)
calcQuanString = getCalcQuanString(Param_Dict).replace("'", '"')
dataString = (
f'''# Loop over the datasets of the series to get the datapoints at each time:
# This does NOT reflect the extrema you chose in the GUI, please
# add them by hand by only using ts[start:(end+1)]
i = 0
time_data = []
y_data = []
length = len(ts)
for ds in ts:
time_data.append(ds.current_time.to_value("{Param_Dict["XUnit"]}"))
ad = ds.all_data() # You could select an arbitrary yt region here
# for available quantities to calculate see
# https://yt-project.org/doc/analyzing/objects.html as well.
yResult = {calcQuanString}
y_data.append(yResult)
i += 1
print(f"Progress: {{i}}/{{length}} data points calculated.")
arr_x = yt.YTArray(time_data, "{Param_Dict["XUnit"]}") # In a YTArray, the units
arr_y = yt.YTArray(y_data, "{Param_Dict["YUnit"]}") # are stored as well.
"""
Note: Another way to do this is loading the series through yt using
ts = yt.load("{Param_Dict["Directory"]}/{Param_Dict["Seriesname"]}")
and then using the built-in parallel iteration tool, piter, like this:
yt.enable_parallelism(suppress_logging=True)
storage = {{}} # A storage dictionary to store the data during parallel iteration
for store, ds in ts.piter(storage=storage):
ad = ds.all_data()
yResult = {calcQuanString}
store.result = yResult
i += 1
print(f"Progress: {{i}}/{{length}} data points calculated."
y_arr = yt.YTArray(list(storage.values()), "{Param_Dict["YUnit"]}")
"""
''')
# If possible, pass the values that have already been calculated to the user
times, values = [], []
field = Param_Dict["YAxis"]
for ds in Param_Dict["DataSeries"]:
try:
time = Param_Dict["DataSetDict"][str(ds) + "Time"].to_value(Param_Dict["XUnit"])
value = Param_Dict["DataSetDict"][str(ds) + field + calcQuanName]
value = yt.YTQuantity(value, Param_Dict["FieldUnits"][field]).to_value(Param_Dict["YUnit"])
times.append(time)
values.append(value)
except KeyError:
pass
if len(times) > 0:
dataString += (
f'''# It seems that you have already calculated some plot points using the GUI.
# I have stored them for you, and you can reuse them if wanted:
calcTimes = yt.YTArray({times}, "{Param_Dict["XUnit"]}")
calcValues = yt.YTArray({values}, "{Param_Dict["YUnit"]}")
''')
elif multiPlot:
onlyEvery = Param_Dict["ProfOfEvery"]
suf = lambda n: "%d%s "%(n,{1:"st",2:"nd",3:"rd"}.get(n if n<20 else n%10,"th"))
numString = suf(onlyEvery)
dataString = (
f'''# Loop over the datasets of the series to make a profile at each time:
i = 0
onlyEvery = {onlyEvery} # If you only want to plot every {numString} plot
length = {math.ceil(len(Param_Dict["DataSeries"])/onlyEvery)} # You can use math.ceil(len(ts)/onlyEvery) to calculate this.
labels = []
profiles = [] # The data for the y-axis will be stored in YTArrays
for ds in ts:
if i % onlyEvery == 0:
# Create a data container to hold the whole dataset.
ad = ds.all_data() # you can use an arbitrary yt region here.
# Create a 1d profile of xfield vs. yfield. Through n_bins the number
# of bins may be modified as well:
prof = yt.create_profile(ad, "{Param_Dict["XAxis"]}",
fields=["{Param_Dict["YAxis"]}"],
weight_field={weightField}, n_bins=64)
# Add labels
time = ds.current_time.to_value("kyr")
label = f"{Param_Dict["YAxis"]} at {{time:.2e}} kyr"
labels.append(label)
profiles.append(prof["{Param_Dict["YAxis"]}"])
print(f"Progress: {{int(i/onlyEvery)+1}}/{{length}} profiles done.")
i += 1
arr_x = prof.x # get the data for the x-axis
"""
Note: Another way to do this is loading the series through yt using
ts = yt.load("{Param_Dict["Directory"]}/{Param_Dict["Seriesname"]}")
and then using the built-in parallel iteration tool, piter, like this:
yt.enable_parallelism(suppress_logging=True)
storage = {{}} # A storage dictionary to store the data during parallel iteration
for store, ds in ts.piter(storage=storage):
if i % onlyEvery == 0:
ad = ds.all_data()
prof = yt.create_profile(ad, "{Param_Dict["XAxis"]}",
fields=["{Param_Dict["YAxis"]}"],
weight_field={weightField}, n_bins=64)
# Add labels
time = ds.current_time.to_value("kyr")
label = f"{Param_Dict["YAxis"]} at {{time:.2e}} kyr"
labels.append(label)
store.result = prof["{Param_Dict["YAxis"]}"]
print("Progress: {{int(i/onlyEvery+1}}/{{length}} profiles done."
i += 1
arr_y = list(storage.values())
"""
''')
else:
dataString = (
f'''# First reate a data container to hold the whole dataset.
ad = ds.all_data() # you can use an arbitrary yt region here.
# Create a 1d profile of the x-field vs. the y-field. You could also pass
# multiple fields for the vertical axis.
prof = yt.create_profile(ad, "{Param_Dict["XAxis"]}",
fields=["{Param_Dict["YAxis"]}"],
weight_field={weightField})
label = "{Param_Dict["YAxis"]}"
arr_x = prof.x
arr_y = prof["{Param_Dict["YAxis"]}"]
''')
figureString = (
f'''# Now that we have created the data, we can set up a figure and plot it.
# More information: https://matplotlib.org/3.1.0/api/_as_gen/matplotlib.pyplot.figure.html
fig, axes = plt.subplots(**{{'figsize': (10, 7), 'dpi': 100}})
# Add the plot:
x_values = arr_x.to_value("{Param_Dict["XUnit"]}")
''')
if multiPlot:
plotString = (
f'''for i, arr_y in enumerate(profiles):
label = labels[i]
y_values = arr_y.to_value("{Param_Dict["YUnit"]}")
axes.plot(x_values, y_values, "-", linewidth=3, label=label)
''')
else:
plotString = (f'y_values = arr_y.to_value("{Param_Dict["YUnit"]}")\n'
f'axes.plot(x_values, y_values, "-", linewidth=3, label="{Param_Dict["YAxis"]}")\n')
field = Param_Dict['XAxis']
xUnit = yt.YTQuantity(1, Param_Dict["XUnit"]).units.latex_repr # get latex repr for unit
if xUnit != "": # do not add empty brackets
xUnit = r"$\:\left[" + xUnit + r"\right]$"
if field == "dens" or field == "temp":
xName = eval(f"ds.fields.flash.{field}.get_latex_display_name()")
elif field == "time":
xName = r"$\rm{Time}$"
else:
xName = eval(f"ds.fields.gas.{field}.get_latex_display_name()")
field = Param_Dict["YAxis"]
yUnit = yt.YTQuantity(1, Param_Dict["YUnit"]).units.latex_repr # get latex repr for unit
if yUnit != "": # do not add empty brackets
yUnit = r"$\:\left[" + yUnit + r"\right]$"
if field == "dens" or field == "temp":
yName = eval(f"ds.fields.flash.{field}.get_latex_display_name()")
elif field == "time":
yName = r"$\rm{Time}$"
else:
yName = eval(f"ds.fields.gas.{field}.get_latex_display_name()")
if time:
plotString += (
f'''
"""
Note:
Since you might not want to recalculate all the data, you can save the values
in a csv-file the following way:
import csv
with open("time_profile_{calcQuanName}_{field}_data.csv", "w") as writefile:
# configure writer to write standard csv file
writer = csv.writer(writefile, delimiter=',', lineterminator='\\n')
writer.writerow(["time", "{field}"])
writer.writerow(["{Param_Dict["XUnit"]}", "{Param_Dict["YUnit"]}"])
for time, y in zip(time_data, y_data):
writer.writerow([time, y])
print("The data points have been saved to\\n"
"time_profile_{calcQuanName}_{field}_data.csv.")
"""
''')
elif multiPlot:
pass
else:
plotString +=(
f'''
"""
Note:
Since you might not want to recalculate all the data, you can save the values
in a csv-file the following way:
import csv
with open("profile_{Param_Dict["XAxis"]}_{Param_Dict["YAxis"]}_data.csv", "w") as writefile:
# configure writer to write standard csv file
writer = csv.writer(writefile, delimiter=',', lineterminator='\\n')
writer.writerow(["{Param_Dict["XAxis"]}", "{Param_Dict["YAxis"]}"])
writer.writerow(["{Param_Dict["XUnit"]}", "{Param_Dict["YUnit"]}"])
for time, y in zip(time_data, y_data):
writer.writerow([time, y])
print("The data points have been saved to\\n"
"profile_{Param_Dict["XAxis"]}_{Param_Dict["YAxis"]}_data.csv.")
"""
''')
plotString += (
'''
"""
Note:
We could also use yt's built-in yt.ProfilePlot.from_profiles(profiles, labels),
but we're using matplotlib.pyplot since it offers more control.
"""
# Modify the settings for x- and y-axis:
# with axes.set_xscale you can set it log(arithmic) or lin(ear).
''')
if Param_Dict["XLog"]:
plotString += 'axes.set_xscale("log")\n'
if Param_Dict["YLog"]:
plotString += 'axes.set_yscale("log")\n'
plotString += (
f'''# We can freely customize the axes' names. You can get a units' LaTeX rep
# like this: yt.YTQuantity(1, "yourUnit").units.latex_repr
# and the display name with ds.fields.gas.YOURFIELD.get_latex_display_name()
axes.set_xlabel(r"{xName + xUnit}")
axes.set_ylabel(r"{yName + yUnit}")
axes.set_xlim({Param_Dict["XMin"]}, {Param_Dict["XMax"]}) # be careful about the units here
axes.set_ylim({Param_Dict["YMin"]}, {Param_Dict["YMax"]})
axes.legend() # Display the labels we have given
axes.grid() # You can customize the grid even further if wanted
axes.set_title(r"{Param_Dict["PlotTitle"]}")
''')
if Param_Dict["AddProfile"]:
plotString += (
'''
"WARNING: The second profile functionality is not a feature yet."
"""
Note: I have not implemented adding a second plot to this script writer yet.
It's actually pretty simple. Just use
twinaxes = axes.twinx()
twinaxes.tick_params(axis='y')
axes.tick_params(axis='y')
and plot the desired data on the twinaxes, which is going to use the same
x-axis.
"""
''')
if Param_Dict["Timestamp"] and Param_Dict["XAxis"] != "time" and not multiPlot:
plotString += ("# Use a custom function to annotate the timestamp (see above):\n"
"drawTimestampBox(axes, ds)\n")
plotString += "# fig.show() # Works best in iPython console or jupyter\n\n\n"
saveString = '# example of how you could name the file:\n'
saveString += f'plotfilename = "{str(Param_Dict["CurrentDataSet"])}_'
if multiPlot:
saveString += 'Multi'
saveString += f'{Param_Dict["PlotMode"]}plot_{Param_Dict["YAxis"]}.png"\n'
saveString += "fig.savefig(plotfilename) # Takes the name of the file as an argument.\n"
saveString += 'print(f"The file has been saved as {plotfilename}")\n'
text = dataString + figureString + plotString + saveString
if Param_Dict["Timestamp"] and Param_Dict["XAxis"] != "time" and not multiPlot:
text = createCustomTimestampString() + text
return text
# %% Function for constructing the plot-making string
def constructUsingMPL(Param_Dict, plotName):
"""Construct the part of the script where the yt plot is plotted as a mpl
figure.
params:
Param_Dict: for retrieving information about what to plot
plotName: the name the plot has been given, e.g. 'slc' for slice
returns:
text: constructed script text
"""
mode = Param_Dict["PlotMode"]
plotString = ("# Everything that's necessary for setting up the figure.\n"
"# More information: https://matplotlib.org/3.1.0/api/_as_gen/matplotlib.pyplot.figure.html\n"
'fig, axes = plt.subplots(**{"figsize": (10, 7), "dpi": 100})\n')
if Param_Dict["DimMode"] == "2D":
plotString += ("# Since we're using the color axis, we need the following:\n"
"divider = make_axes_locatable(axes)\n"
'cax = divider.append_axes("right", size="5%", pad=0.05)\n')
plotString += ("\n\n# Now we need to pass our axes to yt so it can setup the"
" plots for us.\n")
if mode == "Line":
field = Param_Dict["YAxis"]
plotString += f'{plotName}.plots["{field}"].axes = axes\n'
else:
field = Param_Dict["ZAxis"]
if Param_Dict["DomainDiv"]:
field = "Normed " + field
plotString += f'{plotName}.plots["{field}"].axes = axes\n'
if Param_Dict["DimMode"] == "2D":
plotString += f'{plotName}.plots["{field}"].cax = cax\n'
plotString += ("{0}._setup_plots() # This runs the yt-internal command for"
" plotting. It's different for each plot type.\n".format(plotName))
if mode == "Line":
plotString += "axes.set_ylim({0}, {1})\n".format(Param_Dict["YMin"], Param_Dict["YMax"])
if Param_Dict["LineAnno"]:
plotString += "annotateStartEnd(axes, ds) # annotate custom start and end points\n"
if Param_Dict["Timestamp"] and mode in ["Line", "Phase"]:
plotString += ("# Use a custom function to annotate the timestamp (see below):\n"
"drawTimestampBox(axes, ds)\n")
if mode in ["Slice", "Projection"]:
if Param_Dict["SetAspect"]:
plotString += '# Set the aspect ratio. If "1", equal distances will be equally long.\n'
plotString += 'axes.set_aspect("auto") # For "auto", the figure is filled.\n'
plotString += "# fig.show() # Works best in iPython console or jupyter\n\n"
saveString = ('\nplotfilename = "{0}_{1}plot_{2}.png" # example of how you could name the file\n'
.format(str(Param_Dict["CurrentDataSet"]), Param_Dict["PlotMode"], field))
saveString += "fig.savefig(plotfilename) # Takes the name of the file as an argument.\n"
saveString += 'print("The file has been saved as {0}".format(plotfilename))\n'
text = plotString + saveString
if Param_Dict["Timestamp"] and mode in ["Line", "Phase"]:
text = createCustomTimestampString() + text
if Param_Dict["LineAnno"] and mode == "Line":
text = createLinePointAnnoString(Param_Dict) + text
return text
# %% Functions to construct time series strings
def checkTimeSeriesPlot(Param_Dict, plotAll):
"""Helper function to check whether the user really used the time series
attribute."""
# These are the only occurances where the time series is actually used
if (Param_Dict["XAxis"] == "time" or Param_Dict["TimeSeriesProf"] or
plotAll):
return True
else:
return False
def constructSeriesString(Param_Dict, plotAll):
"""Constructs the string when handling a time series"""
loadingString = ("# Load the series through yt and save it as ts:\n"
f'ts = yt.load("{Param_Dict["Directory"]}/{Param_Dict["Seriesname"]}")\n\n\n')
if Param_Dict["XAxis"] == "time":
plotString = constructProfilePlot(Param_Dict, time=True)
elif Param_Dict["TimeSeriesProf"]:
plotString = constructProfilePlot(Param_Dict, multiPlot=True)
elif plotAll:
plotString = constructTimeSeriesPlots(Param_Dict)
text = loadingString + plotString
return text
def constructTimeSeriesPlots(Param_Dict):
"""Constructs a string to reproduce the loop used to plot pictures of a
whole series."""
plotMode = Param_Dict["PlotMode"]
onlyEvery = Param_Dict["OnlyEvery"]
loopString = "\n# We are now performing a loop over each file of the dataset:\n"
loopString += f"directory = '{Param_Dict['Directory']}/{plotMode}plot_{datetime.now().strftime('%d_%m_%Y_%H_%M_%S')}'\n"
loopString += "os.mkdir(directory) # Create a directory to save the frames in\n"
loopString += f"onlyEvery = {onlyEvery}"
loopString += "i = 0 # for convenient progress updates\n"
loopString += "for ds in ts:\n"
suf = lambda n: "%d%s "%(n,{1:"st",2:"nd",3:"rd"}.get(n if n<20 else n%10,"th"))
numString = suf(onlyEvery)
length = len(Param_Dict["DataSeries"])
loopString += (
f""" if i % onlyEvery == 0: # if you only want every {numString} file
make{plotMode}Plot(ds)
print(f"Progress: {{(i/{onlyEvery}+1)}}/{math.ceil(length/onlyEvery)} {plotMode}plots done.")
saveName = f"{{directory}}/{plotMode}plot_{{i+1}}"
plt.savefig(saveName)
i += 1
""")
plotString = "# Here we define the actual plot function that is performed for each frame:\n"
plotString += f"def make{plotMode}Plot(ds):\n"
plotString += (
f''' """A function to produce a {plotMode}plot of the dataset ds using the
desired parameters.
Parameters:
ds: (FLASH)-Dataset loaded using yt
"""
''')
funcString = eval("construct"+Param_Dict["PlotMode"]+"Plot(Param_Dict)")
# We need to insert the four spaces because we use this inside of a function
funcString = funcString.split("\n")
for line in funcString:
if line != "":
plotString += f" {line}\n"
else:
plotString += "\n"
text = plotString + loopString # this way the function is defined before it is called
return text
# %% Functions to construct miscellaneous strings like derived fields, file
# loading and annotations (alphabetical order)
def constructDerFieldString(Param_Dict):
"""Construct a string that reflects the implementation of used custom
derived fields.
Returns:
text: String that is empty in case no custom fields are used
"""
foundFields = []
for axis in ["X", "Y", "Z"]:
fieldName = Param_Dict[axis + "Axis"]
if (fieldName in Param_Dict["NewDerFieldDict"].keys() and
fieldName not in foundFields):
foundFields.append(fieldName)
text = ""
for i, fieldName in enumerate(foundFields):
if i == 0:
text += "# Adding the custom derived fields that are used:\n"
displayName = Param_Dict["NewDerFieldDict"][fieldName]["DisplayName"]
func = Param_Dict["NewDerFieldDict"][fieldName]["FunctionText"]
unit = Param_Dict["NewDerFieldDict"][fieldName]["Unit"]
dim = Param_Dict["NewDerFieldDict"][fieldName]["Dimensions"]
override = Param_Dict["NewDerFieldDict"][fieldName]["Override"]
if "np." in func:
text += "import numpy as np\n"
text += (
f'''# First, define the function that is used for the field:
{func}
# Now we can just add this new field to yt. Another way would be loading the
# dataset(s) first and then add it using ds.add_field(...)
yt.add_field(("gas", "{fieldName}"), function=_{fieldName},
units="{unit}", dimensions="{dim}", force_override={override},
display_name=r"{displayName}", take_log=False)
''')
return text
def createLinePointAnnoString(Param_Dict):
"""Creates a string that can be used to reproduce the lineplot annotation"""
sNumbers = [Param_Dict[axis + "LStart"] for axis in ["X", "Y", "Z"]]
eNumbers = [Param_Dict[axis + "LEnd"] for axis in ["X", "Y", "Z"]]
pointString = (
f'''
# The following functions are used for custom annotation of the start and end
# points used for line plotting
def createLineText(numbers, mode, ds):
"""Creates and returns the text for the line edit.
Parameters:
numbers: list of coordinates
mode: 'Start' or 'End'
ds: dataset object
"""
# It is advisable to turn them into a easy-to-read numbers. This is of
# course optional.
numbers = yt.YTArray(numbers, "{Param_Dict["LineUnit"]}")
lengthUnit = ds.get_smallest_appropriate_unit(max(numbers),
quantity="distance")
lengths = numbers.to_value(lengthUnit)
return f"{{mode}}: ({{lengths[0]:.1f}}, {{lengths[1]:.1f}}, {{lengths[2]:.1f}}) {{lengthUnit}}"
def annotateStartEnd(axes, ds):
"""Annotates two boxes for start and end coordinates.
Parameters:
axes: mpl axes object for plotting
ds: yt dataset object for receiving nice units"""
bboxArgs = {{'boxstyle': 'square,pad=0.3', 'facecolor': 'white',
'linewidth': 2, 'edgecolor': 'black', 'alpha': 0.5}}
startText = createLineText({sNumbers}, "Start", ds)
endText = createLineText({eNumbers}, "End", ds)
axes.text(x=0.0, y=-0.14, s=startText, size=10, bbox=bboxArgs,
ha="left", va="bottom", transform=axes.transAxes,
**{{"color": "black"}})
axes.text(1.0, -0.14, endText, size=10, bbox=bboxArgs, ha="right",
va="bottom", transform=axes.transAxes, **{{"color": "black"}})
print("Annotating line points. Sometimes the positions x and y have "
"to be adjusted.")
''')
return pointString
def createCustomTimestampString():
"""Write a string that can be to reproduce the custom timestamp annotation.
The function call itself has to be added above."""
stampString = (
'''
# The following two functions are used for a custom timestamp annotation
# because they are not supported for line, phase and profile plots by yt:
def createTimestampText(ds):
"""Creates the text for a timestamp annotation.
Parameters:
ds: yt dataset object: To extract the time information from
"""
timeUnit = ds.get_smallest_appropriate_unit(ds.current_time,
quantity="time")
time = ds.current_time.to_value(timeUnit)
return f"t = {time:.1f} {timeUnit}" # give it a nice look.
def drawTimestampBox(axes, ds):
"""Draw a custom timestamp annotation box like the one
used by yt on a given axes axes using the dataset ds for information.
Parameters:
axes: mpl ax object: The ax to draw the timestamp on
ds: yt dataset object: To extract the time information from
"""
bboxArgs = {'boxstyle': 'square,pad=0.3', 'facecolor': 'black',
'linewidth': 3, 'edgecolor': 'white', 'alpha': 0.5}
text = createTimestampText(ds)
axes.text(x=0.03, y=0.96, s=text, size=15, bbox=bboxArgs,
ha="left", va="top", transform=axes.transAxes, **{"color": "w"})
# if you leave out the "transform"-keyword, xpos and ypos will be
# interpreted as data positions.
''')
return stampString
| Fabian-Balzer/GUFY | GUFY/simgui_modules/scriptWriter.py | scriptWriter.py | py | 55,424 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PyQt5.QtWidgets.QDialog",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "simgui_modules.additionalWidgets.GUILogger.info",
"line_number": 50,
"usage_type": "call"
... |
11130591414 | import azure.functions as func
import logging
import json
from azure.data.tables import TableServiceClient
# send to congratiulation message queue
from azure.servicebus import ServiceBusClient, ServiceBusMessage
CONNECTION_STR = "Endpoint=sb://testbus-sk11.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=m0DutAhjDyytP+yxsmTfvevz4jtakRCfIbGsDq3fcQE="
QUEUE_NAME = "congratiulaiton_message_queue"
def main(msg: func.ServiceBusMessage):
logging.info('Python ServiceBus queue trigger processed message.')
result = msg.get_body().decode('utf-8')
logging.info(result)
resultDict = json.loads(result)
logging.info(resultDict)
#resultDict['body'] = "u\'" + resultDict['body'] + "\'"
cmid = resultDict["cm_id"]
logging.info(type(resultDict))
PRODUCT_ID = u'00123411'
PRODUCT_NAME = u'C'
my_entity = {
u'PartitionKey': "CM_"+cmid,
u'RowKey': "CNT_"+resultDict["cmc"],
u'type': resultDict["type"],
u'amount': resultDict["amount"],
}
table_service_client = TableServiceClient.from_connection_string(conn_str="DefaultEndpointsProtocol=https;AccountName=orderstoragecoffee;AccountKey=/HlYJsI+3aFvQijcIYYfvoX3wVZD81RaXS9xW56xKJWPE/QbgHmDKy6mNwTfBrmtcmYw9Y1nxw6A2yM/yxnM3Q==;EndpointSuffix=core.windows.net")
table_client = table_service_client.get_table_client(table_name="coffeeData")
srv_entity = table_client.create_entity(entity=my_entity)
logging.info("before check every 50 coffee")
check_every_50_coffee(cmid, resultDict["cmc"])
logging.info("message queue succesfully executed")
def check_every_50_coffee(coffee_id, coffee_count):
logging.info(int(coffee_count) % 50 == 0)
if int(coffee_count) % 50 == 0:
servicebus_client = ServiceBusClient.from_connection_string(conn_str=CONNECTION_STR, logging_enable=True)
sender = servicebus_client.get_queue_sender(queue_name=QUEUE_NAME)
message = ServiceBusMessage(str(coffee_id))
sender.send_messages(message)
logging.info("Sent a single message")
return | IngNoN/UC_Kaffeemaschine | processCoffeeOrder/__init__.py | __init__.py | py | 2,119 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "azure.functions.ServiceBusMessage",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "azure.functions",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "logging.info",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "l... |
26614306732 | import operator
import functools
import logging
import elasticsearch_dsl
from elasticsearch import Elasticsearch
from jam import settings
from jam import exceptions
from jam.backends import query as queries
from jam.backends.base import Backend
logging.getLogger('elasticsearch').setLevel(logging.WARNING)
class ElasticsearchBackend(Backend):
DEFAULT_CONNECTION = Elasticsearch(settings.ELASTICSEARCH['URI'], request_timeout=settings.ELASTICSEARCH['TIMEOUT'])
ES_MAPPING = {'dynamic_templates': [{
'inner_data': {
'path_match': 'data.*',
'match_mapping_type': 'string',
'mapping': {
'type': 'string',
'fields': {
'raw': {'type': 'string', 'index': 'not_analyzed'}
}
}
}
}, {
'top_level': {
'match': '*',
'match_mapping_type': 'string',
'mapping': {'type': 'string', 'index': 'not_analyzed', 'include_in_all': False}
},
}]}
# TODO (maybe) store as dates rather than timestamps
# }, {
# 'dates': {
# 'match': '*',
# 'match_mapping_type': 'double',
# 'mapping': {'type': 'date', 'include_in_all': False}
# }
@classmethod
def is_connected(cls):
if not settings.ELASTICSEARCH['USE']:
return False
try:
cls.DEFAULT_CONNECTION.cluster.health()
return True
except Exception:
return False
@classmethod
def settings_for(cls, namespace_id, collection_id, type_):
return {
'index': namespace_id,
'doc_type': '{}-{}'.format(type_, collection_id),
}
def __init__(self, index, doc_type, connection=None):
self._connection = connection or ElasticsearchBackend.DEFAULT_CONNECTION
self._index = index
self._doc_type = doc_type
self._connection.indices.create(self._index, ignore=400)
self._connection.indices.put_mapping(body={doc_type: self.ES_MAPPING}, index=index, doc_type=doc_type)
self.search = elasticsearch_dsl.Search(self._connection, index=index, doc_type=doc_type)
def get(self, key):
assert not key.startswith('_'), 'Elasticsearch keys may not being with _'
res = self._connection.get(index=self._index, doc_type=self._doc_type, id=key, ignore=404)
if res.get('status') == 404 or not res['found']:
raise exceptions.NotFound(key)
return res['_source']
def keys(self):
return (x.meta['id'] for x in self.search.fields([]).execute().hits)
def list(self, order=None):
search = self.search
if order:
search = search.sort({
order.key: {
'order': 'asc' if order.order > 0 else 'desc',
'unmapped_type': 'string'
}
})
resp = search.execute()
from_, size, total = 0, 10, resp.hits.total
while from_ * size < total:
for hit in resp.hits.hits:
yield hit['_source']
from_ += len(resp.hits)
resp = search[from_:from_ + size].execute()
def set(self, key, data):
self._connection.index(index=self._index, doc_type=self._doc_type, id=key, body=data)
def unset(self, key):
self._connection.delete(index=self._index, doc_type=self._doc_type, id=key)
def query(self, query, order=None, limit=None, skip=None):
search = self.search
if order:
search = search.sort({
order.key: {
'order': 'asc' if order.order > 0 else 'desc',
'unmapped_type': 'string'
}
})
search = search[skip or 0:(limit or 100) + (skip or 0)]
if query:
search = search.filter(self._translate_query(query))
return (hit['_source'] for hit in search.execute().hits.hits)
def count(self, query):
search = self.search
if query:
search = search.filter(self._translate_query(query))
return search.execute().hits.total
def unset_all(self):
self._connection.delete_by_query(index=self._index, doc_type=self._doc_type, body={
'query': {
'match_all': {}
}
})
def _translate_query(self, query):
if isinstance(query, queries.CompoundQuery):
return functools.reduce({
queries.Or: operator.or_,
queries.And: operator.and_
}[query.__class__], [
self._translate_query(q)
for q in query.queries
])
key = query.key
if key.startswith('data.') and isinstance(query.value, str):
key += '.raw'
return elasticsearch_dsl.F({
queries.Equal: 'term'
}[query.__class__], **{key: query.value})
| CenterForOpenScience/jamdb | jam/backends/elasticsearch.py | elasticsearch.py | py | 4,968 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "logging.WARNING",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "jam.backends.base.Backend",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "elas... |
20214118948 | from django.shortcuts import render
from django import forms
from django.core.files.storage import default_storage
from django.http import HttpResponse
from django.core.files.base import File, ContentFile
from django.http import HttpResponse
import markdown2
import random
import copy
from . import util
class NewTitleForm(forms.Form):
newt = forms.CharField(label="New Title", strip=False)
class NewArticleForm(forms.Form):
newa = forms.CharField(label="New Article", strip=False)
class NewSearchForm(forms.Form):
q = forms.CharField(label="Search", strip=False)
def index(request):
return render(request, "encyclopedia/index.html", {
"entries": util.list_entries()
})
def entry(request, page):
return render(request, "encyclopedia/entry.html", {
"page" : page,
"content" : markdown2.markdown(util.get_entry(page))
})
def create(request):
return render(request, "encyclopedia/create.html")
def preview(request):
if request.method == "POST":
newtitle = NewTitleForm(request.POST)
newarticle = NewArticleForm(request.POST)
if newtitle.is_valid() and newarticle.is_valid():
newt = newtitle.cleaned_data["newt"]
newa = newarticle.cleaned_data["newa"]
for filename in util.list_entries():
filename = str(filename)
if newt.lower() == filename.lower():
return render(request, "encyclopedia/preview2.html", {
"newtitle" : newt,
"newarticle" : newa,
"newarticlemd" : markdown2.markdown(newa)
})
return render(request, "encyclopedia/preview.html", {
"newtitle" : newt,
"newarticle" : newa,
"newarticlemd" : markdown2.markdown(newa)
})
return HttpResponse(status=403)
def save(request):
if request.method == "POST":
newtitle = NewTitleForm(request.POST)
newarticle = NewArticleForm(request.POST)
if newtitle.is_valid() and newarticle.is_valid():
newt = newtitle.cleaned_data["newt"]
newa = newarticle.cleaned_data["newa"]
for filename in util.list_entries():
filename = str(filename)
if newt.lower() == filename.lower():
return render(request, "encyclopedia/preview2.html", {
"newtitle" : newt,
"newarticle" : newa,
"newarticlemd" : markdown2.markdown(newa)
})
text = f"#{newt} \n\n"+newa
util.save_entry(newt, text)
return render(request, "encyclopedia/saved.html", {
"message" : "added",
"page" : newt
})
return HttpResponse(status=403)
def edit(request, page):
f = default_storage.open(f"entries/{page}.md")
f.readline()
f.readline()
article = f.read().decode("utf-8")
f.close()
return render(request, "encyclopedia/edit.html", {
"page" : page,
"content" : article,
"contentmd" : markdown2.markdown(util.get_entry(page))
})
def prevedit(request, page):
if request.method == "POST":
newarticle = NewArticleForm(request.POST)
if newarticle.is_valid():
newa = newarticle.cleaned_data["newa"]
return render(request, "encyclopedia/prevedit.html",{
"page": page,
"content": newa,
"contentmd" : markdown2.markdown(newa)
})
return HttpResponse(status=403)
def saveedit(request, page):
if request.method == "POST":
newarticle = NewArticleForm(request.POST)
if newarticle.is_valid():
newa = newarticle.cleaned_data["newa"]
text = f"#{page} \n\n"+newa
util.save_entry(page, text)
return render(request, "encyclopedia/saved.html",{
"message": "edited",
"page": page
})
return HttpResponse(status=403)
def rand(request):
r = random.choice(util.list_entries())
return entry(request, page=r)
def search(request):
if request.method == "POST":
newsearch = NewSearchForm(request.POST)
if newsearch.is_valid():
q = newsearch.cleaned_data["q"]
results = []
for filename in util.list_entries():
filename = str(filename)
if q.lower() == filename.lower():
return entry(request, filename)
elif q.lower() in filename.lower():
results.append(filename)
if len(results) > 0:
return render(request, "encyclopedia/results.html", {
"message" : f"{len(results)} match(es) found:",
"results" : results
})
else:
return render(request, "encyclopedia/results.html", {
"message": "No matches found"
})
return HttpResponse(status=403)
| ksondjaja/wiki | encyclopedia/views.py | views.py | py | 5,050 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.forms.Form",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "django.forms.CharField",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "django.for... |
12292682173 | #数据处理
import sys, pickle, os, random
import numpy as np
import gensim #add by wjn
## tags, BIO
tag2label = {"O": 0,
"B-KNOW": 1, "I-KNOW": 2,
"B-PRIN": 3, "I-PRIN": 4,
"B-OTHER": 5, "I-OTHER": 6
}
#输入train_data文件的路径,读取训练集的语料,输出train_data
def read_corpus(corpus_path):
"""
read corpus and return the list of samples
:param corpus_path:
:return: data
"""
data = []
with open(corpus_path, encoding='utf-8') as fr:
lines = fr.readlines()
sent_, tag_ = [], []
for line in lines:
if line != '\n':
# [char, label] = line.split(' ')
[char, label] = line.replace('\n','').split(' ')
sent_.append(char)
tag_.append(label)
else:
data.append((sent_, tag_))
sent_, tag_ = [], []
return data
#生成word2id序列化文件
def vocab_build(vocab_path, corpus_path, min_count):
"""
#建立词汇表
:param vocab_path:
:param corpus_path:
:param min_count:
:return:
"""
#读取数据(训练集或测试集)
#data格式:[(字,标签),...]
data = read_corpus(corpus_path)
word2id = {}
for sent_, tag_ in data:
for word in sent_:
if word.isdigit():
word = '<NUM>'
elif ('\u0041' <= word <='\u005a') or ('\u0061' <= word <='\u007a'):
word = '<ENG>'
if word not in word2id:
word2id[word] = [len(word2id)+1, 1]
else:
word2id[word][1] += 1
low_freq_words = []
for word, [word_id, word_freq] in word2id.items():
if word_freq < min_count and word != '<NUM>' and word != '<ENG>':
low_freq_words.append(word)
for word in low_freq_words:
del word2id[word]
new_id = 1
for word in word2id.keys():
word2id[word] = new_id
new_id += 1
word2id['<UNK>'] = new_id
word2id['<PAD>'] = 0
print(len(word2id))
#将任意对象进行序列化保存
print('word2id:\n',word2id)
with open(vocab_path, 'wb') as fw:
pickle.dump(word2id, fw)
#将句子中每一个字转换为id编号,例如['我','爱','中','国'] ==> ['453','7','3204','550']
def sentence2id(sent, word2id):
"""
:param sent:源句子
:param word2id:对应的转换表
:return:
"""
sentence_id = []
for word in sent:
if word.isdigit():
word = '<NUM>'
elif ('\u0041' <= word <= '\u005a') or ('\u0061' <= word <= '\u007a'):
word = '<ENG>'
if word not in word2id:
word = '<UNK>'
sentence_id.append(word2id[word])
return sentence_id
#读取word2id文件
def read_dictionary(vocab_path):
"""
:param vocab_path:
:return:
"""
vocab_path = os.path.join(vocab_path)
#反序列化
with open(vocab_path, 'rb') as fr:
word2id = pickle.load(fr)
print('vocab_size:', len(word2id))
return word2id
#随机嵌入
def random_embedding(vocab, embedding_dim):
"""
:param vocab:
:param embedding_dim:
:return:
"""
embedding_mat = np.random.uniform(-0.25, 0.25, (len(vocab), embedding_dim))
embedding_mat = np.float32(embedding_mat)
return embedding_mat
def pad_sequences(sequences, pad_mark=0):
"""
:param sequences:
:param pad_mark:
:return:
"""
max_len = max(map(lambda x : len(x), sequences))
seq_list, seq_len_list = [], []
for seq in sequences:
seq = list(seq)
seq_ = seq[:max_len] + [pad_mark] * max(max_len - len(seq), 0)
seq_list.append(seq_)
seq_len_list.append(min(len(seq), max_len))
return seq_list, seq_len_list
def batch_yield(data, batch_size, vocab, tag2label, shuffle=False):
"""
:param data:
:param batch_size:
:param vocab:
:param tag2label:
:param shuffle:随机对列表data进行排序
:return:
"""
#如果参数shuffle为true,则对data列表进行随机排序
if shuffle:
random.shuffle(data)
seqs, labels = [], []
for (sent_, tag_) in data:
#将句子转换为编号组成的数字序列
sent_ = sentence2id(sent_, vocab)
#将标签序列转换为数字序列
label_ = [tag2label[tag] for tag in tag_]
#一个句子就是一个样本,当句子数量等于预设的一批训练集数量,便输出该样本
if len(seqs) == batch_size:
yield seqs, labels
seqs, labels = [], []
seqs.append(sent_)
labels.append(label_)
if len(seqs) != 0:
yield seqs, labels
#add by 王嘉宁
#加载训练好的模型词向量,包括三类词向量w2v,glove,GWE
#add by wjn
def load_embeddings(embedding_dim, vocab, embedding_type):
# initial matrix with random uniform
initW = np.random.randn(len(vocab), embedding_dim).astype(np.float32) / np.sqrt(len(vocab))
# load any vectors from the word2vec
# print("Load glove file {0}".format(embedding_path))
embedding_dir = './embeddings/'
if embedding_type == "glove":
f = open(embedding_dir + 'wiki.zh.glove.Mode', 'r', encoding='utf8')
for line in f:
splitLine = line.split(' ')
word = splitLine[0]
embedding = np.asarray(splitLine[1:], dtype='float32')
if word in vocab:
idx = vocab[word]
if idx != 0:
initW[idx] = embedding
elif embedding_type == "word2vec":
model = gensim.models.Word2Vec.load(embedding_dir + 'wiki.zh.w2v.Mode')
allwords = model.wv.vocab
for word in allwords:
embedding = np.asarray(model[word], dtype='float32')
if word in vocab:
idx = vocab[word]
if idx != 0:
initW[idx] = embedding
elif embedding_type == "gwe":
with open(embedding_dir + 'wiki.zh.GWE.mode','r',encoding="utf-8") as f:
for line in f.readlines()[1:]:
splitLine = line.split(' ')
word = splitLine[0]
embedding = np.asarray(splitLine[1:301], dtype='float32')
if word in vocab:
idx = vocab[word]
if idx != 0:
initW[idx] = embedding
return initW | wjn1996/Mathematical-Knowledge-Entity-Recognition | data.py | data.py | py | 6,414 | python | en | code | 31 | github-code | 36 | [
{
"api_name": "pickle.dump",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "pickle.load",
"line_numbe... |
23070148342 | # -*- coding: utf-8 -*-
"""
Created on Thu Dec 22 08:07:59 2016
@author: vijverbe
"""
from ecmwfapi import ECMWFDataServer
server = ECMWFDataServer()
grid = "1.125/1.125"
#server.retrieve({
# 'dataset' : "era5_test",
# 'stream' : "oper/enda", # 'oper' specifies the high resolution daily data, as opposed to monthly means, wave, eda edmm, etc.
# 'type' : "an", # We want instantaneous parameters, which are archived as type Analysis ('an') as opposed to forecast (fc)
# 'levtype' : "sfc", # Surface level, as opposed to pressure level (pl) or model level (ml)
# 'param' : "26", # For parameter codes see the ECMWF parameter database at http://apps.ecmwf.int/codes/grib/param-db
# 'grid' : grid, # The spatial resolution in ERA5 is 31 km globally on a Gaussian grid. Here we us lat/long with 0.25 degrees, which is approximately the equivalent of 31km.
# 'time' : "12:00:00", # ERA5 provides hourly analysis
# 'date' : "2016-01-01/to/2016-01-05",
# 'target' : "era5_test_2016-01-01to2016-01-05_hourly.grib" # Default output format is GRIB
# })
#
server.retrieve({
"class": "ei",
"dataset": "era5_test",
"date": "2016-01-01",
"expver": "1",
"grid": grid,
"levtype": "sfc",
"param": "26",
"step": "0",
"stream": "oper",
"time": "12:00:00",
"type": "an",
"target": "lakemask.nc",
'format' : "netcdf"
})
#%%
from netCDF4 import Dataset
import matplotlib.pyplot as plt
import matplotlib as cm
import numpy as np
path = '/nobackup/users/vijverbe/Download_scripts/lakemask.nc'
lakemask = np.squeeze(Dataset(path, mode='r')['cl'][:,:,:])
latitude = Dataset(path, mode='r')['latitude'][:]
longitude = Dataset(path, mode='r')['longitude'][:]
#%% ################### GRIDCELL PLOT
from mpl_toolkits.basemap import Basemap
plt.figure() # nice projections are 'cea', 'cyl', 'eck4' and 'robin', however only 'cea' and 'cyl' allows for not showing the entire world
map = Basemap(projection='cyl',lat_0=0,lon_0=0,
llcrnrlon=-180, llcrnrlat=-78,
urcrnrlon=180, urcrnrlat=78)
map.drawcoastlines(linewidth=0.25)
map.drawcountries(linewidth=0.25)
x_shift,y_shift = np.meshgrid(longitude,latitude)
x = x_shift - 0.75
y = y_shift + 0.75
lol = map.pcolormesh(x,y,lakemask, cmap=cm.cm.coolwarm, latlon=True, vmin=0,vmax=1)
map.colorbar(lol,location='right',pad="5%", label = '(-)')
plt.savefig('lakemask_ERA5.PNG', format='PNG', dpi=200)
| ruudvdent/WAM2layersPython | dowload_scripts/download_lm.py | download_lm.py | py | 2,494 | python | en | code | 23 | github-code | 36 | [
{
"api_name": "ecmwfapi.ECMWFDataServer",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.squeeze",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "netCDF4.Dataset",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "netCDF4.Datase... |
28368810745 | import json
import os
import time
import logging
import glob
import csv
import os
import stat
import psycopg2
import sqlite3 as sql
delimiter='/'
environment = ''
bucket = ''
models = {}
def getDBString_PROD():
# Format DB connection information
sslmode = "sslmode=verify-ca"
# Format DB connection information
sslrootcert_var = os.environ.get('PG_SSLROOTCERT')
sslrootcert_var = sslrootcert_var.replace('@', '=')
file = open("/server-ca.pem", "w")
file.write(sslrootcert_var)
file.close()
os.chmod("/server-ca.pem", stat.S_IRUSR)
os.chmod("/server-ca.pem", stat.S_IWUSR)
sslrootcert = "sslrootcert=/server-ca.pem"
sslcert_var = os.environ.get('PG_SSLCERT')
sslcert_var = sslcert_var.replace('@', '=')
file = open("/client-cert.pem", "w")
file.write(sslcert_var)
file.close()
os.chmod("/client-cert.pem", stat.S_IRUSR)
os.chmod("/client-cert.pem", stat.S_IWUSR)
sslcert = "sslcert=/client-cert.pem"
sslkey_var = os.environ.get('PG_SSLKEY')
sslkey_var = sslkey_var.replace('@', '=')
file = open("/client-key.pem", "w")
file.write(sslkey_var)
file.close()
os.chmod("/client-key.pem", stat.S_IRUSR)
os.chmod("/client-key.pem", stat.S_IWUSR)
sslkey = "sslkey=/client-key.pem"
hostaddr = "hostaddr={}".format(os.environ.get('PG_HOST'))
user = "user=postgres"
password = "password={}".format(os.environ.get('PG_PASSWORD'))
dbname = "dbname=postgres"
# Construct database connect string
db_connect_string = " ".join([
sslmode,
sslrootcert,
sslcert,
sslkey,
hostaddr,
user,
password,
dbname
])
return db_connect_string
def init_db(environment):
print('Inside init_db' , environment)
if environment == 'PROD':
db_connect_string = getDBString_PROD()
con = psycopg2.connect(db_connect_string)
elif environment=='TEST':
con = sql.connect("questionAnswerTest.db")
elif environment=='LOCAL':
con = sql.connect("questionAnswer.db")
cur = con.cursor()
cur.execute('''CREATE TABLE IF NOT EXISTS answers
(question text, context text, model text, answer text, timestamp int)''')
return con
def saveData(data,environment):
print('inside saveData')
try:
con = init_db(environment)
cur = con.cursor()
timestamp = int(time.time())
for item in data['data']:
print(item['question'])
cur.execute(
"INSERT INTO answers VALUES (:question,:context,:model,:answer,:timestamp)",
{'question': item['question']
, 'context': item['context']
, 'model': 'distilled-bert'
, 'answer': item['answer']
, 'timestamp': timestamp})
cur.execute('''CREATE TABLE IF NOT EXISTS answers
(question text, context text, model text, answer text, timestamp int)''')
con.commit()
con.close()
except Exception as ex:
print('Exception in saveData ' , ex)
raise ex
def main():
print('Inside dataProcessor')
environment = 'PROD'
print('Inside dataProcessor --> environment',environment )
try:
# fetch filed from the output directory
folderName = os.getcwd()+'\pfs\out'
if not os.path.exists(folderName):
print('Output folder from first pipeline not found')
else:
print('Output folder found')
for file_name in [file for file in os.listdir(folderName) if file.endswith('.json')]:
with open(folderName + delimiter+ file_name) as json_file:
data = json.load(json_file)
# Save to database
saveData(data,environment)
except Exception as ex:
print('Exception Occurred in pipeline 02 --> ' , ex)
if __name__ == "__main__":
logging.info('Inside main() --> Pipeline 02')
main()
| Diksha-cmd/Containerized-batch-pipeline-using-DockerHub-Pachyderm-Google-Cloud-Storage-Postgres-Cloud-Database | pipeline2/Pipeline_2.py | Pipeline_2.py | py | 4,047 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.environ.get",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "os.chmod",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "stat.S_IRUSR",
"line_numb... |
42779565833 | import argparse
import fastexcel
def get_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument("file")
return parser.parse_args()
def main():
args = get_args()
excel_file = fastexcel.read_excel(args.file)
for sheet_name in excel_file.sheet_names:
excel_file.load_sheet_by_name(sheet_name).to_pandas()
if __name__ == "__main__":
main()
| ToucanToco/fastexcel | test.py | test.py | py | 409 | python | en | code | 16 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "argparse.Namespace",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "fastexcel.read_excel",
"line_number": 14,
"usage_type": "call"
}
] |
24519178649 | import urllib
from urllib.request import urlopen
import requests
import cv2
import numpy as np
from PIL import Image as im
class CaptchaSolver:
def __init__(self, captcha, captcha_key):
self.captcha = captcha
self.captcha_key = captcha_key
def url_to_image(self):
# get images
# captcha = urllib.request.urlopen(self.captcha)
# captcha_key = urllib.request.urlopen(self.captcha_key)
#
# # convert it to a NumPy array
# img1 = np.array(bytearray(captcha.read()), dtype="uint8")
# img2 = np.array(bytearray(captcha_key.read()), dtype="uint8")
#
# # then read it into OpenCV format
# img1 = cv2.imdecode(img1, 1)
# img2 = cv2.imdecode(img2, 1)
#
# # using PIL save images as .png
# image1 = im.fromarray(img1)
# image2 = im.fromarray(img2)
# image1.save('test1.png')
# image2.save('test2.png')
# easier way to save iamges
captcha = requests.get(self.captcha)
captcha_key = requests.get(self.captcha_key)
with open("test1.png", "wb") as captcha_image:
captcha_image.write(captcha.content)
with open("test2.png", "wb") as captcha_key_image:
captcha_key_image.write(captcha_key.content)
def find_coordinates(self):
# run func and gen 2 images
self.url_to_image()
captcha = cv2.imread('test1.png', 0)
key = cv2.imread('test2.png', 0)
# choose method
method = eval('cv2.TM_CCOEFF') # 'cv.TM_CCOEFF_NORMED', 'cv.TM_CCORR_NORMED', 'cv.TM_SQDIFF_NORMED'
# apply template matching
match = cv2.matchTemplate(captcha, key, method)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(match)
# result is a x-coordinate + measurement uncertainty
return int(max_loc[0]) + 24
''' Tested only with 'cv.TM_CCOEFF' method.
But you can try other methods'''
# # If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum
# w, h = key_img.shape[::-1]
# if method in [cv.TM_SQDIFF, cv.TM_SQDIFF_NORMED]:
# top_left = min_loc
# else:
# top_left = max_loc
'''code below make a graphic interface of matching'''
# bottom_right = (top_left[0] + w, top_left[1] + h)
# cv.rectangle(img,top_left, bottom_right, 255, 2)
# plt.subplot(121),plt.imshow(res,cmap = 'gray')
# plt.title('Matching Result'), plt.xticks([]), plt.yticks([])
# plt.subplot(122),plt.imshow(img,cmap = 'gray')
# plt.title('Detected Point'), plt.xticks([]), plt.yticks([])
# plt.suptitle(meth)
# plt.show()
| claimclone/TikTokBot | captcha.py | captcha.py | py | 2,698 | python | en | code | null | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": ... |
14761703992 | '''
Description:
Author: caobin
Date: 2021-06-22
Github: https://github.com/bcao19
LastEditors: caobin
LastEditTime: 2021-06-28 23:20:20
'''
#!/home/ASIPP/caobin/anaconda3/bin/python
# -*-coding: UTF-8 -*-
"""
this module get the rho from efit or efitrt
type=1 rho from psi, type=2 rho from sqrt(rho), type=3 rho from r/a
caobin@ipp.ac.cn 2021/6/22
"""
import numpy as np
from east_mds import get_data as get
def read(shot, time, efit='efit_east'):
t_efit = get.data1('gtime', shot, efit)
t_efit = abs(t_efit-time)
index_efit = np.argmin(t_efit)
r_psi = get.data1('r', shot, efit)
z_psi = get.data1('z', shot, efit)
ssimag = get.data1('ssimag', shot, efit)
ssimag = ssimag[index_efit]
psi = get.data1('psirz', shot, efit)
psi = psi[index_efit, :, :]-ssimag
ssibry = get.data1('ssibry', shot, efit)
ssibry = ssibry[index_efit]-ssimag
rho = psi/ssibry
return r_psi, z_psi, rho
def getrho(shot, time, efit='efit_east'):
[r, z, rho] = read(shot, time, efit)
from scipy import interpolate
newfunc = interpolate.interp2d(r, z, rho, kind='cubic')
return newfunc
if __name__ == '__main__':
import matplotlib.pyplot as plt
shot = input("input shot: ")
shot = int(shot)
time = input("input the time: ")
time = float(time)
efit = input("input efit tree: ")
if efit=="":
efit = 'efit_east'
[r, z, rho] = read(shot, time, efit)
C = plt.contour(r, z, rho, 20)
plt.clabel(C, inline=True, fontsize=12)
ax = plt.gca()
ax.set_aspect(1)
plt.show()
| bcao19/my-python-code | east_mds/get_rho.py | get_rho.py | py | 1,588 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "east_mds.get_data.data1",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "east_mds.get_data",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "numpy.argmin",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "east_mds.get_d... |
28890119301 | #! /usr/bin/python
"""A script to push a new Pytype release to PyPI.
This script assumes that you have twine installed. The easiest way to run this
script is to run from inside of a virtualenv after "pip" installing "twine".
Also, this virtualenv should not have pytype installed already.
USAGE:
$> python release.py --mode=<TEST|RELEASE>
If mode is "TEST", then the release will be pushed to testpypi. If the mode is
"RELEASE", then the release is pushed to pypi.
"""
import argparse
import os
import shutil
import sys
import build_utils
TEST_MODE = "TEST"
RELEASE_MODE = "RELEASE"
class ReleaseError(Exception):
def __init__(self, msg):
super().__init__()
self.msg = msg
def parse_args():
"""Parse and return the command line args."""
allowed_modes = (TEST_MODE, RELEASE_MODE)
parser = argparse.ArgumentParser()
parser.add_argument(
"-m", "--mode", type=str, default=f"{TEST_MODE}",
help="Specify if the release should be uploaded to pypi or testpyi. Can "
"take a value of %s or %s" % allowed_modes)
args = parser.parse_args()
if args.mode not in allowed_modes:
sys.exit(f"Invalid --mode option. Should be one of {allowed_modes}")
return args
def verify_no_pytype_installation_exists():
try:
import pytype as _ # pylint: disable=g-import-not-at-top
except ImportError:
return # This is what we want - that Pytype does not already exist.
sys.exit("ERROR: Pytype installation detected; Run this script from inside "
"a virtualenv without a pytype installation.")
def verify_pypirc_exists():
pypirc_path = os.path.join(os.path.expanduser("~"), ".pypirc")
if not os.path.exists(pypirc_path):
sys.exit("ERROR: '.pypirc' file not found.")
def check_if_version_is_ok():
"""Prompt the user to confirm that the version in __version__.py is OK."""
sys.path.append(build_utils.PYTYPE_SRC_ROOT)
version_mod = __import__("pytype.__version__", fromlist=["pytype"])
response = input("Making a release with version %s; Continue? " %
getattr(version_mod, "__version__"))
if response not in ["y", "Y", "yes", "YES"]:
sys.exit("Aborting release.")
def upload_package(package_path, test=False):
twine_cmd = ["twine", "upload"]
if test:
twine_cmd.extend(["--repository", "testpypi"])
twine_cmd.append(os.path.join(package_path, "*"))
print(f"Uploading: {twine_cmd}")
returncode, stdout = build_utils.run_cmd(twine_cmd)
if returncode != 0:
raise ReleaseError(f"Package upload failed:\n{stdout}")
class DistributionPackage:
"""Context manager to build the pytype distribution package."""
def __enter__(self):
sdist_cmd = ["python", "setup.py", "sdist"]
print(f"Creating distribution package: {sdist_cmd}\n")
returncode, stdout = build_utils.run_cmd(sdist_cmd)
if returncode != 0:
raise ReleaseError(f"Running {sdist_cmd} failed:\n{stdout}")
# The sdist command creates the distribution package in a directory
# named "dist"
self.dist_path = os.path.join(build_utils.PYTYPE_SRC_ROOT, "dist")
return self.dist_path
def __exit__(self, exc_type, exc_val, exc_tb):
print("Deleting the distribution directory ...\n")
shutil.rmtree(self.dist_path)
print("Deleting the metadata directory ...\n")
shutil.rmtree(os.path.join(build_utils.PYTYPE_SRC_ROOT, "pytype.egg-info"))
return False
def main():
args = parse_args()
verify_no_pytype_installation_exists()
check_if_version_is_ok()
verify_pypirc_exists()
try:
with DistributionPackage() as pkg_path:
upload_package(pkg_path, args.mode == TEST_MODE)
except ReleaseError as error:
sys.exit(f">>> Release Failed <<<\n{error.msg}")
print("!!! Release Successful !!!\n")
if __name__ == "__main__":
main()
| google/pytype | build_scripts/release.py | release.py | py | 3,784 | python | en | code | 4,405 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_nu... |
25259786916 | ### Setup
import pandas as pd
from splinter import Browser
from bs4 import BeautifulSoup as bsp
from webdriver_manager.chrome import ChromeDriverManager
import time
# Setup splinter
executable_path = {'executable_path': ChromeDriverManager().install()}
browser = Browser('chrome', **executable_path, headless=False)
def scrape():
### NASA Mars News
# Get to website
url = 'https://mars.nasa.gov/news/'
browser.visit(url)
time.sleep(2)
# Soup and find content
html = browser.html
soup = bsp(html, 'html.parser')
content = soup.find("div", class_="image_and_description_container")
news_title = content.find("div", class_ = "content_title").text
news_p = content.find("div", class_ = "article_teaser_body").text
news_p
### JPL Mars Space Images - Featured Image
# Get to website
url = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'
browser.visit(url)
time.sleep(2)
# Click through to arrive at correc destination
browser.find_by_id("full_image").click()
time.sleep(2)
browser.links.find_by_partial_text("more info").click()
time.sleep(2)
# Soup and find content
html = browser.html
soup = bsp(html, 'html.parser')
base_url = "https://www.jpl.nasa.gov"
img = soup.find("img", class_ ="main_image")["src"]
featured_image_url = base_url + img
print(featured_image_url)
### Mars Facts
# Get to website
url = 'https://space-facts.com/mars/'
browser.visit(url)
# bring into table
tables = pd.read_html(url)
tables[0]
description_df = tables[0].rename(columns = {0:'Labels', 1:'Measurement'})
mars_facts = description_df.to_html(classes = "table table-striped")
### Mars Hemispheres
# Get to website
url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
browser.visit(url)
hemisphere_image_urls = []
# Click through to arrive at correc destination
for i in range(4):
browser.find_by_tag("h3")[i].click()
html = browser.html
soup = bsp(html, 'html.parser')
content = soup.find("div", id ="wide-image")
data = {
"img_url":content.find("a")["href"],
"title":soup.find("h2", class_ = "title").text
}
hemisphere_image_urls.append(data)
browser.back()
hemisphere_image_urls
# Store data into a dictionary
mars_data = {
"news_p": news_p,
"featured_image_url": featured_image_url,
"mars_facts": mars_facts,
"hemisphere_image_urls": hemisphere_image_urls
}
#Return results
return mars_data
| Robert-A-Norris/web-scraping-challenge | Mission_to_Mars/scrape_mars.py | scrape_mars.py | py | 2,718 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "webdriver_manager.chrome.ChromeDriverManager",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "splinter.Browser",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 24,
"usage_type": "call"
},
{
"api_name... |
29370556151 | import discord
from redbot.core import commands, Config, app_commands
import asyncio
import aiohttp
from steam.steamid import SteamID
from datetime import datetime
class SteamAPI(commands.Cog):
"""Search for games and player profiles.
Grab your Steam [API Key](https://steamcommunity.com/dev/apikey).
Use the command `[p]setsteamapikey <key here>` to set it."""
__version__ = "1.0.1"
def __init__(self, bot):
self.bot = bot
self.config = Config.get_conf(self, identifier=67485765, force_registration=True)
default_global = {"steam_api_key": None}
self.config.register_global(**default_global)
async def resolve_vanity_url(self, ctx, custom_url):
steam_api_key = await self.config.steam_api_key()
url = f"http://api.steampowered.com/ISteamUser/ResolveVanityURL/v0001/?key={steam_api_key}&vanityurl={custom_url}"
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
if response.status != 200:
raise ValueError("Failed to resolve Steam vanity URL.")
data = await response.json()
return data.get('response', {}).get('steamid')
async def get_steamid64(self, ctx, identifier):
if identifier.isdigit():
return identifier
elif identifier.startswith('STEAM_'):
steam_id = SteamID(identifier)
return str(steam_id.as_64)
else:
return await self.resolve_vanity_url(ctx, identifier)
@commands.command()
@commands.guild_only()
@commands.has_permissions(administrator=True)
async def setsteamapikey(self, ctx, key: str):
"""Set the Steam API key for this guild (Server Owner Only)"""
if not ctx.channel.permissions_for(ctx.guild.me).manage_messages:
await ctx.send("I do not have permissions to delete messages in this channel.")
return
await self.config.steam_api_key.set(key)
confirmation_message = await ctx.send("Steam API key has been set for this bot.")
await ctx.message.delete()
await asyncio.sleep(5)
await confirmation_message.delete()
@app_commands.command(description="Search for user profiles on the Steam database.")
async def steamprofile(self, interaction: discord.Interaction, identifier: str):
"""Search for user profiles on the steam database."""
STEAM_API_KEY = await self.config.steam_api_key()
if not STEAM_API_KEY:
await interaction.response.send_message("The Steam API key has not been set. Please set it using the `[p]setsteamapikey` command.", ephemeral=True)
return
try:
steam_id64 = await self.get_steamid64(interaction, identifier)
except ValueError as e:
await interaction.response.send_message(str(e), ephemeral=True)
return
if steam_id64 is None:
await interaction.response.send_message("Invalid identifier. Please provide a valid SteamID64, SteamID, or custom URL.", ephemeral=True)
return
async with aiohttp.ClientSession() as session:
async with session.get(f"http://api.steampowered.com/ISteamUser/GetPlayerSummaries/v0002/?key={STEAM_API_KEY}&steamids={steam_id64}") as response:
if response.status != 200:
await interaction.response.send_message("Failed to get player summaries.", ephemeral=True)
return
data = await response.json()
async with session.get(f"http://api.steampowered.com/ISteamUser/GetPlayerBans/v1/?key={STEAM_API_KEY}&steamids={steam_id64}") as response:
if response.status != 200:
await interaction.response.send_message("Failed to get player bans.", ephemeral=True)
return
ban_data = await response.json()
if data and "response" in data and "players" in data["response"] and len(data["response"]["players"]) > 0:
player = data["response"]["players"][0]
ban_info = ban_data["players"][0] if "players" in ban_data else None
if 'timecreated' in player:
account_creation_date = datetime.utcfromtimestamp(player['timecreated'])
account_age = (datetime.utcnow() - account_creation_date).days // 365
else:
account_creation_date = None
account_age = "Unknown"
steam_id = SteamID(int(steam_id64))
embed = discord.Embed(
title=player['personaname'],
url=f"https://steamcommunity.com/profiles/{steam_id64}",
color=discord.Color.blue()
)
embed.set_thumbnail(url=player['avatarfull'])
embed.add_field(name="Profile Info", value=f"**Name:** {player.get('realname', 'Unknown')}\n**Country:** {player.get('loccountrycode', 'Unknown')}\n**Account Age:** {account_age} years", inline=True)
embed.add_field(name="SteamID", value=f"**SteamID:** {steam_id.as_steam2}\n**SteamID3:** [U:1:{steam_id.as_32}]\n**SteamID64:** {steam_id64}", inline=True)
if ban_info is not None:
ban_info_str = f"**VAC Banned:** {ban_info['VACBanned']}\n"
ban_info_str += f"**Bans:** {ban_info['NumberOfVACBans']} (Last: {ban_info['DaysSinceLastBan']} days ago)\n"
ban_info_str += f"**Trade Banned:** {ban_info['EconomyBan']}"
embed.add_field(name="Ban Info", value=ban_info_str, inline=True)
embed.set_footer(text="Powered by Steam")
await interaction.response.send_message(embed=embed)
else:
await interaction.response.send_message("Unable to fetch the player information.", ephemeral=True)
@app_commands.command(description="Search for games on the Steam database.")
async def steamgame(self, interaction: discord.Interaction, game_name: str):
"""Search for games on the steam database."""
url = f"https://store.steampowered.com/api/storesearch/?cc=us&l=en&term={game_name}"
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
data = await response.json()
if data and data.get('total') > 0:
appid = data['items'][0]['id']
async with aiohttp.ClientSession() as session:
async with session.get(f"https://store.steampowered.com/api/appdetails?appids={appid}&cc=us") as response:
data = await response.json()
if str(appid) in data and data[str(appid)]['success']:
game_info = data[str(appid)]['data']
embed = discord.Embed(
title=game_info['name'],
url=f"https://store.steampowered.com/app/{appid}",
color=discord.Color.blue()
)
embed.set_image(url=game_info['header_image'])
about_the_game = game_info['short_description']
if len(about_the_game) > 1024:
about_the_game = about_the_game[:1021] + "..."
embed.add_field(name="About This Game", value=about_the_game, inline=False)
embed.add_field(name="App ID", value=appid, inline=True)
embed.add_field(name="Release Date", value=game_info['release_date']['date'], inline=True)
embed.add_field(name="Price", value=f"{game_info['price_overview']['final_formatted'] if 'price_overview' in game_info else 'Free'}", inline=True)
embed.add_field(name="Release Date", value=game_info['release_date']['date'], inline=True)
embed.add_field(name="Publisher", value=", ".join(game_info['publishers']), inline=True)
embed.add_field(name="Developer", value=", ".join(game_info['developers']), inline=True)
embed.add_field(name="\u200b", value="\u200b", inline=True)
embed.add_field(name="\u200b", value="\u200b", inline=True)
embed.add_field(name="\u200b", value="\u200b", inline=True)
embed.set_footer(text="Powered by Steam")
await interaction.response.send_message(embed=embed)
else:
await interaction.response.send_message("Unable to fetch the game information.", ephemeral=True)
else:
await interaction.response.send_message("Game not found.", ephemeral=True) | dkoz/kozejin-cogs | steamapp/steamapp.py | steamapp.py | py | 8,621 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "redbot.core.commands.Cog",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "redbot.core.commands",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "redbot.core.Config.get_conf",
"line_number": 18,
"usage_type": "call"
},
{
"api_... |
4165802056 | # ONLY EDIT FUNCTIONS MARKED CLEARLY FOR QUESTIONS 1 AND 2. DO NOT CHANGE ANY METHOD SIGNATURES OR THE RUNALL METHOD
from flask import Flask, request, jsonify
import json
from time import clock
app = Flask(__name__)
# IMPORTANT: DO NOT CHANGE THIS FUNCTION UNDER ANY CIRCUMSTANCES
@app.route('/runall', methods=['POST'])
def runall():
q1inputs = request.data
# q2inputs = request.args.get('q2inputs')
q1inputs = json.loads(q1inputs)
response = jsonify([[runq1(q1input) for q1input in q1inputs], []])
response.headers.add('Access-Control-Allow-Origin', '*')
return response
def runq1(q1input):
start = clock()
output = findbestprofit(q1input[0], q1input[1])
end = clock()
diff = end - start
return output, diff
# Space for question 1
def findbestprofit(pricesarray, k):
return -1
if __name__ == '__main__':
app.run(port=8182)
| pythoncodingchallenge/skeleton-repo | vcc-skeleton.py | vcc-skeleton.py | py | 887 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "flask.request.data",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "json.loads",
"li... |
72215136745 | # Utilities to parse alignment files from Schwartz Lab SOMA
# *and* to write alignments in Schwartz lab SOMA format
#
# We use functions and classes defined here both to convert
# from maligner to SOMA and from SOMA to maligner.
#
# This module could be better organized!
# Perhaps separate modules for parsing and writing?
#################################################
import lxml.etree as ET
from lxml.etree import Element, ElementTree
from copy import copy, deepcopy
from ..common import wrap_file_function
from ..core.maligner_dp_alignments import (Alignment as MAlignment, Chunk as MChunk)
##################################################################
# Helper classes and functions for parsing generic XML nodes
def smart_bool(s):
"""Convert an str to a bool, flexibly based on first character"""
if not s:
return False
s = s.upper()
if s[0] == 'T':
return True
return False
class SimpleField(object):
def __init__(self, converter = str):
self.converter = converter
def __call__(self, n, data):
data[n.tag] = self.converter(n.text)
class SmartBool(SimpleField):
def __init__(self):
SimpleField.__init__(self, smart_bool)
class ListNodeField(object):
def __init__(self, converter = str, data_key = None):
self.converter = converter
self.data_key = data_key
def __call__(self, n, data):
data_key = self.data_key if self.data_key else n.tag
cur_data = data.get(data_key, [])
cur_data.append(self.converter(n))
data[data_key] = cur_data
def MakeTextElement(tag, text = None):
elem = Element(tag)
if text is not None:
elem.text = text
return elem
class Blob(object):
pass
################################################################
# Helper functions for parsing alignment nodes
def _parse_reference_map_field(n, data):
data[n.tag] = n.find('name').text
def _parse_aligned_map(n, data):
orientation = n.find('orientation').text
orientation = "F" if orientation == "N" else "R"
data.update({
n.tag: n.find('name').text,
'orientation': orientation
})
def _parse_f(n):
d = dict((c.tag, int(c.text)) for c in n)
return (d['i'], d['l'], d['r'])
################################################################
# Classes representing parsed nodes
class NodeClass(object):
# Register parsers for child nodes of this node by
# setting _node_parsers in subclass of NodeClass
_node_parsers = {}
@classmethod
def from_node(cls, n, parsers = None):
"""Build a new instance of the class
from the node n. If supplied, use the parsers
provided. Otherwise, use the class's _node_parsers.
"""
ret = cls()
ret.node = n
ret._from_node(n, parsers)
return ret
def _from_node(self, n, parsers = None):
"""Set data extracted from node n.
If supplied, use the parsers provided. Otherwise
use the instance's _node_parsers.
"""
data = {}
if parsers is None:
parsers = self._node_parsers
# Parsing all defined parsers
for c in n:
if c.tag in parsers:
self._node_parsers[c.tag](c, data)
else:
data[c.tag] = c.text
for k, v in data.iteritems():
setattr(self, k, v)
class RestrictionMap(NodeClass):
"""
Class to initialze from a node from SOMA XML file
"""
_node_parsers = {
'map_block' : SimpleField(lambda s: [float(b) for b in s.split()]),
'circular' : SmartBool()
}
@classmethod
def from_node(cls, n):
self = cls()
self.node = n
self._from_node(n) # Use the _node_parsers
# Add additional attributes
self.frags = [int(1000*f) for f in self.map_block]
return self
@property
def nfrags(self):
return len(self.frags)
@property
def length(self):
"""Length in bp"""
return sum(self.frags)
def write_as_maligner(self, f):
s = '{0}\t{1}\t{2}\t{3}'.format(self.name, self.length, self.nfrags,
'\t'.join(str(frag) for frag in self.frags))
f.write(s + '\n')
class Alignment(NodeClass):
# These fields are populated by cls.from_node():
uuid = None
reference_map = None
aligned_map = None
soma_score = None
count = None
f = []
_node_parsers = {
'uuid' : SimpleField(),
'reference_map': _parse_reference_map_field,
'aligned_map': _parse_aligned_map,
'soma_score' : SimpleField(float),
'count' : SimpleField(int),
'f' : ListNodeField(_parse_f)
}
def build_chunks(self):
# Convert SOMA chunks indices to maligned Chunk indices
self.chunks = [MatchedChunk(*i) for i in self.f]
self._chunks_merged = merge_chunks_list(self.chunks)
self.chunks[0].is_boundary = True
self.chunks[-1].is_boundary = True
self._chunks_merged[0].is_boundary = True
self._chunks_merged[-1].is_boundary = True
# Add map data to the alignment so we can compute other
# alignment characteristics.
def add_map_data(self, query_map, ref_map):
qm, rm = query_map, ref_map
qfrags = qm.map_block
# Orient query fragments in the reverse direction to
# assist with determining chunk lengths
if self.orientation == 'R':
qfrags = qfrags[::-1]
# Assign query lengths and ref lengths to the chunks,
# convert to kb
for c in self.chunks:
c.ql = int(1000*sum(qfrags[c.q_s:c.q_e]))
c.rl = int(1000*sum(rm.map_block[c.r_s:c.r_e]))
for c in self._chunks_merged:
c.ql = int(1000*sum(qfrags[c.q_s:c.q_e]))
c.rl = int(1000*sum(rm.map_block[c.r_s:c.r_e]))
self.query_map = self.aligned_map
self.num_query_frags = len(qfrags)
cpairs = zip(self._chunks_merged[:-1], self._chunks_merged[1:])
self.has_query_gap = not all(cl.q_e == cr.q_s for cl,cr in cpairs)
self.has_ref_gap = not all(cl.r_e == cr.r_s for cl,cr in cpairs)
qs = self._chunks_merged[0].q_s
rs = self._chunks_merged[0].r_s
qe = self._chunks_merged[-1].q_e
re = self._chunks_merged[-1].r_e
self.num_matched_chunks = len(self._chunks_merged)
self.num_frags_aligned = qe - qs
self.query_start = qs
self.query_end = qe
self.ref_start = rs
self.ref_end = re
self.num_query_frags_aligned = qe - qs
self.trim_left = qs
self.trim_right = self.num_query_frags - qe
self.query_length = sum(qfrags)
interior_chunks = [c for c in self._chunks_merged if not c.is_boundary]
self.query_length_aligned = sum(c.ql for c in self._chunks_merged)
self.query_length_aligned_interior = sum(c.ql for c in interior_chunks)
self.ref_length_aligned = sum(c.rl for c in self._chunks_merged)
self.ref_length_aligned_interior = sum(c.rl for c in interior_chunks)
self.query_scaling_factor = float(self.ref_length_aligned_interior)/float(self.query_length_aligned_interior)
self.frac_length_aligned = float(self.query_length_aligned)/self.query_length
self.frac_frags_aligned = float(qe - qs)/self.num_query_frags
self.query_misses = sum(c.query_misses for c in self._chunks_merged)
self.ref_misses = sum(c.ref_misses for c in self._chunks_merged)
self.matched_sites = len(self._chunks_merged)+1
self.query_miss_rate = float(self.query_misses)/(self.query_misses + self.matched_sites)
self.ref_miss_rate = float(self.ref_misses)/(self.ref_misses + self.matched_sites)
# Orient chunks for maligner
self.chunks_maligner = deepcopy(self._chunks_merged)
if self.orientation == "R":
nqf = self.num_query_frags
for cm, cs in zip(self.chunks_maligner, self._chunks_merged):
cm.q_s = nqf - cs.q_e
cm.q_e = nqf - cs.q_s
self.chunks_maligner_rescaled = deepcopy(self.chunks_maligner)
# Rescale the chunks for maligner, convert to integer
for c in self.chunks_maligner_rescaled:
c.rl = int(c.rl)
c.ql = int(c.ql * self.query_scaling_factor)
def compute_maligner_sizing_error(self, sd_rate, min_sd ):
for c in self.chunks_maligner_rescaled:
csd = max(sd_rate * c.rl, min_sd)
delta = c.ql - c.rl
chi2 = (delta/csd)**2
c.chi2 = chi2
c.sd = csd
for c in self.chunks_maligner:
csd = max(sd_rate * c.rl, min_sd)
delta = c.ql - c.rl
chi2 = (delta/csd)**2
c.chi2 = chi2
c.sd = csd
self.sizing_score = sum(c.chi2 if not c.is_boundary else 0 for c in self.chunks_maligner)
self.sizing_score_rescaled = sum(c.chi2 if not c.is_boundary else 0 for c in self.chunks_maligner_rescaled)
def compute_scores(self, query_miss_penalty, ref_miss_penalty, sd_rate = 0.05, min_sd = 1000.0):
miss_score = self.query_misses*query_miss_penalty + self.ref_misses*ref_miss_penalty
self.compute_maligner_sizing_error(sd_rate, min_sd)
self.total_score = self.sizing_score + miss_score
self.total_rescaled_score = self.sizing_score_rescaled + miss_score
self.m_score = 0.0 # TODO
@property
def chunk_string(self):
# Write maligner chunk string.
def c_to_str(c):
fields = [c.q_s, c.q_e, c.ql, c.r_s, c.r_e, c.rl]
return ','.join([str(f) for f in fields])
return ';'.join(c_to_str(c) for c in self.chunks_maligner_rescaled)
##################################################################
# Methods for converting to a SOMA Alignments File
def make_experiment():
pass
def restriction_map_to_node(name, length, frags, is_consensus = True, enzymes="None", circular=False):
"""Initialize from name, frags (bp)"""
frags = list(frags)
sub_elements = [
MakeTextElement("name", text = name),
MakeTextElement("type", text = "consensus" if is_consensus else "opmap"),
MakeTextElement("enzymes", text = enzymes),
MakeTextElement("circular", text =str(circular).lower()),
MakeTextElement("num_frags", text = str(len(frags))),
MakeTextElement("map_block", text = " ".join("%.3f"%(frag/1000.0) for frag in frags))
]
element = MakeTextElement("restriction_map")
element.extend(sub_elements)
return element
def alignment_to_node(uuid, aln, query_frags, ref_frags, soma_score = 0.0):
"""Return an ETree Element for writing to Alignment File.
aln should be an instance of maligner_dp_alignments.Alignment
"""
# <map_alignment>
# <uuid>3ca38422-b6e7-43e8-a925-ef36d475a2d7</uuid>
# <reference_map>
# <name>chr1</name>
# </reference_map>
# <aligned_map>
# <name>2248849_0_44</name>
# <orientation>R</orientation>
# </aligned_map>
# <soma_score>3.3207884</soma_score>
# <count>18</count>
# <f><i>1</i><l>26398</l><r>26398</r></f>
# <f><i>2</i><l>26399</l><r>26399</r></f>
# <f><i>3</i><l>26400</l><r>26401</r></f>
# <f><i>4</i><l>26402</l><r>26402</r></f>
# <f><i>5</i><l>26403</l><r>26403</r></f>
# <f><i>6</i><l>26404</l><r>26405</r></f>
# <f><i>7</i><l>26406</l><r>26406</r></f>
# <f><i>8</i><l>26406</l><r>26406</r></f>
# <f><i>9</i><l>26407</l><r>26407</r></f>
# <f><i>10</i><l>26408</l><r>26408</r></f>
# <f><i>11</i><l>26409</l><r>26409</r></f>
# <f><i>12</i><l>26410</l><r>26410</r></f>
# <f><i>13</i><l>26411</l><r>26411</r></f>
# <f><i>14</i><l>26412</l><r>26413</r></f>
# <f><i>15</i><l>26414</l><r>26414</r></f>
# <f><i>16</i><l>26415</l><r>26415</r></f>
# <f><i>17</i><l>26416</l><r>26419</r></f>
# <f><i>18</i><l>26420</l><r>26421</r></f>
# </map_alignment>
ref_element = MakeTextElement("reference_map")
ref_element.append(MakeTextElement("name", text = aln.ref_map))
aligned_map_element = MakeTextElement("aligned_map")
aligned_map_element.append(MakeTextElement("name", text = aln.query_map))
aligned_map_element.append(MakeTextElement("orientation", text = "N" if aln.is_forward=="F" else "R"))
num_chunks = len(aln.chunks)
num_query_frags = aln.query_end - aln.query_start
soma_frags = convert_alignment_chunks(aln, query_frags, ref_frags)
soma_frag_elements = [f.to_element() for f in soma_frags]
sub_elements = [
MakeTextElement("uuid", text=uuid),
ref_element,
aligned_map_element,
MakeTextElement("soma_score", text=str(soma_score)),
MakeTextElement("count", text = str(num_query_frags))
]
sub_elements.extend(soma_frag_elements)
element = MakeTextElement("map_alignment")
element.extend(sub_elements)
return element
###################################################################################
# Functions for working with XML files and ET root nodes.
class ReturnValue(object):
pass
def get_root(f):
tree = ET.parse(open(f))
return tree.getroot()
def get_maps(tr):
nodes = tr.iter('restriction_map')
return [RestrictionMap.from_node(n) for n in nodes]
def get_consensus_maps(r):
maps = (RestrictionMap(m) for m in r.iter('restriction_map'))
return [m for m in maps if m.type == 'consensus']
@wrap_file_function('r')
def get_consensus_maps_from_file(aln_file):
return get_consensus_maps(get_root(aln_file))
# Get consensus maps from multiple files
def get_all_consensus_maps(file_list):
maps = []
for f in file_list:
cmaps = get_consensus_maps(get_root(f))
maps.extend(cmaps)
return maps
def get_query_maps(tr):
"""Return query maps from a SOMA alignments file"""
all_maps = get_maps(tr)
query_maps = [m for m in all_maps if m.type != 'consensus']
return query_maps
def get_all_maps(tr):
"""Return all maps from a SOMA alignments file"""
maps = get_maps(tr)
ret = ReturnValue()
ret.reference_maps = dict((m.name, m) for m in maps if m.type == 'consensus')
ret.query_maps = dict((m.name, m) for m in maps if m.type != 'consensus')
return ret
def make_alignment_from_node(n, maps):
"""Make an alignment from the xml node.
This means constructing an Alignment object,
linking it with the restriction maps
to determine various alignment attributes.
"""
aln = Alignment.from_node(n)
qm = maps.query_maps.get(aln.aligned_map, None)
rm = maps.reference_maps.get(aln.reference_map, None)
aln.build_chunks()
aln.add_map_data(qm, rm)
return aln
def iter_alignments(tr, maps):
"""tr: root node of the xml cElementTree
maps: dictionary of reference and query maps, returned by get_all_maps
"""
alns = tr.iter('map_alignment')
return (make_alignment_from_node(n, maps) for n in alns)
def read_file(f):
"""
Read an entire xml and return the root node. Beware of memory usage if
"""
return ET.parse(f)
##########################################################################
# Classes for dealing with "chunks" (alignment blocks/intervals)
# There are differences between how these are represented in maligned and in Schwartz format.
class MergeException(Exception):
pass
class MatchedChunk(object):
"""Helper class to assist with converting indices of Schwartz
map_alignment to the maligner format
"""
def __init__(self, i, l, r):
self.i = i
self.l = l
self.r = r #inclusive!
# Intervals of chunks in query and reference, with
# ending indices exclusive (as in python)
self.q_s = i
self.q_e = i + 1
self.r_s = self.l
self.r_e = self.r + 1
self.ql = None
self.rl = None
self.is_boundary = False
def overlaps_in_reference(self, other):
return not ((self.r < other.l) or (other.r < self.l))
def should_merge(self, other):
"""Return True if this MatchedChunk should be merged with other on the right"""
if not self.overlaps_in_reference(other):
return False
if other.q_s == self.q_e:
return True
return False
def merge(self, other):
"""Merge this MatchedChunk with a right MatchedChunk"""
if not self.should_merge(other):
raise MergeException("Chunks should not be merged.")
# Assert that the Chunks are adjacent in the query?
# Not necessarily true if there is a deletion from query]
self.q_e = other.q_e
self.r_s = min(self.r_s, other.r_s)
self.r_e = max(self.r_e, other.r_e)
def __repr__(self):
return '({0},{1},{ql})-({2},{3},{rl})'.format(self.q_s, self.q_e,
self.r_s, self.r_e, ql=self.ql, rl = self.rl)
@property
def query_misses(self):
return self.q_e - self.q_s - 1
@property
def ref_misses(self):
return self.r_e - self.r_s - 1
@property
def rel_error(self):
d = self.ql - self.rl
return d/self.rl
def merge_chunks_list(cl):
"""Merge overlapping chunks in effort to convert from SOMA format
to maligner format. This makes a copy of the chunk list and then modifies."""
if not cl:
return []
cl = [copy(c) for c in cl]
cur_chunks = cl
while True:
made_change = False
new_chunks = []
cur_chunk = cur_chunks[0]
for i in range(1, len(cur_chunks)):
next_chunk = cur_chunks[i]
if cur_chunk.should_merge(next_chunk):
cur_chunk.merge(next_chunk)
made_change = True
else:
new_chunks.append(cur_chunk)
cur_chunk = next_chunk
next_chunk = None
if cur_chunk:
new_chunks.append(cur_chunk)
cur_chunks = new_chunks
if not made_change:
break
return cur_chunks
class SomaFrag(object):
def __init__(self, i, l, r):
self.i = i
self.l = l
self.r = r
assert(l <= r)
def is_sane(self):
return (self.i >= 0 and self.l <= self.r)
def to_element(self):
f_element = Element('f')
f_element.append(MakeTextElement('i', text = str(self.i)))
f_element.append(MakeTextElement('l', text = str(self.l)))
f_element.append(MakeTextElement('r', text = str(self.r)))
return f_element
def maligner_chunk_to_soma_frags(chunk, query_frags, ref_frags):
frags = _maligner_chunk_to_soma_frags(chunk.qs, chunk.qe,
chunk.rs, chunk.re, query_frags, ref_frags)
assert(_check_soma_frags_sane(frags))
return frags
def _maligner_chunk_to_soma_frags(qs, qe, rs, re, query_frags, ref_frags):
"""Convert the indices of a maligner chunk to one or more soma frags
This is tricky because there are multiple cases.
Throughput [qs,qe,rs,re] is a maligner chunk give by indices of query start, end & ref start, end.
<i, l, r> is a soma chunk showing that fragment i is matched to reference map fragments l inclusive through r inclusive
A. one query frag to one ref. frag
[1,2,10,11] -> <1, 10, 10>
B. one query frag to muliple ref frag.
[1,2,10,12] -> <1, 10, 11>
C multiple query frag to multiple ref frag.
In this case there are two possible translations:
i) [1, 3, 10, 12] -> <1, 10, 10>, <2, 10, 11> OR
ii) [1, 3, 10, 12] -> <1, 10, 11> <2, 11, 11>
We will pick based on where the restriction cuts are.
Case i):
1 2
|-----|----------------| query
|-------------|--------| reference
10 11
Case ii):
1 2
|--------------|--------| query
|------|----------------| reference
10 11
"""
nq = qe-qs
nr = re-rs
assert(qe <= len(query_frags))
assert(re <= len(ref_frags))
assert(nq > 0)
assert(nr > 0)
if nq == nr == 1:
# one to one
return [SomaFrag(qs, rs, rs)]
elif nq == 1 and nr >1:
# one to many
return [SomaFrag(qs, rs, re-1)]
elif nq > 1 and nr ==1:
return [SomaFrag(qs+i, rs, rs) for i in range(nq)]
elif nq > 1 and nr > 1:
# many to many. Pretty tricky.
# go from left to right and decide on the ref. indices based on fragment size.
q = query_frags[qs]
r = ref_frags[rs]
if q <= r:
return [SomaFrag(qs, rs, rs)] + _maligner_chunk_to_soma_frags(qs+1, qe, rs, re, query_frags, ref_frags)
else:
# Keep adding reference chunks until reference grows too large:
rr = rs
last_r = re - 1
while rr < last_r and r < q:
rr += 1
r += ref_frags[rr]
return [SomaFrag(qs, rs, rr)] + _maligner_chunk_to_soma_frags(qs+1, qe, rr, re, query_frags, ref_frags)
else:
raise RuntimeError("how'd you get here?")
def _check_soma_frags_sane(frags):
is_sane = all(f.is_sane() for f in frags)
if len(frags) == 1:
return is_sane
for fl, fr in zip(frags[:-1], frags[1:]):
is_sane = is_sane and (fr.i == fl.i + 1)
is_sane = is_sane and (fl.l <= fr.l)
is_sane = is_sane and ((fl.r <= fr.r))
if not is_sane:
import pdb; pdb.set_trace()
return is_sane
def convert_alignment_chunks(aln, query_frags, ref_frags):
"""Convert maligner chunks to soma frags"""
# If the alignment is reverse, we need to flip the query_frags AND
# flip the indices in the chunks. This is so we match soma's
# output of reverse alignments.
chunks = aln.chunks
num_query_frags = len(query_frags)
if aln.is_forward == "R":
query_frags = query_frags[::-1]
chunks = [copy(c) for c in chunks] # Make a copy
for c in chunks:
c.flip_query_coords(num_query_frags) # Flip query coords
# Check that things are sane:
cqe_prev = chunks[0].qe
for c in chunks[1:]:
assert(c.qs == cqe_prev)
cqe_prev = c.qe
frag_lists = [maligner_chunk_to_soma_frags(c, query_frags, ref_frags) for c in chunks]
soma_frags = [f for fl in frag_lists for f in fl]
assert(_check_soma_frags_sane(soma_frags))
return soma_frags
class AlignmentWriter(object):
"""Write alignment records in the maligner format"""
# Maligner output to Alignment attr
maligner_fields_mapping = [
('query_map', 'query_map'),
('ref_map', 'reference_map'),
('is_forward', 'orientation'),
('num_matched_chunks', 'num_matched_chunks' ),
('query_misses', 'query_misses'),
('ref_misses', 'ref_misses'),
('query_miss_rate', 'query_miss_rate' ),
('ref_miss_rate', 'ref_miss_rate' ),
('total_score', 'total_score'),
('total_rescaled_score', 'total_rescaled_score'),
('m_score', 'm_score'),
('sizing_score', "sizing_score"),
('sizing_score_rescaled', "sizing_score_rescaled" ),
('query_scaling_factor', 'query_scaling_factor'),
('chunk_string', "chunk_string")
]
maligner_fields = [m for m,a in maligner_fields_mapping]
def __init__(self):
pass
def _formatter(self, val):
if isinstance(val, float):
return '%.6f'%val
return str(val)
def _gen_maligner_fields(self, aln):
for m, a in self.maligner_fields_mapping:
if a is None:
yield "NA"
continue
yield getattr(aln, a, "NA")
def to_maligner_str(self, aln):
"""Output a string for maligner output file"""
f = self._formatter
fields = [f(v) for v in self._gen_maligner_fields(aln)]
return '\t'.join(fields)
def write_maligner_alignment(self, f, aln):
"""Output a line to the maligner output file"""
f.write(self.to_maligner_str(aln) + '\n')
def write_maligner_header(self, f):
"""Write maligner header"""
f.write('\t'.join(self.maligner_fields) + '\n')
| LeeMendelowitz/maligner | lib/malignpy/schwartz/parse_soma_alignments.py | parse_soma_alignments.py | py | 22,652 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "lxml.etree.Element",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 253,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "lxml.etree.parse",
... |
5548747359 | import os
from pprint import pprint
from tqdm import tqdm
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
import numpy as np
import utils
from lt_data import train_loader,val_loader
from model import resnet32
import argparse
def get_arguments():
parser = argparse.ArgumentParser(
description='PyTorch implementation of the paper: Long-tail Learning via Logit Adjustment')
parser.add_argument('--dataset', default="cifar10-lt", type=str, help='Dataset to use.',
choices=["cifar10", "cifar100", "cifar10-lt", "cifar100-lt"])
parser.add_argument('--class_names', type=int, default=100)
parser.add_argument('--epochs', type=int, default=1420)
parser.add_argument('--data_home', default="data", type=str,
help='Directory where data files are stored.')
parser.add_argument('--num_workers', default=2, type=int, metavar='N',
help='number of workers at dataloader')
parser.add_argument('--batch_size', default=128, type=int,
help='mini-batch size (default: 128)')
parser.add_argument('--lr', default=0.1, type=float,
help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, help='momentum')
parser.add_argument('--weight-decay', default=1e-4,
type=float, help='weight decay (default: 1e-4)')
parser.add_argument('--log_val', help='compute val acc',
type=int, default=10)
parser.add_argument('--tro_train', default=1.0,
type=float, help='tro for logit adj train')
parser.add_argument('--sign', default=True, type=bool, help='PGD split.')
parser.add_argument('--thr', default="LA", type=str, help='PGD thr.')
parser.add_argument('--num_classes', default=100,
type=int, help='num_classes')
parser.add_argument('--pgd_nums', default=40, type=int, help='PGD nums')
parser.add_argument('--alpha', default=0.01, type=float, help='PGD alpha.')
parser.add_argument('--split', default=40, type=int, help='PGD split.')
return parser
def get_step(split: int, classes_num: int, step_size: int, classes_freq: list):
class_step = []
for i in range(0, classes_num):
if i < split:
step = (classes_freq[i] / classes_freq[0]) * step_size*0.2 - 1
else:
step = (classes_freq[-1] / classes_freq[i]) * step_size - 1
class_step.append(round(step))
return class_step
class LPLLoss(nn.Module):
def __init__(self, sign=True, thr="LA", num_classes=100, pgd_nums=50, alpha=0.1, split=27):
super().__init__()
self.num_classes = num_classes
self.pgd_nums = pgd_nums
self.alpha = alpha
self.split = split
self.sign = sign
self.thr = thr
self.criterion = nn.CrossEntropyLoss()
self.CE = nn.CrossEntropyLoss(reduction = 'none')
def compute_adv_sign(self, logit, y):
logit_softmax = F.softmax(logit, dim=-1)
y_onehot = F.one_hot(y, num_classes=self.num_classes)
# compute sign(nums*nums) .to(torch.float64)
sum_class_logit = torch.matmul(y_onehot.permute(1, 0)*1.0, logit_softmax)
sum_class_num = torch.sum(y_onehot, dim=0)
# 防止某个类别不存在
sum_class_num = torch.where(sum_class_num == 0, 100, sum_class_num)
mean_class_logit = torch.div(
sum_class_logit, sum_class_num.reshape(-1, 1))
# compute adv
grad = mean_class_logit-torch.eye(self.num_classes).cuda()
grad = torch.div(grad, torch.norm(grad, p=2, dim=0).reshape(-1, 1))
if self.sign:
if self.thr == "MEAN":
mean_class_p = torch.diag(mean_class_logit)
# 某个类别不存在时mask掉
mean_mask = sum_class_num > 0
mean_class_thr = torch.mean(mean_class_p[mean_mask])
sub = mean_class_thr - mean_class_p
sign = sub.sign()
elif self.thr == "LA":
sign = torch.tensor([-1]*self.split+[1] *
(self.num_classes-self.split)).cuda()
grad = self.alpha * grad * sign.reshape(-1, 1)
else:
grad = self.alpha * grad
adv_logit = torch.index_select(grad, 0, y)
return adv_logit, grad
def forward(self, models, x, y, args):
logit = models(x)
init_l = self.CE(logit, y)
logit_copy = logit.clone()
logit_steps = torch.zeros(
[self.pgd_nums, logit.shape[0], self.num_classes]).cuda()
logit_news = torch.zeros([logit.shape[0], self.num_classes]).cuda()
for i in range(self.pgd_nums):
adv_logit, grad = self.compute_adv_sign(logit, y)
logit = logit + adv_logit
logit_steps[i] = logit
for i, freq in enumerate(args.label_freq):
logit_news += logit_steps[freq] * \
torch.where(y == i, 1, 0).unsqueeze(-1)
finl_l = self.CE(logit_news, y)
loss_adv = self.criterion(logit_news, y)
return loss_adv, logit_news, logit_copy, init_l, finl_l
def main(ord_d):
"""Main script"""
num_class = args.class_names
model = torch.nn.DataParallel(resnet32(num_classes=num_class))
model = model.to(device)
cudnn.benchmark = True
criterion = nn.CrossEntropyLoss().to(device)
criterion_lpl = LPLLoss(sign=args.sign, thr=args.thr, num_classes=args.num_classes,
pgd_nums=args.num_classes, alpha=args.alpha, split=args.split)
_, label_freqs = utils.compute_adjustment(train_loader)
# label_freqs = sorted(label_freqs.items(), key=lambda e: e[1], reverse=True)
label_freq_list = [x[1] for x in label_freqs]
label_freq = get_step(split=args.split, classes_num=100,
step_size=args.pgd_nums, classes_freq=label_freq_list)
args.label_freq = label_freq
print(label_freq)
optimizer = torch.optim.SGD(model.parameters(),
args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay,
nesterov=True)
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,milestones=[80, 120, 160])
loop = tqdm(range(0, args.epochs), total=args.epochs, leave=False)
val_loss, val_acc,best_acc = 0, 0, 0
for epoch in loop:
# train for one epoch
train_loss, train_acc = train(
train_loader, model, criterion_lpl, optimizer, ord_d, epoch)
writer.add_scalar("train/acc", train_acc, epoch)
writer.add_scalar("train/loss", train_loss, epoch)
lr_scheduler.step()
# evaluate on validation set
val_loss, val_acc = validate(val_loader, model, criterion)
if val_acc>best_acc:
best_acc = val_acc
writer.add_scalar("val/acc", val_acc, epoch)
writer.add_scalar("best/acc", best_acc, epoch)
writer.add_scalar("val/loss", val_loss, epoch)
loop.set_description(f"Epoch [{epoch}/{args.epochs}")
loop.set_postfix(train_loss=f"{train_loss:.2f}", val_loss=f"{val_loss:.2f}",
train_acc=f"{train_acc:.2f}",
val_acc=f"{val_acc:.2f}",
best_acc=f"{best_acc:.2f}")
if (epoch + 1) % 100 == 0:
np.save('res100_110.npy', ord_d)
np.save('res.npy', ord_d)
file_name = 'model.th'
mdel_data = {"state_dict": model.state_dict()}
torch.save(mdel_data, os.path.join(model_loc, file_name))
results = utils.class_accuracy(val_loader, model, args)
results["OA"] = val_acc
hyper_param = utils.log_hyperparameter(args, args.tro_train)
pprint(results)
writer.add_hparams(hparam_dict=hyper_param, metric_dict=results)
writer.close()
def train(train_loader, model, criterion, optimizer, ord_d, epoch):
""" Run one train epoch """
losses = utils.AverageMeter()
accuracies = utils.AverageMeter()
model.train()
labels = []
init_loss = []
finl_loss = []
for _, (inputs, target) in enumerate(train_loader):
target = target.to(device)
labels.append(target)
input_var = inputs.to(device)
target_var = target
# output = model(input_var)
# loss, output
loss, output, _, init_l, finl_l= criterion(model, input_var, target_var,args)
acc = utils.accuracy(output.data, target)
init_loss.append(init_l)
finl_loss.append(finl_l)
# loss = criterion(output, target_var)
loss_r = 0
for parameter in model.parameters():
loss_r += torch.sum(parameter ** 2)
loss = loss + args.weight_decay * loss_r
optimizer.zero_grad()
loss.backward()
optimizer.step()
losses.update(loss.item(), inputs.size(0))
accuracies.update(acc, inputs.size(0))
labels = torch.cat(labels).cpu().detach().numpy()
init_loss = torch.cat(init_loss).cpu().detach().numpy()
finl_loss = torch.cat(finl_loss).cpu().detach().numpy()
d = {'labels':labels,
'init_loss':init_loss,
'finl_loss':finl_loss}
ord_d[epoch] = d
return losses.avg, accuracies.avg
def validate(val_loader, model, criterion):
""" Run evaluation """
losses = utils.AverageMeter()
accuracies = utils.AverageMeter()
model.eval()
with torch.no_grad():
for _, (inputs, target) in enumerate(val_loader):
target = target.to(device)
input_var = inputs.to(device)
target_var = target.to(device)
output = model(input_var)
loss = criterion(output, target_var)
acc = utils.accuracy(output.data, target)
losses.update(loss.item(), inputs.size(0))
accuracies.update(acc, inputs.size(0))
return losses.avg, accuracies.avg
if __name__ == '__main__':
parser = get_arguments()
args = parser.parse_args()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
args.device = device
exp_loc, model_loc = utils.log_folders(args)
writer = SummaryWriter(log_dir=exp_loc)
ord_d = {}
main(ord_d)
| limengyang1992/lpl | lpl-longtail-other/train_lpl_cifar100.py | train_lpl_cifar100.py | py | 10,563 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "torch.nn.CrossE... |
8385911882 | from django import template, forms
from collections import OrderedDict
from ..models import FormLabel
register = template.Library()
# get the form field corresponding to the given block
@register.simple_tag
def block_field(form, block):
if block.name in form.fields: return form[block.name]
return None
# get the inline formset corresponding to the given collection block
@register.simple_tag
def block_formset(formsets, block):
if block.pk in formsets: return formsets[block.pk]
return None
@register.simple_tag
def block_labels(labels, block):
if block.block_type() == 'collection':
key = f'{block.name}{block.id}_'
if key in labels: return labels[key]
if block.name in labels: return labels[block.name]
return {}
@register.simple_tag
def collection_items(items, block):
if block.pk in items: return items[block.pk]
return None
@register.simple_tag
def item_columns(item, block, uploading):
if not item: return { 'fields': True }
return {
'fields': not item._error and not uploading,
'progress': not item._error and uploading,
'message': item._error,
'upload': item._error or block.file_optional
}
@register.simple_tag
def item_form(formset, item):
if type(item) == int: return formset[item-1]
for form in formset:
if form.instance == item: return form
return None
@register.simple_tag
def form_hidden(form, name):
if form: return form['_' + name]
return None
@register.filter
def get_by_field(errors, name):
for form_errors in errors:
if name in form_errors: return form_errors[name][0]
return ''
@register.filter
def get_by_style(labels, style):
if labels and style in labels: return labels[style]
return None
@register.filter
def get_by_choice(labels, choice):
if choice in labels: return labels[choice]
return None
@register.filter
def closest_label(labels):
names = ('WIDGET', 'HORIZONTAL', 'VERTICAL')
styles = [ FormLabel.LabelStyle[n] for n in names ]
for style in styles:
label = get_by_style(labels, style)
if label: return label
return None
@register.filter
def for_choice_value(labels, value):
if value in labels: return closest_label(labels[value])
return None
@register.filter
def for_item_field(labels, field):
if field in labels: return labels[field]
return None
@register.simple_tag
def unbound_checkbox_field(form_block, name, radio=False):
anon_form = forms.Form()
if radio: field = forms.ChoiceField(choices=[(name, name)],
widget=forms.RadioSelect)
else: field = forms.BooleanField(widget=forms.CheckboxInput)
anon_form.fields['_' + form_block.stock.field_name(name)] = field
field.required = False
return anon_form
@register.simple_tag
def unbound_radio_field(form_block, name):
return unbound_checkbox_field(form_block, name, radio=True)
@register.simple_tag(takes_context=True)
def include_stock(context, block, labels, review=False):
stock = block.stock
name = review and stock.review_template_name or stock.template_name
template = context.template.engine.get_template('formative/stock/' + name)
fields = []
for n in stock.widget_names():
field_name = stock.field_name(n)
if field_name not in context['form'].fields: return ''
fields.append((n, context['form'][field_name]))
fields_dict = OrderedDict(fields)
return template.render(context.new({
'form_block': block,
'block_fields': fields_dict,
'labels': labels
}))
@register.simple_tag(takes_context=True)
def include_stock_review(context, block, labels):
return include_stock(context, block, labels, review=True)
| johncronan/formative | formative/templatetags/form_block.py | form_block.py | py | 3,803 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "django.template.Library",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.template",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "models.FormLabel.LabelStyle",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name":... |
5657081633 | #!/usr/bin/env python3
import os
import json
import requests
import sys
def TriggerPipeline(token, commit, ci_ref):
url = "https://circleci.com/api/v2/project/github/mapbox/mobile-metrics/pipeline"
headers = {
"Content-Type": "application/json",
"Accept": "application/json",
}
data = {
"parameters": {
"run_ios_navigation_benchmark": True,
"target_branch": commit,
"ci_ref": int(ci_ref),
}
}
# Branch in mobile-metrics repo if you want to trigger a custom pipeline
# data["branch"] = "test"
response = requests.post(url, auth=(token, ""), headers=headers, json=data)
print(response.request.url)
if response.status_code != 201 and response.status_code != 200:
print("Error triggering the CircleCI: %s." % response.json()["message"])
sys.exit(1)
else:
response_dict = json.loads(response.text)
print("Started run_ios_navigation_benchmark: %s" % response_dict)
def main():
token = os.getenv("MOBILE_METRICS_TOKEN")
commit = os.getenv("CIRCLE_SHA1")
ci_ref = os.getenv("CIRCLE_BUILD_NUM")
if token is None:
print("Error triggering because MOBILE_METRICS_TOKEN is not set")
sys.exit(1)
TriggerPipeline(token, commit, ci_ref)
return 0
if __name__ == "__main__":
main()
| mapbox/mapbox-navigation-ios | scripts/trigger-metrics.py | trigger-metrics.py | py | 1,361 | python | en | code | 821 | github-code | 36 | [
{
"api_name": "requests.post",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 39,
... |
36445019270 | from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support import expected_conditions as EC
import sys
def execute():
"""
executes script, checks if login page is loaded.
if its loaded, then autologin.
"""
loaded = check_page_load('login')
if (loaded):
login(username, password)
def login(username: str, password: str):
"""
autofills username and password, then submits.
checks if the home page is fully loaded, indicating login successful.
checks if the term is correct, if yes, then proceed to irs page.
if not, flow is passed into logout function to relogin.
"""
uname_field = driver.find_element(By.NAME, 'u')
uname_field.clear()
uname_field.send_keys(username)
pw_field = driver.find_element(By.NAME, 'p')
pw_field.clear()
pw_field.send_keys(password)
driver.find_element(By.XPATH,
'.//*[@id="submit"]/input[@type="submit"]').click()
check_page_load('home')
print('succesfully logged in...')
active_term = driver.find_element(
By.XPATH,
'//*[@id="m_b1"]/div[1]').get_attribute('innerText')
print(f'Current status:\n{active_term}')
if (f'Term {term}' not in active_term):
print('term incorrect. retrying...')
logout()
isi_irs(irs)
def isi_irs(irs: dict):
"""
opens irs page then check if page successfully loaded,
if yes then autofill classes according to dict then submits.
-> key = class code, value: priority list
"""
driver.get(irs_url)
check_page_load('irs')
print('filling in classes...')
print('selected:')
# pick class w priority
for matkul in list(irs.keys()):
selection = driver.find_elements(By.NAME, matkul)
# handle priority lists
pick_class(selection, irs[matkul])
# 4. submit irs
driver.find_element(
By.XPATH, '//*[@id="ti_m1"]/div/table/tbody/tr/td/form/div[2]/input[2]'
).click()
print('done.')
def logout():
"""
redirects to login page should logout succeeds.
then run execute function again,
starting the proccess from the beginning (relogin).
"""
driver.get(logout_url)
execute()
def pick_class(selection: list, priority: list):
"""
function receives current class selection
and its list of priorities as arguments.
then loops through the priorities to check if
the capacity is enough for user to enroll.
if yes, then enrolls the user to the class.
if not, then checks the following priorty in the list.
"""
for p in priority:
selected_class = selection[p]
class_name = selected_class.find_element(
By.XPATH, '..').find_element(
By.XPATH, 'following-sibling::*[1]').find_element(
By.XPATH, './/*[not(preceding-sibling::*)]').find_element(
By.XPATH, './/*[not(preceding-sibling::*)]').get_attribute('innerText')
capacity = selected_class.find_element(
By.XPATH, '..').find_element(
By.XPATH, 'following-sibling::*[3]')
enrolled = capacity.find_element(By.XPATH, 'following-sibling::*[1]')
if (int(capacity.get_attribute("innerText")) > int(enrolled.get_attribute("innerText"))):
selected_class.click()
print(f'{class_name} -> capacity: {capacity.get_attribute("innerText")}, current: {enrolled.get_attribute("innerText")}')
break
def check_page_load(page: str) -> bool:
"""
*UNTESTED*
takes a string of page that is checked as argument,
then checks if the page is loaded successfully
by checking if certain elements are present in the page
according to the argument.
if not present, wait for 1 second to load
then throw exception to reload and check again recursively
if present, then return true.
"""
try:
if (page == 'login'):
data = EC.presence_of_element_located(
(By.XPATH, '//*[@id="u"]'))
WebDriverWait(driver, 1).until(data)
elif (page == 'home'):
data = EC.presence_of_element_located(
(By.XPATH, '//*[@id="m_b1"]/div[1]'))
WebDriverWait(driver, 1).until(data)
elif (page == 'irs'):
data = EC.presence_of_element_located(
(By.XPATH, '//*[@id="ti_h"]'))
WebDriverWait(driver, 1).until(data)
except TimeoutException as e:
print('heavy load. refreshing...')
driver.refresh()
handle_alerts()
check_page_load(page)
return True
def handle_alerts():
"""
helper function to handle any alerts that might pop up
by focusing into the alert and autoclicks accept to dismiss.
"""
try:
WebDriverWait(driver, 1).until(EC.alert_is_present)
driver.switch_to.alert.accept()
except:
pass
if __name__ == "__main__":
username = "<username>"
password = "<password>"
term = 2 # 1 : smt-ganjil, 2 : smt-genap, 3: smt-pendek
"""
valid format:
'c[<class_code>_<curriculum_code>]': [<priority>]
with 0 begin the topmost class
e.g. A class = 0, B class = 1, etc.
"""
irs = {
'c[CSGE602012_01.00.12.01-2020]': [1,2,0,3,4],
'c[CSGE602091_01.00.12.01-2020]': [0,3,2,1,4,5],
'c[CSGE602022_01.00.12.01-2020]': [3,4,0,1,2,5],
'c[CSCM602055_01.00.12.01-2020]': [1,2,0],
'c[CSGE602040_01.00.12.01-2020]': [5,1,0,3,4,2],
}
login_url = "https://academic.ui.ac.id/main/Authentication/"
logout_url = "https://academic.ui.ac.id/main/Authentication/Logout"
home_url = "https://academic.ui.ac.id/main/Welcome/"
irs_url = "https://academic.ui.ac.id/main/CoursePlan/CoursePlanEdit"
options = webdriver.ChromeOptions()
options.add_argument('--ignore-certificate-errors')
options.add_argument('--ignore-ssl-errors')
options.add_argument('log-level=3')
options.add_experimental_option('detach', True) # keep browser open
# open browser
driver = webdriver.Chrome(options=options)
driver.set_page_load_timeout(1.5)
# driver
while True:
driver.get(login_url)
execute()
sys.exit(0) # ends instance if ran successfully
| rayhanrandi/siakwar | driver.py | driver.py | py | 6,467 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "selenium.webdriver.common.by.By.NAME",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.NAME",
"line_number": 29,
"usag... |
30188582457 | #!/usr/bin/python2
# -*- coding: utf-8 -*-
import copy
import numpy as np
import sys
import time
from control_msgs.msg import GripperCommandGoal, GripperCommandAction
from geometry_msgs.msg import Quaternion, PoseStamped
from grasping_msgs.msg import FindGraspableObjectsAction, FindGraspableObjectsGoal
from moveit_commander import Constraints
from moveit_msgs.msg import JointConstraint
from moveit_msgs.msg import MoveItErrorCodes
from std_srvs.srv import Empty
import actionlib
import moveit_commander
import rospy
import tf
import tf2_geometry_msgs
import tf2_ros
from gazebo_utils.pose_utils import GazeboPoseMaster
from rviz_utils.rviz_utils import RvizMarkerPublisher
class GraspingClient(object):
"""
This class is used to plan and move the arm.
"""
def __init__(self, group="arm"):
self.tf_buffer = tf2_ros.Buffer(rospy.Duration(1)) #tf buffer length
self.tf_buffer.clear()
self.tf_listener = tf2_ros.TransformListener(self.tf_buffer)
moveit_commander.roscpp_initialize(sys.argv)
self.robot = moveit_commander.RobotCommander()
self.scene = moveit_commander.PlanningSceneInterface("base_link")
self.group = moveit_commander.MoveGroupCommander(group)
self.tolerance = rospy.get_param('move_group/arm/tolerance', default = 0.005)
self.group.set_goal_tolerance(self.tolerance)
#gripper params
self.gripper_height_above = 0.2
self.gripper_closed_pos = 0 # The position for a fully-closed gripper (meters).
self.gripper_open_pos = 0.10 # The position for a fully-open gripper (meters).
self.MIN_EFFORT = 35 # Min grasp force, in Newtons
self.MAX_EFFORT = 100 # Max grasp force, in Newtons
self.gripper_action_server_name = 'gripper_controller/gripper_action'
self.gripper_client = actionlib.SimpleActionClient(self.gripper_action_server_name, GripperCommandAction)
self.gripper_client.wait_for_server(rospy.Duration(10))
self.gazebo_client = GazeboPoseMaster()
self.pose_publisher = RvizMarkerPublisher()
def pick(self, obj_gazebo_pose):
self.pose_publisher.publish(obj_gazebo_pose)
grasp_pose = PoseStamped()
grasp_pose.pose.position = obj_gazebo_pose.pose.position
quat = tf.transformations.quaternion_from_euler(0, np.pi/2, 0) #rotation about z axis
grasp_pose.pose.orientation = Quaternion(*quat)
grasp_pose.header.stamp = rospy.Time.now()
grasp_pose.header.frame_id = 'map'
grasp_pose.pose.position.z+= self.gripper_height_above
self.pose_publisher.publish(grasp_pose)
self.group.set_pose_target(grasp_pose)
plan = self.group.plan()
arm_execution_success = self.group.execute(plan)
#log execution time end
time.sleep(1)
if not plan.joint_trajectory.points:
return
cartesian_servoing_success = self.move_gripper_linearly(grasp_pose, delta_x = 0, delta_y = 0, delta_z = 0, avoid_collisions = False)
time.sleep(0.25)
cartesian_moving_down_success = self.move_gripper_linearly(grasp_pose, delta_x = 0, delta_y = 0, delta_z = -0.1, avoid_collisions = False)
time.sleep(0.5)
self.gripper_open()
self.gripper_close()
time.sleep(0.5)
self.move_gripper_linearly(grasp_pose, delta_z = 0.2) #lift up
def gripper_open(self):
"""Opens the gripper.
"""
goal = GripperCommandGoal()
goal.command.position = self.gripper_open_pos
goal.command.max_effort = self.MAX_EFFORT
self.gripper_client.send_goal(goal)
self.gripper_client.wait_for_result()
def gripper_close(self, max_effort= None):
"""Closes the gripper.
Args:
max_effort: The maximum effort, in Newtons, to use. Note that this
should not be less than 35N, or else the gripper may not close.
"""
goal = GripperCommandGoal()
goal.command.position = self.gripper_closed_pos
goal.command.max_effort = self.MAX_EFFORT
self.gripper_client.send_goal(goal)
self.gripper_client.wait_for_result()
def fix_joint(self, joint_name): #by default last joint remains at default pos
try:
joint_idx = self.group.get_active_joints().index(joint_name)
except ValueError:
raise
c = Constraints()
jc = JointConstraint()
jc.joint_name = self.group.get_active_joints()[joint_idx]
jc.position = self.group.get_current_joint_values()[joint_idx]
jc.weight = 1.0
jc.tolerance_above = 0.025
jc.tolerance_below = 0.025
c.joint_constraints.append(jc)
self.group.set_path_constraints(c)
def updateScene(self):
# insert table to scene
table_height = 0.47
table_width = 0.8
table_length = 0.4
table_pose_in_world = self.gazebo_client.get_pose('coke_can')
self.scene.attach_box("map", "table",
table_pose_in_world, (table_length, table_width, table_height)
)
def tuck(self):
joints = ["shoulder_pan_joint", "shoulder_lift_joint", "upperarm_roll_joint",
"elbow_flex_joint", "forearm_roll_joint", "wrist_flex_joint", "wrist_roll_joint"]
pose = [1.32, 1.40, -0.2, 1.72, 0.0, 1.66, 0.0]
while not rospy.is_shutdown():
result = self.move_group.moveToJointPosition(joints, pose, 0.02)
if result.error_code.val == MoveItErrorCodes.SUCCESS:
return
def move_gripper_linearly(self, current_pose, delta_x = None, delta_y = None, delta_z = None, avoid_collisions = False, eef_step = 0.005): #computes cartesian path and goes down by depth m
waypoints = []
old_frame = self.group.get_pose_reference_frame() #make backup of the original frame
self.group.set_pose_reference_frame(current_pose.header.frame_id) #cartesian trajectory plans in this frame
waypoints.append(current_pose.pose) #our current pose
target_pose = copy.deepcopy(current_pose)
if delta_x:
target_pose.pose.position.x += delta_x
if delta_y:
target_pose.pose.position.y += delta_y
if delta_z:
target_pose.pose.position.z += delta_z
waypoints.append(target_pose.pose)
trajectory, fraction = self.group.compute_cartesian_path(waypoints, eef_step, 0, avoid_collisions)
cartesian_execute_success = self.group.execute(trajectory) #execute previously planned trajectory
self.group.set_pose_reference_frame(old_frame) #reset back to old planning frame
return cartesian_execute_success
| osuprg/fetch_mobile_manipulation | mobile_manipulation/src/grasping/grasping.py | grasping.py | py | 7,066 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "tf2_ros.Buffer",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "rospy.Duration",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "tf2_ros.TransformListener",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "moveit_comman... |
37334494671 | from importlib.resources import contents
import logging
import json
from aiogram import Bot, Dispatcher, types
from aiogram.types import Message
from data.config import BOT_TOKEN
from aiogram import types
# from aiogram.dispatcher.filters.builtin import CommandStart
from aiogram.types import InlineKeyboardMarkup, InlineKeyboardButton
from aiogram.types.web_app_info import WebAppInfo
from aiohttp.web_app import Application
#
from aiogram.dispatcher.webhook.aiohttp_server import SimpleRequestHandler, setup_application
from aiogram.utils.web_app import safe_parse_webapp_init_data
from aiohttp.web_request import Request
from aiohttp.web_response import json_response
from aiogram.dispatcher.router import Router
from aiogram.dispatcher.filters.content_types import ContentTypesFilter, ContentType
from aiogram import Bot
from aiogram.types import (
InlineKeyboardButton,
InlineKeyboardMarkup,
InlineQueryResultArticle,
InputTextMessageContent,
ReplyKeyboardMarkup,
KeyboardButton,
WebAppInfo,
)
from aiogram.utils.web_app import check_webapp_signature, safe_parse_webapp_init_data
from aiogram.types import MenuButtonWebApp, WebAppInfo
from aiohttp.web import run_app
from routes import check_data_handler, send_message_handler
from aiogram.dispatcher.filters import Command
TOKEN = BOT_TOKEN
bot = Bot(token=TOKEN, parse_mode="HTML")
dp = Dispatcher()
my_router = Router()
logger = logging.getLogger(__name__)
@my_router.message(Command(commands=["start"]))
async def command_start_handler(message: types.Message):
keyboard = InlineKeyboardMarkup(
inline_keyboard= [[
InlineKeyboardButton(text="Google", web_app=WebAppInfo(url="https://cumpbotweb.dobbikov.com/"))
]]
)
reply_keyboard = ReplyKeyboardMarkup(
keyboard = [[
KeyboardButton(text = "Подати заявку", web_app = WebAppInfo(url="https://cumpbotweb.dobbikov.com/"))
]]
)
await message.answer(f"Привіт, {message.from_user.full_name}!", reply_markup=reply_keyboard)
@my_router.message(ContentTypesFilter(content_types=[ContentType.WEB_APP_DATA]))
async def web_app_handler(message: types.Message) -> None:
# print(message.web_app_data)
# print("remove it!!")
print(message.web_app_data.data)
data = json.loads(message.web_app_data.data)
print(data["name"])
print(data["email"])
print(data["discord"])
print(data["nickname"])
print(data["age"])
name = data["name"]
email = data["email"]
discord = data["discord"]
nickname = data["nickname"]
age = data["age"]
await bot.send_message(
-1001768046932,
f"<a href='tg://user?id={message.from_user.id}'>{name}</a> надіслав заявку:\n\n\
Email: <b>{email}</b>\n\
Discord: <b>{discord}</b>\n\
Nickname: <b>{nickname}</b>\n\
Вік: <b>{age}</b>\n\
", parse_mode="HTML")
await bot.send_message(message.from_user.id, "Дякуємо за надіслану заявку!\nМи її отримали та відповімо вам, коли її перевіримо.\nГарного дня!")
# message.answer("{message.web_app_data.data}")
@my_router.message()
async def echo_handler(message: types.Message) -> None:
"""
Handler will forward received message back to the sender
By default message handler will handle all message types (like text, photo, sticker and etc.)
"""
try:
# Send copy of the received message
await message.send_copy(chat_id=message.chat.id)
except TypeError:
# But not all the types is supported to be copied so need to handle it
await message.answer("Nice try!")
async def on_startup(bot: Bot, base_url: str):
# dp.register_message(command_start_handler, commands=['start'])
# Router.register_message(command_start_handler, commands=['start'])
webhook = await bot.get_webhook_info()
WEBHOOK_URL = f"https://cumpbot.dobbikov.com/webhook"
# If URL is bad
if webhook.url != WEBHOOK_URL:
# If URL doesnt match current - remove webhook
if not webhook.url:
await bot.delete_webhook()
# Set new URL for webhook
await bot.set_webhook(WEBHOOK_URL)
# If you want to use free certificate signed by LetsEncrypt you need to set only URL without sending certificate.
# await bot.set_webhook(f"{base_url}/webhook")
# await bot.set_chat_menu_button(
# menu_button=MenuButtonWebApp(text="Open Menu", web_app=WebAppInfo(url=f"https://cumpbotweb.dobbikov.com/"))
# )
def main() -> None:
# Initialize Bot instance with an default parse mode which will be passed to all API calls
# bot = Bot(TOKEN, parse_mode="HTML")
# And the run events dispatching
# dp.run_polling(bot)
dispatcher = Dispatcher()
dispatcher["base_url"] = f"https://cumpbot.dobbikov.com"
dispatcher.startup.register(on_startup)
dispatcher.include_router(my_router)
app = Application()
app["bot"] = bot
# app.router.add_get("/demo", demo_handler)
# app.router.add_post("/demo/checkData", check_data_handler)
# app.router.add_post("/demo/sendMessage", send_message_handler)
SimpleRequestHandler(
dispatcher=dispatcher,
bot=bot,
).register(app, path="/webhook")
setup_application(app, dispatcher, bot=bot)
run_app(app, host="127.0.0.1", port=3003)
if __name__ == "__main__":
main() | DobbiKov/telegram-bot-web-app | bot/main.py | main.py | py | 5,445 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "data.config.BOT_TOKEN",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "aiogram.Bot",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "aiogram.Dispatcher",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "aiogram.dispatch... |
23420929820 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('bar', '0300_auto_20161002_1441'),
('clientes', '0276_auto_20160827_2010'),
('personal', '0272_auto_20160827_2010'),
('stock', '0204_productoventa'),
('ventas', '0050_auto_20161002_1429'),
]
operations = [
migrations.CreateModel(
name='Pedido',
fields=[
('numero_pedido', models.AutoField(help_text=b'Este dato se genera automaticamente cada vez que se va crear un Pedido.', serialize=False, verbose_name=b'Nro. Pedido', primary_key=True)),
('fecha_pedido', models.DateTimeField(help_text=b'La fecha y hora del Pedido se asignara automaticamente una vez que sea guardado.', verbose_name=b'Fecha/Hora del Pedido', auto_now_add=True)),
('total_pedido', models.DecimalField(default=0, verbose_name=b'Total del Pedido', max_digits=18, decimal_places=0)),
('estado_pedido', models.ForeignKey(default=1, verbose_name=b'Estado del Pedido', to='bar.PedidoEstado', help_text=b'El estado del Pedido se establece automaticamente.')),
('mesa_pedido', models.ManyToManyField(help_text=b'Indique la/s mesa/s que sera/n ocupada/s por el/los Cliente/s.', to='bar.Mesa', verbose_name=b'Mesas disponibles')),
('mozo_pedido', models.ForeignKey(verbose_name=b'Atendido por?', to_field=b'usuario', to='personal.Empleado', help_text=b'Este dato se completara automaticamente cuando el Pedido sea guardado.')),
('reserva', models.ForeignKey(blank=True, to='clientes.Reserva', help_text=b'Seleccione una Reserva en caso de que el Cliente haya realizado una.', null=True)),
],
options={
'verbose_name': 'Pedido',
'verbose_name_plural': 'Pedidos',
},
),
migrations.CreateModel(
name='PedidoDetalle',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('precio_producto_pedido', models.DecimalField(help_text=b'El Precio de Venta del Producto se define en la pantalla de Productos.', verbose_name=b'Precio Venta Producto', max_digits=18, decimal_places=0)),
('cantidad_producto_pedido', models.DecimalField(default=1, help_text=b'Ingrese la cantidad del producto solicitado por el Cliente.', verbose_name=b'Cantidad del Producto', max_digits=10, decimal_places=3)),
('total_producto_pedido', models.DecimalField(default=0, help_text=b'Este valor se calcula automaticamente tomando el Precio Venta del Producto por la Cantidad del Producto.', verbose_name=b'Costo Total del Producto', max_digits=18, decimal_places=0)),
('fecha_pedido_detalle', models.DateTimeField(help_text=b'Registra la fecha y hora en que se realizo el detalle del Pedido, util cuando el cliente pide mas productos.', verbose_name=b'Fecha/hora del detalle del Pedido', auto_now_add=True)),
('pedido', models.ForeignKey(to='ventas.Pedido')),
('producto_pedido', models.ForeignKey(verbose_name=b'Producto a ordenar', to='stock.ProductoVenta', help_text=b'Seleccione el Producto ordenado por el Cliente.')),
],
options={
'verbose_name': 'Pedido - Detalle',
'verbose_name_plural': 'Pedidos - Detalles',
},
),
# migrations.AddField(
# model_name='comanda',
# name='numero_pedido',
# field=models.ForeignKey(default=0, to='ventas.Pedido'),
# ),
# migrations.AddField(
# model_name='venta',
# name='numero_pedido',
# field=models.OneToOneField(default=0, to='ventas.Pedido', help_text=b'Seleccione el Numero de Pedido para el cual se registrara la Venta.', verbose_name=b'Numero de Pedido'),
# ),
]
| pmmrpy/SIGB | ventas/migrations/0051_auto_20161002_1441.py | 0051_auto_20161002_1441.py | py | 4,059 | python | es | code | 0 | github-code | 36 | [
{
"api_name": "django.db.migrations.Migration",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.CreateModel",
"line_number": 18,
"usage_type": "call"
},
... |
39274547040 | from fastapi import HTTPException, Request
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
from sentry_sdk import capture_message
from ..auth.auth_handler import verify_jwt
class JWTBearer(HTTPBearer):
def __init__(self, auto_error: bool = True):
# automatic error reportin
super(JWTBearer, self).__init__(auto_error=auto_error)
async def __call__(self, request: Request):
credentials: HTTPAuthorizationCredentials | None = await super(
JWTBearer, self
).__call__(request)
if credentials:
if not credentials.scheme == "Bearer":
e = HTTPException(
status_code=403, detail="Invalid authentication scheme."
)
capture_message("Invalid authentication scheme.")
raise e
if not verify_jwt(credentials.credentials):
e = HTTPException(
status_code=403, detail="Invalid token or expired token."
)
capture_message("Invalid token or expired token.")
raise e
return credentials.credentials
else:
e = HTTPException(status_code=403, detail="Invalid authorization code.")
capture_message("Invalid authorization code.")
raise e
| LosAltosHacks/api | app/auth/auth_bearer.py | auth_bearer.py | py | 1,344 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "fastapi.security.HTTPBearer",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "fastapi.Request",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "fastapi.security.HTTPAuthorizationCredentials",
"line_number": 13,
"usage_type": "name"
},
... |
71894780585 | from django.shortcuts import render, redirect
from .models import Story, TagsModel
from .forms import StoryForm, ProductFilter
from django.http import HttpResponse
from django.views.generic import DetailView, UpdateView, DeleteView
def product_list(request):
filter = ProductFilter(request.GET, queryset=Story.objects.all())
return render(request, 'main/index.html', {'filter': filter})
def index(request):
tasks = Story.objects.all()
count = tasks.count()
choices = TagsModel.objects.all()
answer = ''
kateg = ''
if request.method == 'POST':
answer = int(request.POST.get('filter_by'))
#tags = "в категории " + str(TagsModel.objects.get(tagId=answer).tagName)
if answer == 1:
tasks = Story.objects.filter(classif = 'Сказки о животных')
else:
tasks = Story.objects.filter(classif = 'Волшебные сказки')
count = tasks.count()
# forDifferent = FilterTag()
context = {
'title': 'Главная страница сайта',
'tasks': tasks,
'choices': choices,
'count': count,
}
return render(request, 'main/index.html', context)
def about(request):
return render(request, 'main/about.html')
def create(request):
error = ''
if request.method == 'POST':
form = StoryForm(request.POST)
if form.is_valid():
form.save()
return redirect('home')
else:
error = "Форма была неверной"
form = StoryForm()
context = {
'form': form
}
return render(request, 'main/create.html', context)
def story(request):
tasks = Story.objects.filter(classif = 'Волшебные сказки')
return render(request, 'main/story.html', {'tasks': tasks})
#return render(request, 'main/story.html')
def story_2(request):
tasks = Story.objects.filter(classif='Сказки о животных')
return render(request, 'main/story_2.html', {'tasks': tasks})
#return render(request, 'main/story_2.html')
from django.shortcuts import render
from django.http import HttpResponseRedirect
from .forms import ResumeForm
# Создайте здесь представления.
def upload_resume(request):
error = ''
if request.method == 'POST':
form = ResumeForm(request.POST, request.FILES)
if form.is_valid():
form.save()
return redirect('/resume/')
else:
error = "Форма была неверной"
else:
form = ResumeForm()
return render(request, 'files/resume.html', {'form':form})
from .classificator import MainClassify as MC
from .forms import NewForm, TextsFormSecond
def classify(request):
error = ''
classText = ''
if request.method == 'POST':
form = TextsFormSecond(request.POST)
secondForm = NewForm(request.POST)
if form.is_valid():
text = form.cleaned_data.get("text_news")
classText = MC.choose_class(text)
else:
error = 'Попробуйте ввести другие данные'
form = NewForm()
secondForm = NewForm()
context = {
'form': form,
'error': error,
'predict': classText
}
return render(request, 'main/classify.html', context)
def my_test_500_view(request):
# Return an "Internal Server Error" 500 response code.
return HttpResponse(status=500)
def showAll(request):
news = Story.objects.all()
count = news.count()
return index(request)
| youngnastyas/appProject | classifier/main/views.py | views.py | py | 3,724 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "forms.ProductFilter",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "models.Story.objects.all",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "models.Story.objects",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "m... |
2769385558 | from discord.ext import commands
from lib.mysqlwrapper import mysql
from typing import Optional
import discord
import lib.embedder
import logging
class Checklist(commands.Cog):
def __init__(self, client):
self.client = client
# Set up the logger
self.logger = logging.getLogger(__name__)
self.logger.addHandler(logging.NullHandler())
self.logger.info("Loading checklist cog")
def cog_unload(self):
self.logger.info("Unloading checklist cog")
@commands.group(
name = "shiny",
aliases = ["s"],
brief = "Shiny checklist system",
description = "Cherubi Bot - Shiny Checklist System",
usage = "<add | remove | list>"
)
async def shiny_group(self, ctx):
# If no subcommand is given, give the help command for the group
if ctx.invoked_subcommand is None:
await ctx.send_help(str(ctx.command))
@shiny_group.command(
name = "add",
aliases = ["a"],
brief = "Adds a shiny Pokémon to your list",
description = "Cherubi Bot - Shiny Checklist System (Add)",
usage = "<name or dex #> [number]",
help = "You can give either the name or the dex number of the Pokémon to add it to your list.\n\nYou also can give an amount, if you don't it'll add a single one."
)
async def add_subcommand(self, ctx, pokemon, count = 1):
# Check that the user has their home guild set. If not, then set it.
# Check if this was invoked from a guild
if not isinstance(ctx.channel, discord.DMChannel):
db = mysql()
query = """
SELECT
user_id,
home_guild
FROM user_preferences
WHERE user_id = %s;
"""
results = db.query(query, [ctx.author.id])
db.close()
# If nothing was returned, then invoke the sethome command
if not results or not results[0]['home_guild']:
await ctx.invoke(self.client.get_command("sethome"))
# Just a couple of sanity checks, since I know someone will test this at some point
if count == 0:
await ctx.send(embed = lib.embedder.make_embed(
type = "error",
title = "Shiny Checklist",
content = "You can't add 0 of something.",
))
return
elif count < 0:
await ctx.send(embed = lib.embedder.make_embed(
type = "error",
title = "Shiny Checklist",
content = "There is no such thing as negative Pokémon. At least... not yet.",
))
return
# Grab the list of Pokemon for the given name or dex number
pokemon_data = self.get_pokemon_data(pokemon)
# If no results are returned, tell the user that that Pokemon doesn't
# exist.
# If there is more than one returned value... right now it's a WIP and needs to be dealt with
# If there is only one response returned than work for it
if not pokemon_data:
await ctx.send(embed = lib.embedder.make_embed(
type = "warning",
title = "Shiny Checklist",
content = f"Pokémon `{pokemon}` doesn't exist",
))
return
elif len(pokemon_data) > 1: # WIP Not implemented right now
await ctx.send(embed = lib.embedder.make_embed(
type = "warning",
title = "Shiny Checklist",
content = "Pokémon with multiple forms, costumes, or variants aren't supported right now.",
))
return
else:
db = mysql()
query = """
INSERT INTO user_shinies (user_id, dex, type, isotope, count)
VALUES (%s, %s, %s, %s, %s)
ON DUPLICATE KEY UPDATE count = count + VALUES(count);
"""
db.execute(query, [
ctx.message.author.id,
pokemon_data[0]['dex'],
pokemon_data[0]['type'],
pokemon_data[0]['isotope'],
count,
])
db.close()
# Tell the user that they added the Pokemon successfully to their list
await ctx.send(embed = lib.embedder.make_embed(
type = "success",
title = "Shiny Checklist",
content = f"Added {'a' if count == 1 else count} shiny {pokemon_data[0]['name']} to your list",
thumbnail = self.generate_image_link(pokemon_data[0], shiny = True)
))
@shiny_group.command(
name = "remove",
aliases = ["delete", "r", "d"],
brief = "Removes a shiny Pokémon from your list",
description = "Cherubi Bot - Shiny Checklist System (Remove)",
usage = "<name or dex #> [number]",
help = "You can give either the name or the dex number of the Pokémon to remove from your list.\n\nYou also can give an amount, if you don't it'll remove a single one."
)
async def remove_subcommand(self, ctx, pokemon, count = 1):
# Just a couple of sanity checks, since I know someone will test this at some point
if count == 0:
await ctx.send(embed = lib.embedder.make_embed(
type = "error",
title = "Shiny Checklist",
content = "You can't remove 0 of something.",
))
return
elif count < 0:
count = count * -1
pokemon_data = self.get_pokemon_data(pokemon)
if not pokemon_data:
await ctx.send(embed = lib.embedder.make_embed(
type = "warning",
title = "Shiny Checklist",
content = f"Pokémon `{pokemon}` doesn't exist",
))
return
elif len(pokemon_data) > 1: # WIP Not implemented right now
await ctx.send(embed = lib.embedder.make_embed(
type = "warning",
title = "Shiny Checklist",
content = "Pokémon with multiple forms, costumes, or variants aren't supported right now.",
))
return
else:
db = mysql()
query = """
SELECT count
FROM user_shinies
WHERE
user_id = %s
AND dex = %s
AND type = %s
AND isotope = %s;
"""
db.execute(query, [
ctx.message.author.id,
pokemon_data[0]['dex'],
pokemon_data[0]['type'],
pokemon_data[0]['isotope'],
])
# Check if the user has any of the Pokemon in their list. If they
# don't, tell them, close the DB and then return.
#
# Also check if they have the amount they want to remove, if not,
# set it to what they have
result = db.fetchone()
if (not result) or (result['count'] == 0):
db.close()
await ctx.send(embed = lib.embedder.make_embed(
type = "warning",
title = "Shiny Checklist",
content = f"You don't have any shiny {pokemon_data[0]['name']} in your list to remove",
thumbnail = self.generate_image_link(pokemon_data[0], shiny = True)
))
return
elif result['count'] < count:
count = result['count']
# If they do however, update the count
query = """
UPDATE user_shinies
SET count = count - %s
WHERE
user_id = %s
AND dex = %s
AND type = %s
AND isotope = %s;
"""
db.execute(query, [
count,
ctx.message.author.id,
pokemon_data[0]['dex'],
pokemon_data[0]['type'],
pokemon_data[0]['isotope'],
])
db.close()
await ctx.send(embed = lib.embedder.make_embed(
type = "success",
title = "Shiny Checklist",
content = f"Removed {count} shiny {pokemon_data[0]['name']} from your list",
thumbnail = self.generate_image_link(pokemon_data[0], shiny = True)
))
@shiny_group.command(
name="list",
aliases=["l"],
brief="Lists the shiny Pokémon that you have",
description="Cherubi Bot - Shiny Checklist System (List)",
help="This lists off all of the shiny Pokémon in your collection."
)
@commands.cooldown(1, 10, commands.BucketType.user)
async def list_subcommand(self, ctx, target: Optional[discord.Member]):
# If no target is given, use the user who wrote the command
target = target or ctx.author
db = mysql()
query = """
SELECT
name.english AS name,
user_shiny.dex AS dex,
user_shiny.type AS type,
user_shiny.isotope AS isotope,
user_shiny.count AS count
FROM user_shinies user_shiny
LEFT JOIN pokemon_names name on name.dex = user_shiny.dex
WHERE
user_shiny.user_id = %s
AND user_shiny.count > 0
ORDER BY name.english;
"""
results = db.query(query, [target.id])
db.close()
# If the user doesn't have any shiny Pokemon in their list, tell them
# that
if not results:
await ctx.send(embed=lib.embedder.make_embed(
type="error",
title="Shiny Checklist",
content=f"Unfortunately {target.display_name} doesn't have \
any Pokémon in your shiny list...",
))
else:
total_count = 0
output = ""
for result in results:
output += f"{result['name']}: {result['count']}\n"
total_count += result['count']
fields = [
("Total:", total_count, False),
]
await ctx.send(embed=lib.embedder.make_embed(
type="info",
title=f"{target.display_name}'s Shiny List:",
content=output,
fields=fields,
))
def get_pokemon_data(self, input):
db = mysql()
query = """
SELECT
pkmn.dex AS 'dex',
name.english AS 'name',
pkmn.type AS 'type',
pkmn.isotope AS 'isotope',
pkmn.filename AS 'filename',
pkmn.shiny AS 'shiny'
FROM pokemon pkmn
LEFT JOIN pokemon_names name on name.dex = pkmn.dex
WHERE (
pkmn.dex = %s OR
name.chinese = %s OR
name.english = %s OR
name.french = %s OR
name.german = %s OR
name.italian = %s OR
name.japanese = %s OR
name.korean = %s OR
name.portuguese = %s OR
name.spanish = %s OR
name.thai = %s
);
"""
db.execute(query, [input, input, input, input, input, input, input, input, input, input, input])
results = db.fetchall()
db.close()
return results
def generate_image_link(self, result, shiny = False):
# Base url for the repo, plus an image cacher link, if we are using it
base_url = "https://raw.githubusercontent.com/PokeMiners/pogo_assets/master/Images/Pokemon/"
url = ""
url += base_url
url += "pokemon_icon_"
# Checks if a unique file name exists for the Pokemon
if result['filename'] == None: # If no specific file name is given
# Give it some leading zeroes
dex = str(result['dex']).zfill(3)
# base_url + pokemon_icon_{dex}{type}{isotope or ''}_shiny.png
url += f"{dex}_{result['type']}"
# If there's an isotope value, add it
if result['isotope']:
url += f"_{result['isotope']}"
else:
# base_url + pokemon_icon_{fn}_shiny.png
url += result['filename']
# If it's shiny, add in that little bit
if shiny:
url += "_shiny"
# Finally, add in the file extension
url += ".png"
return url
def setup(client):
client.add_cog(Checklist(client))
| guitaristtom/Cherubi | bot/cogs/checklist.py | checklist.py | py | 12,761 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "discord.ext.commands.Cog",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "logging.getLogger",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "lo... |
5580225642 | import logging
import os
import math
import logging
import sys
import glob
from natsort import natsorted
import torchvision
import numpy as np
import cv2
from PIL import Image
import torch
from torch.utils.data import Dataset
from torchvision import transforms
from torch.nn import functional as F
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
FORMAT = '[%(levelname)s: %(filename)s: %(lineno)4d]: %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT, stream=sys.stdout)
logger = logging.getLogger(__name__)
def cv2pil(image):
''' OpenCV -> PIL '''
new_image = image.copy()
if new_image.ndim == 2:
pass
elif new_image.shape[2] == 3:
new_image = cv2.cvtColor(new_image, cv2.COLOR_BGR2RGB)
elif new_image.shape[2] == 4:
new_image = cv2.cvtColor(new_image, cv2.COLOR_BGRA2RGBA)
new_image = Image.fromarray(new_image)
return new_image
def datadir_to_videodata(data_dir):
return data_dir.replace('annotations', 'videos') + '/video.mov'
def save_obs_img(movie, img_num, img_list, img_dir):
Fs = int(movie.get(cv2.CAP_PROP_FRAME_COUNT))
frame_number = 0
for i in range(Fs - 1):
flag, frame = movie.read()
if i in img_num:
image = cv2pil(frame)
image.save('{}/frame_num_'.format(img_dir)+'{0:04d}'.format(frame_number)+'.png', quality=95)
frame_number += 1
def save_obs_crop_img(movie, img_num, img_list, img_dir, seq_list, img_size, obs_len, norm):
Fs = int(movie.get(cv2.CAP_PROP_FRAME_COUNT))
frame_number = 0
per_sum = 0
for i in range(Fs - 1):
flag, frame = movie.read()
if i in img_num:
image = cv2pil(frame)
w = image.size[0]/norm
h = image.size[1]/norm
k = 0
for person in seq_list[frame_number]:
pos = person[:, obs_len-1]
x_1 = pos[0].item()*w-(img_size//2)
y_1 = pos[1].item()*h-(img_size//2)
x_2 = pos[0].item()*w+(img_size//2)
y_2 = pos[1].item()*h+(img_size//2)
img_crop = image.crop((x_1, y_1, x_2, y_2))
img_crop.save('{}/frame_num_'.format(img_dir)+'{0:06d}'.format(per_sum+k)+'.png', quality=95)
k += 1
frame_number += 1
per_sum += k
def seq_collate(data):
(obs_seq_list, pred_seq_list, obs_seq_rel_list, pred_seq_rel_list, non_linear_ped_list, loss_mask_list, sa_data, pa_data, img) = zip(*data)
_len = [len(seq) for seq in obs_seq_list]
cum_start_idx = [0] + np.cumsum(_len).tolist()
seq_start_end = [[start, end] for start, end in zip(cum_start_idx, cum_start_idx[1:])]
# Data format: batch, input_size, seq_len
# LSTM input format: seq_len, batch, input_size
obs_traj = torch.cat(obs_seq_list, dim=0).permute(2, 0, 1)
pred_traj = torch.cat(pred_seq_list, dim=0).permute(2, 0, 1)
obs_traj_rel = torch.cat(obs_seq_rel_list, dim=0).permute(2, 0, 1)
pred_traj_rel = torch.cat(pred_seq_rel_list, dim=0).permute(2, 0, 1)
non_linear_ped = torch.cat(non_linear_ped_list)
sa_data = torch.cat(sa_data, dim=0)
pa_data = torch.cat(pa_data, dim=0)
loss_mask = torch.cat(loss_mask_list, dim=0)
seq_start_end = torch.LongTensor(seq_start_end)
# img = torch.cat(img, dim=0)
out = [
obs_traj, pred_traj, obs_traj_rel, pred_traj_rel, non_linear_ped,
loss_mask, seq_start_end, sa_data, pa_data, img
]
return tuple(out)
def test_seq_collate(data):
(obs_seq_list, pred_seq_list, obs_seq_rel_list, pred_seq_rel_list, non_linear_ped_list, loss_mask_list, sa_data, pa_data, img, test_img) = zip(*data)
_len = [len(seq) for seq in obs_seq_list]
cum_start_idx = [0] + np.cumsum(_len).tolist()
seq_start_end = [[start, end] for start, end in zip(cum_start_idx, cum_start_idx[1:])]
# Data format: batch, input_size, seq_len
# LSTM input format: seq_len, batch, input_size
obs_traj = torch.cat(obs_seq_list, dim=0).permute(2, 0, 1)
pred_traj = torch.cat(pred_seq_list, dim=0).permute(2, 0, 1)
obs_traj_rel = torch.cat(obs_seq_rel_list, dim=0).permute(2, 0, 1)
pred_traj_rel = torch.cat(pred_seq_rel_list, dim=0).permute(2, 0, 1)
non_linear_ped = torch.cat(non_linear_ped_list)
loss_mask = torch.cat(loss_mask_list, dim=0)
seq_start_end = torch.LongTensor(seq_start_end)
sa_data = torch.cat(sa_data, dim=0)
pa_data = torch.cat(pa_data, dim=0)
# img = torch.cat(img, dim=0)
out = [
obs_traj, pred_traj, obs_traj_rel, pred_traj_rel, non_linear_ped,
loss_mask, seq_start_end, sa_data, pa_data, img, test_img
]
return tuple(out)
def ph_seq_collate(data):
(obs_seq_list, pred_seq_list, obs_seq_rel_list, pred_seq_rel_list, non_linear_ped_list, loss_mask_list, sa_data, pa_data, img) = zip(*data)
_len = [len(seq) for seq in obs_seq_list]
cum_start_idx = [0] + np.cumsum(_len).tolist()
seq_start_end = [[start, end] for start, end in zip(cum_start_idx, cum_start_idx[1:])]
# Data format: batch, input_size, seq_len
# LSTM input format: seq_len, batch, input_size
obs_traj = torch.cat(obs_seq_list, dim=0).permute(2, 0, 1)
pred_traj = torch.cat(pred_seq_list, dim=0).permute(2, 0, 1)
obs_traj_rel = torch.cat(obs_seq_rel_list, dim=0).permute(2, 0, 1)
pred_traj_rel = torch.cat(pred_seq_rel_list, dim=0).permute(2, 0, 1)
non_linear_ped = torch.cat(non_linear_ped_list)
sa_data = torch.cat(sa_data, dim=0)
pa_data = torch.cat(pa_data, dim=0)
loss_mask = torch.cat(loss_mask_list, dim=0)
seq_start_end = torch.LongTensor(seq_start_end)
img = torch.cat(img, dim=0)
out = [
obs_traj, pred_traj, obs_traj_rel, pred_traj_rel, non_linear_ped,
loss_mask, seq_start_end, sa_data, pa_data, img
]
return tuple(out)
def ph_test_seq_collate(data):
(obs_seq_list, pred_seq_list, obs_seq_rel_list, pred_seq_rel_list, non_linear_ped_list, loss_mask_list, sa_data, pa_data, img, test_img) = zip(*data)
_len = [len(seq) for seq in obs_seq_list]
cum_start_idx = [0] + np.cumsum(_len).tolist()
seq_start_end = [[start, end] for start, end in zip(cum_start_idx, cum_start_idx[1:])]
# Data format: batch, input_size, seq_len
# LSTM input format: seq_len, batch, input_size
obs_traj = torch.cat(obs_seq_list, dim=0).permute(2, 0, 1)
pred_traj = torch.cat(pred_seq_list, dim=0).permute(2, 0, 1)
obs_traj_rel = torch.cat(obs_seq_rel_list, dim=0).permute(2, 0, 1)
pred_traj_rel = torch.cat(pred_seq_rel_list, dim=0).permute(2, 0, 1)
non_linear_ped = torch.cat(non_linear_ped_list)
loss_mask = torch.cat(loss_mask_list, dim=0)
seq_start_end = torch.LongTensor(seq_start_end)
sa_data = torch.cat(sa_data, dim=0)
pa_data = torch.cat(pa_data, dim=0)
img = torch.cat(img, dim=0)
out = [
obs_traj, pred_traj, obs_traj_rel, pred_traj_rel, non_linear_ped,
loss_mask, seq_start_end, sa_data, pa_data, img, test_img
]
return tuple(out)
def sophie_seq_collate(data):
(obs_seq_list, pred_seq_list, obs_seq_rel_list, pred_seq_rel_list, non_linear_ped_list, loss_mask_list, sa_data, pa_data, img) = zip(*data)
_len = [len(seq) for seq in obs_seq_list]
cum_start_idx = [0] + np.cumsum(_len).tolist()
seq_start_end = [[start, end] for start, end in zip(cum_start_idx, cum_start_idx[1:])]
# Data format: batch, input_size, seq_len
# LSTM input format: seq_len, batch, input_size
obs_traj = torch.cat(obs_seq_list, dim=0).permute(3, 0, 1, 2)
pred_traj = torch.cat(pred_seq_list, dim=0).permute(2, 0, 1)
obs_traj_rel = torch.cat(obs_seq_rel_list, dim=0).permute(3, 0, 1, 2)
pred_traj_rel = torch.cat(pred_seq_rel_list, dim=0).permute(2, 0, 1)
non_linear_ped = torch.cat(non_linear_ped_list)
sa_data = torch.cat(sa_data, dim=0)
pa_data = torch.cat(pa_data, dim=0)
loss_mask = torch.cat(loss_mask_list, dim=0)
seq_start_end = torch.LongTensor(seq_start_end)
# img = torch.cat(img, dim=0)
out = [
obs_traj, pred_traj, obs_traj_rel, pred_traj_rel, non_linear_ped,
loss_mask, seq_start_end, sa_data, pa_data, img
]
return tuple(out)
def sophie_test_seq_collate(data):
(obs_seq_list, pred_seq_list, obs_seq_rel_list, pred_seq_rel_list, non_linear_ped_list, loss_mask_list, sa_data, pa_data, img, test_img) = zip(*data)
_len = [len(seq) for seq in obs_seq_list]
cum_start_idx = [0] + np.cumsum(_len).tolist()
seq_start_end = [[start, end] for start, end in zip(cum_start_idx, cum_start_idx[1:])]
# Data format: batch, input_size, seq_len
# LSTM input format: seq_len, batch, input_size
obs_traj = torch.cat(obs_seq_list, dim=0).permute(3, 0, 1, 2)
pred_traj = torch.cat(pred_seq_list, dim=0).permute(2, 0, 1)
obs_traj_rel = torch.cat(obs_seq_rel_list, dim=0).permute(3, 0, 1, 2)
pred_traj_rel = torch.cat(pred_seq_rel_list, dim=0).permute(2, 0, 1)
non_linear_ped = torch.cat(non_linear_ped_list)
loss_mask = torch.cat(loss_mask_list, dim=0)
seq_start_end = torch.LongTensor(seq_start_end)
sa_data = torch.cat(sa_data, dim=0)
pa_data = torch.cat(pa_data, dim=0)
# img = torch.cat(img, dim=0)
out = [
obs_traj, pred_traj, obs_traj_rel, pred_traj_rel, non_linear_ped,
loss_mask, seq_start_end, sa_data, pa_data, img, test_img
]
return tuple(out)
def read_file(_path, img_path, norm=0, skip=1, delim='\t'):
data = []
img = Image.open(img_path)
cord2imcord = np.array([1.,1.])
if norm != 0:
# print(max(img.size[0], img.size[1]))
w = img.size[0]/norm
h = img.size[1]/norm
# w = k
# h = k
if delim == 'tab':
delim = '\t'
elif delim == 'space':
delim = ' '
with open(_path, 'r') as f:
for line in f:
line = line.strip().split(delim)
if str(line[6]) == '0':
line = [float(i) for i in line[:-4]]
if line[5] % skip == 0:
if norm != 0:
line = [line[5]//skip, line[0], (line[3]+line[1])/2/w, (line[4]+line[2])/2/h]
if (line[2])/2/w > norm or (line[3])/2/h > norm:
print('error')
else:
line = [line[5]//skip, line[0], (line[3]+line[1])/2, (line[4]+line[2])/2]
data.append(line)
return np.asarray(data), cord2imcord
def poly_fit(traj, traj_len, threshold):
"""
Input:
- traj: Numpy array of shape (2, traj_len)
- traj_len: Len of trajectory
- threshold: Minimum error to be considered for non linear traj
Output:
- int: 1 -> Non Linear 0-> Linear
"""
t = np.linspace(0, traj_len - 1, traj_len)
# print(t, traj[0, -traj_len:])
res_x = np.polyfit(t, traj[0, -traj_len:], 2, full=True)[1]
res_y = np.polyfit(t, traj[1, -traj_len:], 2, full=True)[1]
if res_x + res_y >= threshold:
return 1.0
else:
return 0.0
class TrajectoryDataset(Dataset):
"""Dataloder for the Trajectory datasets"""
def __init__(
self, args, data_dir, obs_len=8, pred_len=12, center_crop=False, crop_img_size=512, skip=1, skip_frame=2, threshold=0.002,
min_ped=1, max_ped=1000, delim='\t', norm=1, large_image=False, remake_data=False, test=False, check_so_at=False
):
"""
Args:
- data_dir: List of dataset paths. [path1, path2, ...]
<frame_id> <ped_id> <x> <y>
**stanford dataset : <ped_id> <x_min> <y_min> <x_max> <y_max> <frame_id> <lost> <occluded> <generate> <label>
lost: If 1, the annotation is outside of the view screen.
occluded: If 1, the annotation is occluded.
generated: If 1, the annotation was automatically interpolated.
label: The label for this annotation, enclosed in quotation marks.
- obs_len: Number of time-steps in input trajectories
- pred_len: Number of time-steps in output trajectories
- center_crop
- skip: Number of frames to skip while making the dataset
- threshold: Minimum error to be considered for non linear traj
- min_ped: Minimum number of pedestrians that should be in a seqeunce
- max_ped
- delim: Delimiter in the dataset files
- norm
- remake_data
- test
"""
super(TrajectoryDataset, self).__init__()
self.obs_len = obs_len
self.pred_len = pred_len
self.skip = skip
self.seq_len = self.obs_len + self.pred_len
self.delim = delim
self.norm = norm
self.center_crop = center_crop
self.test = test
self.check_so_at = check_so_at
self.ph_type = args.physical_attention_type
self.large_image = large_image
if self.large_image:
self.large_image_size = 640
self.transform = transforms.Compose(
[transforms.Resize((self.large_image_size, self.large_image_size)),
transforms.ToTensor(),
])
else:
size = 224
self.transform = transforms.Compose(
[transforms.Resize((size,size)),
transforms.ToTensor(),
# transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
num_peds_in_seq = []
seq_list = []
s_seq_list = []
seq_list_rel = []
s_seq_list_rel = []
loss_mask_list = []
non_linear_ped = []
img_list = []
test_img_list = []
self.f_skip = 12
first = True
sa = False
pa = False
for num in range(len(data_dir)):
self.remake_data = remake_data
dn = 0
self.data_dir = data_dir[num]
self.img_path = self.data_dir + '/reference.jpg'
# img = Image.open(self.img_path)
self.video = cv2.VideoCapture(datadir_to_videodata(self.data_dir))
img_num = []
pednum_list = []
all_files = os.listdir(self.data_dir)
all_files = [os.path.join(self.data_dir, 'annotations.txt')]
for path in all_files:
data, cord2imcord = read_file(path, self.img_path, self.norm, self.f_skip, self.delim)
frames = np.unique(data[:, 0]).tolist()
frame_data = []
for frame in frames:
frame_data.append(data[frame == data[:, 0], :])
num_sequences = int(math.ceil((len(frames) - self.seq_len + 1) / self.skip))
# print(len(frame_data))
# print(num_sequences)
for idx in range(0, num_sequences * self.skip + 1, self.skip):
if len(frame_data[idx:idx + self.seq_len]) < 1:
continue
# curr_seq_data = np.concatenate(f_data[::self.f_skip], axis=0)
curr_seq_data = np.concatenate(frame_data[idx:idx + self.seq_len], axis=0)
peds_in_curr_seq = np.unique(curr_seq_data[:, 1])
curr_seq_rel = np.zeros((len(peds_in_curr_seq), 2, self.seq_len))
curr_seq = np.zeros((len(peds_in_curr_seq), 2, self.seq_len))
curr_loss_mask = np.zeros((len(peds_in_curr_seq),self.seq_len))
num_peds_considered = 0
_non_linear_ped = []
for _, ped_id in enumerate(peds_in_curr_seq):
curr_ped_seq = curr_seq_data[curr_seq_data[:, 1] == ped_id, :]
pad_front = frames.index(curr_ped_seq[0, 0]) - idx
pad_end = frames.index(curr_ped_seq[-1, 0]) - idx + 1
# print(pad_end,pad_front)
if pad_end - pad_front != self.seq_len:
continue
if curr_ped_seq.shape[0] != self.seq_len:
continue
curr_ped_seq = np.transpose(curr_ped_seq[:, 2:])
# print(curr_ped_seq)
# Make coordinates relative
rel_curr_ped_seq = np.zeros(curr_ped_seq.shape)
rel_curr_ped_seq[:, 1:] = curr_ped_seq[:, 1:] - curr_ped_seq[:, :-1]
_idx = num_peds_considered
# print(pad_front,pad_end)
# print(curr_ped_seq.shape)
curr_seq[_idx, :, pad_front:pad_end] = curr_ped_seq
curr_seq_rel[_idx, :, pad_front:pad_end] = rel_curr_ped_seq
# Linear vs Non-Linear Trajectory
_non_linear_ped.append(poly_fit(curr_ped_seq, pred_len, threshold))
curr_loss_mask[_idx, pad_front:pad_end] = 1
num_peds_considered += 1
# print(num_peds_considered)
if num_peds_considered == max_ped:
break
if num_peds_considered > min_ped:
dn += 1
non_linear_ped += _non_linear_ped
num_peds_in_seq.append(num_peds_considered)
loss_mask_list.append(curr_loss_mask[:num_peds_considered])
seq_list.append(curr_seq[:num_peds_considered])
# print(seq_list)
seq_list_rel.append(curr_seq_rel[:num_peds_considered])
# img_list.append(self.transform(img))
img_num.append((idx+self.obs_len)*self.f_skip)
pednum_list.append(num_peds_considered)
if args.social_attention_type == 'sophie' or args.physical_attention_type == 'sophie':
num_peds_in_seq.append(num_peds_considered)
curr_seq_exp = np.zeros((num_peds_considered, args.n_max, 2, self.seq_len))
curr_seq_rel_exp = np.zeros((num_peds_considered, args.n_max, 2, self.seq_len))
for i in range(num_peds_considered):
curr_seq_exp[i, 0, :, :] = curr_seq[i]
curr_seq_exp[i, 1:i+1, :, :] = curr_seq[0:i]
curr_seq_exp[i, i+1:num_peds_considered, :, :] = curr_seq[i+1:num_peds_considered]
dists = (curr_seq_exp[i, :] - curr_seq_exp[i, 0]) ** 2
dists = np.sum(np.sum(dists, axis=2), axis=1)
idxs = np.argsort(dists)
curr_seq_exp[i, :] = curr_seq_exp[i, :][idxs]
curr_seq_rel_exp[i, 0, :, :] = curr_seq_rel[i]
curr_seq_rel_exp[i, 1:i+1, :, :] = curr_seq_rel[0:i]
curr_seq_rel_exp[i, i+1:num_peds_considered, :, :] = curr_seq_rel[i+1:num_peds_considered]
curr_seq_rel_exp[i, :] = curr_seq_rel_exp[i, :][idxs]
s_seq_list.append(curr_seq_exp[:num_peds_considered])
s_seq_list_rel.append(curr_seq_rel_exp[:num_peds_considered])
# if self.center_crop:
# img_dir = "{}/crop_True_cropsize_{}_skip_{}_obs_{}_pre_{}_minped_{}".format(self.data_dir, crop_img_size, skip, obs_len, pred_len, min_ped)
# else:
img_dir = "{}/crop_False_skip_{}_obs_{}_pre_{}_minped_{}_fskip_{}".format(self.data_dir, self.skip, obs_len, pred_len, min_ped, self.f_skip)
if not os.path.exists(img_dir):
os.mkdir(img_dir)
self.remake_data = True
if self.remake_data:
logger.info('Remake image data: {}_{}'.format(self.data_dir.split('/')[-2], self.data_dir.split('/')[-1]))
# if not self.center_crop:
save_obs_img(self.video, img_num, img_list, img_dir)
# else:
# save_obs_crop_img(self.video, img_num, img_list, img_dir, seq_list, crop_img_size, self.obs_len, norm)
if self.test or args.physical_attention_dim != 0:
files = glob.glob(os.path.join(img_dir, '*.png'))
if self.ph_type == 'prior3' or self.ph_type == 'prior4' or self.ph_type == 'prior5' or self.ph_type == 'prior6' or self.ph_type == 'self_attention':
n = 0
# path = self.data_dir + '/image_2.csv'
# img_save = []
for path in natsorted(files):
ped_num = pednum_list[n]
img = Image.open(path)
img2 = self.transform(img)
if self.test:
test_img_list.append(img)
for _ in range(ped_num):
img_list.append(img2)
img.load()
n += 1
else:
for path in natsorted(files):
img = Image.open(path)
img_list.append(self.transform(img))
if self.test:
test_img_list.append(img)
img.load()
# if self.test and self.center_crop:
# test_img_dir = "{}/crop_False_skip_{}_obs_{}_pre_{}_minped_{}".format(self.data_dir, skip, obs_len, pred_len, min_ped)
# test_files = glob.glob(os.path.join(test_img_dir, '*.png'))
# for path in natsorted(test_files):
# img = Image.open(path)
# test_img_list.append(img)
# img.load()
if args.social_attention_dim != 0 and args.social_attention_type =='prior':
sa = True
# f_path = self.data_dir + '/pseudo-social_attention_.csv'
if args.so_ver == 1:
f_path = self.data_dir + '/pseudo-social_attention.csv'
elif args.so_ver == 2:
f_path = self.data_dir + '/pseudo-social_attention_gauss.csv'
else:
print("pseudo social attention version ERROR")
if first:
self.sa_data = np.loadtxt(f_path)
else:
self.sa_data = np.concatenate([self.sa_data , np.loadtxt(f_path)])
if args.physical_attention_dim != 0 and (args.physical_attention_type !='one_head_attention' and args.physical_attention_type !='simple' and args.physical_attention_type !='one_head_attention2' and args.physical_attention_type !='sat' and args.physical_attention_type !="NICG_sat" and args.physical_attention_type != 'sophie') and args.ph_prior_type != 'one_head_attention' and args.ph_prior_type != 'not_prior':
pa = True
if args.ph_ver == 1:
g_path = self.data_dir + '/pseudo-physical_attention_mask.csv'
elif args.ph_ver == 2:
g_path = self.data_dir + '/pseudo-physical_attention_fan_mask.csv'
elif args.ph_ver == 3:
g_path = self.data_dir + '/pseudo-physical_attention_leaky_mask.csv'
elif args.ph_ver == 4:
g_path = self.data_dir + '/pseudo-physical_attention_gauss1.csv'
elif args.ph_ver == 5:
g_path = self.data_dir + '/pseudo-physical_attention_gauss2.csv'
elif args.ph_ver == 6:
g_path = self.data_dir + '/pseudo-physical_attention_gauss3.csv'
elif args.ph_ver == 7:
g_path = self.data_dir + '/pseudo-physical_attention_gauss4.csv'
else:
print("pseudo physical attention version ERROR")
if first:
self.pa_data = np.loadtxt(g_path)
else:
self.pa_data = np.concatenate([self.pa_data , np.loadtxt(g_path)])
logger.info('{}/{} is done. {}'.format(self.data_dir.split('/')[-2], self.data_dir.split('/')[-1], dn))
first = False
# print(self.a_data.shape)
if args.social_attention_type == 'sophie' or args.physical_attention_type == 'sophie':
s_seq_list = np.concatenate(s_seq_list, axis=0)
s_seq_list_rel = np.concatenate(s_seq_list_rel, axis=0)
self.num_seq = len(seq_list)
seq_list = np.concatenate(seq_list, axis=0)
seq_list_rel = np.concatenate(seq_list_rel, axis=0)
loss_mask_list = np.concatenate(loss_mask_list, axis=0)
non_linear_ped = np.asarray(non_linear_ped)
# Convert numpy -> Torch Tensor
if args.social_attention_type == 'sophie' or args.physical_attention_type == 'sophie':
self.obs_traj = torch.from_numpy(s_seq_list[:, :, :, :self.obs_len]).type(torch.float)
self.obs_traj_rel = torch.from_numpy(s_seq_list_rel[:, :, :, :self.obs_len]).type(torch.float)
else:
self.obs_traj = torch.from_numpy(seq_list[:, :, :self.obs_len]).type(torch.float)
self.obs_traj_rel = torch.from_numpy(seq_list_rel[:, :, :self.obs_len]).type(torch.float)
self.pred_traj = torch.from_numpy(seq_list[:, :, self.obs_len:]).type(torch.float)
self.pred_traj_rel = torch.from_numpy(seq_list_rel[:, :, self.obs_len:]).type(torch.float)
self.loss_mask = torch.from_numpy(loss_mask_list).type(torch.float)
self.non_linear_ped = torch.from_numpy(non_linear_ped).type(torch.float)
cum_start_idx = [0] + np.cumsum(num_peds_in_seq).tolist()
self.seq_start_end = [
(start, end) for start, end in zip(cum_start_idx, cum_start_idx[1:])
]
if args.physical_attention_dim != 0:
self.img_data = torch.stack(img_list, dim=0)
else:
self.img_data = self.pred_traj_rel
if self.test:
self.test_img_data = test_img_list
if sa:
self.sa_data = torch.from_numpy(self.sa_data).type(torch.float)
else:
self.sa_data = self.pred_traj_rel
if pa:
self.pa_data = torch.from_numpy(self.pa_data).type(torch.float)
else:
self.pa_data = self.pred_traj_rel
print(self.img_data.shape, self.pa_data.shape, self.sa_data.shape)
def __len__(self):
return self.num_seq
def __getitem__(self, index):
start, end = self.seq_start_end[index]
# print(self.obs_traj.shape)
if self.check_so_at:
pd_num = end - start
b = True
# obs_traj: batch, xy, t
obs_traj = self.obs_traj[start:end, :]
last_pos = self.obs_traj[start:end, :, -1]
_obs_traj = torch.zeros(3,2,8)
_pred_traj = torch.zeros(3,2,12)
_obs_traj_rel = torch.zeros(3,2,8)
_pred_traj_rel = torch.zeros(3,2,12)
_non_linear_ped = torch.zeros(3)
_loss_mask = torch.zeros(3, 20)
list_pd = np.arange(pd_num)
a = 0
r = 0.005
while b:
q = list_pd[a]
d = (last_pos[:,0] - last_pos[q,0].item())**2 + (last_pos[:,1] - last_pos[q,1].item())**2
# print(d.shape)
dl = d.clone()
dl = torch.sort(dl)
# print(dl[1])
if dl[0][1] <= r:
b = False
else:
a += 1
if a == pd_num:
a = 0
r += 0.005
min = dl[1][1]
max = dl[1][-1]
_obs_traj[0] = self.obs_traj[start+a, :].unsqueeze(0)
_obs_traj[1] = self.obs_traj[start+min, :].unsqueeze(0)
_obs_traj[2] = self.obs_traj[start+max, :].unsqueeze(0)
_pred_traj[0] = self.pred_traj[start+a, :].unsqueeze(0)
_pred_traj[1] = self.pred_traj[start+min, :].unsqueeze(0)
_pred_traj[2] = self.pred_traj[start+max, :].unsqueeze(0)
_obs_traj_rel[0] = self.obs_traj_rel[start+a, :].unsqueeze(0)
_obs_traj_rel[1] = self.obs_traj_rel[start+min, :].unsqueeze(0)
_obs_traj_rel[2] = self.obs_traj_rel[start+max, :].unsqueeze(0)
_pred_traj_rel[0] = self.pred_traj_rel[start+a, :].unsqueeze(0)
_pred_traj_rel[1] = self.pred_traj_rel[start+min, :].unsqueeze(0)
_pred_traj_rel[2] = self.pred_traj_rel[start+max, :].unsqueeze(0)
_non_linear_ped[0] = self.non_linear_ped[start+a]
_non_linear_ped[1] = self.non_linear_ped[start+min]
_non_linear_ped[2] = self.non_linear_ped[start+max]
_loss_mask[0] = self.loss_mask[start+a, :].unsqueeze(0)
_loss_mask[1] = self.loss_mask[start+min, :].unsqueeze(0)
_loss_mask[2] = self.loss_mask[start+max, :].unsqueeze(0)
if self.test:
out = [
_obs_traj, _pred_traj, _obs_traj_rel, _pred_traj_rel, _non_linear_ped, _loss_mask,
self.sa_data[3*index:3*index+3], self.pa_data[start:start+3, :],
self.img_data[index], self.test_img_data[index]
]
else:
out = [
_obs_traj, _pred_traj, _obs_traj_rel, _pred_traj_rel, _non_linear_ped, _loss_mask,
self.sa_data[3*index:3*index+3], self.pa_data[start:start+3, :], self.img_data[index]
]
elif self.ph_type == 'prior3' or self.ph_type == 'prior4' or self.ph_type == 'prior5' or self.ph_type == 'prior6' or self.ph_type == 'self_attention':
if self.test:
out = [
self.obs_traj[start:end, :], self.pred_traj[start:end, :],
self.obs_traj_rel[start:end, :], self.pred_traj_rel[start:end, :],
self.non_linear_ped[start:end], self.loss_mask[start:end, :],
self.sa_data[start:end, :], self.pa_data[start:end],
self.img_data[start:end], self.test_img_data[index]
]
else:
out = [
self.obs_traj[start:end, :], self.pred_traj[start:end, :],
self.obs_traj_rel[start:end, :], self.pred_traj_rel[start:end, :],
self.non_linear_ped[start:end], self.loss_mask[start:end, :],
self.sa_data[start:end, :], self.pa_data[start:end], self.img_data[start:end]
]
else:
if self.test:
out = [
self.obs_traj[start:end, :], self.pred_traj[start:end, :],
self.obs_traj_rel[start:end, :], self.pred_traj_rel[start:end, :],
self.non_linear_ped[start:end], self.loss_mask[start:end, :],
self.sa_data[start:end, :], self.pa_data[start:end],
self.img_data[index], self.test_img_data[index]
]
else:
out = [
self.obs_traj[start:end, :], self.pred_traj[start:end, :],
self.obs_traj_rel[start:end, :], self.pred_traj_rel[start:end, :],
self.non_linear_ped[start:end], self.loss_mask[start:end, :],
self.sa_data[start:end, :], self.pa_data[start:end], self.img_data[index]
]
return out | masa-k-21414/explainable_trajectory_prediction_model | scripts/model/data/trajectories.py | trajectories.py | py | 31,981 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "PIL.ImageFile.LOAD_TRUNCATED_IMAGES",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "PIL.ImageFile",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "logging.basicConfig",
"line_number": 22,
"usage_type": "call"
},
{
"api_na... |
33445056109 | from docutils import nodes
from docutils.parsers.rst import Directive, directives
import requests
class Contributor:
"""Main class for the contributors atributes."""
def __init__(self, login, url, contributions=0):
"""Initialize atributes."""
self.contributions = contributions
self.login = login
self.url = url
self.contributions = contributions
def build(self):
"""Build contributor details."""
node_contributor = nodes.paragraph()
node_contributor += nodes.reference(text=self.login, refuri=self.url)
node_contributor += nodes.Text(' - ' + str(self.contributions) + ' ' +
('contributions' if self.contributions != 1 else 'contribution'))
return node_contributor
class ContributorsRepository:
"""Main class for repo atributes."""
def __init__(self, contributors, reverse=True, limit=10, exclude=None):
"""Initialize repo details."""
self.contributors = sorted([c for c in contributors if c.login not in exclude],
key=lambda c: c.contributions,
reverse=reverse)[:limit]
def build(self):
"""Build contributors repo details."""
node_list = nodes.bullet_list()
for contributor in self.contributors:
node_contributor = nodes.list_item()
node_contributor += contributor.build()
node_list += node_contributor
return node_list
class ContributorsDirective(Directive):
"""Main class for the contributors directive."""
has_content = True
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {
'limit': directives.positive_int,
'order': directives.unchanged,
'exclude': directives.unchanged,
}
def run(self):
"""Run the plugin."""
limit = self.options.get('limit', 10)
order = self.options.get('order', 'DESC') == 'DESC'
exclude = self.options.get('exclude', '').split(",")
r = requests.get('https://api.github.com/repos/' + self.arguments[0] + '/contributors?per_page=100')
if type(r.json()) == dict:
raise ValueError('The repository ' + self.arguments[0] + ' does not exist.')
contributors = list(map(lambda c: Contributor(c.get('login'),
c.get('html_url'),
c.get('contributions')), r.json()))
return [ContributorsRepository(contributors, reverse=order, limit=limit, exclude=exclude).build()]
def setup(app):
"""Configure the plugin."""
app.add_directive('ghcontributors', ContributorsDirective)
return {'version': '0.1'}
| Kubeinit/kubeinit | docs/src/_exts/ghcontributors.py | ghcontributors.py | py | 2,820 | python | en | code | 208 | github-code | 36 | [
{
"api_name": "docutils.nodes.paragraph",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "docutils.nodes",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "docutils.nodes.reference",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "docu... |
70356004905 | # /home/jay/anaconda3/bin/python
import numpy as np
from sklearn.metrics import log_loss
from scipy.special import expit
# sig = lambda x: 1.0/(1.0+np.exp(-x))
sig = expit
sig_d = lambda x: sig(x) * (1 - sig(x))
sig_to_d = lambda x: x * (1 - x)
# log_loss = lambda y,yhat: np.sum(-(y*np.log(yhat) + (1 - y)*np.log(1-yhat)))
def print_shape(f):
print(list(map(lambda x: x.shape, f)))
def get_shape(f):
return str(list(map(lambda x: x.shape, f)))
class mlp:
def __init__ (self, layer_sizes):
'''
Parameters
----------
layer_sizes : list of integers
sizes of the layres in the network. first number is the input layer
last number is the output layer
'''
self.weights = [np.random.randn(x, y)
for x, y in zip(layer_sizes[:-1], layer_sizes[1:])]
self.biases = [np.random.randn(x) for x in layer_sizes[1:]]
def __str__(self):
out = 'weights:' + get_shape(self.weights)
out += ', biases:' + get_shape(self.biases)
return out
def feed_forward(self, X, dropout=False, dropout_percent=0.5):
'''
Parameters
----------
X : numpy.array shape(records, features)
Returns
-------
a : numpy.array
output of neural network
'''
assert X.shape[1] == self.weights[0].shape[0], 'input X is wrong shape'
a = X
self.z = []
self.a = [a]
l = len(self.weights)
i = 0
for w, b in zip(self.weights, self.biases):
z = np.dot(a, w) + b
a = sig(z)
if dropout and i > 0 and i != l-1:
drop_mat = np.random.binomial(
[np.ones(z.shape)], 1-dropout_percent)[0] * (
1.0/(1-dropout_percent)
)
z *= drop_mat
a *= drop_mat
self.z.append(z)
self.a.append(a)
i += 1
return a
def back_propigation(self, X, Y, dropout=False, dropout_percent=0.5):
self.feed_forward(X, dropout, dropout_percent)
ddw = []
ddb = []
delta = -(Y - self.a[-1]) * sig_to_d(self.a[-1])
for i in reversed(range(len(self.weights))):
ddw.append(np.dot(self.a[i].T, delta))
ddb.append(delta)
if i != 0:
delta = np.dot(delta, self.weights[i].T) * sig_to_d(self.a[i])
return list(reversed(ddw)), [b.sum(axis = 0) for b in reversed(ddb)]
def gradient_descent(self, X, Y, itterations, batch_size, a = 1e-2,
dropout=False, dropout_percent=0.5):
'''
Parameters
----------
Returns
-------
'''
assert batch_size <= X.shape[0]
assert X.shape[0] == Y.shape[0], 'X and Y different lengths'
n_batches = X.shape[0] // batch_size
batches = np.array_split(range(X.shape[0]), n_batches)
for i in range(itterations):
for b in batches:
ddw, ddb = self.back_propigation(X[b], Y[b],
dropout, dropout_percent)
for j in range(len(self.weights)):
self.weights[j] -= ddw[j] * a
self.biases[j] -= ddb[j] * a
def predict(self, X):
'''
feed_forward without recording activations or using dropout
Parameters
----------
X : numpy.array shape(records, features)
Returns
-------
a : numpy.array
output of neural network
'''
assert X.shape[1] == self.weights[0].shape[0], 'input X is wrong shape'
a = X
for w, b in zip(self.weights, self.biases):
z = np.dot(a, w) + b
a = sig(z)
return a
def gradient_checking(self, X, Y):
'''
utility function to check back_propigation
'''
bp = self.back_propigation(X, Y)[0]
yhat = self.feed_forward(X)
# print_shape(bp)
weights = self.weights[:]
epsilon = 1e-4
# epsilon = 1
grad_approx = [np.zeros(w.shape) for w in self.weights]
for i in range(len(self.weights)):
for j, x in np.ndenumerate(self.weights[i]):
zeros_mat = np.zeros(self.weights[i].shape)
# theta_plus
zeros_mat[j] += epsilon
self.weights[i] = self.weights[i] + zeros_mat
p1 = np.array_equal(weights[i], self.weights[i])
theta_plus = self.feed_forward(X)
self.weights[i] = weights[i][:]
# theta_minus
zeros_mat = np.zeros(self.weights[i].shape)
zeros_mat[j] = epsilon
self.weights[i] = self.weights[i] - zeros_mat
p2 = np.array_equal(weights[i], self.weights[i])
theta_minus = self.feed_forward(X)
# reset weights
self.weights[i] = weights[i][:]
p3 = np.array_equal(weights[i], self.weights[i])
if any([p1, p2, not p3]):
print(p1, p2, p3)
print(i, j)
# print((log_loss(Y, theta_plus),
# log_loss(Y, theta_minus)), (2 * epsilon))
grad_approx[i][j] = (log_loss(Y, theta_plus) -
log_loss(Y, theta_minus)) / (2 * epsilon)
# for i,j in zip(bp, grad_approx):
# print(i - j)
return grad_approx
def main():
from sklearn.datasets import load_digits
from sklearn.preprocessing import OneHotEncoder
from sklearn.cross_validation import train_test_split
from sklearn.metrics import classification_report
import pandas as pd
fp = '/home/jay/projects/mlp/'
df = pd.read_csv(fp + 'train.csv')
X, Y = df.drop('label', 1).values, df['label'].values
onehot = OneHotEncoder(sparse = False)
Y = onehot.fit_transform(np.atleast_2d(Y).T)
train_x, test_x, train_y, test_y = train_test_split( X, Y,
test_size = 0.3)
test_y = np.argmax(test_y, 1)
train_y2 = np.argmax(train_y, 1)
nn = mlp([X.shape[1], 100, 10])
# nn = mlp([X.shape[1], 100, 10])
yhat = np.argmax(nn.feed_forward(train_x), 1)
# nn.gradient_descent(train_x, train_y, 10, 100, a = 1e-1)
# nn.gradient_descent(train_x, train_y, 9000, 100, a = 1e-2)
# nn.gradient_descent(train_x, train_y, 50, 10, a=1e-1,
# dropout=False, dropout_percent=0.5)
# nn.gradient_descent(train_x, train_y, 50, 10, a=1e-2,
# dropout=False, dropout_percent=0.5)
nn.gradient_descent(train_x, train_y, 500, 10, a=1e-3,
dropout=True, dropout_percent=0.5)
nn.gradient_descent(train_x, train_y, 500, 10, a=1e-4,
dropout=True, dropout_percent=0.5)
yhat = np.argmax(nn.predict(test_x), 1)
print(nn)
print(classification_report(test_y, yhat))
if __name__ == '__main__':
main()
# nn = mlp([2, 2, 1])
#
# X = np.array([[0,0],
# [0,1],
# [1,0],
# [1,1]])
#
# Y = np.array([[0, 1, 1, 0]]).T
#
# nn.gradient_descent(X, Y, 1000, 4, dropout=True)
# for x in [(i[0],j[0]) for i,j in zip(nn.predict(X), Y)]:
# print(x)
| jayswinney/mlp | mlp.py | mlp.py | py | 7,754 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "scipy.special.expit",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "numpy.random.randn",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.... |
33539146773 | #!../python35/python.exe
print ("Content-type: text/html\n")
import cgi
import cgitb; cgitb.enable()
form = cgi.FieldStorage()
import pymysql
conn = pymysql.connect(host='localhost', port=3306, user='root', passwd='',db='soccer',autocommit=True)
cur = conn.cursor()
nombre = form.getfirst("nombre");
posicion = form.getfirst("posicion");
equipo = form.getfirst("equipo");
numero = form.getfirst("numero");
import os
fileitem = form['img']
if fileitem.filename:
fn = os.path.basename(fileitem.filename)
open("images/player/" +fn, 'wb').write(fileitem.file.read())
imagen = fn
else:
imagen = "None"
print(nombre,posicion,equipo,numero)
sql = "INSERT INTO `jugadores`(`nombre_jugador`, `id_equipo`, `posicion`, `numero`,foto) VALUES ('"+str(nombre)+"','"+str(equipo)+"','"+str(posicion)+"','"+str(numero)+"','"+str(imagen)+"')"
cur.execute(sql)
print('''<body><script type="text/javascript">window.location="admin_jugadores.py";</script></body>''') | cecilianogranados96/Proyecto-Soccer | nuevo_jugador.py | nuevo_jugador.py | py | 977 | python | es | code | 0 | github-code | 36 | [
{
"api_name": "cgitb.enable",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "cgi.FieldStorage",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pymysql.connect",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path.basename",
"li... |
26850908312 | import nextcord
from nextcord.ext import commands
from nextcord import Interaction, SlashOption
from config import settings
class ExampleSlash(commands.Cog):
def __init__(self, bot):
self.bot = bot
@nextcord.slash_command(guild_ids=[settings.GUILD_ID], description="Чистка текущего канала от сообщений")
async def clear(self, interaction: Interaction, amount: int=SlashOption(description="Количество сообщений для удаления", required=True)):
await interaction.response.send_message(f"Ваш запрос выполняется. Пожалуйста, подождите...", ephemeral=True)
messages = await interaction.channel.purge(limit=amount)
await interaction.edit_original_message(content=f"Удалено **{len(messages)}** сообщений в этом канале")
def setup(bot):
bot.add_cog(ExampleSlash(bot))
print("Module ExampleSlash successfully loaded") | Maxim-2005/Kurushi-DiscordBot- | cogs/example_slash.py | example_slash.py | py | 989 | python | ru | code | 0 | github-code | 36 | [
{
"api_name": "nextcord.ext.commands.Cog",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "nextcord.ext.commands",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "nextcord.Interaction",
"line_number": 11,
"usage_type": "name"
},
{
"api_name"... |
23785755593 | from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
from .views import index, PostDetail, news_all, ContactView, TeamDetail, TeamAll, club, CategoryNews, Galery, subscription, \
locations, SubscriptionView
urlpatterns = [
path('', index, name='index'),
path('subscription/buy/', SubscriptionView.as_view(), name='subscription'),
path('locations/', locations, name='locations'),
path('subscription/', subscription, name='sub'),
path('galery/', Galery.as_view(), name='galery'),
path('category/<str:slug>/', CategoryNews.as_view(), name='category'),
path('club/', club, name='club'),
path('team/', TeamAll.as_view(), name='team_all'),
path('team_detail/<int:pk>/', TeamDetail.as_view(), name='team'),
path('post/<str:slug>/', PostDetail.as_view(), name='detail'),
path('news_all/', news_all, name='news_all'),
path('contact/', ContactView.as_view(), name='contact'),
]
| dimaProtas/Footballab | main/urls.py | urls.py | py | 983 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "views.index",
"line_number": 9,
"usage_type": "argument"
},
{
"api_name": "django.urls.path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "views.SubscriptionVie... |
1353676121 | import cv2
import os
src_dir = 'H:\\document\\ocr\\src\\shixin2'
dst_dir = 'H:\\document\\ocr\\dst-test'
files = os.listdir(src_dir)
for name in files:
im = cv2.imread(os.path.join(src_dir, name))
im_gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(im_gray, (5, 5), 0)
ret, thresh = cv2.threshold(blurred, 220, 255, cv2.THRESH_BINARY)
cv2.imwrite(os.path.join(dst_dir, name), thresh)
print('done') | magicnian/court | train/pic_func.py | pic_func.py | py | 442 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "os.listdir",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 10,
... |
27005467704 | import os
import json
import logging
from slacker.logger import Logger
from slacker.session import Session
# Default REPL prompt template string. Override with "repl_prompt" field in
# .slacker config file.
#
# Supported identifers:
# ${ro} - display DEFAULT_READ_ONLY_STR when Slacker is running in read-only mode
# ${w} - current workspace Slacker is working against
DEFAULT_REPL_PROMPT = "${ro}${w}> "
# Default string used when Slacker is running in read-only mode. Override with
# read_only_str in .slacker config file.
DEFAULT_READ_ONLY_STR = "(read-only) "
class Config:
__instance = None
def __init__(self):
"""Get instance of Config via Config.get()."""
if not Config.__instance:
self.__logger = Logger(self.__class__.__name__).get()
# Set default values.
self.reset()
# Load from file if it exists.
try:
self.load()
except Exception as ex:
self.__logger.debug("Config does not exist: {}\n{}".format(self.file_path(), ex))
# Assign singleton instance.
Config.__instance = self
@staticmethod
def get():
if not Config.__instance:
Config()
return Config.__instance
def repl_prompt(self):
return self.__repl_prompt
def set_repl_prompt(self, repl_prompt):
self.__repl_prompt = repl_prompt
def active_workspace(self):
"""Active workspace name, if defined."""
return self.__active_workspace
def set_active_workspace(self, name):
if name not in self.__workspaces:
raise ValueError("Cannot set unknown workspace active: '{}'".format(name))
self.__active_workspace = name
def add_workspace(self, name, token):
if name in self.__workspaces:
raise ValueError("Cannot add workspace '{}' because it already exists!".format(name))
self.__workspaces[name] = token
def remove_workspace(self, name):
if name not in self.__workspaces:
raise ValueError("Cannot remove unknown workspace: '{}'".format(name))
if self.active_workspace() == name:
raise ValueError("Cannot remove active workspace: '{}'".format(name))
del(self.__workspaces[name])
def workspaces(self):
return list(self.__workspaces.keys())
def workspace_token(self, name):
if name not in self.__workspaces:
raise ValueError("Cannot get token for unknown workspace: '{}'".format(name))
return self.__workspaces[name]
def active_workspace_token(self):
if not self.active_workspace():
raise ValueError("No workspace is active!")
return self.workspace_token(self.active_workspace())
def set_log_level(self, level):
if level not in Logger.levels():
raise ValueError("Invalid log level: {}".format(level))
self.__log_level = level
Session.get().set_log_level(level)
Logger.set_level(level)
def log_level(self):
return self.__log_level
def read_only(self):
return self.__read_only
def set_read_only(self, enable):
self.__read_only = enable
def read_only_str(self):
return self.__read_only_str
def set_read_only_str(self, str):
self.__read_only_str = str
def file_path(self):
return os.path.expanduser("~/.slacker")
def safe_dict(self):
"""Returns a safe dictionary of current values excluding any tokens."""
return {"repl_prompt": self.repl_prompt(),
"workspaces": self.workspaces(),
"active_workspace": self.active_workspace(),
"log_level": self.log_level(),
"read_only": self.read_only(),
"read_only_str": self.read_only_str()}
def save(self):
data = {"repl_prompt": self.repl_prompt(),
"workspaces": self.__workspaces,
"active_workspace": self.active_workspace(),
"log_level": self.log_level(),
"read_only": self.read_only(),
"read_only_str": self.read_only_str()}
with open(self.file_path(), "w") as fp:
json.dump(data, fp, indent=2)
self.__logger.debug("Saved config to: {}".format(self.file_path()))
def load(self):
with open(self.file_path(), "r") as fp:
data = json.load(fp)
if "repl_prompt" in data:
self.set_repl_prompt(data["repl_prompt"])
if "workspaces" in data:
self.__workspaces = data["workspaces"]
if "active_workspace" in data:
self.__active_workspace = data["active_workspace"]
if "log_level" in data:
self.set_log_level(data["log_level"])
if "read_only" in data:
self.set_read_only(data["read_only"])
if "read_only_str" in data:
self.set_read_only_str(data["read_only_str"])
self.__logger.debug("Loaded config from: {}".format(self.file_path()))
def reset(self):
"""Resets all values to default."""
self.set_repl_prompt(DEFAULT_REPL_PROMPT)
self.__workspaces = {} # Workspace name -> API token
self.__active_workspace = None
self.__log_level = logging.INFO
self.__read_only = False
self.__read_only_str = DEFAULT_READ_ONLY_STR
| netromdk/slacker | slacker/environment/config.py | config.py | py | 4,977 | python | en | code | 14 | github-code | 36 | [
{
"api_name": "slacker.logger.Logger",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "slacker.logger.Logger.levels",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "slacker.logger.Logger",
"line_number": 87,
"usage_type": "name"
},
{
"api_name... |
42591857727 | from dataclasses import dataclass, field
from dataclasses_json import dataclass_json
from typing import Tuple, List, Set, Optional, Dict, Iterable
from math import sqrt, asin, sin, cos, pi
from pathlib import Path
Coordinate = Tuple[float, float]
def distance(p1: Coordinate, p2: Coordinate):
return sqrt((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2)
R_EARTH = 3959.0
def gc_distance(geo1: Coordinate, geo2: Coordinate, R: float = R_EARTH):
lat1, lon1 = geo1[0] * pi/180, geo1[1] * pi/180
lat2, lon2 = geo2[0] * pi/180, geo2[1] * pi/180
rhs = (sin((lat2-lat1)/2)**2) + cos(lat1)*cos(lat2)*(sin((lon2-lon1)/2)**2)
return 2*R*asin(sqrt(rhs))
@dataclass_json
@dataclass
class MapPoint:
index: int # Numbered ordering of points
dxf_coords: Coordinate # Coordinates taken from raw DXF file
city_names: List[str] = field(default_factory=list)
# Use a list for pairs like San Francisco-Oakland
# Will only be populated for named destination cities
geonames_lookup: Optional[str] = field(default=None)
# Name to lookup in geonames dump data
place_name: Optional[str] = field(default=None)
# Single place name for displays
state: Optional[str] = field(default=None)
# May be unknown before lat,lon are calculated
geo_coords: Optional[Coordinate] = field(default=None)
# Latitude/longitude (need to be calculated)
connections: Dict[str, Set[int]] = field(default_factory=dict)
# Indexes of connected MapPoints, grouped by railroad name
final_svg_coords: Optional[Coordinate] = field(default=None)
# Final coordinates in the .svg
region: Optional[str] = None
@property
def display_name(self) -> str:
return f'{(self.place_name or "").replace("_","")}, {self.state}'
def _connect_to(self, other: 'MapPoint', rr: str):
if rr not in self.connections:
self.connections[rr] = set()
self.connections[rr].add(other.index)
def connect_to(self, other: 'MapPoint', rr: str):
self._connect_to(other, rr)
other._connect_to(self, rr)
@property
def pts_connected_to(self) -> List[int]:
return list(sorted(set(
pt_j for pts in self.connections.values() for pt_j in pts)))
@dataclass_json
@dataclass
class Railroad:
shortName: str
longName: str
cost: int
triangles: List[Tuple[int, int, int]] | None = None
@dataclass_json
@dataclass
class Map:
points: List[MapPoint] = field(default_factory=list)
map_transform_A: List[List[float]] = field(default_factory=list)
map_transform_b: List[float] = field(default_factory=list)
railroads: Dict[str, Railroad] = field(default_factory=dict)
def map_transform(self, c: Coordinate) -> Coordinate:
x, y = c
A, b = self.map_transform_A, self.map_transform_b
return A[0][0] * x + A[0][1] * y + b[0], \
A[1][0] * x + A[1][1] * y + b[1]
def lookup_city(self, city: str) -> Tuple[str, int]:
def canon(s: str) -> str:
return s.upper().replace('.','').replace(' ','')
for pt in self.points:
for c in pt.city_names:
if canon(c) == canon(city):
return c, pt.index
raise StopIteration
def gc_distance(self, pt_i: int, pt_j: int, R: float = R_EARTH) -> float:
geo1 = self.points[pt_i].geo_coords
geo2 = self.points[pt_j].geo_coords
assert geo1, "Must have geo coords"
assert geo2, "Must have geo coords"
return gc_distance(geo1, geo2, R)
def read_map(json_path: Path | None = None) -> Map:
if not json_path:
json_path = (Path(__file__) / '../../../../../data/map.json').resolve()
with json_path.open('r') as json_file:
return Map.from_json(json_file.read()) # type: ignore
Waypoint = Tuple[str, int] # Railroad name, dot
RailSegment = Tuple[str, int, int] # Railroad name + 2 dots (in order)
def make_rail_seg(rr: str, pt_i: int, pt_j: int) -> RailSegment:
assert pt_i != pt_j, "Rail segment must connect two different points"
return (rr, pt_i, pt_j) if pt_i < pt_j else (rr, pt_j, pt_i)
def rail_segs_from_wps(start_pt: int, wp: List[Waypoint]) -> List[RailSegment]:
curr_pt = start_pt
rail_segs: List[RailSegment] = []
for rr, next_pt in wp:
assert curr_pt != next_pt, f"Invalid history {wp} from {start_pt} -> duplicate {curr_pt}"
rail_segs.append(make_rail_seg(rr, curr_pt, next_pt)); curr_pt = next_pt
return rail_segs
def get_valid_waypoints(m: Map, pt_i: int,
exclude_rs: List[RailSegment] = [],
exclude_pts: Iterable[int] = []) -> List[Waypoint]:
wps: List[Waypoint] = []
for rr, conn_pts in m.points[pt_i].connections.items():
for pt_j in conn_pts:
rs = make_rail_seg(rr, pt_i, pt_j)
if pt_j not in exclude_pts and rs not in exclude_rs:
wps.append((rr, pt_j))
return wps | bmboucher/rail_baron | python/src/pyrailbaron/map/datamodel.py | datamodel.py | py | 4,986 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.Tuple",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "math.sqrt",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "math.pi",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "math.pi",
"line_number": 15,
"u... |
17888105443 | from django.shortcuts import render
from django.shortcuts import Http404
from django.shortcuts import HttpResponse
from crawler.tools import crawler,indexJson,search
from django.views.decorators.csrf import csrf_protect
from json import dumps,loads
# Create your views here.
@csrf_protect
def indexPage(request):
context = {}
return render(request, 'indexePage.html', context)
@csrf_protect
def searchPage(request):
context = {}
return render(request, 'searchPage.html', context)
@csrf_protect
def searchIt(request):
if request.method != 'POST':
raise Http404
searchTerm = request.POST['searchTerm']
result = search(searchTerm)
result = dumps(result)
print("result",result)
return HttpResponse(result, content_type='application/json')
@csrf_protect
def indexIt(request):
if request.method != 'POST':
raise Http404
url = request.POST['url']
count = request.POST['count']
count = int(count)
print("crawling on pages count " , count, " starting form page: ",url )
result = crawler(url , count)
indexJson()
result = [{'url':item['url'], 'rank':item['rank']} for item in result['content']]
result = dumps(result)
return HttpResponse(result, content_type='application/json')
| roohy/search_engine_server | crawler/views.py | views.py | py | 1,270 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.shortcuts.render",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.views.decorators.csrf.csrf_protect",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 17,
"usage_type": "call"
},
... |
19247601785 | """ Contains a pipeline for generating a data set for use with the LightTag
platform
see: https://www.lighttag.io/
"""
import json
from ucla_topic_analysis import get_file_list
from ucla_topic_analysis.data import get_training_file_path
from ucla_topic_analysis.data.coroutines import create_file
from ucla_topic_analysis.data.coroutines import insert
from ucla_topic_analysis.data.coroutines import print_progress
from ucla_topic_analysis.data.pipeline import Pipeline
from ucla_topic_analysis.data.coroutines.read import ReadFilePipeline
class LightTagDataSetPipeline(Pipeline):
"""Pipeline for generating a dataset for the LightTag platform.
NOTE: This pipeline is a data sink. It does not return any new data.
"""
# The split schema for the corpus files
SCHEMA = {
"training": 0.8,
"validation": 0.1,
"testing": 0.1
}
@staticmethod
def get_input_stream(schema=None):
"""This function is used to get an input stream for the
LightTagDataSetPipeline
Args:
schema(:obj:`dict`): The schema for the file pipeline
Returns:
An iterable containing the dataset.
"""
# Build the input stream pipeline
files = ReadFilePipeline.get_input_stream()
return ReadFilePipeline(
input_stream=files,
schema=schema
).output_stream()
@classmethod
async def generate_dataset(cls):
"""This function is used to create a dataset for the LightTag platform
"""
#build the pipeline
data_stream = cls.get_input_stream(cls.SCHEMA)
pipeline = cls()
# create the dataset
count = 1
total = len(get_file_list())
async for data in data_stream:
await pipeline.run(data)
print_progress(count, total)
count += 1
print("")
async def coroutine(self, data):
"""This function dictionaries to a json file for using in LightTag
data sets.
Args:
data (:obj:`dict`): A dictionary containing data that needs to be
tagged
"""
file_path = get_training_file_path("LightTag-dataset.json")
is_new_file = create_file(file_path, "[\n]")
data_string = json.dumps(data)
prefix = "\n" if is_new_file else ",\n"
insertion_string = "{0}{1}".format(prefix, data_string)
with open(file_path, "r+") as json_file:
json_file.seek(0, 2)
position = json_file.tell() - 2
insert(insertion_string, json_file, position)
| swang666/applied-finance-project | UCLA-Topic-Analysis/ucla_topic_analysis/data/coroutines/light_tag.py | light_tag.py | py | 2,695 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "ucla_topic_analysis.data.pipeline.Pipeline",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "ucla_topic_analysis.data.coroutines.read.ReadFilePipeline.get_input_stream",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "ucla_topic_analysis.data.co... |
1450744710 | from dash import Dash, dcc, Output, Input # pip install dash
import dash_bootstrap_components as dbc # pip install dash-bootstrap-components
import plotly.express as px
import pandas as pd # pip install pandas
import geopandas as gpd
df = pd.read_csv("rainfall.csv")
print(df.head())
gdf = gpd.read_file('Indian_States.txt')
gdf["geometry"] = gdf.to_crs(gdf.estimate_utm_crs()).simplify(1000).to_crs(gdf.crs)
india_states = gdf.rename(columns={"NAME_1": "ST_NM"}).__geo_interface__
# Build your components
app = Dash(__name__, external_stylesheets=[dbc.themes.LUX])
mytitle = dcc.Markdown(children='')
mygraph = dcc.Graph(figure={})
dropdown = dcc.Dropdown(options=df.columns.values[1:],
value='Rainfall(mm)', # initial value displayed when page first loads
clearable=False)
# Customize your own Layout
app.layout = dbc.Container([
dbc.Row([
dbc.Col([mytitle], width=6)
], justify='center'),
dbc.Row([
dbc.Col([mygraph], width=12)
]),
dbc.Row([
dbc.Col([dropdown], width=6)
], justify='center'),
], fluid=True)
# Callback allows components to interact
@app.callback(
Output(mygraph, 'figure'),
Output(mytitle, 'children'),
Input(dropdown, 'value')
)
def update_graph(column_name): # function arguments come from the component property of the Input
print(column_name)
print(type(column_name))
fig = px.choropleth(
pd.json_normalize(india_states["features"])["properties.ST_NM"],
locations="properties.ST_NM",
geojson=india_states,
featureidkey="properties.ST_NM",
color_discrete_sequence=["lightgrey"],
)
fig.add_traces(
px.choropleth(
df,
locations="State",
geojson=india_states,
featureidkey="properties.ST_NM",
locationmode="geojson-id",
color=column_name,
scope="asia",
).data
)
fig.update_geos(fitbounds="locations", visible=False)
return fig, '# '+column_name # returned objects are assigned to the component property of the Output
# Run app
if __name__=='__main__':
app.run_server(debug=True, port=8054)
| Aakanksha-Geo/Average_rainfall_in_India_Dash_Plotly | dash_rainfall_India.py | dash_rainfall_India.py | py | 2,336 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "geopandas.read_file",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "dash.Dash",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "dash_bootstrap_component... |
28223166240 | import pykka
from site_storage.sсhema import RegularCheck, WatchStatus
from site_storage.messages import UpdateSiteRequest
from site_storage.messages import SiteDeleteResponse, SiteResponse, SubscribeOnSiteUpdates
import os
import urllib.request
from urllib.parse import urldefrag
import time
import threading
from datetime import datetime
import logging
logger = logging.getLogger(__name__)
class Handler(threading.Thread):
def __init__(self, downloader):
super().__init__(daemon=True)
self.running = True
self.downloader = downloader
def run(self):
last_update = datetime.now()
while self.running:
current_time = datetime.now()
difference = current_time - last_update
if difference.seconds >= 60:
self.downloader._recalculate_queue()
last_update = datetime.now()
if len(self.downloader.queue) > 0:
site = self.downloader.queue.pop()
self.downloader._download_site(site[1])
time.sleep(5)
def stop(self):
self.running = False
print('Site downloader stopped')
class SiteDownloaderActor(pykka.ThreadingActor):
point_dict = {
RegularCheck.TWICE_HOUR: 1000,
RegularCheck.ONCE_HOUR: 500,
RegularCheck.FOUR_TIMES_DAY: 250,
RegularCheck.TWICE_DAY: 125,
RegularCheck.ONCE_DAY: 100
}
regular_check_duration = {
RegularCheck.TWICE_HOUR: 30,
RegularCheck.ONCE_HOUR: 60,
RegularCheck.FOUR_TIMES_DAY: 360,
RegularCheck.TWICE_DAY: 720,
RegularCheck.ONCE_DAY: 1440
}
def __init__(self, storage_proxy, analytic_proxy):
try:
super().__init__()
self.storage_proxy = storage_proxy
self.analytic_proxy = analytic_proxy
self.sites = dict()
self.queue = list()
self._create_queue()
self.storage_proxy.subscribe_on_site_update(
SubscribeOnSiteUpdates(self.actor_ref)
)
self.handle_process = Handler(downloader=self)
except Exception as e:
logger.error("Error occurred: {0}".format(e))
def on_start(self):
self.handle_process.start()
def on_stop(self):
self.handle_process.stop()
def _create_queue(self):
for site in self.storage_proxy.get_sites().get():
self.sites[site.id] = site
points = self._calculate_priority(site)
if points > 0:
self.queue.append((points, site))
self.queue.sort(key=lambda x: x[0])
def _recalculate_queue(self):
for site in self.sites.values():
points = self._calculate_priority(site)
if points > 0:
if site.last_watch is not None:
self.storage_proxy.update_site(UpdateSiteRequest(id=site.id, status=WatchStatus.NEED_TO_WATCH))
self.queue.append((points, site))
self.queue.sort(key=lambda x: x[0])
def _calculate_priority(self, site):
points = 0
points = points + self.point_dict[site.regular_check]
if site.last_watch is None:
points = points + 10000
else:
difference = datetime.now() - datetime.fromisoformat(site.last_watch)
fine = (difference.seconds // 60) - self.regular_check_duration[site.regular_check]
if fine < 0:
points = -1
else:
points = points + fine * 100
return points
def on_receive(self, message):
if isinstance(message, SiteResponse):
self.on_site_record(message)
if isinstance(message, SiteDeleteResponse):
self.on_delete_site(message)
def on_site_record(self, site):
self.sites[site.id] = site
def on_delete_site(self, message):
del self.sites[message.id]
def _download_site(self, site):
try:
self.storage_proxy.update_site(UpdateSiteRequest(id=site.id, status=WatchStatus.IN_PROGRESS))
with urllib.request.urlopen(site.url) as f:
html = f.read().decode('utf-8')
self.analytic_proxy.analyze_site(site, html)
except Exception as e:
logger.error("Error occurred: {0}".format(e))
self.storage_proxy.update_site(UpdateSiteRequest(id=site.id, status=WatchStatus.ERROR)) | map82top/site_watcher | site_downloader/actor.py | actor.py | py | 4,456 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "datetim... |
13092077642 | import sympy as sy
from itertools import permutations
import dill
import pickle
import time
total_start = time.time()
#
#
#The Antimatter Reactor Diagram
#
# /---------- Q1
#P1---------/
# S (K1)
#P2---------|
# | (K2)
#P3---------|
# S (K3)
#P4---------\
# \----------- Q2
#
#
#
sy.var('identity4 overlap F a epsilon g m holder1x holder1y holder1z holder1u holder1d holder2x holder2y holder2z holder2u holder2d holder3x holder3y holder3z holder3u holder3d holder4x holder4y holder4z holder4u holder4d P1 p1t p1x p1y p1z P2 p2t p2x p2y p2z P3 p3t p3x p3y p3z P4 p4t p4x p4y p4z K1 k1t k1x k1y k1z K2 k2t k2x k2y k2z K3 k3t k3x k3y k3z Q1 q1t q1x q1y q1z Q2 q2t q2x q2y q2z s1 s1u s1d s2 s2u s2d s3 s3u s3d s4 s4u s4d z1 z1u z1d z2 z2u z2d UP1 UP2 VbarP3 UP4 UbarQ1 UbarQ2 Gamma Pauli y y24')
if(True): #Creates new overlap |integral(<q1,q2|F(a)|p1,p2,p3,p4>)|^2
s1 = sy.Matrix([s1u, s1d]) #change for different spin values
s2 = sy.Matrix([s2u, s2d])
s3 = sy.Matrix([s3u, s3d])
s4 = sy.Matrix([s4u, s4d])
z1 = sy.Matrix([z1u, z1d])
z2 = sy.Matrix([z2u, z2d])
identity4 = sy.Matrix([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]
])
g = sy.Matrix([[
[-1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]
]])
Gamma = [
sy.Matrix(
[[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, -1, 0],
[0, 0, 0, -1]]),
sy.Matrix(
[[0, 0, 0, 1],
[0, 0, 1, 0],
[0, -1, 0, 0],
[-1, 0, 0, 0]]),
sy.Matrix(
[[0, 0, 0, -sy.I],
[0, 0, sy.I, 0],
[0, sy.I, 0, 0],
[-sy.I, 0, 0, 0]]),
sy.Matrix(
[[0, 0, 1, 0],
[0, 0, 0, -1],
[-1, 0, 0, 0],
[0, 1, 0, 0]])
]
Pauli = sy.Matrix([
[[1, 0],
[0, 1]],
[[0, 1],
[1, 0]],
[[0, -sy.I],
[sy.I, 0]],
[[1, 0],
[0, -1]]
])
P1 = sy.Matrix([p1t, p1x, p1y, p1z])
P2 = sy.Matrix([p2t, p2x, p2y, p2z])
P3 = sy.Matrix([p3t, p3x, p3y, p3z])
P4 = sy.Matrix([p4t, p4x, p4y, p4z])
Q1 = sy.Matrix([q1t, q1x, q1y, q1z])
Q2 = sy.Matrix([q2t, q2x, q2y, q2z])
K1 = P1 - Q1
K2 = P1 + P2 - Q1
K3 = Q2 - P4
#K1 = sy.Matrix([k1t, k1x, k1y, k1z])
#K2 = sy.Matrix([k2t, k2x, k2y, k2z])
#K3 = sy.Matrix([k3t, k3x, k3y, k3z])
P1Pauli=sy.Matrix([
[p1t+p1z, p1x-sy.I*p1y],
[p1x+sy.I*p1y, p1t-p1z]
])
P2Pauli=sy.Matrix([
[p2t+p2z, p2x-sy.I*p2y],
[p2x+sy.I*p2y, p2t-p2z]
])
P3Pauli=sy.Matrix([
[p3t+p3z, p3x-sy.I*p3y],
[p3x+sy.I*p3y, p3t-p3z]
])
P4Pauli=sy.Matrix([
[p4t+p4z, p4x-sy.I*p4y],
[p4x+sy.I*p4y, p4t-p4z]
])
Q1Pauli=sy.Matrix([
[q1t+q1z, q1x-sy.I*q1y],
[q1x+sy.I*q1y, q1t-q1z]
])
Q2Pauli=sy.Matrix([
[q2t+q2z, q2x-sy.I*q2y],
[q2x+sy.I*q2y, q2t-q2z]
])
UP1 = sy.sqrt((P1[0]+m)/(2*m))*sy.Matrix(s1).col_join((P1Pauli*s1))
UP2 = sy.sqrt((P2[0]+m)/(2*m))*sy.Matrix(s2).col_join((P2Pauli*s2))
VbarP3 = sy.sqrt((P3[0]+m)/(2*m))*sy.Matrix((P3Pauli*s3)).col_join(s3).H*Gamma[0]
UP4 = sy.sqrt((P4[0]+m)/(2*m))*sy.Matrix(s4).col_join((P4Pauli*s4))
UbarQ1 = sy.sqrt((Q1[0]+m)/(2*m))*sy.Matrix(z1).col_join((Q1Pauli*z1)).H*Gamma[0]
UbarQ2 = sy.sqrt((Q2[0]+m)/(2*m))*sy.Matrix(z2).col_join((Q2Pauli*z2)).H*Gamma[0]
k2slash = K2[0]*Gamma[0]+K2[1]*Gamma[1]+K2[2]*Gamma[2]+K2[3]*Gamma[3]+m*identity4
print("Creating Simple Momentum State Amplitude (y)...")
start = time.time()
y=0
myi = 0
for mu in range(4):#Summing over all einstein indicies
for nu in range(4):
for sigma in range(4):
for tau in range(4):
#y = y+sy.simplify((UbarQ1*Gamma[mu]*UP1)[0]*(VbarP3*Gamma[nu]*k2slash*Gamma[sigma]*UP2)[0]*(UbarQ2*Gamma[tau]*UP4)[0]*g[mu][nu]*g[sigma][tau])
y = y+(UbarQ1*Gamma[mu]*UP1)[0]*(VbarP3*Gamma[nu]*k2slash*Gamma[sigma]*UP2)[0]*(UbarQ2*Gamma[tau]*UP4)[0]*g[mu][nu]*g[sigma][tau]
myi += 1
#print("Finished simplification "+str(myi))
print(str(time.time()-start)+" seconds")
print("Simplifying Base Amplitude...")
start = time.time()
y = sy.simplify(y)
print(str(time.time()-start)+" seconds")
y = y/(K1.T*K1)[0]/(K3.T*K3)[0]/((K2.T*K2)[0]-m*m+sy.I*epsilon)
y = y.subs([(p1t, sy.sqrt(p1x*p1x + p1y*p1y + p1z*p1z + m*m)), (p2t, sy.sqrt(p2x*p2x + p2y*p2y + p2z*p2z + m*m)), (p3t, sy.sqrt(p3x*p3x + p3y*p3y + p3z*p3z + m*m)), (p4t, sy.sqrt(p4x*p4x + p4y*p4y + p4z*p4z + m*m)), (q1t, sy.sqrt(q1x*q1x + q1y*q1y + q1z*q1z + m*m)), (q2t, sy.sqrt(q2x*q2x + q2y*q2y + q2z*q2z + m*m))])
#substitutes actual values for the variables
#spin of electron 1 down, (up+down)^2 !must! = 1
#now for all permutations of the particles
y24 = 0
for p in permutations([[p1x,p1y,p1z,s1u,s1d], [p2x,p2y,p2z,s2u,s2d], [p3x,p3y,p3z,s3u,s3d], [p4x,p4y,p4z,s4u,s4d]]):
print("\nAdding permutation: "+str(p))
start = time.time()
y24 = y24+y.subs([(p1x, holder1x), (p1y, holder1y), (p1z, holder1z), (s1u, holder1u), (s1d, holder1d), (p2x, holder2x), (p2y, holder2y), (p2z, holder2z), (s2u, holder2u), (s2d, holder2d), (p3x, holder3x), (p3y, holder3y), (p3z, holder3z), (s3u, holder3u), (s3d, holder3d), (p4x, holder4x), (p4y, holder4y), (p4z, holder4z), (s4u, holder4u), (s4d, holder4d)]).subs([(holder1x, p[0][0]), (holder1y, p[0][1]), (holder1z, p[0][2]), (holder1u, p[0][3]), (holder1d, p[0][4]), (holder2x, p[1][0]), (holder2y, p[1][1]), (holder2z, p[1][2]), (holder2u, p[1][3]), (holder2d, p[1][4]), (holder3x, p[2][0]), (holder3y, p[2][1]), (holder3z, p[2][2]), (holder3u, p[2][3]), (holder3d, p[2][4]), (holder4x, p[3][0]), (holder4y, p[3][1]), (holder4z, p[3][2]), (holder4u, p[3][3]), (holder4d, p[3][4])])
print(str(time.time()-start)+" seconds")
print("Adding Permutation of Outgoing Particles...")
start = time.time()
y24 = y24 + y24.subs([(q1x, holder1x), (q1y, holder1y), (q1z, holder1z), (s1u, holder1u), (s1d, holder1d), (q2x, holder2x), (q2y, holder2y), (q2z, holder2z), (s2u, holder2u), (s2d, holder2d)]).subs([(holder1x, q2x), (holder1y, q2y), (holder1z, q2z), (holder1u, s2u), (holder1d, s2d), (holder2x, q1x), (holder2y, q1y), (holder2z, q1z), (holder2u, s1u), (holder2d, s1d)])
print(str(time.time()-start)+" seconds")
print("\nConstructing Arbitrary Wavefunction F(a)...")
#NEED TO ADD ARBITRARY WAVEFUNCTION F(a,p) => integral(F*y24)
start = time.time()
F=sy.exp(-a*P1.T*P1)*sy.exp(-a*P2.T*P2)*sy.exp(-a*P3.T*P3)*sy.exp(-a*P4.T*P4)#Input States
F= F*sy.exp(-a*Q1.T*Q1)*sy.exp(-a*Q2.T*Q2)#Output States, NEED TO FIND A BETTER OUTPUT THAN A GAUSSIAN!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
print(str(time.time()-start)+" seconds")
print("\nJoining Functions, (F*y24)")
start = time.time()
y24 = F*y24
print(str(time.time()-start)+" seconds")
print("\nEnforcing Momentum Conservation...")
start = time.time()
y24 = y24.subs([(q2x, p1x+p2x+p3x+p4x-q1x), (q2y, p1y+p2y+p3y+p4y-q1y), (q2z, p1z+p2z+p3z+p4z-q1z)])#MOMENTUM CONSERVATION
print(str(time.time()-start)+" seconds")
"""
print("\nSimplifying Amplitude...")
start = time.time()
y24 = sy.simplify(y24)
print(str(time.time()-start)+" seconds")
#"""
print("\nIntegrating over Momentum...")
start = time.time()
overlap = sy.integrate(y24, (p1x, -sy.oo, sy.oo), (p1y, -sy.oo, sy.oo), (p1z, -sy.oo, sy.oo), (p2x, -sy.oo, sy.oo), (p2y, -sy.oo, sy.oo), (p2z, -sy.oo, sy.oo), (p3x, -sy.oo, sy.oo), (p3y, -sy.oo, sy.oo), (p3z, -sy.oo, sy.oo), (p4x, -sy.oo, sy.oo), (p4y, -sy.oo, sy.oo), (p4z, -sy.oo, sy.oo), (q1x, -sy.oo, sy.oo), (q1y, -sy.oo, sy.oo), (q1z, -sy.oo, sy.oo))
#Still dependent on spins, m, epsilon, and input/output state parameter a
print(str(time.time()-start)+" seconds")
print(overlap)
#Lambdafication and Saving
print("\nLambdifying Expression for Overlap...")
start = time.time()
lambda_overlap = sy.lambdify((a, s1u, s1d, s2u, s2d, s3u, s3d, s4u, s4d, z1u, z1d, z2u, z2d, m, epsilon), overlap, modules="numpy")
print(str(time.time()-start)+" seconds")
print("\nSAVING...")
start = time.time()
dill.settings['recurse'] = True
dill.dumps(lambda_overlap)
print(str(time.time()-start)+" seconds")
"""#For Momentum Eigenstate y24, (without F). Allows you to investigate momentum eigenstate scattering
print("Inputting Desired Particle Momentum...")
result = result.subs([(p1x, 1), (p1y, 1), (p1z, 0)])#momentum vector of electron 1
result = result.subs([(p2x, 1), (p2y, 0), (p2z, 2)])
result = result.subs([(p3x, 1), (p3y, 0), (p3z, 1)])#incoming electron 3 is positron
result = result.subs([(p4x, 1), (p4y, 3), (p4z, 0)])
result = result.subs([(q1x, 0), (q1y, 1), (q1z, 0)])
#result = result.subs([(q2x, 0), (q2y, 0), (q2z, 0)])#q2z is automatically determined due to momentum conservation
"""
print("Inputting Desired Electron Spins...")
result = y24.subs([(s1u, 1),(s1d, 0)])#spin of electron 1 up
result = result.subs([(s2u, 1),(s2d, 0)])
result = result.subs([(s3u, 1),(s3d, 0)])
result = result.subs([(s4u, 1),(s4d, 0)])
result = result.subs([(z1u, 1),(z1d, 0)])
result = result.subs([(z2u, 1),(z2d, 0)])
print("Inputting Electron Mass...")
result = result.subs(m, 1)
print("Approximating Epsilon...")
result = result.subs(epsilon, 0.0001)
print("Inputting Test a...")
result = result.subs(a, 1)
print("\n")
print("TEST RESULT IS...")
print(result) #zoo = complex infinity
print("\n")
print("OR APPROXIMATELY:")
result = result.evalf()
print(result)
print("\n")
print("|<q1,q2|F(a)>|^2:")#NEED TO FIND A BETTER OUTPUT STATE
result = (sy.Pow(sy.Abs(result), 2))
print(result)
print("ENTIRE PROCESS TOOK "+str(total_start - time.time()))
#Now the Fun Begins
# time to Test over values of F(a), and use gradient descent to find the local max efficiency
| Northerneye/antimatterReactor | antimatterReactor.py | antimatterReactor.py | py | 10,272 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "time.time",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sympy.var",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "sympy.Matrix",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "sympy.Matrix",
"line_number": 27,... |
5185672583 | import pymysql
from .config import m_config
from dbutils.persistent_db import PersistentDB
class DBconnect:
def __init__(self, db_config):
self.config = db_config
self.POOL = self.initPool()
self.conn = self.createConnection()
def initPool(self):
POOL = PersistentDB(
creator = pymysql,
maxusage = None,
setsession = [],
ping = 0,
closeable = False,
threadlocal = None,
host = self.config['HOSTNAME'],
port = int(self.config['PORT']),
user = self.config['USERNAME'],
password = self.config['PASSWORD'],
database = self.config['DATABASE'],
charset = 'utf8',
)
return POOL
def createConnection(self):
return self.POOL.connection(shareable=False)
def createSession(self):
return self.conn.cursor()
def closeSession(self, cursor):
result = cursor.fetchall()
cursor.close()
return result
def commit(self):
self.conn.commit()
self.conn.close()
def execute(self, whole_sql):
cursor = self.createSession()
cursor.execute(whole_sql)
result = self.closeSession(cursor)
self.commit()
return result
db_config = m_config.get('MySQL')
m_DBconnector = DBconnect(db_config)
# if __name__ == '__main__':
# sql2 = "insert into anime_list (mikan_id,img_url) values (3,'ok')"
# sql = 'select * from anime_list'
# print(DBconnect().execute(sql))
| FortyWinters/autoAnime | src/lib/connect.py | connect.py | py | 1,572 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "dbutils.persistent_db.PersistentDB",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "config.m_config.get",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "config.m_config",
"line_number": 50,
"usage_type": "name"
}
] |
71346257703 | from flask import Flask
from pickView import PickView, EventAdd, EventRetrieve
app = Flask(__name__)
app.add_url_rule('/', view_func=PickView.as_view('pick_view'),
methods=['GET'])
app.add_url_rule('/eventAdd', view_func=EventAdd.as_view('event_add'),
methods=['POST'])
app.add_url_rule('/eventRetrieve', view_func=EventRetrieve.as_view('event_retrieve'),
methods=['POST'])
if __name__ == '__main__':
app.run(debug=True)
| hethune/impicky | picky.py | picky.py | py | 437 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "pickView.PickView.as_view",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pickView.PickView",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "pickView.EventAd... |
6209537625 | # -*- coding: utf-8 -*-
"""
Created on Tue Jan 31 12:39:09 2023
@author: Mouhamad Ali Elamine
"""
import argparse
import json
parser = argparse.ArgumentParser(description='A1T3')
parser.add_argument('--input_file', type=str, default='./review.json', help='the input file')
parser.add_argument('--output_file', type=str, default= './a1t3_customized.json', help='the output file contains your answer')
parser.add_argument('--n_partitions', type=int, default=10, help='the number of partitions')
parser.add_argument('--n', type=int, default=10, help='the threshold of the number of reviews')
args = parser.parse_args()
from pyspark import SparkConf, SparkContext
if __name__ == '__main__':
sc_conf = SparkConf() \
.setAppName('task2') \
.setMaster('local[*]') \
.set('spark.driver.memory','8g') \
.set('spark.executor.memory','4g')
sc = SparkContext.getOrCreate(conf=sc_conf)
sc.setLogLevel('OFF')
def partitioner(x):
return hash(x)
review_file = 'review.json'
r_lines = sc.textFile(review_file).persist()
review_rdd = r_lines.map(lambda x: json.loads(x)).persist()
review_rdd1 = review_rdd.map(lambda x: (x["business_id"],1)).persist()
partitioned_rdd = review_rdd1.partitionBy(args.n_partitions, partitioner)
item_num = partitioned_rdd.glom().map(lambda x: len(x)).collect()
partitioned_rdd1 = partitioned_rdd.groupByKey().mapValues(lambda x: sum(x))
partitioned_rdd2 = partitioned_rdd1.filter(lambda x: x[1]>args.n).collect()
task3_customized = {
"n_partitions":args.n_partitions,
"n_items":item_num,
"result": partitioned_rdd2
}
json_task3_customized = json.dumps(task3_customized, indent=4)
with open(args.output_file, "w") as outfile:
outfile.write(json_task3_customized) | elami018/CSCI_5523 | HW1/task3_customized.py | task3_customized.py | py | 1,815 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pyspark.SparkConf",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pyspark.SparkContext.getOrCreate",
"line_number": 28,
"usage_type": "call"
},
{
"api_na... |
41086969415 | # Импортировал библиотеки
from tkinter import *
from tkinter import messagebox
import pyperclip
import pyshorteners
# Создал окно с помощью: tkinter
root = Tk()
# Прописал название нашего окна
root.title('Link_Converter')
# Задал размер окна с помощью: geometry
root.geometry('600x400')
# Задал цвет фона, нашему окну
root["bg"] = '#778899'
# Задал текст внутри нашего окна.
# Задал ему шрифт: font, Размер, Фон и Цвет
Label(root, text='Welcome to the \nLink_Converter', font='Colibri 20 bold',
bg='#778899', fg='#FFFFFF').pack(pady=5)
Label(root, text='Enter link:', font='Colibri 15 bold',
bg='#778899', fg='#FFFFFF').pack(pady=5)
# Создал поле, где будет ссылка
link = Entry(root, width=40)
link.pack()
# Создал новый текст
Label(root, text='Shortened link', font='Colibri 15 bold',
bg='#778899', fg='#FFFFFF').pack(pady=5)
# Создал переменную, в которой будем хранить ссылку
res = Entry(root, width=40)
res.pack()
# Создал две функции: с помощью первой копируется ссылка с помощью кнопки.
# С помощью второй будем её именно сокращать.
def copytoclipboard():
url = res.get()
pyperclip.copy(url)
def short():
# Если всё хорошо и ссылка корректная, тогда произойдёт её сокращение.
try:
a = link.get()
s = pyshorteners.Shortener().tinyurl.short(a)
res.insert(0, s)
# Если ссылка будет некорректной, то будет ошибка.
except:
messagebox.showerror('Link Shortening', 'Invalid URL')
# Создали две кнопки, кнопки-взаимодействуют с функциями-которые прописаны выше.
Button(root, text='Cut down', command=short, activebackground='#778899',
bd=5, font='Colibri 13 bold', fg='#00BFFF').pack(pady=10)
Button(root, text='Copy', command=copytoclipboard, activebackground='#778899',
bd=5, font='Colibri 13 bold', fg='#00BFFF').pack(pady=5)
Label(root, text='It-Overone </>', font='Colibri 15 bold', bg='#778899', fg='#00FFFF',).pack(pady=35)
# Вывод всего того, что прописали.
root.mainloop()
| Maksim-Lukashyk-1996/Link_Converter | LinkConverter.py | LinkConverter.py | py | 2,512 | python | ru | code | 0 | github-code | 36 | [
{
"api_name": "pyperclip.copy",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "pyshorteners.Shortener",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "tkinter.messagebox.showerror",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "tk... |
11483729231 | import re
import glob
import json
import os
def get_enode_info():
result = []
prefix = '/tmp/ansible-eth-node-info-'
for file_name in glob.glob('{}*'.format(prefix)):
node_id = file_name.replace(prefix, '')
with open(file_name) as f:
content = f.read().replace('\n', '')
enode_id = re.search(r'enode://([0-9a-f]{40,})@', content).group(1)
result.append({'node_id': node_id, 'enode_id': enode_id})
os.remove(file_name)
return result
def get_ip_address():
result = {}
with open(os.path.expanduser('~/.bcli/sessions/latest/deploy.json')) as f:
nodes = json.load(f)['nodes']
for node_list in nodes.values():
for item in node_list:
result[item['id']] = item['ipv4']
return result
def main():
ip_mapping = get_ip_address()
node_list = get_enode_info()
with open('/tmp/add-peers.sh', 'w') as f:
f.write('cd\n')
for item in node_list:
ipv4 = ip_mapping[item['node_id']]
enode_id = item['enode_id']
uri = 'enode://{}@{}:30303'.format(enode_id, ipv4)
cmd = "geth attach ipc:gethDataDir/geth.ipc --exec 'admin.addPeer(\"{}\")'".format(uri)
f.write('{}\n'.format(cmd))
if __name__ == '__main__':
main()
| mitnk/bcli | playbooks/ethereum/generate_node_list.py | generate_node_list.py | py | 1,319 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "glob.glob",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path.expanduser",
"line_number":... |
74469041703 | import requests
import base45
import base64
from typing import Dict, Tuple, Optional
from cose.keys import cosekey, ec2, keyops, curves
from cryptojwt import utils as cjwt_utils
import zlib
from cose.messages import CoseMessage
from pyasn1.codec.ber import decoder as asn1_decoder
from cose.headers import Algorithm, KID
class MysejahteraPrivateAPI:
# This public is used to verify the certificate signature
public_key = "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEbP5zZhl/Nfvfk9Ocmj4kVz6BnMesxexyPMHW+vbveCzQwCj4MOkQaQcC932W3+f5/FV2081EKlp2zhL9ks0qLQ=="
def __init__(self, username, password, auth_token=None):
self.auth_token = None
self.username = username
self.password = password
self.api_url = "https://mysejahtera.malaysia.gov.my/"
self.headers = {
'accept': 'application/json',
'referer': 'https://mysejahtera.malaysia.gov.my/home',
'user-agent': 'MySejahtera/1.0.45 (iPhone; iOS 14.6; Scale/3.00)',
'accept-language': 'en-MY;q=1, ms-MY;q=0.9'
}
def login(self):
multipart_form_data = {
'username': (None, self.username),
'password': (None, self.password)
}
response = requests.post(f'{self.api_url}epms/login', files=multipart_form_data)
if 'X-AUTH-TOKEN' not in response.headers:
raise Exception("Login Failed!")
self.auth_token = response.headers['X-AUTH-TOKEN']
def digital_cert(self):
if self.auth_token is None:
raise Exception("Auth Token is empty! Please run the login first.")
headers = self.headers
headers['x-auth-token'] = self.auth_token
response = requests.request("GET", f"{self.api_url}epms/v1/mobileApp/vaccineSignedCertQrCodeUrl",
headers=headers)
return response.json()
@staticmethod
def public_ec_key_points(public_key: bytes) -> Tuple[str, str]:
"""
This code adapted from: https://stackoverflow.com/a/59537764/1548275
"""
public_key_asn1, _remainder = asn1_decoder.decode(public_key)
public_key_bytes = public_key_asn1[1].asOctets()
off = 0
if public_key_bytes[off] != 0x04:
raise ValueError("EC public key is not an uncompressed point")
off += 1
size_bytes = (len(public_key_bytes) - 1) // 2
x_bin = public_key_bytes[off:off + size_bytes]
x = int.from_bytes(x_bin, 'big', signed=False)
off += size_bytes
y_bin = public_key_bytes[off:off + size_bytes]
y = int.from_bytes(y_bin, 'big', signed=False)
off += size_bytes
bl = (x.bit_length() + 7) // 8
bytes_val = x.to_bytes(bl, 'big')
x_str = base64.b64encode(bytes_val, altchars='-_'.encode()).decode()
bl = (y.bit_length() + 7) // 8
bytes_val = y.to_bytes(bl, 'big')
y_str = base64.b64encode(bytes_val, altchars='-_'.encode()).decode()
return x_str, y_str
@staticmethod
def cosekey_from_jwk_dict(jwk_dict: Dict) -> cosekey.CoseKey:
"""
Create CoseKey from JWK
Adapted from https://github.com/hannob/vacdec
"""
# Read key and return CoseKey
if jwk_dict["kty"] != "EC":
raise ValueError("Only EC keys supported")
if jwk_dict["crv"] != "P-256":
raise ValueError("Only P-256 supported")
key = ec2.EC2(
crv=curves.P256,
x=cjwt_utils.b64d(jwk_dict["x"].encode()),
y=cjwt_utils.b64d(jwk_dict["y"].encode()),
)
key.key_ops = [keyops.VerifyOp]
if "kid" in jwk_dict:
key.kid = bytes(jwk_dict["kid"], "UTF-8")
return key
@staticmethod
def verify_signature(cose_msg: CoseMessage) -> bool:
x, y = MysejahteraPrivateAPI.public_ec_key_points(base64.b64decode(MysejahteraPrivateAPI.public_key))
key_dict = {'crv': "P-256",
'kid': cose_msg.phdr[KID].hex(),
'kty': "EC",
'x': x,
'y': y,
}
jwt_key = MysejahteraPrivateAPI.cosekey_from_jwk_dict(key_dict)
cose_msg.key = jwt_key
if not cose_msg.verify_signature():
return False
return True
@staticmethod
def decode_vaccine_cert(payload: str) -> CoseMessage:
assert payload[0:3] == "HC1"
# Strip HC1 since it's the magic value for this vaccine cert
b45data = payload[4:]
# Decode the data
zlibdata = base45.b45decode(b45data)
# Uncompress the data
decompressed = zlib.decompress(zlibdata)
return CoseMessage.decode(decompressed)
| mahadirz/MySejahtera-Private-API | api.py | api.py | py | 4,743 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "requests.post",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "requests.request",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "pyasn1.codec.ber.decoder.decode",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "pyasn1... |
12149097983 | import requests
import json
import os
from dotenv import load_dotenv
# Load environment variables from .env file
load_dotenv()
# Load token from .env file. Don't put the .env file in source control (git/devops)
token = os.environ.get("PERSONAL_TOKEN")
project_id = os.environ.get("PROJECT_ID")
# API Documentation
# (documentation: https://api.fieldmanager.io/labmanager/api/docs)
api_base_url = "https://api.fieldmanager.io/labmanager/api"
test_type = "TRIAXIAL_COMPRESSION"
# ALL TEST TYPES:
# test_type = "WATER_CONTENT"
# test_type = "VISUAL_DESCRIPTION"
# test_type = "BULK_DENSITY_UNIT_WEIGHT"
# test_type = "FALL_CONE"
# test_type = "LIQUID_PLASTIC_LIMIT"
# test_type = "PARTICLE_DENSITY"
# test_type = "PARTICLE_SIZE_DISTRIBUTION"
# test_type = "POCKET_PENETROMETER"
# test_type = "UNIAXIAL_COMPRESSION_STRENGTH"
# test_type = "TRIAXIAL_COMPRESSION"
# test_type = "DIRECT_SIMPLE_SHEAR"
# test_type = "CONSTANT_RATE_OF_STRAIN"
# test_type = "INCREMENTAL_LOADING"
# test_type = "RESSONANT_COLUMN"
# test_type = "RING_SHEAR"
# test_type = "UNDRAINED_UNCONSOLIDATED"
# test_type = "DIRECT_SHEAR"
# test_type = "POINT_LOAD"
# test_type = "TENSILE_STRENGTH"
# Optional call that filters by test type
api_url = f"{api_base_url}/projects/{project_id}/tests?test_type={test_type}"
# Call to get all labtests irregardless of type
# api_url = f"{api_base_url}/projects/{project_id}/tests"
# Setting authentication and other request headers
headers = {
"Authorization": f"Bearer {token}",
"Content-Type": "application/json",
}
# Making the actual API request
api_response = requests.get(api_url, headers=headers)
# Use the data
print(json.dumps(api_response.json(), indent=2))
| norwegian-geotechnical-institute/labmanager-api-demo | example.py | example.py | py | 1,691 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "dotenv.load_dotenv",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
... |
74839486183 |
from lxml import etree
import pandas
def getdata(name , lb):
# htmll = etree.parse(name, etree.HTMLParser())
f = open(name, encoding="utf-8")
# 输出读取到的数据
text = f.read()
f.close()
# encode_type = chardet.detect(text)
# text = text.decode(encode_type['encoding'])
htmll = etree.HTML(text)
# # /html/body/div[15]/button[1]/div/div/div/div/div/div/div/div[2]
aa = htmll.xpath('//div[@role="listitem"]')
# print(aa)
# /html/body/div[17]/button[1]/div/div/div/div/div/div/div/div[2]/span[1]/div/a
# print("----ffsf")
allarr = []
for a in aa:
arr = []
# a1 = a.xpath('./button/div/div[1]/div/a/@href')
# button/div/div[2]/div/div/div/div[2]/span[2]/a/div..name
a2 = a.xpath('./button/div/div[2]/div/div/div/div[2]/span[2]/a/div/text()')
# /div/div[3]/div/div[2]/span/div/div...pri
a3 = a.xpath('./button/div/div[3]/div/div[2]/span/div/div/text()')
# /div/div[4]/p/div--q
a4 = a.xpath('./button/div/div[4]/div/text()')
print(a4)
# a5 = a.xpath('./button/div/div[5]/div/a/@href')
# url
a6 = a.xpath('./button/div/div[6]/div/a/@href')
#
a7 = a.xpath('./button/div/div[7]/div/a/text()')
arr.append(''.join(a2))
arr.append(''.join(a3))
arr.append(''.join(a4))
arr.append(''.join(a6))
arr.append(''.join(a7))
# print(arr)
# assets = a.xpath('./span/a/@href')
# collection = a.xpath('./span/div/a/@href')
# if len(assets) > 0:
# arr.append(assets[0])
# else:
# arr.append("")
#
# if len(collection) > 0:
# arr.append(collection[0])
# else:
# arr.append("")
allarr.append(arr)
return allarr
# return arr
if __name__ == '__main__':
allarr = []
filename = "a2list"
lb = "家居"
for i in range(0 , 2000):
try:
# print("______"+ str(i))
arr = getdata('../html/'+ filename + str(i) +'.html' , lb)
# print(arr)
# arr.append(lb)
# print(arr)
allarr.extend(arr)
# print(len(allarr))
if i%25 == 24:
print("---")
print(i)
kkk = [";".join(i) for i in allarr]
kkk = list(set(kkk))
print(len(kkk))
except Exception as e :
# print("------------****" , e)
if i%25 == 24:
print("---")
print(i)
kkk = [";".join(i) for i in allarr]
kkk = list(set(kkk))
print(len(kkk))
continue
print(len(allarr))
kk = [";".join(i) for i in allarr]
kk = list(set(kk))
print(len(kk))
# print(kk)
bb = []
for i in kk:
# print(i)
b =i.split(";")
bb.append(b)
name = ['name' , "price" , "qu" , "url" , "time"]
test = pandas.DataFrame(columns=name, data=bb)
test.to_csv("./csv/aa0518v2" + filename + '2.csv', encoding='utf-8')
| chenqiuying1023/opensea-supergucci | getinfo2.py | getinfo2.py | py | 3,143 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "lxml.etree.HTML",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "lxml.etree",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 111,
"usage_type": "call"
}
] |
12255944849 | import streamlit as st
import pandas as pd
import numpy as np
import base64
import matplotlib.pyplot as plt
import pickle
from PIL import Image
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from xgboost import XGBRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.metrics import mean_absolute_error, r2_score
# Define the app
def app():
# Set the page title and description
st.set_page_config(page_title='ROP Prediction App', page_icon=':chart_with_upwards_trend:', layout='wide')
st.title('ROP Prediction App')
st.markdown('This app predicts the Rate of Penetration (ROP) using real-time drilling data.')
st.write("-----")
st.write("")
st.write("")
if 'df_pred' not in st.session_state:
st.session_state['df_pred'] = pd.DataFrame()
# Upload the drilling data
st.subheader('Make a Machine Learning model to predict the Rate of Penetration (ROP).')
uploaded_file = st.file_uploader('Upload your drilling data (CSV file)', type=['csv'])
if uploaded_file is not None:
# Load the drilling data into a pandas DataFrame
df = pd.read_csv(uploaded_file)
# Display the drilling data
st.write('Drilling Data:')
st.dataframe(df, height=200)
# Select the prediction model
st.subheader('Selecting Prediction Model and Features')
model_name = st.selectbox('Select the prediction model', ['Random Forest Regression', 'Gradient Boosting Regression', 'XGBoost Regression', 'Decision Tree Regression'])
col1, col2 = st.columns(2)
with col1:
# Select the Rate of Penetration column
target_column = st.selectbox('Select the Rate of Penetration column', list(df.columns), key='target_column')
with col2:
# Select the input features for the ROP prediction model
selected_features = st.multiselect('Select the input features', list(df.drop('Rate of Penetration m/h',axis=1).columns))
# Set the model parameters based on the selected model
st.write(f'<h3 style="font-size:16px;">Adjust model parameters for {model_name}</h3>', unsafe_allow_html=True)
if model_name == 'Random Forest Regression':
# Create three columns with equal width
col1, col2, col3 = st.columns(3)
# Add sliders to each column
with col1:
n_estimators = st.slider('Number of Trees', min_value=10, max_value=500, value=100, step=10)
with col2:
max_depth = st.slider('Max Depth', min_value=1, max_value=20, value=10)
with col3:
min_samples_split = st.slider('Min Samples Split', min_value=2, max_value=20, value=5)
# Create a dictionary with the slider values
model_params = {'n_estimators': n_estimators,
'max_depth': max_depth,
'min_samples_split': min_samples_split}
model = RandomForestRegressor(**model_params)
elif model_name == 'Gradient Boosting Regression':
# Create three columns with equal width
col1, col2, col3 = st.columns(3)
# Add sliders to each column
with col1:
n_estimators = st.slider('Number of Trees', min_value=10, max_value=500, value=100, step=10)
with col2:
max_depth = st.slider('Max Depth', min_value=1, max_value=20, value=10)
with col3:
min_samples_split = st.slider('Min Samples Split', min_value=2, max_value=20, value=5)
# Create a dictionary with the slider values
model_params = {'n_estimators': n_estimators,
'max_depth': max_depth,
'min_samples_split': min_samples_split}
model = GradientBoostingRegressor(**model_params)
elif model_name == 'XGBoost Regression':
# Create three columns with equal width
col1, col2, col3 = st.columns(3)
# Add sliders to each column
with col1:
n_estimators = st.slider('Number of Trees', min_value=10, max_value=500, value=100, step=10)
with col2:
max_depth = st.slider('Max Depth', min_value=1, max_value=20, value=10)
with col3:
learning_rate = st.slider('Learning Rate', min_value=0.01, max_value=0.5, value=0.1, step=0.01)
# Create a dictionary with the slider values
model_params = {'n_estimators': n_estimators,
'max_depth': max_depth,
'learning_rate': learning_rate}
model = XGBRegressor(**model_params)
elif model_name == 'Decision Tree Regression':
# Create two columns with equal width
col1, col2 = st.columns(2)
# Add sliders to each column
with col1:
max_depth = st.slider('Max Depth', min_value=1, max_value=20, value=10)
with col2:
min_samples_split = st.slider('Min Samples Split', min_value=2, max_value=20, value=5)
# Create a dictionary with the slider values
model_params = {'max_depth': max_depth,
'min_samples_split': min_samples_split}
model = DecisionTreeRegressor(**model_params)
# else:
# # Create two columns with equal width
# col1, col2 = st.columns(2)
#
# # Add sliders to each column
# with col1:
# generations = st.slider('Number of generations', min_value=5, max_value=50, value=10, step=5)
#
# with col2:
# population_size = st.slider('Population size', min_value=10, max_value=100, value=50, step=10)
#
# # Create a dictionary with the slider values
# model_params = {'generations': generations,
# 'population_size': population_size}
#
# model = TPOTRegressor(generations=model_params['generations'], population_size=model_params['population_size'], verbosity=0, random_state=42)
# Select test size
st.write('<h3 style="font-size:16px;">Train-test split</h3>', unsafe_allow_html=True)
test_size = st.slider('Select Test Size', min_value=0.1, max_value=0.5, value=0.2, step=0.01)
# Make the ROP prediction
button_html = '<button style="background-color: lightgreen; color: white; font-size: 16px; padding: 0.5em 1em; border-radius: 5px; border: none;">Make Prediction</button>'
if st.button('Make Prediction',use_container_width=True):
st.text('Prediction in progress...') # display message while prediction is happening
# Check if the target column exists in the input data
if target_column not in df.columns:
st.warning(f'The input data does not have a column named "{target_column}". Please upload valid drilling data.')
else:
# Preprocess the input data
X = df[selected_features]
y = df[target_column]
# Train-test split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=42)
# Fit the model to the data
model.fit(X_train, y_train)
# Predict the ROP
y_pred_train = model.predict(X_train)
y_pred_test = model.predict(X_test)
st.success('Prediction successful!') # display success message after prediction
# Save the model to a file
filename = 'model.pkl'
with open(filename, 'wb') as file:
pickle.dump(model, file)
# Load the saved model
with open(filename, 'rb') as file:
model = pickle.load(file)
# Encode the model file to base64
with open(filename, 'rb') as f:
bytes_data = f.read()
b64 = base64.b64encode(bytes_data).decode()
# Create a download link for the model file
href = f'<a href="data:file/model.pkl;base64,{b64}" download="model.pkl">Download Trained Model (.pkl)</a>'
st.markdown(href, unsafe_allow_html=True)
# Calculate MAE & R2 Score
MAE_train = mean_absolute_error(y_train, y_pred_train)
MAE_test = mean_absolute_error(y_test, y_pred_test)
R2_train = r2_score(y_train, y_pred_train)
R2_test = r2_score(y_test, y_pred_test)
st.subheader('Result')
col1, col2, col3 = st.columns([1,1,2])
with col1:
st.write('for training data\n- R2-score: <span style="color:#007D5C;font-weight:bold">{:.3f}</span>\n- MAE: <span style="color:#007D5C;font-weight:bold">{:.3f}</span>'.format(R2_train, MAE_train), unsafe_allow_html=True)
with col2:
st.write('for testing data\n- R2-score: <span style="color:#007D5C;font-weight:bold">{:.3f}</span>\n- MAE: <span style="color:#007D5C;font-weight:bold">{:.3f}</span>'.format(R2_test, MAE_test), unsafe_allow_html=True)
with col3:
# Display the ROP prediction
df_pred1 = pd.DataFrame({'ROP_actual':y_test,'ROP_pred': y_pred_test})
X_test['ROP_actual'] = df_pred1['ROP_actual']
X_test['ROP_pred'] = df_pred1['ROP_pred']
st.session_state['df_pred'] = pd.concat([st.session_state['df_pred'], X_test], axis=0)
# Add a download button to download the dataframe as a CSV file
csv = st.session_state['df_pred'].to_csv(index=False)
b64 = base64.b64encode(csv.encode()).decode() # some strings
href = f'<a href="data:file/csv;base64,{b64}" download="st.session_state[\'df_pred\'].csv">Download predicted ROP data</a>'
st.markdown(href, unsafe_allow_html=True)
# Display the dataframe in Streamlit
st.write('ROP Predicted:')
st.dataframe(st.session_state['df_pred'][['ROP_actual','ROP_pred']], height=200,width=400)
# Add a blank line between the buttons
st.write("")
st.write("")
st.write("")
st.write("________")
# Create a file uploader widget
st.subheader('Calculate ROP using created machine learning model.')
model_file = st.file_uploader("Upload a saved ML model (pkl)", type=["pkl"])
# If a file has been uploaded, load the model from the file
if model_file is not None:
with model_file:
model = pickle.load(model_file)
# Display the loaded model
if 'model' in locals():
st.write("<span style='color:green; font-weight:bold'>Model loaded successfully!</span>", unsafe_allow_html=True)
st.write(model)
# Get the list of column names
columns = st.session_state['df_pred'].drop(['ROP_actual','ROP_pred'],axis=1).columns.tolist()
# Create a row of input boxes using beta_columns
input_cols = st.columns(min(len(columns), 5))
input_array = np.zeros(len(columns))
for i, input_col in enumerate(input_cols):
input_value = input_col.number_input(label=columns[i], step=0.1, value=0.0, min_value=0.0, max_value=1000000.0, key=columns[i])
input_array[i] = input_value
# Create additional rows of input boxes if there are more than 5 columns
if len(columns) > 5:
for j in range(5, len(columns), 5):
input_cols = st.columns(min(len(columns)-j, 5))
for i, input_col in enumerate(input_cols):
input_value = input_col.number_input(label=columns[j+i], step=0.1, value=0.0, min_value=0.0, max_value=1000000.0, key=columns[j+i])
input_array[j+i] = input_value
# Define colors and font sizes
HIGHLIGHT_COLOR = '#22c1c3'
HEADER_FONT_SIZE = '20px'
RESULT_FONT_SIZE = '36px'
if st.button('Calculate ROP'):
input_array = input_array.reshape(1, -1)
rop = model.predict(input_array)
st.success('Calculated successful!')
# Format the output message
result_text = f"Calculated Rate of Penetration (ROP): {rop[0]:.2f} ft/hr"
result_html = f'<div style="font-size:{RESULT_FONT_SIZE}; color:{HIGHLIGHT_COLOR};">{result_text}</div>'
st.markdown(result_html, unsafe_allow_html=True)
# Add a blank line between the buttons
st.write("")
st.write("")
st.write("")
st.write("")
st.write("__________")
col1, col2 = st.columns(2)
with col1:
if st.button('HELP',use_container_width=True):
st.write('**Welcome to the ROP Prediction App!**')
st.write('This app helps you predict the **Rate of Penetration (ROP)** using **real-time drilling data**. Here are a few guidelines to use this app:')
st.write('1. Upload your **drilling data** in **CSV** format using the file uploader.')
st.write('2. Select the **prediction model** from the sidebar.')
st.write('3. Select the **Rate of Penetration (ROP)** column that you want to predict.')
st.write('4. Select the **input features** that you want to use for the prediction.')
st.write('5. Split your data into **training** and **testing** sets.')
st.write('6. Adjust the **model parameters** as per your requirements.')
st.write('7. Click on the **"Predict"** button to see the **predicted ROP values**.')
st.write('Note: This app uses **Random Forest Regression, Gradient Boosting Regression, XGBoost Regression, Decision Tree Regression,** and **TPOT Regression** to predict ROP value.')
with col2:
# Add a contact us button
email_icon = Image.open('email.png')
if st.button('Contact Us',use_container_width=True):
st.image(email_icon,width=150)
st.write('Please email us at <span style="font-size:20px">sahilvoraa@gmail.com</span>', unsafe_allow_html=True)
# Run the app
if __name__ == '__main__':
app()
| SahilVora55/ROP_prediction_app | ML_app.py | ML_app.py | py | 14,851 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "streamlit.set_page_config",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "streamlit.title",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "streamlit.markdown",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "streamli... |
32345698677 | # this module will serve us to translate the json that is being
# outputted from the GSC to a proper SQL query.
import json
class JsontoSQLConverter():
def __init__(self, data_file, table_name):
self.data_file = data_file
self.table_name = table_name
def convert(self):
# load the data
with open(self.data_file) as f:
data = json.load(f)
# preprocess
# convert date to a single item (instead of a list of one entry)
for item in data['rows']:
item['keys'] = item['keys'][0]
# chunk the data meanignfully
entries = []
tmp = []
for entry in data['rows']:
for item in entry.values():
tmp += [item]
entries.append(tmp)
tmp = []
# print(data)
"""for row in data:
print(row)"""
"""for i in range(len(data['rows'])):
print("----------------------")
for column in data['rows'][i]:
print(column, ": ", data['rows'][i][column])"""
# well let's store these keys somewhere
tablekeys = []
for i in range(len(data['rows'])):
for x in data['rows'][i]:
if x not in tablekeys:
tablekeys.append(x)
sqlquery = """
CREATE TABLE if not exists {1}(
{0} DATE,
{2},
id serial not null primary key
);""".format(tablekeys[0],
self.table_name,
",\n ".join(map(lambda x: "{0} VARCHAR".format(x), tablekeys[1:])))
# create a table with the whole data and populate it with all entries from upstream
# so that later we can use that table in our
# INSERT INTO performance
# SELECT keys FROM dates(keys) WHERE keys=date_in_question
# AND (SELECT keys FROM performance WHERE keys = date_in_question) IS NULL;
# dates list
dates = []
for x in range(len(data['rows'])):
dates.append(data['rows'][x]['keys'][0])
sqlquery += """
CREATE TABLE IF NOT EXISTS tmp(
{3} DATE,
{0}
);
INSERT INTO tmp({2}) VALUES
{1};
""".format(",\n ".join(map(lambda x: "{0} VARCHAR".format(x), tablekeys[1:])),
', '.join(map(lambda t: '({0})'.format(t),
[', '.join(
['\''+str(x)+'\'' if entries[a].index(x) == 0 else str(x) for x in entries[a]])
for a in range(len(entries))
]
)
),
", ".join([item.strip('[]') for item in tablekeys]),
tablekeys[0])
# Add a public key
# sqlquery += """
# alter table {0} add if not exists id serial not null primary key ;
# """.format(self.table_name)
import re
for i in range(len(data['rows'])):
sqlquery += """
INSERT INTO {0}({1})
SELECT {1} FROM tmp
WHERE {3}=\'{4}\'
AND (
SELECT {3} FROM {0}
WHERE {3} = \'{4}\'
)
IS NULL;
""".format(self.table_name,
", ".join([item.strip('[]') for item in tablekeys]),
", ".join([(str(data['rows'][i][key])).strip('[]') for key in tablekeys]),
tablekeys[0],
str(data['rows'][i]['keys']).strip('[]'))
database = open("database.sql", "w")
database.write(sqlquery)
# and delete the tmp table
sqlquery += """
DROP TABLE tmp;
"""
return sqlquery, data['rows'][-1]['keys'][-1]
def get_last_date(self):
pass
def main():
data_file = "./data/data_Available dates.json"
table_name = "performance"
Converter = JsontoSQLConverter(data_file, table_name)
sqlQuery = Converter.convert()
if __name__ == "__main__":
main()
| IliassAymaz/SofterpawIntel | SofterPawInsights/querying/jsontosql.py | jsontosql.py | py | 3,956 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.load",
"line_number": 15,
"usage_type": "call"
}
] |
8460085849 |
#!/usr/bin/python3
# 提取目录下所有图片, 把RGB的图片修改为BGR图片保存
# from PIL import Image
import os.path
import sys, os
import cv2
def convertjpg(inputdir, outdir):
if not os.path.isdir(outdir):
os.makedirs(outdir)
files= os.listdir(inputdir) #得到文件夹下的所有文件名称
sorted_files = sorted(files)
for file in sorted_files:
print(file)
img = cv2.imread(inputdir + '/' + file)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) #cv2默认为bgr顺序
(w,h,c) = img.shape
if w!= 224 or h!=224 or c!=3:
print("error shape ", img.shape)
sys.exit(-1)
save_name = file.split(".")[0] + '.jpg'
save_file = os.path.join(outdir, os.path.basename(save_name))
cv2.imwrite(save_file, img)
if __name__ == "__main__":
if len(sys.argv) !=3:
print("Usage: ", sys.argv[0], " input_path output_path")
sys.exit(-1)
convertjpg(sys.argv[1], sys.argv[2])
| yywbxgl/onnx_tools | python_script/img_rgb_to_bgr.py | img_rgb_to_bgr.py | py | 1,051 | python | zh | code | 2 | github-code | 36 | [
{
"api_name": "os.path.isdir",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number"... |
19404942737 | import os
import json
import fire
import random
import pickle
import math
from tqdm import tqdm
import tensorflow as tf
import numpy as np
from tensorflow.keras import layers
from gated_cnn import GatedCNN
from adaptive_softmax import AdaptiveSoftmax
from data_utils import load_and_process_data, batchify, get_tokenizer, iterate
from config import FW_CONFIG
#python pretrain.py --train_file data/imdb/lm_train.txt --valid_file data/imdb/lm_val.txt
def get_trainbale_params():
return np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
def _sampled_lm_loss(pre_logits, labels,
vocab_size,
vocab_freqs=None,
num_candidate_samples=-1,
weight=None):
"""
Sampled Softmax loss to speedup training.
Importance sampling is performed based on vocab_freqs
:params:
- pre_logits: Output of RNN Layer
- labels: Target tokens
- vocab_size: max vocab size
- vocab_freqs: list of int containing frequency of
words (must of num_candidate_samples > 0)
- num_candidate_samples (-1): Number of samples to sample for Softmax
:output:
- Tf variable loss
"""
# Get the weight and biases
pre_logits_hidden_size = pre_logits.get_shape()[-1]
if weight is None:
lin_w = tf.get_variable(name="lin_w", shape=[pre_logits_hidden_size, vocab_size],\
dtype=tf.float32)
else:
lin_w = weight
lin_b = tf.get_variable(name="lin_b", shape=[vocab_size],\
dtype=tf.float32)
# Reshape Inputs and Lables
inputs_reshaped = tf.reshape(pre_logits, [-1, int(pre_logits.get_shape()[2])])
labels_reshaped = tf.reshape(labels, [-1])
if num_candidate_samples > -1:
# Sampled Softmax Case
assert vocab_freqs is not None
labels_reshaped = tf.expand_dims(labels_reshaped, -1)
sampled = tf.nn.fixed_unigram_candidate_sampler(
true_classes=labels_reshaped,
num_true=1,
num_sampled=num_candidate_samples,
unique=True,
range_max=vocab_size,
unigrams=vocab_freqs)
lm_loss = tf.nn.sampled_softmax_loss(
weights=tf.transpose(lin_w),
biases=lin_b,
labels=labels_reshaped,
inputs=inputs_reshaped,
num_sampled=num_candidate_samples,
num_classes=vocab_size,
sampled_values=sampled)
else:
# Normal Softmax Case
logits = tf.nn.xw_plus_b(x=inputs_reshaped, weights=lin_w, biases=lin_b)
lm_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels_reshaped)
lm_loss = tf.identity(tf.reduce_mean(lm_loss), name='lm_xentropy_loss')
return lm_loss
def _gcnn_block(input):
output = GatedCNN()(input)
return output
def language_model_graph(input_tokens, output_tokens,
initial_state, num_layers,
max_vocab_size, vocab_freqs,
batch_size, embed_size,
hidden_size, dropout,
optimizer,
num_candidate_samples,
maxlen, clip,
type_="rnn"):
"""
This creates language model tensorflow graph. It takes placeholder
for input tokens, output_tokens (target), initial state for LSTM layers.
Lanugage model graph has Embedding Layer followed by LSTM layers. Loss
is calculated using sampled softmax layer of tensorflow.
:params:
- input_tokens: Placeholder for input tokens [shape:(batch_size, None)]
- output_tokens: Placeholder for output tokens (used as target)
[shape:(batch_size, None)]
- initial_state: Initial states placeholder for feeding state in LSTM
Layers [shape:(num_layers, batch_size, hidden_size)]
- num_layers: Number of LSTM Layers
- max_vocab_size: Maximum Vocabulary size
- vocab_freqs: Frequency of words
- batch_size: Batch Size (should not be none)
- embed_size: Embedding Dimensions
- hidden_size: Hidden size of LSTM layers
- dropout: Dropout to keep between Layers, same dropout is applied after
Embedding as well as between and after LSTM Layers
- num_candidate_samples: Candidate Samples to consider for Sampled softmax
-1 to calculate complete softmax
- maxlen: Sequence length of examples (bptt)
- clip: clip gradients by `clip`
:returns:
- train_op: Training Op Tensorflow
- training_flag: Var for training flag
- sampled_loss: Sampled Loss Variable
- loss: Complete Loss Variable
- final_state: Output State of LSTMs
- weights: Dictionay containing weights of Embedding and LSTM layers
- learning_rate: Learning Rate Variable
"""
bptt = tf.shape(input_tokens)[1]
training_flag = tf.Variable(True)
learning_rate = tf.Variable(20.0)
embedding_layer = layers.Embedding(max_vocab_size, embed_size)
rnn_layers = []
for i in range(num_layers):
rnn_layers.append(layers.CuDNNLSTM(units=hidden_size,
return_sequences=True,
return_state=True))
embedded_input = embedding_layer(input_tokens)
embedded_input = tf.layers.dropout(
embedded_input ,
rate=dropout,
training=training_flag,
)
states = []
rnn_input = embedded_input
input_state_cs = initial_state[0]
input_state_hs = initial_state[1]
if type_ == "rnn":
final_state_cs = []
final_state_hs = []
for i in range(num_layers):
state_c, state_h = input_state_cs[i], input_state_hs[i]
rnn_outputs = rnn_layers[i](rnn_input, initial_state=(state_c, state_h))
rnn_output, final_state_c, final_state_h = rnn_outputs
rnn_output = tf.layers.dropout(
rnn_output ,
rate=dropout,
training=training_flag,
noise_shape=[batch_size, 1, hidden_size]
)
final_state_cs.append(final_state_c)
final_state_hs.append(final_state_h)
final_state_c = tf.stack(final_state_cs, 0)
final_state_h = tf.stack(final_state_hs, 0)
final_state = (final_state_c, final_state_h)
elif type_ == "gcnn":
rnn_output = _gcnn_block(rnn_input)
final_state = (input_state_cs, input_state_hs)
rnn_output = layers.Dense(hidden_size, activation='relu')(rnn_output)
# rnn_output = tf.layers.dropout(
# rnn_output ,
# rate=dropout,
# training=training_flag,
# noise_shape=[batch_size, 1, embed_size]
# )
weight = embedding_layer.weights[0]
weight = tf.transpose(weight, [1, 0])
# weight = None
with tf.variable_scope("loss"):
sampled_loss = _sampled_lm_loss(rnn_output, output_tokens,
max_vocab_size,
vocab_freqs=vocab_freqs,
num_candidate_samples=num_candidate_samples,
weight=weight)
with tf.variable_scope("loss", reuse=True):
loss = _sampled_lm_loss(rnn_output, output_tokens,
max_vocab_size,
vocab_freqs=vocab_freqs,
num_candidate_samples=-1,
weight=weight)
# softmax = AdaptiveSoftmax(hidden_size, cutoff=[2800, 20000, 76000])
# loss, _ = sampled_loss, _ = softmax.loss(rnn_output, output_tokens)
with tf.variable_scope("optimizer"):
# sampled_loss = loss
t_vars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(sampled_loss*maxlen, t_vars),
clip)
if optimizer == "adam":
train_op = tf.compat.v1.train.AdamOptimizer(learning_rate).apply_gradients(zip(grads, t_vars))
elif optimizer == "sgd":
train_op = tf.train.GradientDescentOptimizer(learning_rate).apply_gradients(zip(grads, t_vars))
else:
train_op = tf.compat.v1.train.MomentumOptimizer(learning_rate, momentum=0.9).apply_gradients(zip(grads, t_vars))
# Extract Weights
weights = {}
weights["embedding"] = embedding_layer.weights
weights["lstm"] = [rnn_layer.weights for rnn_layer in rnn_layers]
return train_op, training_flag, sampled_loss, loss, final_state, weights,\
learning_rate
def _run_epoch(X, y, epoch, session, sampled_loss, loss,
num_layers,
batch_size,
hidden_size,
input_placeholder,
target_placeholder,
initial_state_c,
initial_state_h,
learning_rate_var,
learning_rate,
train_op,
final_state_c,
final_state_h,
training_flag,
seq_length=45,
train=False,
lr_cosine_decay_params=None,
print_progress=True):
"""
Runs a single epoch of training or validation
:params:
- X: Input
- y: Target
- train: Training Flag (Dropouts are turned off by this)
- print_progress: Print the progress (tqdm progress bar)
- All other params are self expanatory or already described
:outputs:
- mean loss of all batches
"""
data_iterator = iterate((X, y), seq_length=seq_length)
computed_loss = []
# Create a tqdm iterator
max_steps = int(X.shape[1]/seq_length)
if print_progress:
tqdm_range = tqdm(list(range(max_steps)))
else:
tqdm_range = range(max_steps)
batch_size = X.shape[0]
# Intial LSTM State
c = np.zeros((num_layers, batch_size, hidden_size), dtype=np.float32)
h = np.zeros((num_layers, batch_size, hidden_size), dtype=np.float32)
# Iterate over data
for i in tqdm_range:
# if lr_cosine_decay_params:
# learning_rate = lr_cosine_decay_params["learning_rate"]
# t_mul = lr_cosine_decay_params["t_mul"]
# steps = (epoch*max_steps + i + 1)
# cycles_completed = int(math.log(
# steps * (t_mul - 1) /
# lr_cosine_decay_params["first_decay_steps"] + 1
# ) / math.log(t_mul)
# )
# cycle = lr_cosine_decay_params["first_decay_steps"] * \
# ( t_mul**cycles_completed + lr_cosine_decay_params["first_decay_steps"] )
# min_learning_rate = lr_cosine_decay_params["learning_rate"] * lr_cosine_decay_params["alpha"]
# learning_rate = min_learning_rate + 0.5 * (learning_rate - min_learning_rate) * \
# math.cos( steps * math.pi / cycle )
# learning_rate = tf.train.cosine_decay_restarts(
# global_step=epoch*max_steps + i ,
# **lr_cosine_decay_params)
item = next(data_iterator)
feed_dict = {input_placeholder: item[0],
target_placeholder: item[1],
initial_state_c: c,
initial_state_h: h,
training_flag:train,
learning_rate_var: learning_rate}
if train:
ops = [train_op, sampled_loss, final_state_c, final_state_h]
_, loss_, c, h = session.run(ops, feed_dict=feed_dict)
else:
ops = [loss, final_state_c, final_state_h]
loss_, c, h = session.run(ops, feed_dict=feed_dict)
computed_loss.append(loss_)
if print_progress:
tqdm_range.set_description('Loss {}'.format(str(round(np.mean(computed_loss),2))))
return np.mean(computed_loss)
def pretrain_encoder(train_file, valid_file,\
save_folder='saved_model/base', tokenizer=None,
restore=False,
**kwargs):
"""
Module for running the training and validation subroutines.
:params:
- train_file: Training File, File with sentences separated by newline
- valid_file: Validation File, same format as above
- save_folder: Folder to save output files and models
- restore: Whether to restore from the save_folder (can be used
to finetune on a smaller dataset)
- tokenizer: Tokenizer to use for tokenizing sentences into tokens
- **kwargs: other params:
* batch_size
* hidden_size
* num_layers
* epochs
* seq_length
:outputs:
None
"""
config = FW_CONFIG
tokenizer = get_tokenizer(tokenizer) if tokenizer else None
batch_size = kwargs.get("batch_size") or FW_CONFIG["batch_size"]
hidden_size = kwargs.get("hidden_size") or FW_CONFIG["hidden_size"]
num_layers = kwargs.get("num_layers") or FW_CONFIG["num_layers"]
epochs = kwargs.get("epochs") or FW_CONFIG.pop("epochs")
if "epochs" in FW_CONFIG:
FW_CONFIG.pop("epochs")
FW_CONFIG["num_candidate_samples"] = kwargs.get("num_candidate_samples") or FW_CONFIG["num_candidate_samples"]
seq_length = FW_CONFIG.pop("seq_length")
learning_rate = kwargs.get("learning_rate", 0.001)
optimizer = kwargs.get("optimizer", "adam")
print_progress = kwargs.get("print_progress", False)
type_ = kwargs.get("type", "rnn")
learning_rate_decay = 0.1
lr_cosine_decay_params = {
"learning_rate": learning_rate,
"first_decay_steps": 2000,
"t_mul": 2.0,
"alpha": 0.01
}
tokenizer_json_file = os.path.join(save_folder, "tokenizer.json")
# Load data and Batchify
all_data = load_and_process_data(train_file, valid_file,
max_vocab_size=config["max_vocab_size"],
custom_tokenizer_function=tokenizer,
tokenizer_json_file=tokenizer_json_file,
restore_from=tokenizer_json_file if restore else None)
word_freq, word_index, train_data, valid_data = all_data
X_train, y_train = batchify(train_data, batch_size)
X_valid, y_valid = batchify(valid_data, batch_size)
# Save the Vocab and frequency files
if not os.path.exists(save_folder):
os.makedirs(save_folder)
# Saving config and word_index file
json.dump(word_index, open(os.path.join(save_folder, "word_index.json"), "w"))
json.dump(word_freq, open(os.path.join(save_folder, "word_freq.json"), "w"))
json.dump(FW_CONFIG, open(os.path.join(save_folder, "config.json"), "w"))
# Arranging tokens in alist, this will go in vocab file
vocab = [" "] + [i[0] for i in sorted(word_index.items(), key=lambda x: x[1])][:FW_CONFIG["max_vocab_size"]+1]
open(os.path.join(save_folder, "vocab.txt"), "w").write("\n".join(vocab))
open(os.path.join(save_folder, "word_freqs.txt"), "w").write("\n".join(word_index))
# Check max_vocab_size
FW_CONFIG["max_vocab_size"] = min(len(word_index) + 1, FW_CONFIG["max_vocab_size"])
print("Vocabulary Size: {}".format(FW_CONFIG["max_vocab_size"]))
# Define Placeholder and Initial States
inputs = tf.placeholder(dtype=tf.int32, shape=(batch_size,None), name='input')
targets = tf.placeholder(dtype=tf.int64, shape=(batch_size,None), name='target')
initial_state_c = tf.placeholder(dtype=tf.float32, shape=(num_layers, batch_size, hidden_size),\
name='input_state_c')
initial_state_h = tf.placeholder(dtype=tf.float32, shape=(num_layers, batch_size, hidden_size),\
name='input_state_h')
# Create the Graph
train_op, training_flag, sampled_loss,\
loss, rnn_states, weights, learning_rate_var = language_model_graph(inputs, targets,
(initial_state_c, initial_state_h),
vocab_freqs=word_freq,
optimizer=optimizer,
type_=type_,
**config)
final_state_c, final_state_h = rnn_states
sess = tf.Session()
sess.run(tf.global_variables_initializer())
print("total number of trainable params {}".format(get_trainbale_params()))
# Define run epoch function params (passed as kwargs)
run_epoch_params = {"session": sess,
"sampled_loss": sampled_loss,
"loss": loss,
"num_layers": num_layers,
"input_placeholder": inputs,
"target_placeholder": targets,
"initial_state_c": initial_state_c,
"initial_state_h": initial_state_h,
"learning_rate_var":learning_rate_var,
"learning_rate":learning_rate,
"train_op": train_op,
"final_state_c": final_state_c,
"final_state_h": final_state_h,
"seq_length": seq_length,
"batch_size": batch_size,
"hidden_size":hidden_size,
"training_flag": training_flag,
"lr_cosine_decay_params": lr_cosine_decay_params}
valid_losses = [1000]
vars = tf.trainable_variables()
vars = [i for i in vars if 'optimizer' not in i.name]
saver = tf.train.Saver(vars)
if restore:
saver.restore(sess, os.path.join(save_folder, "model.ckpt"))
for epoch in range(epochs):
decay = (learning_rate_decay ** int((max(epoch - 5, 0)/2)))
run_epoch_params['learning_rate'] = learning_rate * decay
# Training Epoch
train_loss = _run_epoch(X_train, y_train,
train=True,
epoch=epoch,
print_progress=print_progress,
**run_epoch_params)
# Valid Epoch
valid_loss = _run_epoch(X_valid, y_valid,
train=False,
print_progress=False,
epoch=epoch,
**run_epoch_params)
format_values = [epoch, train_loss, np.exp(train_loss),\
valid_loss, np.exp(valid_loss)]
print("Epoch {0}, Train Loss {1:.2f}, Train Perplexity {2:.2f},\
Val Loss {3:.2f}, Val Perplexity {4:.2f}".format(*format_values))
if valid_loss < min(valid_losses):
saver.save(sess, os.path.join(save_folder, "model.ckpt"))
numpy_weights = {}
weights_ = weights
for layer in weights:
numpy_weights[layer] = sess.run(weights[layer])
weights = weights_
pickle.dump(numpy_weights, open(os.path.join(save_folder, "weights.pkl"), "wb"))
valid_losses.append(valid_loss)
if __name__ == '__main__':
fire.Fire(pretrain_encoder)
| vivekverma239/lm_pretraining | pretrain.py | pretrain.py | py | 20,647 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "numpy.sum",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.prod",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "tensorflow.trainable_variables",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "tensorflow.get_va... |
73711972905 | #!/usr/bin/python3
# -*- mode: python; Encoding: utf-8; coding: utf-8 -*-
"""
----------------------------------------------------------------
clipweb
Author: ayaya (ayatec)
GitHub: https://github.com/ayatec/clipweb
----------------------------------------------------------------
"""
# ----------------------------------------------------------------
# Import
# ----------------------------------------------------------------
import json
import datetime
import time
from web import cgi
from db import flex_sqlite3
import cw_user
import cw_clip
import cw_list
import cw_code
import cw_nav
TIME = {}
TIME["init"] = datetime.datetime.now()
AUTO_GENERATE = None
# ----------------------------------------------------------------
# Class
# ----------------------------------------------------------------
class Clipweb:
# ----------------------------------------------------------------
# Define
result = {}
# ----------------------------------------------------------------
# Constructor
# ----------------------------------------------------------------
def __init__(cls):
cls.cgi = cgi.CGI()
_type = cls.cgi.get("type")
if _type is not None:
cls.check_type(_type)
else:
cls.result["error"] = {}
cls.result["error"]["message"] = "type is null"
cls.exit()
def check_type(cls, _type=None):
_type = _type.split(".")
if _type[0] == "user":
cls.check_user(_type[1])
elif _type[0] == "list":
cls.check_list(_type[1])
elif _type[0] == "clip":
cls.check_clip(_type[1])
elif _type[0] == "code":
cls.check_code(_type[1])
elif _type[0] == "nav":
cls.check_nav(_type[1])
else:
cls.result["error"] = {}
cls.result["error"]["message"] = "check_type: {0}".format(_type)
def check_user(cls, _type=None):
USER = cw_user.User()
USER.set_cgi(cls.cgi)
if _type == "register":
cls.result["user"] = USER.register()
elif _type == "login":
cls.result["user"] = USER.login()
elif _type == "logout":
cls.result["user"] = USER.logout()
elif _type == "setting":
cls.result["user"] = USER.setting()
elif _type == "info":
cls.result["user"] = USER.info()
elif _type == "leave":
cls.result["user"] = USER.leave()
else:
cls.result["error"] = {}
cls.result["error"]["message"] = "check_user: {0}".format(_type)
def check_list(cls, _type=None):
LIST = cw_list.List()
LIST.set_cgi(cls.cgi)
if _type == "search":
cls.result["list"] = LIST.search()
else:
cls.result["error"] = {}
cls.result["error"]["message"] = "check_clip: {0}".format(_type)
def check_clip(cls, _type=None):
CLIP = cw_clip.Clip()
CLIP.set_cgi(cls.cgi)
if _type == "new":
cls.result["clip"] = CLIP.new()
elif _type == "setting":
cls.result["clip"] = CLIP.setting()
elif _type == "load":
cls.result["clip"] = CLIP.load()
elif _type == "delete":
cls.result["clip"] = CLIP.delete()
elif _type == "share":
cls.result["clip"] = CLIP.share()
elif _type == "privilege":
cls.result["clip"] = CLIP.privilege()
elif _type == "ban":
cls.result["clip"] = CLIP.ban()
elif _type == "history":
cls.result["clip"] = CLIP.history()
else:
cls.result["error"] = {}
cls.result["error"]["message"] = "check_clip: {0}".format(_type)
def check_code(cls, _type=None):
CODE = cw_code.Code()
CODE.set_cgi(cls.cgi)
if _type == "load":
cls.result["code"] = CODE.load()
elif _type == "save":
cls.result["code"] = CODE.save()
elif _type == "sync":
cls.result["code"] = CODE.sync()
elif _type == "chat":
cls.result["code"] = CODE.chat()
else:
cls.result["error"] = {}
cls.result["error"]["message"] = "check_clip: {0}".format(_type)
def check_nav(cls, _type=None):
NAV = cw_nav.Nav()
NAV.set_cgi(cls.cgi)
if _type == "feedback":
cls.result["nav"] = NAV.feedback()
else:
cls.result["error"] = {}
cls.result["error"]["message"] = "check_clip: {0}".format(_type)
# ----------------------------------------------------------------
# Method
# ----------------------------------------------------------------
def exit(cls):
global TIME
TIME["app"] = datetime.datetime.now()
cls.result["exec_time"] = (TIME["app"] - TIME["init"]).total_seconds()
# ----------------------------------------------------------------
# Ready
# ----------------------------------------------------------------
app = Clipweb()
print(json.dumps(app.result))
| ayatec/clipweb | src/python/clipweb.py | clipweb.py | py | 5,127 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.now",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "web.cgi.CGI",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "web.cgi",
... |
19789115086 | #create tokenized descriptions
import nltk
from nltk.tokenize import word_tokenize
def tokenize(col):
#creates list of lists, inside lists contains sentences tokenized by word
list_of_lists = []
for sentence in col:
tokens = nltk.word_tokenize(str(sentence))
list_of_lists.append(tokens)
return list_of_lists
| Valparaiso-Data-Science/general-course-relevance-discovery | tripodscode/analysis copy/tokenizer.py | tokenizer.py | py | 338 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "nltk.word_tokenize",
"line_number": 9,
"usage_type": "call"
}
] |
33201215587 | from matplotlib import pyplot as plt
import random
plt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
y_3 = [random.randint(20, 35) for i in range(31)]
y_10 = [random.randint(20, 35) for i in range(31)]
x_3 = range(31)
x_10 = range(40, 71)
plt.figure(figsize=(20, 8), dpi=80)
plt.scatter(x_3, y_3, label='3月份')
plt.scatter(x_10, y_10, label='10月份')
# x_3 = x_3[::5]
# x_10 = x_10[::5]
x_label = ['3月{}日'.format(i+1) for i in x_3]
x_label += ['10月{}日'.format(i-39) for i in x_10]
x = list(x_3) + list(x_10)
plt.xticks(x[::3], x_label[::3], rotation=45)
plt.legend(loc='upper right')
plt.show()
| LelouchCcCC/python-practice | python-practice/Matplotlib/案例/scatter.py | scatter.py | py | 709 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 3,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 4,
"usage_type": "attribute"
},
{
"ap... |
10809168696 | import os
import shutil
import random
import urllib.request
import zipfile
script_dir = "script/"
programs_dir = "programs/"
def copy_programs():
add_implementation = {
"python3": "pypy3",
"lua": "luajit",
"node": "js",
"php": "hhvm"
}
dir_out = script_dir + "programs/"
if os.path.isdir(dir_out):
shutil.rmtree(dir_out)
os.mkdir(dir_out)
for d in os.listdir(programs_dir):
din = programs_dir + d + "/"
dout = dir_out + d + "/"
if not os.path.isdir(din):
continue
if not os.path.isdir(dout):
os.mkdir(dout)
for program in os.listdir(din):
shutil.copy(din + program, dout + program)
extension = program.split(".")[-1]
if extension in add_implementation:
shutil.copy(din + program, dout + program.replace(extension, add_implementation[extension]))
if __name__ == "__main__":
tmp_zip_file = ".tmp.zip"
print("Downloading...")
urllib.request.urlretrieve("http://benchmarksgame.alioth.debian.org/download/benchmarksgame-script.zip", tmp_zip_file)
if not os.path.isdir(script_dir):
os.mkdir(script_dir)
shutil.rmtree(script_dir)
print("Extracting...")
with zipfile.ZipFile(tmp_zip_file, "r") as z:
z.extractall(script_dir)
os.remove(tmp_zip_file)
print("Loading programs and ini")
shutil.copy("my.linux.ini", "script/makefiles/my.linux.ini")
copy_programs()
print("\nDONE\n")
print("Run programs with $python2 script/bin/bencher.py")
print("View raw results in script/summary")
print("generate images with $python3 visualize.py")
| gareins/dynamic_benchmarks | init.py | init.py | py | 1,744 | python | en | code | 81 | github-code | 36 | [
{
"api_name": "os.path.isdir",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "shutil.rmtree",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.mkdir",
"line_number"... |
7208980234 | #!/usr/bin/env python
from os import system as s
import json
player = "mpv"
with open("n_list.json") as l:
ip_tv = json.load(l)
jml_channel = (len(ip_tv["tv"]))
channel = []
url = []
for i in range(0, len(ip_tv["tv"])):
list_channel = ip_tv["tv"][i]["channel"]
list_url = ip_tv["tv"][i]["url"]
channel.append(list_channel)
url.append(list_url)
try:
while True:
s("clear")
print(f"""
███╗ ██╗ ████████╗██╗ ██╗
████╗ ██║ ╚══██╔══╝██║ ██║
██╔██╗ ██║█████╗██║ ██║ ██║
██║╚██╗██║╚════╝██║ ╚██╗ ██╔╝
██║ ╚████║ ██║ ╚████╔╝
╚═╝ ╚═══╝ ╚═╝ ╚═══╝ By Nestero
Streaming TV menggunakan IPTV.
""")
print("List Channel :")
for i, key in enumerate(channel):
if (i + 1) % 3:
print(i, "=>", '{:15}'.format(key), end='\t')
else:
print(i, "=>", key, end='\n')
p_tv = int(input(f"\nPilih Channel 0 - {jml_channel-1} > "))
c_tv = url[p_tv]
t_tv = channel[p_tv]
if player == "mpv":
s(f"mpv --fs --title='{t_tv}' {c_tv}")
else:
s(f"{player} {c_tv}")
except KeyboardInterrupt:
print("^")
| mnabila/n-tv | ntv.py | ntv.py | py | 1,446 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.load",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 43,
"... |
23565719069 | # vim: set fileencoding=utf-8 :
import pytest
import pyvips
from helpers import JPEG_FILE, assert_almost_equal_objects
class TestGValue:
def test_bool(self):
gv = pyvips.GValue()
gv.set_type(pyvips.GValue.gbool_type)
gv.set(True)
value = gv.get()
assert value
gv.set(False)
value = gv.get()
assert not value
def test_int(self):
gv = pyvips.GValue()
gv.set_type(pyvips.GValue.gint_type)
gv.set(12)
value = gv.get()
assert value == 12
def test_uint64(self):
gv = pyvips.GValue()
gv.set_type(pyvips.GValue.guint64_type)
gv.set(2 ** 64 - 1) # G_MAXUINT64
value = gv.get()
assert value == 2 ** 64 - 1
def test_double(self):
gv = pyvips.GValue()
gv.set_type(pyvips.GValue.gdouble_type)
gv.set(3.1415)
value = gv.get()
assert value == 3.1415
def test_enum(self):
# the Interpretation enum is created when the first image is made --
# make it ourselves in case we are run before the first image
pyvips.vips_lib.vips_interpretation_get_type()
interpretation_gtype = pyvips.gobject_lib. \
g_type_from_name(b'VipsInterpretation')
gv = pyvips.GValue()
gv.set_type(interpretation_gtype)
gv.set('xyz')
value = gv.get()
assert value == 'xyz'
def test_flags(self):
# the OperationFlags enum is created when the first op is made --
# make it ourselves in case we are run before that
pyvips.vips_lib.vips_operation_flags_get_type()
operationflags_gtype = pyvips.gobject_lib. \
g_type_from_name(b'VipsOperationFlags')
gv = pyvips.GValue()
gv.set_type(operationflags_gtype)
gv.set(12)
value = gv.get()
assert value == 12
# we also support setting flags with strings
gv.set("deprecated")
value = gv.get()
assert value == 8
# libvips 8.15 allows this as well
# gv.set("deprecated|nocache")
# though we don't test it
def test_string(self):
gv = pyvips.GValue()
gv.set_type(pyvips.GValue.gstr_type)
gv.set('banana')
value = gv.get()
assert value == 'banana'
def test_array_int(self):
gv = pyvips.GValue()
gv.set_type(pyvips.GValue.array_int_type)
gv.set([1, 2, 3])
value = gv.get()
assert_almost_equal_objects(value, [1, 2, 3])
def test_array_double(self):
gv = pyvips.GValue()
gv.set_type(pyvips.GValue.array_double_type)
gv.set([1.1, 2.1, 3.1])
value = gv.get()
assert_almost_equal_objects(value, [1.1, 2.1, 3.1])
def test_image(self):
image = pyvips.Image.new_from_file(JPEG_FILE)
gv = pyvips.GValue()
gv.set_type(pyvips.GValue.image_type)
gv.set(image)
value = gv.get()
assert value == image
def test_array_image(self):
image = pyvips.Image.new_from_file(JPEG_FILE)
r, g, b = image.bandsplit()
gv = pyvips.GValue()
gv.set_type(pyvips.GValue.array_image_type)
gv.set([r, g, b])
value = gv.get()
assert value, [r, g == b]
def test_blob(self):
with open(JPEG_FILE, 'rb') as f:
blob = f.read()
gv = pyvips.GValue()
gv.set_type(pyvips.GValue.blob_type)
gv.set(blob)
value = gv.get()
assert value == blob
if __name__ == '__main__':
pytest.main()
| libvips/pyvips | tests/test_gvalue.py | test_gvalue.py | py | 3,584 | python | en | code | 558 | github-code | 36 | [
{
"api_name": "pyvips.GValue",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pyvips.GValue",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "pyvips.GValue",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pyvips.GValue",
"l... |
14263365350 | from django.shortcuts import render
from django.http import HttpResponse
from orders.models import *
import csv
from django.http import HttpResponse, JsonResponse
# Create your views here.
def exportOrders(request):
response = HttpResponse(content_type='text/csv')
writer = csv.writer(response)
writer.writerow(['order_number', 'user', 'address_line_1','region', 'city','order_total', 'created_at' ])
for record in Order.objects.all().values_list('order_number', 'user', 'address_line_1', 'region', 'city',
'order_total', 'created_at'):
record = list(record)
writer.writerow(record)
response['Content-Disposition'] = "attachment; filename=Orders.csv"
return response
def exportOrderProducts(request):
response = HttpResponse(content_type='text/csv')
writer = csv.writer(response)
writer.writerow(['order__order_number', 'user', 'product','variations', 'quantity', 'product_price', 'created_at' ])
for record in OrderProduct.objects.all().values_list('order', 'user', 'product__product_name',
'variations__variation_value','quantity','product_price', 'created_at'):
record = list(record)
writer.writerow(record)
response['Content-Disposition'] = "attachment; filename=OrderProducts.csv"
return response
| jeffjcb/southcartel-app | southcartel/reports/views.py | views.py | py | 1,390 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "django.http.HttpResponse",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "csv.write... |
3473807793 | import os
import math
import itertools
import micp.kernel as micp_kernel
import micp.info as micp_info
import micp.common as micp_common
import micp.params as micp_params
from micp.kernel import raise_parse_error
DEFAULT_SCORE_TAG = 'Computation.Avg'
class xgemm(micp_kernel.Kernel):
def __init__(self):
info = micp_info.Info()
self.param_validator = micp_params.XGEMM_VALIDATOR
self._paramDefaults = {'i_num_rep':'3',
'n_num_thread':'228',
'm_mode':'NN',
'M_size':'-1',
'N_size':'-1',
'K_size':'-1'}
self._paramNames = self._paramDefaults.keys()
self.args = '--n_num_thread {0} --M_size {1} --N_size {2} --K_size {3}'
self.maxMemory = info.mic_memory_size() - 1024**3
# define maximal number of cores per MPI rank
if info.is_in_sub_numa_cluster_mode():
self.maxCount = info.snc_max_threads_per_quadrant()
else:
self.maxCount = info.num_cores()
# define coreConfig - set of requested numbers of spawned threads
# for scaling_core category. The set of values is chosen arbitrarily
# so to satisfy reasonable coverage of range up to number of processor
# cores.
step = int(round(self.maxCount/10.0))
if step < 4:
step = 4
self.coreConfig = range(step, self.maxCount, step)
self.coreConfig.append(self.maxCount)
self.units = 'GFlops'
def _do_unit_test(self):
return False
def offload_methods(self):
return['native', 'pragma', 'auto', 'local']
def path_host_exec(self, offType):
bench_name = self.name
if offType == 'pragma':
if micp_common.is_platform_windows():
return self._path_exec(micp_kernel.LIBEXEC_HOST, bench_name + '_ofl.exe')
else:
return self._path_exec(micp_kernel.LIBEXEC_HOST, bench_name +'_ofl.x')
if offType == 'auto' or offType == 'local':
# binary name
if micp_info.Info().is_processor_mcdram_available():
xgemm_binary = bench_name + '_mcdram_cpu'
else:
xgemm_binary = bench_name + '_cpu'
# in SNC mode use a different MPI based binary regardless of the
# kind of memory to be used
if micp_info.Info().is_in_sub_numa_cluster_mode():
xgemm_binary = bench_name + '_mpi_snc_cpu'
# extension
if micp_common.is_platform_windows():
xgemm_binary = '{0}.exe'.format(xgemm_binary)
else:
xgemm_binary = '{0}.x'.format(xgemm_binary)
return self._path_exec(micp_kernel.LIBEXEC_HOST, xgemm_binary)
return None
def path_dev_exec(self, offType):
bench_name = self.name
if offType == 'native':
return self._path_exec(micp_kernel.LIBEXEC_DEV, bench_name + '_mic.x')
return None
def path_aux_data(self, offType):
result = []
if offType == 'native':
result.append(self.mic_library_find('libiomp5.so'))
return result
def param_type(self):
return 'flag'
def parse_desc(self, raw, prototype = 'XGEMM'):
# parse the output and put parameters of run into dd dictionary
# where keys represent the parameter name and dictionary value the
# parameter value
dd = dict([tuple([ll.strip() for ll in line.split(':')])
for line in raw.splitlines()
if ':' in line and line.find(':') == line.rfind(':')])
try:
M = dd['fixed M']
N = dd['fixed N']
K = dd['fixed K']
# for mpirun driven run xgemm has different output thus 'if'
# statement
if 'threads used' in dd:
# code below is for standard execution
numThreads = dd['threads used']
else:
# code below for mpirun exection;
# for mpirun execution dgemm will print 'MPI rank <rank_number>'
# parameter for each requested rank, below code counts those and
# parses the number of spawned threads for each rank
numThreads_t = []
key_t = 'MPI rank {}'
for i in itertools.count():
key = key_t.format(i)
if key not in dd:
break
else:
numThreads_t.append(dd[key] + " [" + key + "]")
numThreads = '/'.join(numThreads_t)
numIt = dd['min_niters']
except (IndexError, KeyError) as e:
raise_parse_error(raw, "Key error: " + str(e))
result = '(M={}, N={}, K={}) MKL {} with {} threads and {} iterations'.format(M, N, K, prototype, numThreads, numIt)
return result
def parse_perf(self, raw):
"""Parse xGEMM's raw output and extract performance results, expected
line format (in SNC modes we also expect an avg for each NUMA node):
xGEMM output...
n min avg max stddev
* 10240 286.64 290.43 296.65 3.815e+00
additional output...
return results in dictionary as required by the micp/kernel.py interface.
"""
line = [float(line.split()[3]) for line in raw.splitlines() if line.startswith('*')]
speed = str(sum(line))
dd = dict([tuple([ll.strip() for ll in line.split(':')])
for line in raw.splitlines()
if ':' in line and line.find(':') == line.rfind(':')])
try:
if dd['timer'] == 'native':
self.tag = 'Task.Computation.Avg'
elif dd['timer'] == 'invoke':
self.tag = 'Device.Computation.Avg'
elif dd['timer'] == 'full':
self.tag = 'Host.Computation.Avg'
except KeyError:
self.tag = DEFAULT_SCORE_TAG
result = {}
result[self.tag] = {'value': speed, 'units': self.units, 'rollup': True}
return result
def environment_dev(self):
return {'LD_LIBRARY_PATH':'/tmp'}
def environment_host(self, auxHostVars = None):
"""returns extra enviroment variables needed to run xgemm on the host"""
info = micp_info.Info()
numThreads = info.num_cores() - 1
maxMemory = str(int((info.mic_memory_size() - 1024**3)/(1024**3)))
retvars = {'LD_LIBRARY_PATH':self.ld_library_path()}
mic_sb = {'KMP_AFFINITY':'compact,1,0',
'LD_LIBRARY_PATH':self.ld_library_path(),
'USE_2MB_BUFFERS':'16K'}
if auxHostVars:
mic_sb.update(auxHostVars)
retvars.update(auxHostVars)
# additional variables for Windows running on Xeon Phi processors
if micp_common.is_platform_windows() and micp_common.is_selfboot_platform():
mic_sb['OMP_NUM_THREADS'] = str(info.num_cores())
mic_sb['MKL_DYNAMIC'] = 'false'
mic_sb['KMP_BLOCKTIME'] = 'infinite'
mic_sb['KMP_LIBRARY'] = 'turnaround'
# MKL_FAST_MEMORY_LIMIT forces MKL to store buffers in DDR memory
if not micp_info.Info().is_processor_mcdram_available():
retvars['MKL_FAST_MEMORY_LIMIT'] = '0'
if micp_info.Info().is_in_sub_numa_cluster_mode():
cores = micp_info.Info().snc_max_threads_per_quadrant()
retvars['KMP_HW_SUBSET'] = '{0}c,1t'.format(cores)
if micp_common.is_selfboot_platform():
retvars.update(mic_sb)
return retvars
def independent_var(self, category):
if category == 'scaling_core':
return 'n_num_thread'
return 'K_size'
def get_process_modifiers(self):
"""returns the MPI command line (as a list) to run mpi_stream in
the SNC modes, for the other cluster modes returns an empty list"""
if micp_info.Info().is_in_sub_numa_cluster_mode():
subclusters = micp_info.Info().get_number_of_nodes_with_cpus()
return ['mpirun', '-n', str(subclusters)]
else:
return []
def is_mpi_required(self):
"""MPI is required to run xGEMM when system is in the SCN2 or SNC4 mode"""
return micp_info.Info().is_in_sub_numa_cluster_mode()
def is_optimized_for_snc_mode(self):
"""micperf provides an optimized version for SNC modes"""
return True
def _ordering_key(self, stat):
try:
tag = self.tag
except:
tag = DEFAULT_SCORE_TAG
return float(stat.perf[tag]['value'])
| antoinecarme/xeon-phi-data | intel_software/pkg_contents/micperf/CONTENTS/usr/share/micperf/micp/micp/kernels/_xgemm.py | _xgemm.py | py | 8,876 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "micp.kernel.Kernel",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "micp.kernel",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "micp.info.Info",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "micp.info",
"l... |
42871257105 | import sys
import json
from typing import List
import urllib.request
import os
import glob
from rulekit.experiment import ExperimentRunner
from rulekit import RuleKit
dir_path = os.path.dirname(os.path.realpath(__file__))
def download_rulekit_jar():
release_version = 'latest'
current_rulekit_jars_files: List[str] = glob.glob(
f"{dir_path}/jar/*-all.jar")
url = f"https://api.github.com/repos/adaa-polsl/RuleKit/releases/{release_version}"
req = urllib.request.Request(url)
req.add_header('Content-Type', 'application/json; charset=utf-8')
response = urllib.request.urlopen(req)
response = json.loads(response.read())
latest_release_version = response['tag_name']
print('Fetching latest RuleKit release version: ', latest_release_version)
assets = response['assets']
asset = list(
filter(lambda asset: asset['name'].endswith('-all.jar'), assets))
if len(asset) != 1:
raise Exception('Failed to fetch latest RuleKit release jar file.')
asset = asset[0]
download_link = asset['browser_download_url']
if len(current_rulekit_jars_files) > 0:
old_files_names = list(
map(lambda path: os.path.basename(path), current_rulekit_jars_files))
tmp = input(
f'Old RuleKit jar file/files ({old_files_names}) detected, do you want to remove it/them? Type "yes" or "no"\n')
if tmp == 'yes':
for old_file_path in current_rulekit_jars_files:
os.remove(old_file_path)
print('Old files removed.')
elif tmp != 'no':
print('I will treat it as no')
print(f'Downloading jar file: "{asset["name"]}" from: "{download_link}"')
def show_progress(block_num, block_size, total_size):
downloaded = int((block_num * block_size / total_size) * 100)
print(f'\r{downloaded}%', end='\r')
urllib.request.urlretrieve(
download_link, f'{dir_path}/jar/{asset["name"]}', show_progress)
print('Download finished!\nPackage is ready to use.')
def main():
if len(sys.argv) > 1 and sys.argv[1] == 'download_jar':
download_rulekit_jar()
else:
# use rulekit batch CLI
rulekit = RuleKit()
rulekit.init()
ExperimentRunner.run(sys.argv[1:])
if __name__ == "__main__":
main()
| cezary986/complex_conditions | src/utils/rulekit/__main__.py | __main__.py | py | 2,384 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.dirname",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.path.realpath",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line... |
74566852583 | from typing import List
class Solution:
"""
日期:2023-08-04
作者:仲景
"""
def longestCommonPrefix(self, strs: List[str]) -> str:
res = strs[0]
for i in range(1, len(strs)):
res = twoStrLongestCommonPrefix(res, strs[i])
return res
def twoStrLongestCommonPrefix(str1: str, str2: str) -> str:
"""
返回两个字符串的最长公共前缀
:param str1: 第一个字符串
:param str2: 第二个字符串
:return: 最长公共前缀
"""
prefix = ""
for i in range(min(len(str1), len(str2))):
if str1[i] == str2[i]:
prefix += str1[i]
else:
break
return prefix | ZhongJing0121/LeetCode | LeetCode_14/Solution_ZhongJing.py | Solution_ZhongJing.py | py | 723 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 10,
"usage_type": "name"
}
] |
9476101469 | import os
from random import randint
from time import time
import subprocess
from datetime import datetime
from shutil import rmtree
from db.models import UserRegisteredContest,Contest,ContestProblem,Problem,Submission,Admin,User
from bson.objectid import ObjectId
from platform import system
from flask import current_app
ORIGINAL_DIR=os.getcwd()
if system()=='Linux':
py_dir="/usr/local/bin/python"
go_dir="/usr/bin/go"
cplus_dir="/usr/bin/g++"
c_dir="/usr/bin/gcc"
import resource
compilers={
"go":go_dir,
"py":py_dir,
"java":"/usr/bin/java",
"c":c_dir,
"cpp":cplus_dir,
"python":py_dir,
"python2":"/usr/bin/python2",
"php":"/usr/bin/php",
"js":"/usr/bin/node"
}
def runCommand(*popenargs,input=None, capture_output=False, timeout=None, check=False,memorylimit=300,**kwargs):
#mocking the standard implementation of subprocess.run
#so we can set memory limit
lang=kwargs.get("lang")
if lang:kwargs.pop("lang")
if input is not None:
if kwargs.get('stdin') is not None:
raise ValueError('stdin and input arguments may not both be used.')
kwargs['stdin'] = subprocess.PIPE
if capture_output:
if kwargs.get('stdout') is not None or kwargs.get('stderr') is not None:
raise ValueError('stdout and stderr arguments may not be used '
'with capture_output.')
kwargs['stdout'] = subprocess.PIPE
kwargs['stderr'] = subprocess.PIPE
with subprocess.Popen(*popenargs, **kwargs) as process:
if system()=='Linux':
memorylimit=memorylimit+128 #extra 128 mb for startup
memorylimithard=memorylimit*1024**2+10024
if lang!="js":
#js is single threaded that means we would be setting stack limit
#for the entire js interpreter since js relies on callback functions
#for it operations which essentially uses stack
resource.prlimit(process.pid,resource.RLIMIT_DATA,(memorylimit*1024**2,memorylimithard))
resource.prlimit(process.pid,resource.RLIMIT_STACK,(memorylimit*1024**2,memorylimithard))
try:
stdout, stderr = process.communicate(input, timeout=timeout)
except subprocess.TimeoutExpired as exc:
process.kill()
if subprocess._mswindows:
# Windows accumulates the output in a single blocking
# read() call run on child threads, with the timeout
# being done in a join() on those threads. communicate()
# _after_ kill() is required to collect that and add it
# to the exception.
exc.stdout, exc.stderr = process.communicate()
else:
# POSIX _communicate already populated the output so
# far into the TimeoutExpired exception.
process.wait()
raise
except: # Including KeyboardInterrupt, communicate handled that.
process.kill()
# We don't call process.wait() as .__exit__ does that for us.
raise
retcode = process.poll()
if check and retcode:
raise subprocess.CalledProcessError(retcode, process.args,
output=stdout, stderr=stderr)
return subprocess.CompletedProcess(process.args, retcode, stdout, stderr)
class Task:
"""
Handles running of submitted code and updates the submission details in the database
"""
@classmethod
def taskObj(cls,d):
if isinstance(d,dict):
obj=object()
for classvar in d:
setattr(obj,classvar,d[classvar])
setattr(obj,"run",)
return obj
PossibelTasksState=["initialize","running","finished"]
def __init__(self,lang,content,userid,problem,id,stype,codefile,contestid=None,ctype=None):
"""
:param stype: the type of submission. differentiates actual
test cases from sample cases.
:param content: code content
:param problem: an Instance of :class: `ProblemInstance`.
"""
self.state=Task.PossibelTasksState[0]
self.lang=lang
self.stype = stype
self.codefile = codefile
if self.codefile:
self.content = self.codefile.read().decode("utf-8")
else:
self.content = content # get submitted code content
self.userid=userid
user=User().getBy(_id=ObjectId(userid)) or Admin().getBy(_id=ObjectId(userid))
self.username = user.get("username")
self.id=id
self.state=Task.PossibelTasksState[0]
self.verdict = "Passed"
self.contestid = contestid
self.ctype = ctype
self.problem = problem
if self.stype == "test":
self.cases=self.problem.getTestCases().split(",")
self.answercase=self.problem.getAnswerForTestCases().split(",")
self.result=[None]*int(self.problem.getSizeOfTestCases())
elif self.stype == "sample":
self.cases=self.problem.getSampleCases().split(",")
self.answercase=self.problem.getAnswerForSampleCases().split(",")
self.result=[None]*int(self.problem.getSizeOfSampleCases())
self.cases=[i.strip() for i in self.cases]
self.answercase=[i.strip() for i in self.answercase]
self.timelimit=problem.getTimeLimit()
self.memlimit=problem.getMemLimit()
self.enter()
def toJson(self):
# Don't show expected output for contest submission
if self.contestid:
for each in self.result:
if each:
each.pop('expectedoutput', None)
return {"state":self.state,"lang":self.lang,"_id":self.id,"result":self.result}
def __str__(self):
return str(self.__dict__)
def __repr__(self):
return str(self.__dict__)
def free(self):
#del self.contestid,self.ctype,self.problem,self.stype
#del self.cases,self.answercase,self.timelimit,self.memlimit
pass
def __del___(self):
#cleaning up
try:
os.remove(self.filepath)
if self.lang.lower()=="java":
rmtree(self.folder,ignore_errors=True)
except FileNotFoundError:
pass
def __lt__(self,other):
return ~self.PossibelTasksState.index(self.state)< ~other.PossibelTasksState.index(other.state)
def enter(self):
self.filename=self.randomFilename()
if self.lang.lower()=="java":
filename=os.path.join(*self.filename.split(".")[:-1])
self.folder="/tmp/{}/{}/".format(self.lang,filename)
os.makedirs(self.folder,exist_ok=True)
else:
self.folder="/tmp/{}/".format(self.lang)
self.filepath=self.folder+self.filename
with open(self.filepath,"w+") as f:
f.write(self.content)
self.timeofsubmission=str(datetime.now())
def resolveFolder(self,lang):
#python is py,java is java e.t.c.This function exist if need be resolve the name later
if lang not in compilers:
raise NotImplementedError("Not yet supported")
if lang.lower()=="go":
return [compilers[lang],"run"]
return [compilers[lang]]
def randomFilename(self):
return self.userid+"{}{}.{}".format(hash(time()),hash(self),self.lang)
def status(self):
return self.state
def formatRunOutput(self,string):
if not string:return string
#starting by replacing all file name with a generic name
return string.replace(self.filepath,"submited."+self.lang)
def runPerCase(self,n_cases,binargs,**kwargs):
for cc in range(n_cases):
errput = None
ans = None
try:
ans=runCommand(binargs,input=self.cases[cc],**kwargs)
except subprocess.TimeoutExpired:
self.result[cc] ={"passed":False,
"output":"Time Limit Exceeded",
"errput":"",
"expectedoutput":self.answercase[cc]
}
self.verdict = "Failed"
errput = "TimeOut"
break
except MemoryError:
self.result[cc] ={"passed":False,
"output":"Memory Limit Exceeded",
"errput":"",
"expectedoutput":self.answercase[cc]
}
self.verdict = "Failed"
errput = "OutOFMemory"
break
except Exception as e:
errput="RuntimeError"
if str(type(e))=="<class 'subprocess.TimeoutExpired'>":
errput="TimeOut"
if ans:
output=ans.stdout.strip()
errput=errput or ans.stderr.strip()
if not output and not errput:
errput="No result from Interpreter"
if ans.returncode > 0:
errput="RuntimeError"
self.result[cc] = {
"passed":output==self.answercase[cc] and ans.returncode==0,
"output":self.formatRunOutput(output),
"errput":self.formatRunOutput(errput),
"expectedoutput":self.answercase[cc]
}
if not self.answercase[cc] or ans.returncode>0 or output!=self.answercase[cc]:
self.verdict = "Failed"
else:
self.result[cc] = {
"passed":False,
"output":"",
"errput":self.formatRunOutput(errput),
"expectedoutput":self.answercase[cc]
}
self.verdict = "Failed"
def runCompile(self,compiler_name,compile_options,run_options,n_cases,binary):
compileans=runCommand([compiler_name]+compile_options+[self.filepath],
capture_output=True,encoding="utf-8",memorylimit=self.memlimit)
#if while trying to compile and there was an error
l=len(self.result)
if compileans.returncode >0 :
for cc in range(l):
self.result[cc] ={"passed":False,
"output":self.formatRunOutput(compileans.stderr.strip()),
"errput":"CompileError",
"expectedoutput":self.answercase[cc]
}
return
self.runPerCase(l,[binary]+run_options,capture_output=True,timeout= self.timelimit,\
encoding="utf-8",memorylimit=self.memlimit)
def run(self,ClientConnection):
setattr(self,"runtime",time())
l=len(self.result)
self.state=self.PossibelTasksState[1]
#some languagues have to compile then run
if self.lang == "java":
options_compile=["-d",self.folder,"-s",self.folder,"-h",self.folder]
options_run=["-classpath",self.folder,"Solution"]
self.runCompile("javac",options_compile,options_run,l,"java")
elif self.lang == "c":
options_compile=["-o",self.filepath+".out"]
options_run=[]
self.runCompile("gcc",options_compile,options_run,l,self.filepath+".out")
elif self.lang=="cpp":
options_compile=["-o",self.filepath+".out"]
options_run=[]
self.runCompile("g++",options_compile,options_run,l,self.filepath+".out")
else:
# languages like python, js, php should be fine.
self.memlimit= self.memlimit+100 # extra 100mb for interpreted languanges
args=self.resolveFolder(self.lang)+[self.filepath]
self.runPerCase(l,args,capture_output=True,timeout= self.timelimit,\
encoding="utf-8",memorylimit=self.memlimit,lang=self.lang)
self.state=self.PossibelTasksState[2]
#create a submission in the database
submission_data = {'prblmid':self.problem.getprblmid(),'name':self.problem.getName(),'userid':self.userid,'contestid':self.contestid,'ctype':self.ctype,'codecontent':self.content,
'lang':self.lang,'stype':self.stype,'result': self.result,'verdict': self.verdict,'timesubmitted':self.timeofsubmission}
if self.stype == "test":
if not self.contestid:
submission_data.pop('userid', None)
submission_data.pop('contestid', None)
submission_data.pop('ctype', None)
Submission(self.userid).addDoc(submission_data)
if self.verdict == "Passed":
# problem solved, update this particular problem document
update = {"$addToSet": {"solvedby": self.userid}}
Problem().flexibleUpdate(update, _id=ObjectId(self.problem.getprblmid()))
else:
Submission(self.userid).addDoc(submission_data) #add this submission to the submission document
self.gradeSubmission(submission_data)
os.remove(self.filepath)
if self.lang.lower()=="java":
rmtree(self.folder,ignore_errors=True)
self.free()
ClientConnection.send(["DONE",self.id])
def gradeSubmission(self, data):
"""
This method is called in :class: `Task' in task.py to make necessary
updates to the database when submission code to a problem in the contest has be run
"""
userid = data.get('userid')
contestid = data.get('contestid')
ctype = data.get('ctype')
prblmid = data.get('prblmid')
verdict = data.get('verdict')
contest_problem = ContestProblem(ctype, contestid).getBy(_id=ObjectId(prblmid))
contest = Contest(ctype).getBy(_id=ObjectId(contestid))
contest_start_time = contest.get('starttime')
submission_time = datetime.now().timestamp() - contest_start_time
prblmscorefield = 'problemscore.{}'.format(prblmid)
if verdict != "Passed":
score = 0
penalty = -10
# update the penalty field
update = {'$inc': {'penalty': penalty}}
pScore = contest_problem.get('prblmscore')
argDict={"contestid":contestid, prblmscorefield:{'$ne': pScore}}
UserRegisteredContest(userid).flexibleUpdate(update, **argDict)
else:
score = contest_problem.get('prblmscore')
penalty = 0
# update the user score for that problem and time penalty, if the new score is greater than the prev one
update = {'$set': {'problemscore.{}'.format(prblmid): score}, '$inc': {'timepenalty': submission_time}}
argDict={"contestid":contestid,prblmscorefield:{'$lte': score}}
if UserRegisteredContest(userid).flexibleUpdate(update, **argDict):
# calculate the total score
reg_contest = UserRegisteredContest(userid).getBy(contestid=contestid)
problemscore = reg_contest.get('problemscore')
totalscore = reg_contest.get('penalty')
timepenalty = reg_contest.get('timepenalty')
for each in problemscore:
totalscore += problemscore[each]
# update the total score
update = {"$set": {'totalscore': totalscore}}
UserRegisteredContest(userid).flexibleUpdate(update, contestid=contestid)
# update the contest document to reflect this participants current score.
update = {"$set": {'participants.{}.currscore'.format(userid): totalscore,
'participants.{}.timepenalty'.format(userid): timepenalty}}
Contest(ctype).flexibleUpdate(update, _id=ObjectId(contestid))
data['score'] = totalscore
wsdata={"point":totalscore,"username":self.username,"attempted":len(problemscore)}
current_app.socketio.emit('newscore', wsdata,namespace='/scoreboard/')
| Harjacober/HackAlgo | coderunner/task.py | task.py | py | 16,600 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "os.getcwd",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "platform.system",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "subprocess.PIPE",
... |
39612322172 | import numpy as np
import os.path
import math
import scipy.linalg as _la
from math import factorial
import itertools
import time
import os
from scipy.sparse import csc_matrix
#..................................counting number of one
POPCOUNT_TABLE16 = [0] * 2**16
for index in range(len(POPCOUNT_TABLE16)):
POPCOUNT_TABLE16[index] = (index & 1) + POPCOUNT_TABLE16[index >> 1]
def one_count(v):
return (POPCOUNT_TABLE16[ v & 0xffff] +
POPCOUNT_TABLE16[(v >> 16) & 0xffff])
#..................................Binomial
def comb(n, k):
kk = factorial(n) / factorial(k) / factorial(n - k)
uga= np.int64(kk)
return uga
#..................................from configuration to bin number
def TO_bin(xx):
return int(xx,2)
#..................................from bin number to configuration
def TO_con(x,L):
x1=np.int64(x)
L1=np.int64(L)
return np.binary_repr(x1, width=L1)
#..................................base preparation
def Base_prep(n,k):
result = []
for bits in itertools.combinations(range(n), k):
s = ['0'] * n
for bit in bits:
s[bit] = '1'
result.append(''.join(s))
return result
def BaseNumRes_creation(Dim,LL,B):
A=np.zeros((Dim,LL), dtype=np.float)
for i in range(Dim):
k=0
for j in list(B[i]):
A[i,k] = float(j)-0.5
k+=1
return A
#..................................hop. preparation
def Hop_prep(L,BC):
if BC == 1:
Hop_dim=L-1
else:
Hop_dim=L
return [TO_con(2**i+2**((i+1)%L),L) for i in range(Hop_dim)]
#..................................................Disorder creation
def Dis_Creation(LL,Dis_gen):
dis = np.zeros(LL, dtype=np.float)
for i in range(LL):
if Dis_gen==0:
dis[i] = 2*np.random.random()-1
else:
dis[i] = np.cos(2*math.pi*0.721*i/LL)
return dis
#..................................creation Lin Tables
def LinTab_Creation(LL,Base,di):
L = np.int64(LL)
Dim=np.int64(di)
#..........................Table Creation
MaxSizeLINVEC = sum([2**(i-1) for i in range(1,np.int64(L/2+1))])
#....creates a table LinTab_L+LinTab_R
#.....................[ , ]+[ , ]
LinTab = np.zeros((MaxSizeLINVEC+1,4),dtype=int)
Jold = JJ=j1=j2=0
Conf_old = TO_con(0,np.int64(L/2))
#...........................Table Filling
for i in range(Dim):
Conf_lx = Base[i][0:np.int64(L/2)]
Bin_lx = TO_bin(Conf_lx)
Conf_rx = Base[i][np.int64(L/2):L]
Bin_rx = TO_bin(Conf_rx)
if Conf_lx==Conf_old:
j1 = Jold
else:
j1 += j2
Conf_old = Conf_lx
if Jold != j1:
JJ = Jold = 0
j2 = JJ+1
Jold = j1
JJ += 1
#print Conf_lx, np.int64(Bin_lx), np.int64(j1), Conf_rx, np.int64(Bin_rx), np.int64(j2)
LinTab[Bin_lx,0]= np.int64(Bin_lx)
LinTab[Bin_lx,1]= np.int64(j1)
LinTab[Bin_rx,2]= np.int64(Bin_rx)
LinTab[Bin_rx,3]= np.int64(j2)
# print LinTab
return LinTab
#..................................Lin Look for complete state
def LinLook(vec,LL,arr):
Vec = TO_con(vec,LL)
v1 = Vec[0:np.int64(LL/2)]
v2 = Vec[np.int64(LL/2):LL]
ind1 = TO_bin(v1)
ind2 = TO_bin(v2)
return arr[ind1,1]+arr[ind2,3]-1
#..................................Lin Look for RIGHT state
def LinLook_LL(vec,arr):
ind=TO_bin(vec)
return arr[ind+1,1]
#..................................Lin Look for RIGHT state
def LinLook_RR(vec,arr):
ind=TO_bin(vec)
return arr[ind+1,3]
#..................................................Hamiltonian Creation
def Ham_Dense_Creation(LL,NN,Dim,D,Dis_real,BC,Base_Bin,Base_Num,Hop_Bin,LinTab):
t=1.
# tutto in unita di t!!
ham = np.zeros((Dim,Dim), dtype=np.float)
if BC == 1:
Hop_dim=LL-1
else:
Hop_dim=LL
for i in range(Dim):
n_int = 0.0
n_dis = 0.0
bra = LinLook(Base_Bin[i],LL,LinTab)
for j in range(Hop_dim):
xx = Base_Bin[i]^Hop_Bin[j]
ket = LinLook(xx,LL,LinTab)
if one_count(xx) == NN:
ham[bra,ket] = t/2
uu = Base_Bin[i] & Hop_Bin[j]
if one_count(uu) == 1:
n_int -= 0.25
else:
n_int += 0.25
n_ones = Base_Bin[i] & np.int64(2**(LL-j-1))
if n_ones != 0:
n_dis += 0.5*Dis_real[j]
else:
n_dis -= 0.5*Dis_real[j]
ham[bra,bra] = t*(0*n_int + D*n_dis)
return ham
#..................................................Hamiltonian Creation
def Ham_Sparse_Creation(LL,NN,Dim,D,Dis_real,BC,Base_Bin,Base_Num,Hop_Bin,LinTab):
t=1.
# tutto in unita di t!!
ham_ind1 = []
ham_ind2 = []
ham_val = []
if BC == 1:
Hop_dim=LL-1
else:
Hop_dim=LL
for i in range(Dim):
n_int = 0.0
n_dis = 0.0
bra = LinLook(Base_Bin[i],LL,LinTab)
for j in range(Hop_dim):
xx = Base_Bin[i]^Hop_Bin[j]
ket = LinLook(xx,LL,LinTab)
if one_count(xx) == NN:
ham_ind1.append( bra )
ham_ind2.append( ket )
ham_val.append( t/2 )
uu = Base_Bin[i] & Hop_Bin[j]
if one_count(uu) == 1:
n_int -= 0.25
else:
n_int += 0.25
n_ones = Base_Bin[i] & np.int64(2**(LL-j-1))
if n_ones != 0:
n_dis += 0.5*Dis_real[j]
else:
n_dis -= 0.5*Dis_real[j]
ham_ind1.append( bra )
ham_ind2.append( bra )
ham_val.append( t*(0*n_int + D*n_dis) )
ham = csc_matrix((ham_val, (ham_ind1,ham_ind2)), shape=(Dim,Dim), dtype=np.double)
return ham
#..................................................Hamiltonian Dense Diagonalization
def eigval(A):
E = _la.eigh(A)
return E
#..................................................Hamiltonian Dense Diagonalization
def levstat(E,Dim):
gap=E[1:]-E[:-1]
B = np.zeros(Dim-2, dtype=np.float)
for i in range(Dim-2):
B[i]=np.minimum(gap[i+1],gap[i])/np.maximum(gap[i+1],gap[i])
return B
#..................................................Hamiltonian Sparse Diagonalization
def eigsh(A,n):
E = _la.sparse.linalg.eigsh(A, n)
return E
#..................................................Initial state
def Psi_0(Dim):
n = np.random.randnp.int64(0,Dim-1)
#n = 0
return n
def Proj_Psi0(a,V):
return V[a]
#..................................................Traslations MEAN
def Trasl_Mean(A):
a = A.shape
B = np.zeros((a[1],a[1]), dtype=np.float)
for i in range(a[1]):
B[i] = np.roll(A[i],-i)
return np.mean(B, axis=0)
#..................................................dens
def density(V,Base_NumRes):
den = np.dot(np.transpose(V**2),Base_NumRes)
#equivalente a fare:
#dens = np.einsum('jn,jn,ji -> ni', V, V, Base_NumRes)
return den
def density_t(Pro,V,BDens):
den = np.einsum('i,ij,jk-> k', Pro, V, BDens)
#np.dot(np.transpose(V**2),Base_NumRes)
return den
#..................................................NiNj
def OUTER_creation(L,Dim,A):
B = np.zeros((Dim,L,L), dtype=np.float)
for i in range(Dim):
B[i] = np.outer(A[i],A[i])
return B
def SzSz_con_P(A,B,C):
SzSz=np.einsum('il,ijk -> ljk', A**2, B)
uga =SzSz-C
return uga
def SzSz_con_P_Psi0(A,B):
uga=np.einsum('i,ijk -> jk', A, B)
return uga
def SzSz_con_Huse(A):
L = A.shape[0]
uga_mat= np.einsum('ijk -> jk', np.absolute(A))
uga = uga_mat/L
return uga
def SzSz_con_Huse_t(A):
L = A.shape[0]
Aabs = np.absolute(A)
Alog = np.log(Aabs)
uga_mat = np.einsum('ijk -> jk', Alog)
uga = uga_mat/L
return uga
def Mat_SzSz_DE(A,B,C):
#NN -> A V, B Base_Corr, C Proj_Psi0
corr_zero=np.einsum('l,il,ijk -> jk',C, A**2, B)
return corr_zero
def Mat_Sz_DE(A,B):
#NN -> A Dens B Proj_Psi0
corr_zero=np.einsum('i,ij -> j',B, A)
return corr_zero
def SzSz_con_DE(A,B,C):
#A proiezioni, B SzSz, C Sz
Sz2=np.outer(C,C)
uga=B-Sz2
return uga
def SPARSE_SzSz_con_DE(psi_t,Base_Corr,Base_NumRes):
#mean SzSz
SzSz = np.einsum('i, ijk -> jk', np.abs(psi_t)**2, Base_Corr)
#mean Sz
Dens = np.dot(np.transpose(np.abs(psi_t)**2),Base_NumRes)
Sz2 = np.outer(Dens,Dens)
#connected correlations
SzSz_con = SzSz - Sz2
return SzSz_con
#..................................................CdiCj
def prep_tab(L):
Dim = comb(L, np.int64(L/2))
Base_Num = Base_prep(L,np.int64(L/2))
Base_Bin = [int(Base_Num [i],2) for i in range(Dim)]
LinTab = LinTab_Creation(L,Base_Num,Dim)
CdC_Tab = CdC_tabCreation (L,np.int64(L/2),Dim,Base_Num,Base_Bin,LinTab)
return CdC_Tab
def CdC_tabCreation (LL,NN,Dim,Base_Num,Base_Bin,LinTab):
dimCiCj = comb(LL-2, NN-1)
CdC_Tab = np.zeros((LL,LL,dimCiCj,2), dtype=int)
for i in range(LL):
for j in range(LL):
xx = np.zeros((dimCiCj,2), dtype=int)
x0 = 0
for l in range(Dim):
a = Base_Num[l][0:i]
b = Base_Num[l][i+1:LL]
c = ''.join([a,'1',b])
a = c[0:j]
b = c[j+1:LL]
d = ''.join([a,'0',b])
if (one_count(int(d,2)) == NN and int(d,2) != Base_Bin[l]):
bra = LinLook(Base_Bin[l],LL,LinTab)
ket = LinLook(int(d,2),LL,LinTab)
xx[x0,0] = np.int64(bra)
xx[x0,1] = np.int64(ket)
x0 += 1
CdC_Tab[i,j] = xx
return CdC_Tab
def Mat_CdC_i(UU1,LL,V,l):
CC = np.zeros((LL,LL),dtype=float)
for i in range(LL):
for j in range(i,LL):
uu = UU1[i,j]
CC[j,i] = CC[i,j] = np.inner(V[uu[:,0],l],V[uu[:,1],l])
np.fill_diagonal(CC, 0.25)
return CC
def Mat_CdC_Psi0(UU1,Proj_Psi0,Dim,LL,V):
CC = np.empty((Dim,LL,LL),dtype=float)
for l in range(Dim):
for i in range(LL):
for j in range(i,LL):
uu = UU1[i,j]
CC[l,j,i] = CC[l,i,j] = np.inner(V[uu[:,0],l],V[uu[:,1],l])
#print Dim, l, i, j, np.inner(V[uu[:,0],l],V[uu[:,1],l])
np.fill_diagonal(CC[l], 0.25)
CC[l] *= Proj_Psi0[l]
CC1 = np.einsum('ijk -> jk', CC)
return CC1
def generate_filename(basename):
local_time = str(np.int64(round(time.time() * 1000)))
xx = basename + local_time + ".dat"
if os.path.isfile(xx):
time.sleep(1)
return generate_filename(basename)
return xx
#..................................................Entropy
#..................................................Time - Evolution
def Corr_Evolution(Proj_Psi0,E,V,t,Base_NumRes,Base_Corr):
Pro_t0 = Proj_Psi0
#c'era un V di troppo::: np.einsum('i,ki-> k', Proj_Psi0, V)
Pro_t = np.outer(Pro_t0*np.exp(1j*E*t),Pro_t0*np.exp(-1j*E*t))
coef = np.real(np.einsum('nm,jn,jm -> j', Pro_t, V, V))
dens_t = np.real(np.einsum('j,ji -> i', coef, Base_NumRes))
corr_t = np.real(np.einsum('j,jli -> li', coef, Base_Corr))
corr_con_t = np.real(corr_t - np.outer(dens_t,dens_t))
corr_con_t_AVER = Trasl_Mean(corr_con_t)
return dens_t,corr_t,corr_con_t_AVER
#..................................................Print_MATRIX
def print_matrix(H):
#print('matrix to print')
if isinstance(H, csc_matrix):
print_h = csc_matrix.todense(H)
print(print_h)
else:
print(H)
return 0
def split(container, count):
"""
Simple function splitting a container into equal length chunks.
Order is not preserved but this is potentially an advantage depending on
the use case.
"""
return [container[_i::count] for _i in range(count)]
| JeanClaude87/J1_J2 | code/f_function.py | f_function.py | py | 10,651 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "math.factorial",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.int64",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.int64",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.int64",
"line_number... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.