index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
13,700 | bc444d42940bbf2070916b9ca8db688910c98086 | import cv2
import numpy as np
import face_recognition
# Load and encode images
imgElon = face_recognition.load_image_file('ImageBasic/Elon Musk.jpg')
imgElon = cv2.cvtColor(imgElon, cv2.COLOR_BGR2RGB)
imgTest = face_recognition.load_image_file('ImageBasic/elon musky test.jpg')
imgTest = cv2.cvtColor(imgTest, cv2.COLOR_BGR2RGB)
imgMads = face_recognition.load_image_file('ImageAttendance/Mads Mikkelsen.jpg')
imgMads = cv2.cvtColor(imgMads, cv2.COLOR_BGR2RGB)
# place square around detected face
faceLoc = face_recognition.face_locations(imgElon)[0]
encodeElon = face_recognition.face_encodings(imgElon)[0]
#print(faceLoc)
cv2.rectangle(imgElon, (faceLoc[3], faceLoc[0]), (faceLoc[1], faceLoc[2]), (255, 0, 230), 3)
faceLocTest = face_recognition.face_locations(imgTest)[0]
encodeTest = face_recognition.face_encodings(imgTest)[0]
cv2.rectangle(imgTest, (faceLocTest[3], faceLocTest[0]), (faceLocTest[1], faceLocTest[2]), (255, 0, 230), 3)
# compare
results = face_recognition.compare_faces([encodeElon], encodeTest)
faceDis = face_recognition.face_distance([encodeElon], encodeTest)
print(results, faceDis)
cv2.putText(imgTest, f'{results} {round(faceDis[0], 2)}', (50, 50), cv2.FONT_HERSHEY_DUPLEX, 1, (0, 0, 255), 2)
cv2.imshow('Elon Muskrat', imgElon)
cv2.imshow('Elon Test', imgTest)
cv2.waitKey(0) |
13,701 | 17574dfb4bb968b8966e8ca24a29c7bda3af9ad2 | import matplotlib
matplotlib.use('Agg')
import json
import numpy as np
from collections import Counter
import matplotlib.pyplot as plt
import sys
NODES_FILE = sys.argv[1] #'results/tree/gisaid_hcov-19_test.nodes.json'
DELETIONS_COUNTER = sys.argv[2] #'results/alignment/gisaid_hcov-19_test.deletions_counter.npy'
REF_DICT = sys.argv[3]
HISTOGRAM_FILE = sys.argv[4]
with open(NODES_FILE) as json_file:
data = json.load(json_file)
ref_dict = np.load(REF_DICT).item()
mutations = Counter(np.concatenate([np.array(data['nodes'][i]['muts']) for i in data['nodes']])).most_common()
svs = [(i,j) for i,j in mutations if '-' in i]
d_pos = [ref_dict[int(d[1:-1])] for d,i in svs if d[0] if d[-1]=='-']
d_n_times = [i for d,i in svs if d[-1]=='-']
i_pos = [ref_dict[int(d[1:-1])] for d,i in svs if d[0] if d[0]=='-']
i_n_times = [i for d,i in svs if d[0]=='-']
svs_counter = np.load(SVS_COUNTER).item()
print(len(svs_counter), 'unique SVs.')
f,ax =plt.subplots(figsize=(10,6), nrows=2, sharey='row', sharex='row')
ax[0].plot([d[0] for d in svs_counter.keys() if d[2]=='D'],
[svs_counter[d] for d in svs_counter.keys() if d[2]=='D'], '.', color='orange')
ax[0].plot(d_pos, d_n_times, 'bx')
ax[0].hist(np.concatenate([np.array([d[0] for _ in range(svs_counter[d])]) for d in svs_counter.keys() if d[2]=='D']),
bins=np.linspace(0,30000,100), alpha=.2, color='orange')
ax[0].set_yscale('log')
ax[0].set_xlim(0,30000)
ax[0].legend(['Sequences', 'Lineages'])
ax[0].set_title('Deletions')
ax[1].plot([d[0] for d in svs_counter.keys() if d[2]=='I'],
[svs_counter[d] for d in svs_counter.keys() if d[2]=='I'], '.', color='orange')
ax[1].plot(i_pos, i_n_times, 'bx')
ax[1].hist(np.concatenate([np.array([d[0] for _ in range(svs_counter[d])]) for d in svs_counter.keys() if d[2]=='I']),
bins=np.linspace(0,30000,100), alpha=.3, color='orange')
ax[1].set_yscale('log')
ax[1].set_xlim(0,30000)
ax[1].set_title('Insertions')
plt.xlabel('Start Position')
plt.ylabel('# With Structural Variant')
plt.tight_layout()
plt.show()
f.savefig(HISTOGRAM_FILE, bbox_inches='tight') |
13,702 | 76c07f33590fa29e86cdc7024edae2e64c2b8ab4 | # Generated by Django 2.1 on 2018-09-04 08:02
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('title', models.CharField(max_length=64, unique=True)),
],
),
migrations.CreateModel(
name='Publisher',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=64, unique=True)),
],
),
migrations.AddField(
model_name='book',
name='publish_id',
field=models.ForeignKey(on_delete=False, to='MyApp.Publisher'),
),
]
|
13,703 | 5f7a999f3144615743a5e73c778d635f78993b67 | import datetime
from twilio.rest import Client
import requests
import pandas
import csv
import config
STOCK_NAME = "TSLA"
COMPANY_NAME = "Tesla Inc"
STOCK_ENDPOINT = "https://www.alphavantage.co/query"
NEWS_ENDPOINT = "https://newsapi.org/v2/everything"
## STEP 1: Use https://www.alphavantage.co/documentation/#daily
# When stock price increase/decreases by 5% between yesterday and the day before yesterday then print("Get News").
# today_data = datetime.date.today()
# yesterday_data = today_date - datetime.timedelta(days=3)
# yesterday_data = yesterday_date.strftime("%Y-%m-%d")
parameters = {
"function": "TIME_SERIES_DAILY",
"symbol": STOCK_NAME,
"apikey": config.API_PRICE,
}
response = requests.get(STOCK_ENDPOINT, params=parameters)
response.raise_for_status()
tesla_data = response.json()['Time Series (Daily)']
tesla_data_list = [value for (key, value) in tesla_data.items()]
yesterday_data = tesla_data_list[0]
yesterday_closing_price = float(yesterday_data['4. close'])
before_yesterday_data = tesla_data_list[1]
before_yesterday_closing_price = float(before_yesterday_data['4. close'])
print(f"Close prise yesterday: {yesterday_closing_price}")
print(f"Close prise before yesterday: {before_yesterday_closing_price}")
difference = yesterday_closing_price - before_yesterday_closing_price
percent = (before_yesterday_closing_price - yesterday_closing_price) / yesterday_closing_price * 100
if difference > 0:
print(f"Upper 💹 by {round(percent, 2)} percent")
if percent > 5:
print("Get News")
elif difference < 0:
print(f"Down 🔻 by {round(percent, 2)} percent")
else:
print("Same price")
new_parameters = {
"q": COMPANY_NAME,
"apiKey": config.API_NEWS,
}
response = requests.get(NEWS_ENDPOINT, params=new_parameters)
response.raise_for_status()
news_data = response.json()['articles'][:3]
formated_articles = [f"Headline: {new['title']}. \nBrief: {new['description']}" for new in news_data]
print(formated_articles)
## STEP 3: Use twilio.com/docs/sms/quickstart/python
# to send a separate message with each article's title and description to your phone number.
# Send each article as a separate message via Twilio.
client = Client(config.TWILIO_SID, config.TWILIO_TOKEN)
for article in formated_articles:
message = client.messages.create(
body=article,
from_="+14159910027",
to="+79183209852",
)
# Optional Format the message like this:
"""
TSLA: 🔺2%
Headline: Were Hedge Funds Right About Piling Into Tesla Inc. (TSLA)?.
Brief: We at Insider Monkey have gone over 821 13F filings that hedge funds and prominent investors are required to file by the SEC The 13F filings show the funds' and investors' portfolio positions as of March 31st, near the height of the coronavirus market crash.
or
"TSLA: 🔻5%
Headline: Were Hedge Funds Right About Piling Into Tesla Inc. (TSLA)?.
Brief: We at Insider Monkey have gone over 821 13F filings that hedge funds and prominent investors are required to file by the SEC The 13F filings show the funds' and investors' portfolio positions as of March 31st, near the height of the coronavirus market crash.
"""
|
13,704 | 5feb9f434414856f8bda00a599650f9b6c3f0460 | import copy
import numpy as np
import pandas as pd
from scipy import sparse
def put_column(base, ix, v):
if isinstance(v, pd.Series):
v = v.values
if isinstance(base, np.ndarray) or sparse.issparse(base):
shape = base[:, ix].shape
base[:, ix] = v.reshape(shape)
elif isinstance(base, pd.DataFrame):
base[ix] = v
else:
raise Exception("Unhandled base type")
def get_columns(base, ix):
if isinstance(base, np.ndarray) or sparse.issparse(base):
selected = base[:, ix]
if len(selected.shape) == 1:
selected = selected.reshape(-1, 1)
return selected
elif isinstance(base, pd.DataFrame):
selected = base[ix]
if len(selected.shape) == 1:
selected = selected.values.reshape(-1, 1)
return selected
else:
raise Exception("Unhandled base type")
def get_column_ixs(base):
if isinstance(base, np.ndarray):
return range(0, base.shape[1])
elif sparse.issparse(base):
return np.unique(base.nonzero()[1])
elif isinstance(base, pd.DataFrame):
return base.columns
else:
raise Exception("Unhandled base type")
def remove_column_ixs(base, remove_ixs):
new_column_ixs = [i for i in get_column_ixs(base) if not i in remove_ixs]
return get_columns(base, new_column_ixs)
def combine_values(base, results):
init = results[0]
# if new results are sparse
# we are forced to make everything sparse
if sparse.issparse(init):
if not sparse.issparse(base):
if isinstance(base, pd.DataFrame):
base = base.values
# needs to be numeric type to convert to sparse matrix
if base.dtype not in [np.dtype(float), np.dtype(int)]:
base = base.astype(np.dtype(float))
base = sparse.csr_matrix(base)
# return csr since it is column indexable
return sparse.hstack([base] + results, format='csr')
# for other cases, favor type of base
elif isinstance(init, np.ndarray):
if isinstance(base, np.ndarray):
return np.hstack([base] + results)
elif isinstance(base, pd.DataFrame):
new_cols = np.hstack(results)
new_df = pd.DataFrame(new_cols)
return pd.concat([base, new_df], axis=1)
else:
raise Exception("Unhandled base type")
elif isinstance(init, pd.DataFrame):
if isinstance(base, np.ndarray):
results = [v.values for v in results]
return np.hstack([base] + results)
elif isinstance(base, pd.DataFrame):
return pd.concat([base] + results, axis=1)
else:
raise Exception("Unhandled base type")
else:
raise Exception("Unhandled result type")
def smudge_column(col):
return np.apply_along_axis(lambda x: '__%s' % str(x[0]), 1, col)
# an upper limit on the size of the table in num of columns
# that we're willing to iterate over
COLUMN_LOOP_LIMIT = 1000
class ColumnLoop(object):
def __init__(self, sklearn_op):
self.base_op = sklearn_op
self.ops = []
self.ixs = []
self.flatten = []
self.smudge = []
self._fit = False
self._import = 'runtime_helpers'
@staticmethod
def code():
return 'runtime_helpers.ColumnLoop'
@staticmethod
def is_allowed(arg, state):
ncols = len(get_column_ixs(arg.value(state)))
return ncols <= COLUMN_LOOP_LIMIT
def _fit_column(self, X, ix):
o = self.base_op()
col = get_columns(X, ix)
# we may need to massage the column
for smudge in [False, True]:
for flatten in [False, True]:
try:
mod_col = col
is_str_col = col.dtype == np.dtype('object')
if smudge and is_str_col:
mod_col = smudge_column(mod_col)
if flatten:
mod_col = mod_col.flatten()
o.fit(mod_col)
self.ixs.append(ix)
self.ops.append(o)
self.smudge.append(smudge and is_str_col)
self.flatten.append(flatten)
# at least one transform was succesfully fit
self._fit = True
return
except:
pass
return
def fit(self, X):
# don't re-fit if already done
# if self._fit:
# return self
ixs = []
ops = []
for i in get_column_ixs(X):
self._fit_column(X, i)
return self
def transform(self, X):
if not self._fit:
raise Exception("Must fit first")
results = []
for o, must_smudge, must_flatten, i in zip(self.ops, self.smudge,
self.flatten, self.ixs):
col = get_columns(X, i)
if must_smudge:
col = smudge_column(col)
if must_flatten:
col = col.flatten()
transformed_col = o.transform(col)
# make it a column vector explicitly, if necessary
if len(transformed_col.shape) == 1:
transformed_col = transformed_col.reshape(-1, 1)
results.append(transformed_col)
return self.update_values(X, results)
def fit_transform(self, X, y=None):
self.fit(X)
return self.transform(X)
def update_values(self, X, results):
X = copy.deepcopy(X)
max_num_cols = max([r.shape[1] for r in results])
# can just override directly
if max_num_cols == 1:
for i, v in zip(self.ixs, results):
put_column(X, i, v)
else:
# remove columns that may have been modified
X_removed = remove_column_ixs(X, self.ixs)
# concatenate column-wise at the end, the results
X = combine_values(X_removed, results)
return X
|
13,705 | 14caf4d4f39ff47c2b399c4887c0ba57fbe8afab | # para input perceber que é um número tenho que declarar o tipo da variável
n1 = int(input('Digite um número: '))
n2 = int(input('Digite outro número: '))
# para somar, não preciso declara o tipo
s = n1 + n2
# type mostra o tipo de variável
print("O tipo de variável é :", type(s))
print("A soma desses números é:", s)
# usando {} e .format, consigo colocar diversas variáveis em uma só msg
print("A soma entre {} e {} é: {}".format(n1, n2, s))
|
13,706 | 8170b10f6d66faaed2a1ee0d191c20af848c8158 |
"""
Implementations of goodness-of-fit tests.
Goodness-of-fit
---------------
.. autosummary::
:toctree: generated/
ChiSquareTest
JarqueBera
References
----------
B. W. Yap & C. H. Sim (2011) Comparisons of various types of normality tests,
Journal of Statistical Computation and Simulation, 81:12, 2141-2155, DOI: 10.1080/00949655.2010.520163
Ukponmwan H. Nosakhare, Ajibade F. Bright. Evaluation of Techniques for Univariate Normality Test Using Monte
Carlo Simulation. American Journal of Theoretical and Applied Statistics.
Special Issue: Statistical Distributions and Modeling in Applied Mathematics.
Vol. 6, No. 5-1, 2017, pp. 51-61. doi: 10.11648/j.ajtas.s.2017060501.18
Wikipedia contributors. (2018, March 20). Jarque–Bera test. In Wikipedia, The Free Encyclopedia.
Retrieved 14:46, September 15, 2018,
from https://en.wikipedia.org/w/index.php?title=Jarque%E2%80%93Bera_test&oldid=831439673
"""
import numpy as np
from scipy.stats import chi2
from hypothetical.descriptive import kurtosis, skewness
class ChiSquareTest(object):
r"""
Performs the one-sample Chi-Square goodness-of-fit test.
Parameters
----------
observed : array-like
One-dimensional array of observed frequencies.
expected : array-like, optional
One-dimensional array of expected frequencies. If not given, the expected frequencies are computed
as the mean of the observed frequencies (each category is equally likely to occur).
continuity : bool, optional
Applies Yates's continuity correction for approximation error. Defaults to False as the correction can
tend to overcorrect and result in a type II error.
degrees_freedom : int, optional
Degrees of freedom. The p-value in the chi-square test is computed with degrees of freedom is :math:`k - 1`,
where :math:`k` is the number of observed frequencies.
Attributes
----------
observed : array-like
One-dimensional array of observed frequencies.
expected : array-like
One-dimensional array of expected frequencies.
degrees_of_freedom : int
Total degrees of freedom used in the computation of the p-value.
continuity_correction : bool
If True, Yates's continuity correction is applied when performing the chi-square test
n : int
Total number of observed frequencies.
chi_square : float
The computed test statistic, the chi-square, :math:`\chi^2` value.
p_value : float
The calculated p-value of the test given the chi-square statistic and degrees of freedom.
test_summary : dict
Dictionary containing a collection of the resulting test statistics and other information.
Raises
------
ValueError
If the :code:`expected` parameter is passed but is not of the same length as the required :code:`observed`
parameter, a :code:`ValueError` is raised.
Notes
-----
The chi-squared test, often called the :math:`\chi^2` test, is also known as Pearson's chi-squared test. The
chi-square test is a one-sample goodness-of-fit test that evaluates whether a significant difference exists between
an observed number of frequencies from two or more groups and an expected frequency based on a null hypothesis. A
simple example of a chi-square test is testing whether a six-sided die is 'fair', in that all outcomes are equally
likely to occur.
The chi-square test statistic, :math:`\chi^2` is defined as:
.. math::
\chi^2 = \sum^k_{i=1} \frac{O_i - E_i)^2}{E_i}
Where :math:`O_i` is the observed number of frequencies in the :math:`i`th category, :math:`E_i` is the expected
number of frequencies in the respective :math:`i`th group, and :math:`k` is the total number of groups or
categories, or 'cells'.
The p-value can then be found by comparing the calculated :math:`\chi^2` statistic to a chi-square distribution.
The degrees of freedom is equal to :math:`k - 1` minus any additional reduction in the degrees of freedom, if
specified.
Examples
--------
>>> observed = [29, 19, 18, 25, 17, 10, 15, 11]
>>> expected = [18, 18, 18, 18, 18, 18, 18, 18]
>>> ch = ChiSquareTest(observed, expected)
>>> ch.test_summary
{'chi-square': 16.333333333333332,
'continuity correction': False,
'degrees of freedom': 7,
'p-value': 0.022239477462390588}
>>> ch = ChiSquareTest(observed)
>>> ch.test_summary
{'chi-square': 16.333333333333332,
'continuity correction': False,
'degrees of freedom': 7,
'p-value': 0.022239477462390588}
References
----------
Siegel, S. (1956). Nonparametric statistics: For the behavioral sciences.
McGraw-Hill. ISBN 07-057348-4
Weisstein, Eric W. "Chi-Squared Test." From MathWorld--A Wolfram Web Resource.
http://mathworld.wolfram.com/Chi-SquaredTest.html
Wikipedia contributors. (2018, July 5). Chi-squared test. In Wikipedia, The Free Encyclopedia. Retrieved 13:56,
August 19, 2018, from https://en.wikipedia.org/w/index.php?title=Chi-squared_test&oldid=848986171
Wikipedia contributors. (2018, April 12). Pearson's chi-squared test. In Wikipedia, The Free Encyclopedia.
Retrieved 12:55, August 23, 2018,
from https://en.wikipedia.org/w/index.php?title=Pearson%27s_chi-squared_test&oldid=836064929
"""
def __init__(self, observed, expected=None, continuity=False, degrees_freedom=0):
if not isinstance(observed, np.ndarray):
self.observed = np.array(observed)
else:
self.observed = observed
if expected is None:
obs_mean = np.mean(self.observed)
self.expected = np.full_like(self.observed, obs_mean)
else:
if not isinstance(expected, np.ndarray):
self.expected = np.array(expected)
else:
self.expected = expected
if self.observed.shape[0] != self.expected.shape[0]:
raise ValueError('number of observations must be of the same length as expected values.')
self.degrees_of_freedom = self.observed.shape[0] - 1 - degrees_freedom
self.continuity_correction = continuity
self.n = self.observed.shape[0]
self.chi_square = self._chisquare_value()
self.p_value = self._p_value()
self.test_summary = {
'chi-square': self.chi_square,
'p-value': self.p_value,
'degrees of freedom': self.degrees_of_freedom,
'continuity correction': self.continuity_correction
}
def _chisquare_value(self):
r"""
Computes the chi-square value of the sample data
Notes
-----
The chi-square test statistic, :math:`\chi^2` is defined as:
.. math::
\chi^2 = \sum^k_{i=1} \frac{O_i - E_i)^2}{E_i}
Returns
-------
x2 : float
The computed chi-square value with continuity correction (if specified)
"""
x2 = np.sum((np.absolute(self.observed - self.expected) - (0.5 * self.continuity_correction)) ** 2 /
self.expected)
return x2
def _p_value(self):
r"""
Finds the p-value of the chi-square statistic.
Notes
-----
The p-value can be found by comparing the calculated :math:`\chi^2` statistic to a chi-square distribution.
The degrees of freedom is equal to :math:`k - 1` minus any additional reduction in the degrees of freedom, if
specified.
Returns
-------
p_value : float
The p-value of the associated chi-square value and degrees of freedom.
"""
pval = chi2.sf(self.chi_square, self.degrees_of_freedom)
return pval
class JarqueBera(object):
r"""
Performs the Jarque-Bera goodness-of-fit test.
Parameters
----------
x : array-like
One-dimensional array-like object (list, numpy array, pandas DataFrame or pandas Series) containing
the observed sample values.
Attributes
----------
x : array-like
The given sample values
test_statistic : float
Computed Jarque-Bera test statistic
p_value : float
p-value of Jarque-Bera test statistic
test_summary : dict
Dictionary containing the Jarque-Bera test statistic and associated p-value.
Examples
--------
Notes
-----
The Jarque-Bera test is a goodness-of-fit test developed by Carlos Jarque and Anil Bera that tests whether
a sample of data is normally distributed using the sample's kurtosis and skewness. The Jarque-Bera test
statistic is defined as:
.. math::
JB = \frac{n}{6} \large( s^2 + \frac{(k-3)^2}{4} \large)
where :math:`n` is the number of samples in the data, :math:`s` is the computed sample's skewness and :math:`k` is
the sample's kurtosis. The Jarque-Bera test statistic has a chi-square distribution with two degrees of freedom
when the number of samples is adequate. The test statistic is always non-negative and the farther away from zero,
the stronger of an indication the sample data does not follow a normal distribution.
In the case of small samples ('small' being somewhat subjective but generally considered to be :math:`n < 30`),
the Jarque-Bera test and statistic is overly-sensitive and can lead to large Type 1 error rates.
References
----------
B. W. Yap & C. H. Sim (2011) Comparisons of various types of normality tests,
Journal of Statistical Computation and Simulation, 81:12, 2141-2155, DOI: 10.1080/00949655.2010.520163
Jarque, C., & Bera, A. (1987). A Test for Normality of Observations and Regression Residuals.
International Statistical Review / Revue Internationale De Statistique, 55(2), 163-172. doi:10.2307/1403192
Ukponmwan H. Nosakhare, Ajibade F. Bright. Evaluation of Techniques for Univariate Normality Test Using Monte
Carlo Simulation. American Journal of Theoretical and Applied Statistics.
Special Issue: Statistical Distributions and Modeling in Applied Mathematics.
Vol. 6, No. 5-1, 2017, pp. 51-61. doi: 10.11648/j.ajtas.s.2017060501.18
Wikipedia contributors. (2018, March 20). Jarque–Bera test. In Wikipedia, The Free Encyclopedia.
Retrieved 14:46, September 15, 2018,
from https://en.wikipedia.org/w/index.php?title=Jarque%E2%80%93Bera_test&oldid=831439673
"""
def __init__(self, x):
if not isinstance(x, np.ndarray):
self.x = np.array(x)
else:
self.x = x
if self.x.ndim != 1:
raise ValueError('sample data must be one-dimensional')
self.test_statistic = self._jarque_bera_statistic()
self.p_value = self._p_value()
self.test_summary = {
'Jarque-Bera statistic': self.test_statistic,
'p-value': self.p_value
}
def _jarque_bera_statistic(self):
r"""
Computes the Jarque-Bera test statistic:
Returns
-------
jb : float
The Jarque-Bera test statistic.
Notes
-----
The Jarque-Bera test statistic is defined as:
.. math::
JB = \frac{n}{6} \large( s^2 + \frac{(k-3)^2}{4} \large)
"""
n = len(self.x)
jb = n / 6. * (skewness(self.x) ** 2 + kurtosis(self.x) ** 2 / 4)
return jb
def _p_value(self):
r"""
Calculates the associated p-value of the Jarque-Bera test statistic.
Returns
-------
p_value : float
The p-value of the Jarque-Bera test statistic.
Notes
-----
The Jarque-Bera test statistic has a chi-square distribution with two degrees of freedom
when the number of samples is adequate. The test statistic is always non-negative and the farther away from
zero, the stronger of an indication the sample data does not follow a normal distribution.
In the case of small samples ('small' being somewhat subjective but generally considered to be :math:`n < 30`),
the Jarque-Bera test and statistic is overly-sensitive and can lead to large Type 1 error rates.
"""
p_value = chi2.sf(self.test_statistic, 2)
return p_value
|
13,707 | 5347170ae8a7fbf62e97716c36ed9cd771c4a3a4 |
import pygame
import random
import math
pygame.mixer.pre_init(44100, 16, 2, 4096)
pygame.init()
from pygame import mixer
# Screen size, game caption and game icon
screen = pygame.display.set_mode((880, 680))
pygame.display.set_caption("Corona Invaders")
icon = pygame.image.load("virus.png")
pygame.display.set_icon(icon)
# Game backround
backround = pygame.image.load("6870.jpg")
backroundX = 0
backroundY = 0
# Backround music
mixer.music.load("bensound-evolution.mp3")
mixer.music.play(-1)
# Goal
goal = pygame.image.load("objective.png")
goalX = float(440)
goalY = float(0)
# Player
player = pygame.image.load("coronavirus.png")
playerX = float(440)
playerY = float(550)
playerX_move = 0
playerY_move = 0
# Enemies
enemy = pygame.image.load("boss3.png")
enemyX = random.randint(0, 880)
enemyY = float(100)
enemyX_move = 8
enemyY_move = 0
enemy2 = pygame.image.load("boss2.png")
enemy2X = random.randint(0, 880)
enemy2Y = float(200)
enemy2X_move = 10
enemy2Y_move = 0
enemy3 = pygame.image.load("boss1.png")
enemy3X = random.randint(0, 880)
enemy3Y = float(300)
enemy3X_move = 10
enemy3Y_move = 0
enemy4 = pygame.image.load("boss4.png")
enemy4X = random.randint(0, 880)
enemy4Y = float(450)
enemy4X_move = 5
enemy4Y_move = 0
def iscollision_G(goalX, goalY, playerX, playerY):
distance = math.sqrt((goalX - playerX)**2 + (goalY - playerY)**2)
if distance < 45:
return True
else:
return False
def iscollision(enemyX, enemyY, playerX, playerY):
distance = math.sqrt((enemyX - playerX)**2 + (enemyY - playerY)**2)
if distance < 45:
return True
else:
return False
def iscollision_2(enemy2X, enemy2Y, playerX, playerY):
distance = math.sqrt((enemy2X - playerX)**2 + (enemy2Y - playerY)**2)
if distance < 45:
return True
else:
return False
def iscollision_3(enemy3X, enemy3Y, playerX, playerY):
distance = math.sqrt((enemy3X - playerX)**2 + (enemy3Y - playerY)**2)
if distance < 45:
return True
else:
return False
def iscollision_4(enemy4X, enemy4Y, playerX, playerY):
distance = math.sqrt((enemy4X - playerX)**2 + (enemy4Y - playerY)**2)
if distance < 45:
return True
else:
return False
run = True
while run:
screen.fill((137, 96, 167))
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
exit(0)
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
playerX_move = -4
if event.key == pygame.K_RIGHT:
playerX_move = 4
if event.key == pygame.K_UP:
playerY_move = -4
if event.key == pygame.K_DOWN:
playerY_move = 4
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:
playerX_move = 0
if event.key == pygame.K_UP or event.key == pygame.K_DOWN:
playerY_move = 0
playerX += playerX_move
playerY += playerY_move
if playerX <= 0:
playerX = 0
elif playerX >= 815:
playerX = 815
if playerY <= 0:
playerY = 0
elif playerY >= 615:
playerY = 615
enemyX += enemyX_move
if enemyX <= 0:
enemyX_move = 8
elif enemyX >= 815:
enemyX_move = - 8
enemy2X += enemy2X_move
if enemy2X <= 0:
enemy2X_move = 10
elif enemy2X >= 815:
enemy2X_move = - 10
enemy3X += enemy3X_move
if enemy3X <= 0:
enemy3X_move = 10
elif enemy3X >= 815:
enemy3X_move = - 10
enemy4X += enemy4X_move
if enemy4X <= 0:
enemy4X_move = 5
elif enemy4X >= 815:
enemy4X_move = - 5
collision = iscollision_G(goalX,goalY,playerX,playerY)
if collision:
die_sound = mixer.Sound("applause2.wav")
die_sound.play()
print("You Win!")
exit(0)
collision = iscollision(enemyX,enemyY,playerX,playerY)
if collision:
die_sound = mixer.Sound("wickedwitchlaugh.wav")
die_sound.play()
print("You loose")
exit(0)
collision = iscollision_2(enemy2X,enemy2Y,playerX,playerY)
if collision:
die_sound = mixer.Sound("wickedmalelaugh1.wav")
die_sound.play()
print("You loose")
exit(0)
collision = iscollision_3(enemy3X,enemy3Y,playerX,playerY)
if collision:
die_sound = mixer.Sound("Laugh+2.wav")
die_sound.play()
print("You loose")
exit(0)
collision = iscollision_4(enemy4X,enemy4Y,playerX,playerY)
if collision:
die_sound = mixer.Sound("wickedwitchlaugh.wav")
die_sound.play()
print("You loose")
exit(0)
screen.blit(backround,(backroundX, backroundY))
screen.blit(goal, (goalX, goalY))
screen.blit(player, (playerX, playerY))
screen.blit(enemy, (enemyX, enemyY))
screen.blit(enemy2, (enemy2X, enemy2Y))
screen.blit(enemy3, (enemy3X, enemy3Y))
screen.blit(enemy4, (enemy4X, enemy4Y))
pygame.display.flip()
pygame.display.update()
|
13,708 | c0d2987b6b8bd16aeed3648fcf7883374c8cf6c9 | """Processing objects typically used as losses.
These classes are quite similar to Nodes (subclasses, in fact), with the
important distinction that each object has a singular scalar output.
"""
import theano.tensor as T
import optimus.core as core
from optimus.nodes import Node
import optimus.functions as functions
class NegativeLogLikelihood(Node):
"""Indexed negative log-likelihood loss, i.e. for 1-of-k classifiers.
In numpy syntax, computes the following:
output = mean(-log(likelihoods[:, index]))
The provided likelihoods must be 2D, and should satisfy (likelihoods > 0);
life will be very miserable if the second condition is violated, but no
checks are performed in-line.
See also: optimus.nodes.SelectIndex
"""
def __init__(self, name):
# Input Validation
Node.__init__(self, name=name)
self.likelihoods = core.Port(name=self.__own__("likelihoods"))
self.index = core.Port(name=self.__own__("index"), shape=[])
self._inputs.extend([self.likelihoods, self.index])
self.output = core.Port(name=self.__own__('output'))
self._outputs.append(self.output)
def transform(self):
"""writeme"""
self.validate_ports()
assert self.likelihoods.variable.ndim == 2
col_index = self.index.variable
row_index = T.arange(col_index.shape[0], dtype='int32')
self.output.variable = -T.mean(T.log(
self.likelihoods.variable[row_index, col_index]))
class CrossEntropy(Node):
"""Pointwise cross-entropy between a `prediction` and `target`.
NOTE: Both inputs *must* be non-negative, and only `target` may contain
zeros. Expect all hell to break loose if this is violated.
"""
def __init__(self, name, epsilon=10.0**-6):
# Input Validation
Node.__init__(self, name=name, epsilon=epsilon)
self.prediction = core.Port(name=self.__own__("prediction"))
self.target = core.Port(name=self.__own__("target"))
self._inputs.extend([self.prediction, self.target])
self.output = core.Port(name=self.__own__('output'))
self._outputs.append(self.output)
self.epsilon = epsilon
def transform(self):
"""writeme"""
self.validate_ports()
prediction = self.prediction.variable
target = self.target.variable
eps_p1 = (1.0 + self.epsilon)
output = target * T.log((prediction + self.epsilon) / eps_p1)
output += (1.0 - target) * T.log(
(1.0 - prediction + self.epsilon) / eps_p1)
self.output.variable = -output.mean(axis=1)
class CrossEntropyLoss(CrossEntropy):
"""Pointwise cross-entropy between a `prediction` and `target`.
NOTE: Both inputs *must* be non-negative, and only `target` may contain
zeros. Expect all hell to break loose if this is violated.
"""
def transform(self):
"""writeme"""
super(CrossEntropyLoss, self).transform()
self.output.variable = T.mean(self.output.variable)
class MeanSquaredError(Node):
"""Compute the mean squared error between a `prediction` and a `target`.
See also: optimus.nodes.SquaredEuclidean
"""
def __init__(self, name):
# Input Validation
Node.__init__(self, name=name)
self.prediction = core.Port(name=self.__own__("prediction"))
self.target = core.Port(name=self.__own__("target"))
self._inputs.extend([self.prediction, self.target])
self.output = core.Port(name=self.__own__('output'))
self._outputs.append(self.output)
def transform(self):
"""writeme"""
self.validate_ports()
if self.prediction.variable.ndim >= 2:
xA = T.flatten(self.prediction.variable, outdim=2)
xB = T.flatten(self.target.variable, outdim=2)
axis = 1
else:
xA = self.prediction.variable
xB = self.target.variable
axis = None
self.output.variable = T.mean(T.pow(xA - xB, 2.0).sum(axis=axis))
class WeightDecayPenalty(Node):
"""Compute the mean L2-magnitude of an `input`, scaled by `weight`.
See also: optimus.nodes.L2Magnitude.
"""
def __init__(self, name):
Node.__init__(self, name=name)
self.input = core.Port(name=self.__own__('input'))
self.weight = core.Port(name=self.__own__('weight'))
self._inputs.extend([self.input, self.weight])
self.output = core.Port(name=self.__own__('output'))
self._outputs.append(self.output)
def transform(self):
self.validate_ports()
x_in = self.input.variable.flatten(2)
w_mag = T.sqrt(T.sum(T.pow(x_in, 2.0), axis=-1))
self.output.variable = self.weight.variable * T.mean(w_mag)
class SimilarityMargin(Node):
"""Create a contrastive margin loss, controlled by an `equivalence` score.
Inputs
------
distance : vector
Observed distance between datapoints.
equivalence : vector
Similarity scores, normalized to [0, 1], corresponding to least and
most similar, respectively.
sim_margin : scalar
Margin between similar points within which no penalty is incurred. If
value is <= 0, a penalty is always incurred.
diff_margin : scalar
Margin between dissimilar points within which no penalty is incurred.
If value is <= 0, a penalty is always incurred.
Outputs
-------
output : scalar
Cost incurred given the input parameters.
Equation
--------
Given D: distance, y: equivalence ...
sim_cost = y*hwr(D - sim_margin)^2
diff_cost = (1 - y) * hwr(diff_margin - D)^2
total = ave(sim_cost + diff_cost)
"""
def __init__(self, name):
super(SimilarityMargin, self).__init__(name=name)
self.distance = core.Port(name=self.__own__('distance'))
self.equivalence = core.Port(name=self.__own__('equivalence'))
self.sim_margin = core.Port(name=self.__own__('sim_margin'))
self.diff_margin = core.Port(name=self.__own__('diff_margin'))
self._inputs.extend([self.distance, self.equivalence,
self.sim_margin, self.diff_margin])
self.output = core.Port(name=self.__own__('output'))
self._outputs.append(self.output)
def transform(self):
"""Transform inputs to outputs."""
self.validate_ports()
# TODO: Find a more reusable way of enforcing this behavior.
if self.distance.variable.ndim != 1:
raise ValueError("`distance` must be a vector.")
if self.equivalence.variable.ndim != 1:
raise ValueError("`equivalence` must be a vector.")
dist = self.distance.variable
equiv = self.equivalence.variable
smarg = self.sim_margin.variable
dmarg = self.diff_margin.variable
sim_cost = T.pow(functions.relu(dist - smarg), 2.0)
diff_cost = T.pow(functions.relu(dmarg - dist), 2.0)
total_cost = equiv * sim_cost + (1 - equiv) * diff_cost
self.output.variable = T.mean(total_cost)
class ContrastiveMargin(Node):
"""Create a contrastive margin loss, comparing the relationship between
three datapoints as two costs.
Inputs
------
cost_sim : vector
Observed cost between two inputs defined as "similar".
cost_diff : vector
Observed cost between two inputs defined as "different".
margin_sim : scalar
Margin between similar points within which no penalty is incurred. If
value is <= 0, a penalty is always incurred.
margin_diff : scalar
Margin between dissimilar points within which no penalty is incurred.
If value is <= 0, a penalty is always incurred.
Outputs
-------
output : scalar
Cost incurred given the input parameters.
Equation
--------
Given above...
loss_sim = hwr(cost_sim - margin_sim)^2
loss_diff = hwr(margin_diff - cost_diff)^2
total = ave(loss_sim + loss_diff)
"""
def __init__(self, name, filter_zeros=True):
super(ContrastiveMargin, self).__init__(name=name)
self.cost_sim = core.Port(name=self.__own__('cost_sim'))
self.cost_diff = core.Port(name=self.__own__('cost_diff'))
self.margin_sim = core.Port(name=self.__own__('margin_sim'))
self.margin_diff = core.Port(name=self.__own__('margin_diff'))
self._inputs.extend([self.cost_sim, self.cost_diff,
self.margin_sim, self.margin_diff])
self.output = core.Port(name=self.__own__('output'))
self._outputs.append(self.output)
self.filter_zeros = filter_zeros
def transform(self):
"""Transform inputs to outputs."""
self.validate_ports()
# TODO: Find a more reusable way of enforcing this behavior.
if self.cost_sim.variable.ndim != 1:
raise ValueError("`cost_sim` must be a vector.")
if self.cost_diff.variable.ndim != 1:
raise ValueError("`cost_diff` must be a vector.")
cost_sim = self.cost_sim.variable
cost_diff = self.cost_diff.variable
smarg = self.margin_sim.variable
dmarg = self.margin_diff.variable
loss_sim = T.pow(functions.relu(cost_sim - smarg), 2.0)
loss_diff = T.pow(functions.relu(dmarg - cost_diff), 2.0)
if self.filter_zeros:
loss_sim = loss_sim[(loss_sim > 0).nonzero()]
loss_diff = loss_diff[(loss_diff > 0).nonzero()]
self.output.variable = T.mean(loss_sim) + T.mean(loss_diff)
class PairwiseRank(Node):
"""Create a pairwise rank loss, where the cost of a similar pair should
be alpha-times smaller than a the cost of a dissimilar pair.
Inputs
------
cost_sim : vector
Observed cost between two inputs defined as "similar".
cost_diff : vector
Observed cost between two inputs defined as "different".
alpha : scalar
Parameter controlling the separation between dissimilar manifolds;
when in doubt, set to 1.
Outputs
-------
output : scalar
Cost incurred given the input parameters.
Equation
--------
Given above...
cost = hwr(cost_diff - alpha * cost_sim + margin)^2
total = ave(loss_sim + loss_diff)
"""
def __init__(self, name):
super(PairwiseRank, self).__init__(name=name)
self.cost_sim = core.Port(name=self.__own__('cost_sim'))
self.cost_diff = core.Port(name=self.__own__('cost_diff'))
self.alpha = core.Port(name=self.__own__('alpha'))
self.margin = core.Port(name=self.__own__('margin'))
self._inputs.extend([self.cost_sim, self.cost_diff,
self.alpha, self.margin])
self.output = core.Port(name=self.__own__('output'))
self._outputs.append(self.output)
def transform(self):
"""Transform inputs to outputs."""
self.validate_ports()
# TODO: Find a more reusable way of enforcing this behavior.
if self.cost_sim.variable.ndim != 1:
raise ValueError("`cost_sim` must be a vector.")
if self.cost_diff.variable.ndim != 1:
raise ValueError("`cost_diff` must be a vector.")
cost_sim = self.cost_sim.variable
cost_diff = self.cost_diff.variable
alpha = self.alpha.variable
margin = self.margin.variable
cost = cost_diff - alpha * cost_sim + margin
self.output.variable = T.mean(T.pow(functions.relu(cost), 2.0))
|
13,709 | e27c9b8a5dbb135c9c864e3596f465024938be16 | #!/usr/bin/python3
def best_score(my_dict):
if my_dict:
scores = []
for i, j in my_dict.items():
scores.append(j)
scores.sort()
high_score = scores[-1]
for i, j in my_dict.items():
if my_dict[i] is high_score:
return i
return None
|
13,710 | 3566d80d5705529d32714b1ceb369656513d8f5f | def solution(str1, str2):
j1, j2 = [], []
for i in range(len(str1) - 1):
if str1[i:i + 2].isalpha():
j1.append(str1[i:i + 2].lower())
for i in range(len(str2) - 1):
if str2[i:i + 2].isalpha():
j2.append(str2[i:i + 2].lower())
if not j1 and not j2: return 65536
if len(j1) > len(j2):
inter = [j1.remove(s) for s in j2 if s in j1]
else:
inter = [j2.remove(s) for s in j1 if s in j2]
union = j1 + j2
return int(len(inter) / len(union) * 65536) |
13,711 | ffd0ba4cba3ce4205e49331f4630eeaaf25a91ef | '''
@Time :
@Author : Jingsen Zheng
@File : waymo_dataset_loader
@Brief :
'''
from __future__ import (
division,
absolute_import,
with_statement,
print_function,
unicode_literals,
)
import math
import torch
import torch.utils.data as data
import numpy as np
import os
import subprocess
import shlex
from scipy.spatial.transform import Rotation
def _get_data_files(list_filename):
with open(list_filename) as f:
return [line.rstrip() for line in f]
def _load_data_file(name):
f = np.fromfile(name)
f = f.reshape(-1, 4)
data = f[:, :3]
label = f[:, 3].astype(np.int64)
# label = np.expand_dims(label, axis=1)
return data, label
class WaymoDatasetLoader(data.Dataset):
def __init__(self, data_path, num_points):
super(WaymoDatasetLoader, self).__init__()
self.data_dir = data_path
self.num_points = num_points
all_files = os.listdir(data_path)
data_batchlist, label_batchlist = [], []
for f in all_files:
data, label = _load_data_file(os.path.join(data_path, f))
data_batchlist.append(data)
label_batchlist.append(label)
self.points = data_batchlist
self.labels = label_batchlist
def __getitem__(self, idx):
if self.num_points is not None:
pt_idxs = np.random.choice(self.points[idx].shape[0], self.num_points, replace=False)
else:
pt_idxs = np.arange(self.points[idx].shape[0])
current_points = torch.from_numpy(self.points[idx][pt_idxs, :].copy()).type(
torch.FloatTensor
)
current_labels = torch.from_numpy(self.labels[idx][pt_idxs].copy()).type(
torch.LongTensor
)
return current_points, current_labels
def __len__(self):
return int(len(self.points))
def set_num_points(self, pts):
self.num_points = pts
def randomize(self):
pass
class DatasetLoader:
def __init__(self, data_path, batch_size, training = True,
num_points = None, augment = False,
min_range_x = -75, max_range_x = 75,
min_range_y = -75, max_range_y = 75,
min_range_z = -1, max_range_z = 5,
max_shift_x = 0.1, max_shift_y = 0.1,
max_shift_z = 0.1, max_rotation = 1):
self.data_path = data_path
self.batch_size = batch_size
self.training = training
self.num_points = num_points
self.augment = augment
self.min_range_x = min_range_x
self.max_range_x = max_range_x
self.min_range_y = min_range_y
self.max_range_y = max_range_y
self.min_range_z = min_range_z
self.max_range_z = max_range_z
self.max_shift_x = max_shift_x
self.max_shift_y = max_shift_y
self.max_shift_z = max_shift_z
self.max_rotation = max_rotation
all_files = os.listdir(data_path)
self.data_size = len(all_files)
data_batchlist, label_batchlist = [], []
for f in all_files:
data, label = self.load_pointcloud(os.path.join(data_path, f))
data_batchlist.append(data)
label_batchlist.append(label)
self.points = data_batchlist
self.labels = label_batchlist
def data_augment(self, points, labels, pt_idxs):
# 1) No ring information. Hence skipping ring based augmentation
# 2) Add random noise [-rnd, rnd]
# points[:, 3] += 2 * self.cfg.RAND_INTENSITY_VARIATION * np.random.rand() - self.cfg.RAND_INTENSITY_VARIATION
# 3) Clamp the intensity between [0, 1]
# points[:, 3] = np.clip(points[:, 3], 0.0, 1.0)
# 4) Shift augment along X and Y - direction
shift_x = 2 * self.max_shift_x * np.random.rand() - self.max_shift_x
shift_y = 2 * self.max_shift_y * np.random.rand() - self.max_shift_y
points[:, 0] += shift_x
points[:, 1] += shift_y
# 5) Augment along z-axis
move_z = 2 * self.max_shift_z * np.random.rand() - self.max_shift_z
points[:, 2] += move_z
# 4) Generate a random rotation angle [-rnd_rot, rnd_rot]
rand_rot = np.random.randint(2 * self.max_rotation + 1) - self.max_rotation
# 5) Find the rotation matrix and apply that rotation
rotMat = Rotation.from_euler('z', rand_rot, degrees=True)
rot_res = rotMat.apply(points[:, :3])
points[:, :3] = rot_res
# Randomly shuffle the points & labels around to remove ordering dependance
p = np.random.permutation(len(points))
points = points[p]
labels = labels[p]
pt_idxs = pt_idxs[p]
return points, labels, pt_idxs
def normalize_pointcloud(self, points, labels, pt_idxs):
# delete points outside range
req_mask = (points[:, 0] > self.min_range_x) & (points[:, 0] < self.max_range_x) & (
points[:, 1] > self.min_range_y) & (points[:, 1] < self.max_range_y) & (
points[:, 2] > self.min_range_z) & (points[:, 2] < self.max_range_z)
points = points[req_mask]
labels = labels[req_mask]
pt_idxs = pt_idxs[req_mask]
# normalize the x and y of scan to lie in [-1, 1]
points[:, 0] = points[:, 0] / self.max_range_x
points[:, 1] = points[:, 1] / self.max_range_y
points[:, 2] = points[:, 2] / self.max_range_z
mean_z = np.mean(points[:, 2])
points[:, 2] = points[:, 2] - mean_z
return points, labels, pt_idxs
def load_pointcloud(self, name):
f = np.fromfile(name)
f = f.reshape(-1, 4)
data = f[:, :3]
label = f[:, 3].astype(np.int64)
return data, label
def __iter__(self):
self.cur_it = 0
self.index = np.arange(self.data_size)
np.random.shuffle(self.index)
return self
def __next__(self):
if self.cur_it >= 0:
batch_ids = []
points_batch = []
origin_points_batch = []
labels_batch = []
for i in range(self.batch_size):
if self.cur_it < self.data_size:
idx = self.index[self.cur_it]
else:
idx = self.index[np.random.randint(0, self.data_size - 1)]
if self.num_points is not None:
pt_idxs = np.random.choice(self.points[idx].shape[0], self.num_points, replace=False)
else:
pt_idxs = np.arange(self.points[idx].shape[0])
points = self.points[idx][pt_idxs, :].copy()
labels = self.labels[idx][pt_idxs].copy()
if self.training and self.augment:
points, labels, pt_idxs = self.data_augment(points, labels, pt_idxs)
points, labels, pt_idxs = self.normalize_pointcloud(points, labels, pt_idxs)
batch_ids.append(np.repeat(i, len(points)))
points_batch.append(points)
labels_batch.append(labels)
if not self.training:
origin_points_batch.append(self.points[idx][pt_idxs, :].copy())
self.cur_it = self.cur_it + 1
if self.cur_it >= self.data_size:
self.cur_it = -1
return torch.from_numpy(np.hstack(batch_ids)), \
torch.from_numpy(np.concatenate(points_batch, axis=0)).type(torch.FloatTensor), \
torch.from_numpy(np.concatenate(labels_batch, axis=0)).type(torch.LongTensor), \
None if self.training else np.concatenate(origin_points_batch, axis=0), \
self.batch_size
else:
raise StopIteration
def iter_num_per_epoch(self):
return math.ceil(self.data_size / self.batch_size) |
13,712 | 40ab219246e17cbea2f5693d9f61e8dc6415e552 | from flask_wtf import FlaskForm
from wtforms import StringField, SelectField, SubmitField
from wtforms.fields.html5 import EmailField
from wtforms.validators import DataRequired, Email
class AddTaskForm(FlaskForm):
task_name = StringField('To Do Item', validators=[DataRequired()])
email = EmailField('Email', validators=[DataRequired(), Email()])
priority = SelectField('Priority', choices=['Low', 'Medium', 'High'], validators=[DataRequired()])
submit = SubmitField('Add To Do Item')
class ClearTasksForm(FlaskForm):
submit = SubmitField('Clear To-Do List') |
13,713 | 8bef12e64268b458794a37edd941e6adabe19f87 | from eagertools import domap, emap
from hypothesis import given
import hypothesis.strategies as ST
from util import no_args, one_arg, two_args
def test_empty_input():
assert emap(no_args, []) == []
@given(ST.lists(ST.integers()))
def test_one_iter(vals):
assert emap(one_arg, vals) == list(map(one_arg, vals))
@given(ST.lists(ST.integers()), ST.lists(ST.integers()))
def test_two_iters(xs, ys):
assert emap(two_args, xs, ys) == list(map(two_args, xs, ys))
def test_domap_does_not_call_func_for_empty_input():
assert domap(no_args, []) is None
@given(ST.lists(ST.integers()))
def test_domap_gives_none_for_unary_function(vals):
assert domap(one_arg, vals) is None
@given(ST.lists(ST.integers()), ST.lists(ST.integers()))
def test_domap_gives_none_for_binary_function(xs, ys):
assert domap(two_args, xs, ys) is None
|
13,714 | a212442d91b2807e6353f21b0a68c4ee74ec8db9 | #
# PySNMP MIB module TPT-POLICY-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/TPT-POLICY-MIB
# Produced by pysmi-0.3.4 at Wed May 1 15:26:23 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, SingleValueConstraint, ValueRangeConstraint, ConstraintsIntersection, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsIntersection", "ValueSizeConstraint")
Ipv6Address, = mibBuilder.importSymbols("IPV6-TC", "Ipv6Address")
SnmpAdminString, = mibBuilder.importSymbols("SNMP-FRAMEWORK-MIB", "SnmpAdminString")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Bits, Counter64, Integer32, iso, Counter32, NotificationType, TimeTicks, Unsigned32, ModuleIdentity, Gauge32, IpAddress, MibIdentifier, MibScalar, MibTable, MibTableRow, MibTableColumn, ObjectIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "Bits", "Counter64", "Integer32", "iso", "Counter32", "NotificationType", "TimeTicks", "Unsigned32", "ModuleIdentity", "Gauge32", "IpAddress", "MibIdentifier", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ObjectIdentity")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
tpt_tpa_objs, tpt_tpa_unkparams, tpt_tpa_eventsV2 = mibBuilder.importSymbols("TPT-TPAMIBS-MIB", "tpt-tpa-objs", "tpt-tpa-unkparams", "tpt-tpa-eventsV2")
tpt_policy = ModuleIdentity((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1)).setLabel("tpt-policy")
tpt_policy.setRevisions(('2016-05-25 18:54', '2015-06-19 18:30', '2015-05-28 13:30', '2014-12-15 11:42',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: tpt_policy.setRevisionsDescriptions(('Updated copyright information. Minor MIB syntax fixes.', 'Added SSL inspection notification.', 'Added SSL inspected flag parameter to policy notifications.', 'Updated table sequence entries to be SMI compliant.',))
if mibBuilder.loadTexts: tpt_policy.setLastUpdated('201605251854Z')
if mibBuilder.loadTexts: tpt_policy.setOrganization('Trend Micro, Inc.')
if mibBuilder.loadTexts: tpt_policy.setContactInfo('www.trendmicro.com')
if mibBuilder.loadTexts: tpt_policy.setDescription("TPA policy counters. Copyright (C) 2016 Trend Micro Incorporated. All Rights Reserved. Trend Micro makes no warranty of any kind with regard to this material, including, but not limited to, the implied warranties of merchantability and fitness for a particular purpose. Trend Micro shall not be liable for errors contained herein or for incidental or consequential damages in connection with the furnishing, performance, or use of this material. This document contains proprietary information, which is protected by copyright. No part of this document may be photocopied, reproduced, or translated into another language without the prior written consent of Trend Micro. The information is provided 'as is' without warranty of any kind and is subject to change without notice. The only warranties for Trend Micro products and services are set forth in the express warranty statements accompanying such products and services. Nothing herein should be construed as constituting an additional warranty. Trend Micro shall not be liable for technical or editorial errors or omissions contained herein. TippingPoint(R), the TippingPoint logo, and Digital Vaccine(R) are registered trademarks of Trend Micro. All other company and product names may be trademarks of their respective holders. All rights reserved. This document contains confidential information, trade secrets or both, which are the property of Trend Micro. No part of this documentation may be reproduced in any form or by any means or used to make any derivative work (such as translation, transformation, or adaptation) without written permission from Trend Micro or one of its subsidiaries. All other company and product names may be trademarks of their respective holders. ")
policyPacketsDropped = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: policyPacketsDropped.setStatus('current')
if mibBuilder.loadTexts: policyPacketsDropped.setDescription('The total number of packets discarded due to network congestion.')
policyPacketsBlocked = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: policyPacketsBlocked.setStatus('current')
if mibBuilder.loadTexts: policyPacketsBlocked.setDescription('The cumulative number of packets blocked because of policy actions.')
policyPacketsIncoming = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: policyPacketsIncoming.setStatus('current')
if mibBuilder.loadTexts: policyPacketsIncoming.setDescription('The total number of incoming packets.')
policyPacketsOutgoing = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: policyPacketsOutgoing.setStatus('current')
if mibBuilder.loadTexts: policyPacketsOutgoing.setDescription('The total number of outgoing packets.')
policyPacketsInvalid = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: policyPacketsInvalid.setStatus('current')
if mibBuilder.loadTexts: policyPacketsInvalid.setDescription('The total number of packets discarded because they were invalid.')
policyPacketsPermitted = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: policyPacketsPermitted.setStatus('current')
if mibBuilder.loadTexts: policyPacketsPermitted.setDescription('The cumulative number of packets permitted because of policy actions.')
policyPacketsDropped64 = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 31), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: policyPacketsDropped64.setStatus('current')
if mibBuilder.loadTexts: policyPacketsDropped64.setDescription('The total number of packets discarded due to network congestion.')
policyPacketsBlocked64 = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 32), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: policyPacketsBlocked64.setStatus('current')
if mibBuilder.loadTexts: policyPacketsBlocked64.setDescription('The cumulative number of packets blocked because of policy actions.')
policyPacketsIncoming64 = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 33), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: policyPacketsIncoming64.setStatus('current')
if mibBuilder.loadTexts: policyPacketsIncoming64.setDescription('The total number of incoming packets.')
policyPacketsOutgoing64 = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 34), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: policyPacketsOutgoing64.setStatus('current')
if mibBuilder.loadTexts: policyPacketsOutgoing64.setDescription('The total number of outgoing packets.')
policyPacketsInvalid64 = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 36), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: policyPacketsInvalid64.setStatus('current')
if mibBuilder.loadTexts: policyPacketsInvalid64.setDescription('The total number of packets discarded because they were invalid.')
policyPacketsPermitted64 = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 37), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: policyPacketsPermitted64.setStatus('current')
if mibBuilder.loadTexts: policyPacketsPermitted64.setDescription('The total number of packets permitted because of policy actions.')
policyPacketsRateLimited64 = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 38), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: policyPacketsRateLimited64.setStatus('current')
if mibBuilder.loadTexts: policyPacketsRateLimited64.setDescription('The total number of packets discarded by rate limiting filters.')
policyPacketsTrusted64 = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 39), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: policyPacketsTrusted64.setStatus('current')
if mibBuilder.loadTexts: policyPacketsTrusted64.setDescription('The cumulative number of packets trusted because of policy actions.')
policyDVObjs = ObjectIdentity((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 10))
if mibBuilder.loadTexts: policyDVObjs.setStatus('current')
if mibBuilder.loadTexts: policyDVObjs.setDescription('Sub-tree of Digital Vaccine information.')
policyDVVersion = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 10, 1), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 80))).setMaxAccess("readonly")
if mibBuilder.loadTexts: policyDVVersion.setStatus('current')
if mibBuilder.loadTexts: policyDVVersion.setDescription('The version number of the Digital Vaccine on this machine.')
policyCounterTable = MibTable((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 5), )
if mibBuilder.loadTexts: policyCounterTable.setStatus('obsolete')
if mibBuilder.loadTexts: policyCounterTable.setDescription('Table of per-policy counter values.')
policyCounterEntry = MibTableRow((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 5, 1), ).setIndexNames((0, "TPT-POLICY-MIB", "policyGlobalID"))
if mibBuilder.loadTexts: policyCounterEntry.setStatus('obsolete')
if mibBuilder.loadTexts: policyCounterEntry.setDescription('An entry in the policy counter table. Rows cannot be created or deleted. ')
policyGlobalID = MibTableColumn((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 5, 1, 1), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 40)))
if mibBuilder.loadTexts: policyGlobalID.setStatus('obsolete')
if mibBuilder.loadTexts: policyGlobalID.setDescription('The global identifier of a policy.')
policyDescriptiveName = MibTableColumn((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 5, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 80)))
if mibBuilder.loadTexts: policyDescriptiveName.setStatus('obsolete')
if mibBuilder.loadTexts: policyDescriptiveName.setDescription('The human-readable name of a policy.')
policyCountBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 5, 1, 3), Counter64())
if mibBuilder.loadTexts: policyCountBytes.setStatus('obsolete')
if mibBuilder.loadTexts: policyCountBytes.setDescription('The total number of bytes affected by the given policy.')
policyCountPackets = MibTableColumn((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 5, 1, 4), Counter64())
if mibBuilder.loadTexts: policyCountPackets.setStatus('obsolete')
if mibBuilder.loadTexts: policyCountPackets.setDescription('The total number of packets affected by the given policy.')
policyCreationTime = MibTableColumn((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 5, 1, 5), Unsigned32())
if mibBuilder.loadTexts: policyCreationTime.setStatus('obsolete')
if mibBuilder.loadTexts: policyCreationTime.setDescription('The time the policy was pushed to NetPAL, in seconds since the epoch.')
class PolicyProtocol(TextualConvention, Integer32):
description = 'A selection from a set of networking protocols detected by a policy.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8))
namedValues = NamedValues(("icmp", 1), ("udp", 2), ("tcp", 3), ("other-ip", 4), ("arp", 5), ("other-eth", 6), ("icmpv6", 7), ("other-ipv6", 8))
class PolicyFrameSize(TextualConvention, Integer32):
description = 'A selection from a set of layer-2 frame size categories.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11))
namedValues = NamedValues(("fs64B", 1), ("fs65to127B", 2), ("fs128to255B", 3), ("fs256to511B", 4), ("fs512to1023B", 5), ("fs1024toMaxB", 6), ("fsMaxto4095B", 7), ("fs4096to9216B", 8), ("fsUnder", 9), ("fsOver", 10), ("fs9217to16383", 11))
class PolicyFrameType(TextualConvention, Integer32):
description = 'A selection from a set of layer-2 frame types based on addressing and error status.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7))
namedValues = NamedValues(("unicast", 1), ("broadcast", 2), ("multicast", 3), ("macControl", 4), ("fcsError", 5), ("alignError", 6), ("symbolError", 7))
class PolicySeverity(TextualConvention, Integer32):
description = 'A selection from a set of severity levels used by policies. Used for both statistical reports and notifications.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))
namedValues = NamedValues(("warning", 1), ("minor", 2), ("major", 3), ("critical", 4))
topTenHitsByPolicyTable = MibTable((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 11), )
if mibBuilder.loadTexts: topTenHitsByPolicyTable.setStatus('current')
if mibBuilder.loadTexts: topTenHitsByPolicyTable.setDescription('Table of policies with the ten greatest hit counts.')
topTenHitsByPolicyEntry = MibTableRow((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 11, 1), ).setIndexNames((0, "TPT-POLICY-MIB", "topTenRank"))
if mibBuilder.loadTexts: topTenHitsByPolicyEntry.setStatus('current')
if mibBuilder.loadTexts: topTenHitsByPolicyEntry.setDescription('An entry in the top ten policies table. Rows cannot be created or deleted. ')
topTenRank = MibTableColumn((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 11, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 10))).setMaxAccess("readonly")
if mibBuilder.loadTexts: topTenRank.setStatus('current')
if mibBuilder.loadTexts: topTenRank.setDescription('The numerical ranking 1 through 10 of a policy.')
policyHitCount = MibTableColumn((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 11, 1, 2), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: policyHitCount.setStatus('current')
if mibBuilder.loadTexts: policyHitCount.setDescription('The count of alerts generated by a policy.')
policyName = MibTableColumn((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 11, 1, 3), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 80))).setMaxAccess("readonly")
if mibBuilder.loadTexts: policyName.setStatus('current')
if mibBuilder.loadTexts: policyName.setDescription('The human-readable name of a policy.')
policyUUID = MibTableColumn((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 11, 1, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: policyUUID.setStatus('current')
if mibBuilder.loadTexts: policyUUID.setDescription('The global identifier of a policy.')
alertsBySeverityTable = MibTable((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 12), )
if mibBuilder.loadTexts: alertsBySeverityTable.setStatus('current')
if mibBuilder.loadTexts: alertsBySeverityTable.setDescription('Table of alert counts of all policies at each severity level.')
alertsBySeverityEntry = MibTableRow((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 12, 1), ).setIndexNames((0, "TPT-POLICY-MIB", "alertSeverity"))
if mibBuilder.loadTexts: alertsBySeverityEntry.setStatus('current')
if mibBuilder.loadTexts: alertsBySeverityEntry.setDescription('An entry in the alerts by severity table. Rows cannot be created or deleted. ')
alertSeverity = MibTableColumn((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 12, 1, 1), PolicySeverity()).setMaxAccess("readonly")
if mibBuilder.loadTexts: alertSeverity.setStatus('current')
if mibBuilder.loadTexts: alertSeverity.setDescription('The severity of a policy.')
severityAlertCount = MibTableColumn((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 12, 1, 2), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: severityAlertCount.setStatus('current')
if mibBuilder.loadTexts: severityAlertCount.setDescription('The count of alerts generated by all policies of a given severity.')
alertsByProtocolTable = MibTable((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 13), )
if mibBuilder.loadTexts: alertsByProtocolTable.setStatus('current')
if mibBuilder.loadTexts: alertsByProtocolTable.setDescription('Table of alert counts of all policies at each protocol.')
alertsByProtocolEntry = MibTableRow((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 13, 1), ).setIndexNames((0, "TPT-POLICY-MIB", "alertProtocol"))
if mibBuilder.loadTexts: alertsByProtocolEntry.setStatus('current')
if mibBuilder.loadTexts: alertsByProtocolEntry.setDescription('An entry in the alerts by protocol table. Rows cannot be created or deleted. ')
alertProtocol = MibTableColumn((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 13, 1, 1), PolicyProtocol()).setMaxAccess("readonly")
if mibBuilder.loadTexts: alertProtocol.setStatus('current')
if mibBuilder.loadTexts: alertProtocol.setDescription('The protocol of a policy.')
protocolAlertCount = MibTableColumn((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 13, 1, 2), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: protocolAlertCount.setStatus('current')
if mibBuilder.loadTexts: protocolAlertCount.setDescription('The count of alerts generated by all policies of a given protocol.')
alertsByZoneTable = MibTable((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 14), )
if mibBuilder.loadTexts: alertsByZoneTable.setStatus('obsolete')
if mibBuilder.loadTexts: alertsByZoneTable.setDescription('Table of alert counts of all policies for each zone.')
alertsByZoneEntry = MibTableRow((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 14, 1), ).setIndexNames((0, "TPT-POLICY-MIB", "alertSlot"), (0, "TPT-POLICY-MIB", "alertPort"))
if mibBuilder.loadTexts: alertsByZoneEntry.setStatus('obsolete')
if mibBuilder.loadTexts: alertsByZoneEntry.setDescription('An entry in the alerts by zone table. Rows cannot be created or deleted. ')
alertSlot = MibTableColumn((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 14, 1, 1), Unsigned32())
if mibBuilder.loadTexts: alertSlot.setStatus('obsolete')
if mibBuilder.loadTexts: alertSlot.setDescription('The slot portion identifying the zone affected by a policy.')
alertPort = MibTableColumn((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 14, 1, 2), Unsigned32())
if mibBuilder.loadTexts: alertPort.setStatus('obsolete')
if mibBuilder.loadTexts: alertPort.setDescription('The port portion identifying the zone affected by a policy.')
zoneAlertCount = MibTableColumn((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 14, 1, 3), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zoneAlertCount.setStatus('obsolete')
if mibBuilder.loadTexts: zoneAlertCount.setDescription('The count of alerts generated by all policies of a given zone.')
permitsByZoneTable = MibTable((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 15), )
if mibBuilder.loadTexts: permitsByZoneTable.setStatus('obsolete')
if mibBuilder.loadTexts: permitsByZoneTable.setDescription('Table of permit counts of all policies for each zone.')
permitsByZoneEntry = MibTableRow((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 15, 1), ).setIndexNames((0, "TPT-POLICY-MIB", "permitSlot"), (0, "TPT-POLICY-MIB", "permitPort"))
if mibBuilder.loadTexts: permitsByZoneEntry.setStatus('obsolete')
if mibBuilder.loadTexts: permitsByZoneEntry.setDescription('An entry in the permits by zone table. Rows cannot be created or deleted. ')
permitSlot = MibTableColumn((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 15, 1, 1), Unsigned32())
if mibBuilder.loadTexts: permitSlot.setStatus('obsolete')
if mibBuilder.loadTexts: permitSlot.setDescription('The slot portion identifying the zone affected by a policy.')
permitPort = MibTableColumn((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 15, 1, 2), Unsigned32())
if mibBuilder.loadTexts: permitPort.setStatus('obsolete')
if mibBuilder.loadTexts: permitPort.setDescription('The port portion identifying the zone affected by a policy.')
zonePermitCount = MibTableColumn((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 15, 1, 3), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zonePermitCount.setStatus('obsolete')
if mibBuilder.loadTexts: zonePermitCount.setDescription('The count of permits generated by all policies of a given zone.')
blocksByZoneTable = MibTable((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 16), )
if mibBuilder.loadTexts: blocksByZoneTable.setStatus('obsolete')
if mibBuilder.loadTexts: blocksByZoneTable.setDescription('Table of block counts of all policies for each zone.')
blocksByZoneEntry = MibTableRow((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 16, 1), ).setIndexNames((0, "TPT-POLICY-MIB", "blockSlot"), (0, "TPT-POLICY-MIB", "blockPort"))
if mibBuilder.loadTexts: blocksByZoneEntry.setStatus('obsolete')
if mibBuilder.loadTexts: blocksByZoneEntry.setDescription('An entry in the blocks by zone table. Rows cannot be created or deleted. ')
blockSlot = MibTableColumn((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 16, 1, 1), Unsigned32())
if mibBuilder.loadTexts: blockSlot.setStatus('obsolete')
if mibBuilder.loadTexts: blockSlot.setDescription('The slot portion identifying the zone affected by a policy.')
blockPort = MibTableColumn((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 16, 1, 2), Unsigned32())
if mibBuilder.loadTexts: blockPort.setStatus('obsolete')
if mibBuilder.loadTexts: blockPort.setDescription('The port portion identifying the zone affected by a policy.')
zoneBlockCount = MibTableColumn((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 16, 1, 3), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zoneBlockCount.setStatus('obsolete')
if mibBuilder.loadTexts: zoneBlockCount.setDescription('The count of blocks generated by all policies of a given zone.')
p2psByZoneTable = MibTable((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 17), )
if mibBuilder.loadTexts: p2psByZoneTable.setStatus('obsolete')
if mibBuilder.loadTexts: p2psByZoneTable.setDescription('Table of p2p counts of all policies for each zone.')
p2psByZoneEntry = MibTableRow((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 17, 1), ).setIndexNames((0, "TPT-POLICY-MIB", "p2pSlot"), (0, "TPT-POLICY-MIB", "p2pPort"))
if mibBuilder.loadTexts: p2psByZoneEntry.setStatus('obsolete')
if mibBuilder.loadTexts: p2psByZoneEntry.setDescription('An entry in the p2ps by zone table. Rows cannot be created or deleted. ')
p2pSlot = MibTableColumn((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 17, 1, 1), Unsigned32())
if mibBuilder.loadTexts: p2pSlot.setStatus('obsolete')
if mibBuilder.loadTexts: p2pSlot.setDescription('The slot portion identifying the zone affected by a policy.')
p2pPort = MibTableColumn((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 17, 1, 2), Unsigned32())
if mibBuilder.loadTexts: p2pPort.setStatus('obsolete')
if mibBuilder.loadTexts: p2pPort.setDescription('The port portion identifying the zone affected by a policy.')
zoneP2pCount = MibTableColumn((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 17, 1, 3), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zoneP2pCount.setStatus('obsolete')
if mibBuilder.loadTexts: zoneP2pCount.setDescription('The count of p2ps generated by all policies of a given zone.')
framesBySizeTable = MibTable((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 18), )
if mibBuilder.loadTexts: framesBySizeTable.setStatus('current')
if mibBuilder.loadTexts: framesBySizeTable.setDescription('Table of frame counts received in each size category.')
framesBySizeEntry = MibTableRow((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 18, 1), ).setIndexNames((0, "TPT-POLICY-MIB", "frameSize"))
if mibBuilder.loadTexts: framesBySizeEntry.setStatus('current')
if mibBuilder.loadTexts: framesBySizeEntry.setDescription('An entry in the frames by size table. Rows cannot be created or deleted. ')
frameSize = MibTableColumn((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 18, 1, 1), PolicyFrameSize()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frameSize.setStatus('current')
if mibBuilder.loadTexts: frameSize.setDescription('The size category of a frame.')
sizeFrameCount = MibTableColumn((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 18, 1, 2), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sizeFrameCount.setStatus('current')
if mibBuilder.loadTexts: sizeFrameCount.setDescription('The count of frames received in a given size category.')
framesByTypeTable = MibTable((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 19), )
if mibBuilder.loadTexts: framesByTypeTable.setStatus('current')
if mibBuilder.loadTexts: framesByTypeTable.setDescription('Table of frame counts received in each type classification.')
framesByTypeEntry = MibTableRow((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 19, 1), ).setIndexNames((0, "TPT-POLICY-MIB", "frameType"))
if mibBuilder.loadTexts: framesByTypeEntry.setStatus('current')
if mibBuilder.loadTexts: framesByTypeEntry.setDescription('An entry in the frames by type table. Rows cannot be created or deleted. ')
frameType = MibTableColumn((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 19, 1, 1), PolicyFrameType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frameType.setStatus('current')
if mibBuilder.loadTexts: frameType.setDescription('The type classification (e.g., unicast, broadcast, FCS error) of a frame.')
typeFrameCount = MibTableColumn((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 19, 1, 2), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: typeFrameCount.setStatus('current')
if mibBuilder.loadTexts: typeFrameCount.setDescription('The count of frames received in a given type classification.')
packetsByProtocolTable = MibTable((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 20), )
if mibBuilder.loadTexts: packetsByProtocolTable.setStatus('current')
if mibBuilder.loadTexts: packetsByProtocolTable.setDescription('Table of packet counts received for each protocol.')
packetsByProtocolEntry = MibTableRow((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 20, 1), ).setIndexNames((0, "TPT-POLICY-MIB", "packetProtocol"))
if mibBuilder.loadTexts: packetsByProtocolEntry.setStatus('current')
if mibBuilder.loadTexts: packetsByProtocolEntry.setDescription('An entry in the packets by protocol table. Rows cannot be created or deleted. ')
packetProtocol = MibTableColumn((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 20, 1, 1), PolicyProtocol()).setMaxAccess("readonly")
if mibBuilder.loadTexts: packetProtocol.setStatus('current')
if mibBuilder.loadTexts: packetProtocol.setDescription('The protocol of a policy.')
protocolPacketCount = MibTableColumn((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 20, 1, 2), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: protocolPacketCount.setStatus('current')
if mibBuilder.loadTexts: protocolPacketCount.setDescription('The count of packets received for a given protocol.')
portStatsTable = MibTable((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 23), )
if mibBuilder.loadTexts: portStatsTable.setStatus('current')
if mibBuilder.loadTexts: portStatsTable.setDescription('Table of statistics for each physical port.')
portStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 23, 1), ).setIndexNames((0, "TPT-POLICY-MIB", "portNumber"))
if mibBuilder.loadTexts: portStatsEntry.setStatus('current')
if mibBuilder.loadTexts: portStatsEntry.setDescription('An entry in the port statistics table. Rows cannot be created or deleted. ')
portNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 23, 1, 1), Unsigned32())
if mibBuilder.loadTexts: portNumber.setStatus('current')
if mibBuilder.loadTexts: portNumber.setDescription('The numeric index of a port.')
portName = MibTableColumn((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 23, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: portName.setStatus('current')
if mibBuilder.loadTexts: portName.setDescription('The name of a port.')
portVlanTranslations = MibTableColumn((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 23, 1, 3), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: portVlanTranslations.setStatus('current')
if mibBuilder.loadTexts: portVlanTranslations.setDescription('Number of packets leaving this egress port whose VLAN IDs were translated.')
policyByNumberTable = MibTable((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 21), )
if mibBuilder.loadTexts: policyByNumberTable.setStatus('current')
if mibBuilder.loadTexts: policyByNumberTable.setDescription('Table of packet counts received for each protocol.')
policyByNumberEntry = MibTableRow((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 21, 1), ).setIndexNames((0, "TPT-POLICY-MIB", "policyNumber"))
if mibBuilder.loadTexts: policyByNumberEntry.setStatus('current')
if mibBuilder.loadTexts: policyByNumberEntry.setDescription('An entry in the policy by number table. Rows cannot be created or deleted. ')
policyNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 21, 1, 1), Unsigned32())
if mibBuilder.loadTexts: policyNumber.setStatus('current')
if mibBuilder.loadTexts: policyNumber.setDescription('The number of a policy.')
numberName = MibTableColumn((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 21, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 120))).setMaxAccess("readonly")
if mibBuilder.loadTexts: numberName.setStatus('current')
if mibBuilder.loadTexts: numberName.setDescription('The name of a policy.')
numberDesc = MibTableColumn((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 21, 1, 3), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 3000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: numberDesc.setStatus('current')
if mibBuilder.loadTexts: numberDesc.setDescription('The description of a policy.')
securityZonePairTable = MibTable((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 22), )
if mibBuilder.loadTexts: securityZonePairTable.setStatus('current')
if mibBuilder.loadTexts: securityZonePairTable.setDescription('Table of information and statistics for each security zone pair.')
securityZonePairEntry = MibTableRow((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 22, 1), ).setIndexNames((0, "TPT-POLICY-MIB", "szpUUID"))
if mibBuilder.loadTexts: securityZonePairEntry.setStatus('current')
if mibBuilder.loadTexts: securityZonePairEntry.setDescription('An entry in the security zone pair table. Rows cannot be created or deleted. ')
szpName = MibTableColumn((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 22, 1, 1), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 128))).setMaxAccess("readonly")
if mibBuilder.loadTexts: szpName.setStatus('current')
if mibBuilder.loadTexts: szpName.setDescription('The name of a security zone pair.')
szpInZoneName = MibTableColumn((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 22, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 128))).setMaxAccess("readonly")
if mibBuilder.loadTexts: szpInZoneName.setStatus('current')
if mibBuilder.loadTexts: szpInZoneName.setDescription('The name of the input security zone of a security zone pair.')
szpOutZoneName = MibTableColumn((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 22, 1, 3), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 128))).setMaxAccess("readonly")
if mibBuilder.loadTexts: szpOutZoneName.setStatus('current')
if mibBuilder.loadTexts: szpOutZoneName.setDescription('The name of the output security zone of a security zone pair.')
szpUUID = MibTableColumn((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 22, 1, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: szpUUID.setStatus('current')
if mibBuilder.loadTexts: szpUUID.setDescription('The UUID of a security zone pair.')
szpInZoneUUID = MibTableColumn((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 22, 1, 5), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: szpInZoneUUID.setStatus('current')
if mibBuilder.loadTexts: szpInZoneUUID.setDescription('The UUID of the input security zone of a security zone pair.')
szpOutZoneUUID = MibTableColumn((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 22, 1, 6), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: szpOutZoneUUID.setStatus('current')
if mibBuilder.loadTexts: szpOutZoneUUID.setDescription('The UUID of the output security zone of a security zone pair.')
szpInPackets = MibTableColumn((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 22, 1, 7), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: szpInPackets.setStatus('current')
if mibBuilder.loadTexts: szpInPackets.setDescription('The number of packets received on this security zone pair.')
szpInOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 22, 1, 8), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: szpInOctets.setStatus('current')
if mibBuilder.loadTexts: szpInOctets.setDescription('The number of bytes received on this security zone pair.')
szpAlerts = MibTableColumn((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 22, 1, 9), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: szpAlerts.setStatus('current')
if mibBuilder.loadTexts: szpAlerts.setDescription('The number of alerts (blocks + permits) on this security zone pair.')
szpBlocks = MibTableColumn((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 22, 1, 10), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: szpBlocks.setStatus('current')
if mibBuilder.loadTexts: szpBlocks.setDescription('The number of blocks generated on this security zone pair.')
szpPermits = MibTableColumn((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 22, 1, 11), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: szpPermits.setStatus('current')
if mibBuilder.loadTexts: szpPermits.setDescription('The number of permits generated on this security zone pair.')
szpPrecedence = MibTableColumn((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 1, 22, 1, 12), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: szpPrecedence.setStatus('current')
if mibBuilder.loadTexts: szpPrecedence.setDescription('The precedence of security zone pair.')
class PolicyAction(TextualConvention, Integer32):
description = 'A selection between three fundamental actions of a policy: blocking the offending packets, detecting them but allowing them through, or rate-limiting them.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3))
namedValues = NamedValues(("deny", 1), ("allow", 2), ("ratelimit", 3))
class PolicyComponent(TextualConvention, Integer32):
description = 'A selection from among the components of a policy, corresponding to which log file is used to track the associated information.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1, 2, 7, 8, 9))
namedValues = NamedValues(("invalid", 0), ("deny", 1), ("allow", 2), ("alert", 7), ("block", 8), ("peer", 9))
class SslInspectedFlag(TextualConvention, Integer32):
description = 'A flag indicating if an action was taken on an inspected SSL data stream.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("yes", 1), ("no", 2))
tptPolicyNotifyDeviceID = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 11), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: tptPolicyNotifyDeviceID.setStatus('current')
if mibBuilder.loadTexts: tptPolicyNotifyDeviceID.setDescription('The unique identifier of the device sending this notification.')
tptPolicyNotifyPolicyID = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 12), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: tptPolicyNotifyPolicyID.setStatus('current')
if mibBuilder.loadTexts: tptPolicyNotifyPolicyID.setDescription('The unique identifier of the policy causing this notification.')
tptPolicyNotifySignatureID = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 13), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: tptPolicyNotifySignatureID.setStatus('current')
if mibBuilder.loadTexts: tptPolicyNotifySignatureID.setDescription('The unique identifier of the signature matching the incoming data stream.')
tptPolicyNotifySegmentName = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 14), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 128))).setMaxAccess("readonly")
if mibBuilder.loadTexts: tptPolicyNotifySegmentName.setStatus('obsolete')
if mibBuilder.loadTexts: tptPolicyNotifySegmentName.setDescription('A string of the format <slot>:<index> that uniquely identifies the segment pertaining to this notification.')
tptPolicyNotifySrcNetAddr = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 15), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tptPolicyNotifySrcNetAddr.setStatus('current')
if mibBuilder.loadTexts: tptPolicyNotifySrcNetAddr.setDescription('The network address of the source of the packet(s) triggering the policy action.')
tptPolicyNotifySrcNetAddrV6 = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 128), Ipv6Address()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tptPolicyNotifySrcNetAddrV6.setStatus('current')
if mibBuilder.loadTexts: tptPolicyNotifySrcNetAddrV6.setDescription('The IPv6 network address of the source of the packet(s) triggering the policy action.')
tptPolicyNotifySrcNetPort = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 16), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tptPolicyNotifySrcNetPort.setStatus('current')
if mibBuilder.loadTexts: tptPolicyNotifySrcNetPort.setDescription('The network port (if applicable) of the source of the packet(s) triggering the policy action.')
tptPolicyNotifyDestNetAddr = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 17), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tptPolicyNotifyDestNetAddr.setStatus('current')
if mibBuilder.loadTexts: tptPolicyNotifyDestNetAddr.setDescription('The network address of the destination of the packet(s) triggering the policy action.')
tptPolicyNotifyDestNetAddrV6 = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 129), Ipv6Address()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tptPolicyNotifyDestNetAddrV6.setStatus('current')
if mibBuilder.loadTexts: tptPolicyNotifyDestNetAddrV6.setDescription('The IPv6 network address of the destination of the packet(s) triggering the policy action.')
tptPolicyNotifyDestNetPort = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 18), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tptPolicyNotifyDestNetPort.setStatus('current')
if mibBuilder.loadTexts: tptPolicyNotifyDestNetPort.setDescription('The network port (if applicable) of the destination of the packet(s) triggering the policy action.')
tptPolicyNotifyStartTimeSec = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 19), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tptPolicyNotifyStartTimeSec.setStatus('current')
if mibBuilder.loadTexts: tptPolicyNotifyStartTimeSec.setDescription('The time of the first policy hit, marking the start of the aggregation period for this notification (in seconds since January 1, 1970).')
tptPolicyNotifyAlertAction = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 20), PolicyAction()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tptPolicyNotifyAlertAction.setStatus('current')
if mibBuilder.loadTexts: tptPolicyNotifyAlertAction.setDescription('The action associated with this notification: whether the affected packets were actually blocked, allowed through, or rate-limited.')
tptPolicyNotifyConfigAction = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 21), PolicyAction()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tptPolicyNotifyConfigAction.setStatus('current')
if mibBuilder.loadTexts: tptPolicyNotifyConfigAction.setDescription('The action configured for the policy, which in some cases may differ from the action associated with this notification.')
tptPolicyNotifyComponentID = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 22), PolicyComponent()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tptPolicyNotifyComponentID.setStatus('current')
if mibBuilder.loadTexts: tptPolicyNotifyComponentID.setDescription('The component identifier of the policy causing this notification.')
tptPolicyNotifyHitCount = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 23), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tptPolicyNotifyHitCount.setStatus('current')
if mibBuilder.loadTexts: tptPolicyNotifyHitCount.setDescription('The number of policy hits occurring during the aggregation period for this notification.')
tptPolicyNotifyAggregationPeriod = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 24), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tptPolicyNotifyAggregationPeriod.setStatus('current')
if mibBuilder.loadTexts: tptPolicyNotifyAggregationPeriod.setDescription('The duration (in minutes) of the aggregation period for this notification.')
tptPolicyNotifySeverity = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 25), PolicySeverity()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tptPolicyNotifySeverity.setStatus('current')
if mibBuilder.loadTexts: tptPolicyNotifySeverity.setDescription('The severity of the attack for this notification.')
tptPolicyNotifyProtocol = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 26), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 20))).setMaxAccess("readonly")
if mibBuilder.loadTexts: tptPolicyNotifyProtocol.setStatus('current')
if mibBuilder.loadTexts: tptPolicyNotifyProtocol.setDescription('The network protocol of the packet(s) triggering the policy action.')
tptPolicyNotifyAlertTimeSec = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 27), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tptPolicyNotifyAlertTimeSec.setStatus('current')
if mibBuilder.loadTexts: tptPolicyNotifyAlertTimeSec.setDescription('The time this alert was initiated, marking the end of the aggregation period for this notification (in seconds since January 1, 1970).')
tptPolicyNotifyAlertTimeNano = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 28), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tptPolicyNotifyAlertTimeNano.setStatus('current')
if mibBuilder.loadTexts: tptPolicyNotifyAlertTimeNano.setDescription('The nanoseconds portion of tptPolicyNotifyAlertTimeSec.')
tptPolicyNotifyPacketTrace = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 29), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tptPolicyNotifyPacketTrace.setStatus('current')
if mibBuilder.loadTexts: tptPolicyNotifyPacketTrace.setDescription('The value 1 if a corresponding packet trace was logged; 0 if not.')
tptPolicyNotifySequence = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 30), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tptPolicyNotifySequence.setStatus('current')
if mibBuilder.loadTexts: tptPolicyNotifySequence.setDescription('The log file entry sequence number corresponding to this notification.')
tptPolicyNotifyTraceBucket = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 36), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tptPolicyNotifyTraceBucket.setStatus('current')
if mibBuilder.loadTexts: tptPolicyNotifyTraceBucket.setDescription('The bucket identifier for a packet trace.')
tptPolicyNotifyTraceBegin = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 37), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tptPolicyNotifyTraceBegin.setStatus('current')
if mibBuilder.loadTexts: tptPolicyNotifyTraceBegin.setDescription('The starting sequence number for a packet trace.')
tptPolicyNotifyTraceEnd = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 38), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tptPolicyNotifyTraceEnd.setStatus('current')
if mibBuilder.loadTexts: tptPolicyNotifyTraceEnd.setDescription('The ending sequence number for a packet trace.')
tptPolicyNotifyMessageParams = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 39), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tptPolicyNotifyMessageParams.setStatus('current')
if mibBuilder.loadTexts: tptPolicyNotifyMessageParams.setDescription('A string containing parameters (separated by vertical bars) matching the Message in the Digital Vaccine (the XML tag is Message).')
tptPolicyNotifyStartTimeNano = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 40), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tptPolicyNotifyStartTimeNano.setStatus('current')
if mibBuilder.loadTexts: tptPolicyNotifyStartTimeNano.setDescription('The nanoseconds portion of tptPolicyNotifyStartTimeSec.')
tptPolicyNotifyAlertType = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 41), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tptPolicyNotifyAlertType.setStatus('current')
if mibBuilder.loadTexts: tptPolicyNotifyAlertType.setDescription('A bit field defined as follows: 0x0001 = Alert 0x0002 = Block 0x0020 = Peer-to-peer 0x0040 = Invalid 0x0080 = Threshold 0x0100 = Management.')
tptPolicyNotifyInputMphy = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 57), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tptPolicyNotifyInputMphy.setStatus('current')
if mibBuilder.loadTexts: tptPolicyNotifyInputMphy.setDescription('The physical input port of the triggering packet(s).')
tptPolicyNotifyVlanTag = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 58), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tptPolicyNotifyVlanTag.setStatus('current')
if mibBuilder.loadTexts: tptPolicyNotifyVlanTag.setDescription('The VLAN tag of the triggering packet(s).')
tptPolicyNotifyZonePair = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 59), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 128))).setMaxAccess("readonly")
if mibBuilder.loadTexts: tptPolicyNotifyZonePair.setStatus('current')
if mibBuilder.loadTexts: tptPolicyNotifyZonePair.setDescription('A string of the format <in zone UUID>:<out zone UUID> that identifies the zone pair pertaining to this notification.')
tptPolicyNotifyActionSetID = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 130), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: tptPolicyNotifyActionSetID.setStatus('current')
if mibBuilder.loadTexts: tptPolicyNotifyActionSetID.setDescription('The action set uuid associated with this notification.')
tptPolicyNotifyRate = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 131), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tptPolicyNotifyRate.setStatus('current')
if mibBuilder.loadTexts: tptPolicyNotifyRate.setDescription('The rate-limit, in kbps, of the action set associated with this notification.')
tptPolicyNotifyFlowControl = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 137), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: tptPolicyNotifyFlowControl.setStatus('current')
if mibBuilder.loadTexts: tptPolicyNotifyFlowControl.setDescription('The action set flow control associated with this notification.')
tptPolicyNotifyActionSetName = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 138), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 128))).setMaxAccess("readonly")
if mibBuilder.loadTexts: tptPolicyNotifyActionSetName.setStatus('current')
if mibBuilder.loadTexts: tptPolicyNotifyActionSetName.setDescription('The action set name associated with this notification.')
tptPolicyNotifyClientip = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 139), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 128))).setMaxAccess("readonly")
if mibBuilder.loadTexts: tptPolicyNotifyClientip.setStatus('current')
if mibBuilder.loadTexts: tptPolicyNotifyClientip.setDescription('The client-ip associated with this notification.')
tptPolicyNotifyMetadata = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 140), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 128))).setMaxAccess("readonly")
if mibBuilder.loadTexts: tptPolicyNotifyMetadata.setStatus('current')
if mibBuilder.loadTexts: tptPolicyNotifyMetadata.setDescription('The metadata associated with this notification.')
tptPolicyNotifySslInspected = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 180), SslInspectedFlag()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tptPolicyNotifySslInspected.setStatus('current')
if mibBuilder.loadTexts: tptPolicyNotifySslInspected.setDescription("A flag indicting if the notification is associated with an inspected SSL data stream. This flag is only present on IPS and Quarantine events and doesn't apply to Reputation.")
tptPolicyNotifyVirtualSegment = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 182), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 127))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: tptPolicyNotifyVirtualSegment.setStatus('current')
if mibBuilder.loadTexts: tptPolicyNotifyVirtualSegment.setDescription('Virtual segment associated with this notification. ')
tptPolicyNotify = NotificationType((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 0, 8)).setObjects(("TPT-POLICY-MIB", "tptPolicyNotifyDeviceID"), ("TPT-POLICY-MIB", "tptPolicyNotifyPolicyID"), ("TPT-POLICY-MIB", "tptPolicyNotifySignatureID"), ("TPT-POLICY-MIB", "tptPolicyNotifyZonePair"), ("TPT-POLICY-MIB", "tptPolicyNotifyInputMphy"), ("TPT-POLICY-MIB", "tptPolicyNotifyVlanTag"), ("TPT-POLICY-MIB", "tptPolicyNotifySrcNetAddr"), ("TPT-POLICY-MIB", "tptPolicyNotifySrcNetPort"), ("TPT-POLICY-MIB", "tptPolicyNotifyDestNetAddr"), ("TPT-POLICY-MIB", "tptPolicyNotifyDestNetPort"), ("TPT-POLICY-MIB", "tptPolicyNotifyProtocol"), ("TPT-POLICY-MIB", "tptPolicyNotifyMessageParams"), ("TPT-POLICY-MIB", "tptPolicyNotifyHitCount"), ("TPT-POLICY-MIB", "tptPolicyNotifyAggregationPeriod"), ("TPT-POLICY-MIB", "tptPolicyNotifyStartTimeSec"), ("TPT-POLICY-MIB", "tptPolicyNotifyStartTimeNano"), ("TPT-POLICY-MIB", "tptPolicyNotifyAlertTimeSec"), ("TPT-POLICY-MIB", "tptPolicyNotifyAlertTimeNano"), ("TPT-POLICY-MIB", "tptPolicyNotifyPacketTrace"), ("TPT-POLICY-MIB", "tptPolicyNotifyTraceBucket"), ("TPT-POLICY-MIB", "tptPolicyNotifyTraceBegin"), ("TPT-POLICY-MIB", "tptPolicyNotifyTraceEnd"), ("TPT-POLICY-MIB", "tptPolicyNotifyAlertAction"), ("TPT-POLICY-MIB", "tptPolicyNotifyConfigAction"), ("TPT-POLICY-MIB", "tptPolicyNotifyComponentID"), ("TPT-POLICY-MIB", "tptPolicyNotifyAlertType"), ("TPT-POLICY-MIB", "tptPolicyNotifySeverity"), ("TPT-POLICY-MIB", "tptPolicyNotifySequence"), ("TPT-POLICY-MIB", "tptPolicyNotifySrcNetAddrV6"), ("TPT-POLICY-MIB", "tptPolicyNotifyDestNetAddrV6"), ("TPT-POLICY-MIB", "tptPolicyNotifyActionSetID"), ("TPT-POLICY-MIB", "tptPolicyNotifyRate"), ("TPT-POLICY-MIB", "tptPolicyNotifyFlowControl"), ("TPT-POLICY-MIB", "tptPolicyNotifyActionSetName"), ("TPT-POLICY-MIB", "tptPolicyNotifyClientip"), ("TPT-POLICY-MIB", "tptPolicyNotifyMetadata"), ("TPT-POLICY-MIB", "tptPolicyNotifySslInspected"))
if mibBuilder.loadTexts: tptPolicyNotify.setStatus('current')
if mibBuilder.loadTexts: tptPolicyNotify.setDescription('Notification: Used to inform the management station of a policy alert action (either deny or allow) resulting from a signature match.')
class SslProtocol(TextualConvention, Integer32):
description = 'The SSL protocol version. '
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))
namedValues = NamedValues(("unknown", 1), ("sslv3", 2), ("tls10", 3), ("tls11", 4), ("tls12", 5))
class SslInspEventType(TextualConvention, Integer32):
description = 'The type of SSL connection, either inbound or outbound. '
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("inbound", 1), ("outbound", 2))
class SslInspAction(TextualConvention, Integer32):
description = 'The action taken on an SSL connection. '
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3))
namedValues = NamedValues(("decrypted", 1), ("notDecrypted", 2), ("blocked", 3))
tptPolicyNotifySslInspEventType = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 190), SslInspEventType()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: tptPolicyNotifySslInspEventType.setStatus('current')
if mibBuilder.loadTexts: tptPolicyNotifySslInspEventType.setDescription('The SSL connection type. ')
tptPolicyNotifySslInspAction = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 191), SslInspAction()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: tptPolicyNotifySslInspAction.setStatus('current')
if mibBuilder.loadTexts: tptPolicyNotifySslInspAction.setDescription('The SSL connection action taken. ')
tptPolicyNotifySslInspDetails = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 192), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: tptPolicyNotifySslInspDetails.setStatus('current')
if mibBuilder.loadTexts: tptPolicyNotifySslInspDetails.setDescription('Free-form field that provides additional details for the action taken on a SSL connection. ')
tptPolicyNotifySslInspPolicy = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 193), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 127))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: tptPolicyNotifySslInspPolicy.setStatus('current')
if mibBuilder.loadTexts: tptPolicyNotifySslInspPolicy.setDescription('The SSL inspection policy. ')
tptPolicyNotifySslInspCert = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 194), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 127))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: tptPolicyNotifySslInspCert.setStatus('current')
if mibBuilder.loadTexts: tptPolicyNotifySslInspCert.setDescription('The certificate used to decrypt SSL traffic. ')
tptPolicyNotifySslInspCltIF = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 195), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: tptPolicyNotifySslInspCltIF.setStatus('current')
if mibBuilder.loadTexts: tptPolicyNotifySslInspCltIF.setDescription('The client-side interface receiving SSL traffic. ')
tptPolicyNotifySslInspCltSslVer = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 196), SslProtocol()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: tptPolicyNotifySslInspCltSslVer.setStatus('current')
if mibBuilder.loadTexts: tptPolicyNotifySslInspCltSslVer.setDescription('The client-side SSL protocol version. ')
tptPolicyNotifySslInspCltCrypto = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 197), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 80))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: tptPolicyNotifySslInspCltCrypto.setStatus('current')
if mibBuilder.loadTexts: tptPolicyNotifySslInspCltCrypto.setDescription('The client-side SSL crypto-suite. ')
tptPolicyNotifySslInspSrvIF = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 198), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: tptPolicyNotifySslInspSrvIF.setStatus('current')
if mibBuilder.loadTexts: tptPolicyNotifySslInspSrvIF.setDescription('The server-side interface sending SSL traffic. ')
tptPolicyNotifySslInspSrvSslVer = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 199), SslProtocol()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: tptPolicyNotifySslInspSrvSslVer.setStatus('current')
if mibBuilder.loadTexts: tptPolicyNotifySslInspSrvSslVer.setDescription('The server-side SSL protocol version. ')
tptPolicyNotifySslInspSrvCrypto = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 200), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 80))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: tptPolicyNotifySslInspSrvCrypto.setStatus('current')
if mibBuilder.loadTexts: tptPolicyNotifySslInspSrvCrypto.setDescription('The server-side SSL crypto-suite. ')
tptPolicySslInspNotify = NotificationType((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 0, 27)).setObjects(("TPT-POLICY-MIB", "tptPolicyNotifyDeviceID"), ("TPT-POLICY-MIB", "tptPolicyNotifyAlertTimeSec"), ("TPT-POLICY-MIB", "tptPolicyNotifyAlertTimeNano"), ("TPT-POLICY-MIB", "tptPolicyNotifySslInspEventType"), ("TPT-POLICY-MIB", "tptPolicyNotifySeverity"), ("TPT-POLICY-MIB", "tptPolicyNotifySslInspAction"), ("TPT-POLICY-MIB", "tptPolicyNotifySslInspDetails"), ("TPT-POLICY-MIB", "tptPolicyNotifyVirtualSegment"), ("TPT-POLICY-MIB", "tptPolicyNotifySslInspPolicy"), ("TPT-POLICY-MIB", "tptPolicyNotifySslInspCert"), ("TPT-POLICY-MIB", "tptPolicyNotifySslInspCltIF"), ("TPT-POLICY-MIB", "tptPolicyNotifySslInspCltSslVer"), ("TPT-POLICY-MIB", "tptPolicyNotifySslInspCltCrypto"), ("TPT-POLICY-MIB", "tptPolicyNotifySslInspSrvIF"), ("TPT-POLICY-MIB", "tptPolicyNotifySslInspSrvSslVer"), ("TPT-POLICY-MIB", "tptPolicyNotifySslInspSrvCrypto"), ("TPT-POLICY-MIB", "tptPolicyNotifySrcNetAddr"), ("TPT-POLICY-MIB", "tptPolicyNotifySrcNetPort"), ("TPT-POLICY-MIB", "tptPolicyNotifyDestNetAddr"), ("TPT-POLICY-MIB", "tptPolicyNotifyDestNetPort"))
if mibBuilder.loadTexts: tptPolicySslInspNotify.setStatus('current')
if mibBuilder.loadTexts: tptPolicySslInspNotify.setDescription('A notification sent when an action is taken on a SSL connection. ')
tptPolicyLogNotifyDeviceID = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 121), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: tptPolicyLogNotifyDeviceID.setStatus('current')
if mibBuilder.loadTexts: tptPolicyLogNotifyDeviceID.setDescription('The unique identifier of the device sending this notification.')
tptPolicyLogNotifyComponentID = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 122), PolicyComponent()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tptPolicyLogNotifyComponentID.setStatus('current')
if mibBuilder.loadTexts: tptPolicyLogNotifyComponentID.setDescription('The type of log (alert, block, or peer) pertaining to this notification.')
tptPolicyLogNotifyNumber = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 123), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tptPolicyLogNotifyNumber.setStatus('current')
if mibBuilder.loadTexts: tptPolicyLogNotifyNumber.setDescription('The number of policy log entries since the last SMS log file retrieval.')
tptPolicyLogNotifyTrigger = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 124), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tptPolicyLogNotifyTrigger.setStatus('current')
if mibBuilder.loadTexts: tptPolicyLogNotifyTrigger.setDescription('The number of policy log entries needed to trigger this notification.')
tptPolicyLogNotifySequence = MibScalar((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 1, 125), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tptPolicyLogNotifySequence.setStatus('current')
if mibBuilder.loadTexts: tptPolicyLogNotifySequence.setDescription('The current log file entry sequence number.')
tptPolicyLogNotify = NotificationType((1, 3, 6, 1, 4, 1, 10734, 3, 3, 3, 0, 19)).setObjects(("TPT-POLICY-MIB", "tptPolicyLogNotifyDeviceID"), ("TPT-POLICY-MIB", "tptPolicyLogNotifyComponentID"), ("TPT-POLICY-MIB", "tptPolicyLogNotifyNumber"), ("TPT-POLICY-MIB", "tptPolicyLogNotifyTrigger"), ("TPT-POLICY-MIB", "tptPolicyLogNotifySequence"))
if mibBuilder.loadTexts: tptPolicyLogNotify.setStatus('current')
if mibBuilder.loadTexts: tptPolicyLogNotify.setDescription('Notification: Used to inform the management station that some number of policy log entries of a particular type (alert, block, or peer) occurred since the last time the management station retrieved the corresponding log file.')
mibBuilder.exportSymbols("TPT-POLICY-MIB", tptPolicyNotifySslInspSrvSslVer=tptPolicyNotifySslInspSrvSslVer, tptPolicyLogNotifyComponentID=tptPolicyLogNotifyComponentID, tptPolicyNotifySrcNetPort=tptPolicyNotifySrcNetPort, policyCountBytes=policyCountBytes, alertSlot=alertSlot, framesBySizeEntry=framesBySizeEntry, tptPolicyNotify=tptPolicyNotify, tptPolicySslInspNotify=tptPolicySslInspNotify, alertsBySeverityTable=alertsBySeverityTable, blockSlot=blockSlot, szpInPackets=szpInPackets, alertsByProtocolTable=alertsByProtocolTable, tptPolicyNotifyClientip=tptPolicyNotifyClientip, szpPrecedence=szpPrecedence, tptPolicyNotifyActionSetID=tptPolicyNotifyActionSetID, policyCounterEntry=policyCounterEntry, tptPolicyNotifyMetadata=tptPolicyNotifyMetadata, policyPacketsDropped=policyPacketsDropped, tptPolicyNotifyInputMphy=tptPolicyNotifyInputMphy, tptPolicyNotifyDestNetAddr=tptPolicyNotifyDestNetAddr, policyHitCount=policyHitCount, tptPolicyNotifySslInspCltSslVer=tptPolicyNotifySslInspCltSslVer, policyPacketsIncoming=policyPacketsIncoming, policyDVVersion=policyDVVersion, tptPolicyNotifyTraceEnd=tptPolicyNotifyTraceEnd, policyGlobalID=policyGlobalID, policyPacketsBlocked64=policyPacketsBlocked64, packetsByProtocolTable=packetsByProtocolTable, szpUUID=szpUUID, permitPort=permitPort, portNumber=portNumber, framesByTypeEntry=framesByTypeEntry, policyDVObjs=policyDVObjs, topTenRank=topTenRank, sizeFrameCount=sizeFrameCount, p2psByZoneTable=p2psByZoneTable, permitsByZoneTable=permitsByZoneTable, tptPolicyNotifySrcNetAddr=tptPolicyNotifySrcNetAddr, portStatsTable=portStatsTable, tptPolicyNotifySslInspCltCrypto=tptPolicyNotifySslInspCltCrypto, alertsBySeverityEntry=alertsBySeverityEntry, policyPacketsBlocked=policyPacketsBlocked, policyName=policyName, p2psByZoneEntry=p2psByZoneEntry, tptPolicyNotifyProtocol=tptPolicyNotifyProtocol, tptPolicyNotifySslInspCltIF=tptPolicyNotifySslInspCltIF, alertsByZoneTable=alertsByZoneTable, szpBlocks=szpBlocks, tptPolicyNotifyRate=tptPolicyNotifyRate, PolicySeverity=PolicySeverity, tptPolicyNotifyStartTimeNano=tptPolicyNotifyStartTimeNano, topTenHitsByPolicyEntry=topTenHitsByPolicyEntry, policyNumber=policyNumber, PolicyFrameType=PolicyFrameType, tptPolicyNotifyAlertTimeNano=tptPolicyNotifyAlertTimeNano, szpOutZoneName=szpOutZoneName, policyByNumberTable=policyByNumberTable, tptPolicyLogNotifyDeviceID=tptPolicyLogNotifyDeviceID, tptPolicyNotifySslInspSrvCrypto=tptPolicyNotifySslInspSrvCrypto, szpOutZoneUUID=szpOutZoneUUID, szpInOctets=szpInOctets, PolicyComponent=PolicyComponent, packetsByProtocolEntry=packetsByProtocolEntry, policyPacketsOutgoing=policyPacketsOutgoing, p2pPort=p2pPort, framesBySizeTable=framesBySizeTable, tptPolicyNotifyMessageParams=tptPolicyNotifyMessageParams, tptPolicyNotifySslInspPolicy=tptPolicyNotifySslInspPolicy, tptPolicyNotifyZonePair=tptPolicyNotifyZonePair, tptPolicyNotifySslInspSrvIF=tptPolicyNotifySslInspSrvIF, policyPacketsInvalid=policyPacketsInvalid, tptPolicyNotifyFlowControl=tptPolicyNotifyFlowControl, tptPolicyNotifyTraceBegin=tptPolicyNotifyTraceBegin, policyByNumberEntry=policyByNumberEntry, PolicyFrameSize=PolicyFrameSize, SslInspectedFlag=SslInspectedFlag, topTenHitsByPolicyTable=topTenHitsByPolicyTable, blocksByZoneEntry=blocksByZoneEntry, SslInspEventType=SslInspEventType, zoneP2pCount=zoneP2pCount, policyPacketsInvalid64=policyPacketsInvalid64, tptPolicyNotifyConfigAction=tptPolicyNotifyConfigAction, zoneBlockCount=zoneBlockCount, permitsByZoneEntry=permitsByZoneEntry, tptPolicyNotifyDeviceID=tptPolicyNotifyDeviceID, tptPolicyNotifyVirtualSegment=tptPolicyNotifyVirtualSegment, tptPolicyNotifySslInspected=tptPolicyNotifySslInspected, policyPacketsTrusted64=policyPacketsTrusted64, policyPacketsPermitted64=policyPacketsPermitted64, policyPacketsOutgoing64=policyPacketsOutgoing64, tptPolicyNotifyPacketTrace=tptPolicyNotifyPacketTrace, tptPolicyNotifySignatureID=tptPolicyNotifySignatureID, policyPacketsPermitted=policyPacketsPermitted, alertsByZoneEntry=alertsByZoneEntry, blocksByZoneTable=blocksByZoneTable, protocolAlertCount=protocolAlertCount, permitSlot=permitSlot, frameSize=frameSize, packetProtocol=packetProtocol, portName=portName, PolicyProtocol=PolicyProtocol, securityZonePairTable=securityZonePairTable, szpInZoneName=szpInZoneName, PolicyAction=PolicyAction, policyPacketsRateLimited64=policyPacketsRateLimited64, tptPolicyNotifyComponentID=tptPolicyNotifyComponentID, tptPolicyNotifySslInspDetails=tptPolicyNotifySslInspDetails, tptPolicyNotifyAlertTimeSec=tptPolicyNotifyAlertTimeSec, numberName=numberName, tptPolicyLogNotifySequence=tptPolicyLogNotifySequence, policyCreationTime=policyCreationTime, tptPolicyNotifySslInspEventType=tptPolicyNotifySslInspEventType, tptPolicyLogNotifyNumber=tptPolicyLogNotifyNumber, framesByTypeTable=framesByTypeTable, tptPolicyNotifyAlertAction=tptPolicyNotifyAlertAction, tptPolicyNotifySegmentName=tptPolicyNotifySegmentName, policyDescriptiveName=policyDescriptiveName, tptPolicyNotifyTraceBucket=tptPolicyNotifyTraceBucket, alertPort=alertPort, policyCountPackets=policyCountPackets, zoneAlertCount=zoneAlertCount, tptPolicyNotifyDestNetAddrV6=tptPolicyNotifyDestNetAddrV6, SslInspAction=SslInspAction, tpt_policy=tpt_policy, alertsByProtocolEntry=alertsByProtocolEntry, policyCounterTable=policyCounterTable, szpInZoneUUID=szpInZoneUUID, policyPacketsDropped64=policyPacketsDropped64, tptPolicyLogNotify=tptPolicyLogNotify, tptPolicyNotifySeverity=tptPolicyNotifySeverity, tptPolicyLogNotifyTrigger=tptPolicyLogNotifyTrigger, policyPacketsIncoming64=policyPacketsIncoming64, PYSNMP_MODULE_ID=tpt_policy, SslProtocol=SslProtocol, typeFrameCount=typeFrameCount, alertSeverity=alertSeverity, tptPolicyNotifyAggregationPeriod=tptPolicyNotifyAggregationPeriod, szpAlerts=szpAlerts, severityAlertCount=severityAlertCount, portVlanTranslations=portVlanTranslations, tptPolicyNotifyVlanTag=tptPolicyNotifyVlanTag, tptPolicyNotifyHitCount=tptPolicyNotifyHitCount, zonePermitCount=zonePermitCount, tptPolicyNotifyAlertType=tptPolicyNotifyAlertType, szpPermits=szpPermits, tptPolicyNotifyActionSetName=tptPolicyNotifyActionSetName, tptPolicyNotifySslInspCert=tptPolicyNotifySslInspCert, securityZonePairEntry=securityZonePairEntry, tptPolicyNotifyStartTimeSec=tptPolicyNotifyStartTimeSec, tptPolicyNotifyPolicyID=tptPolicyNotifyPolicyID, szpName=szpName, tptPolicyNotifyDestNetPort=tptPolicyNotifyDestNetPort, protocolPacketCount=protocolPacketCount, numberDesc=numberDesc, tptPolicyNotifySrcNetAddrV6=tptPolicyNotifySrcNetAddrV6, tptPolicyNotifySequence=tptPolicyNotifySequence, policyUUID=policyUUID, alertProtocol=alertProtocol, frameType=frameType, p2pSlot=p2pSlot, blockPort=blockPort, portStatsEntry=portStatsEntry, tptPolicyNotifySslInspAction=tptPolicyNotifySslInspAction)
|
13,715 | 0dd82e2b99a581645866783774d55869889b751a | '''Tools for simulating long processes, randomly erroring, infinite-loop hangs, memory leaks, and
CPU-overload situations. Run with caution.
'''
import multiprocessing
import random
import sys
import time
# Most Python errors.
# Note, Exception does not catch these exceptions: GeneratorExit, KeyboardInterrupt, SystemExit.
# BaseException will catch them all.
PYTHON_ERRORS = [AssertionError, AttributeError, EOFError, FloatingPointError, GeneratorExit,
ImportError, IndexError, KeyError, KeyboardInterrupt, MemoryError, NameError,
NotImplementedError, OSError, OverflowError, ReferenceError, RuntimeError, StopIteration,
SyntaxError, IndentationError, TabError, SystemError, SystemExit, TypeError, UnboundLocalError,
UnicodeEncodeError, UnicodeDecodeError, UnicodeTranslateError, ValueError, ZeroDivisionError]
def sleep_random(seconds=5):
'''Sleeps for anywhere between 0 and input seconds. Returns seconds slept.'''
if seconds <= 0:
return 0
nap_time = random.uniform(0, seconds)
print('Sleeping for {:.2f}/{} seconds...'.format(nap_time, seconds))
time.sleep(nap_time)
return seconds
def randomly_error(percentage=50):
'''Randomly raises a random Python exception, given percentage of the time.
Args:
percentage (float): Percentage between 0 and 100.
'''
if random.random() < 0.01 * percentage:
exception = random.choice(PYTHON_ERRORS)
raise exception('Randomly raising {}'.format(exception.__name__))
return percentage
def infinite_loop():
'''Hangs current process. Session will have high power usage.'''
print('Hanging in infinite loop...')
while True:
pass
def memory_leak():
'''Infinite loop with memory leak.
Run this and watch machine memory increase. Once maxed, it'll start writing to disk. Will also
slow down all other apps. To go back to normal, force quit the terminal.
Creates list where every value is the previous value times two. Can quit with Ctrl+C but will
have to close session to clear memory.
'''
print('Starting memory leak...')
items = [2]
while True:
items.append(items[-1] * 2)
def memory_leak_2():
print('Starting memory leak...')
'''Infinite loop with memory leak. Memory rises slower than memory_leak().'''
while True:
def func(): pass
func.__doc__ = func
def overload_cpu():
'''Maximize usage on all CPUs. Creates a pool and runs infinite loop of calcuations.'''
cpus = multiprocessing.cpu_count()
pool = multiprocessing.Pool(processes=cpus)
print('Overloading {} CPUs. Quit with Ctrl+C...'.format(cpus))
res = pool.map(_overload_cpu, [2] * cpus)
def _overload_cpu(x):
'''Infinite loop with calculation.'''
while True:
x * x
def crash():
'''Crashes current Python session using recursion.
Popup error will say "python.exe has stopped working."
'''
sys.setrecursionlimit(999999999)
def func():
func()
print('Crashing using recursion...')
func()
|
13,716 | b1eac345d00e5a9ada05f46763d354452d98ac5e | __author__ = 'Tony Liu'
# -*- coding: utf-8 -*-
#pass 不做任何事情,一般用做占位语句。
for letter in "python":
if letter == "h":
pass
print("pass块")
print(letter) |
13,717 | 95d4c3e58550cdf45c85d483b11090fa3132a58c | from pyramid.threadlocal import get_current_request
from pyramid.exceptions import ConfigurationError
from pyramid.security import authenticated_userid
from pyramid.url import route_url
from git import Repo
def add_renderer_globals(event):
""" A subscriber to the ``pyramid.events.BeforeRender`` events. Updates
the :term:`renderer globals` with values that are familiar to Pylons
users."""
request = event.get('request')
if request is None:
request = get_current_request()
globs = {
'url': route_url,
'h': None,
'a_url': request.application_url,
'user': authenticated_userid(request),
'repo': Repo(request.registry.settings.get('git_repo', '.'))
}
if request is not None:
tmpl_context = request.tmpl_context
globs['c'] = tmpl_context
globs['tmpl_context'] = tmpl_context
try:
globs['session'] = request.session
except ConfigurationError:
pass
event.update(globs)
|
13,718 | 77d748856979b1ac0545037973c3f97bbde2c30e | import sys
user = {
"first_name": "Егор",
"last_name": "Михеев",
"sex": "m"
}
user[sys.argv[1]] = sys.argv[2]
print(user)
"""
ИЗМЕНЕНИЕ ЗНАЧЕНИЯ
Ниже в редакторе содержится словарь user
с пользовательскими данными.
Напишите программу,
которая принимает из аргументов командной строки два параметра:
ключ и значение,
а затем обновляет значение в словаре user если ключ существует
или добавляет в словарь новый ключ с переданным значением.
Пример использования
> python program.py age 18
> {'first_name': 'Егор', 'last_name': 'Михеев', 'sex': 'm', 'age': '18'}
""" |
13,719 | b0b5e59651925d1af47fa6b5db8848fb406e1a76 | from gradientone import InstrumentDataHandler
from gradientone import query_to_dict
from gradientone import create_psettings
from gradientone import convert_str_to_cha_list
from gradientone import render_json_cached
from gradientone import author_creation
from onedb import BscopeDB
from onedb import BscopeDB_key
import ast
import collections
import csv
import datetime
import hashlib
import itertools
import jinja2
import json
import logging
import os
import re
import time
import webapp2
import math
from google.appengine.api import memcache
from google.appengine.api import oauth
from google.appengine.api import users
from google.appengine.ext import db
from time import gmtime, strftime
from collections import OrderedDict
import numpy as np
import appengine_config
import decimate
from google.appengine.ext import blobstore
from google.appengine.ext.webapp import blobstore_handlers
from string import maketrans
class Handler(InstrumentDataHandler):
def get(self,company_nickname="", hardware_name="",config_name="",slicename=""):
"retrieve BitScope data by intstrument name and time slice name"
#if not self.authcheck():
# return
key = 'bscopedata' + company_nickname + hardware_name + config_name + slicename
cached_copy = memcache.get(key)
if cached_copy is None:
logging.error("BscopeData:get: query")
rows = db.GqlQuery("""SELECT * FROM BscopeDB WHERE config_name =:1
AND slicename = :2""", config_name, slicename)
rows = list(rows)
data = query_to_dict(rows)
print data[0]
cha_list = convert_str_to_cha_list(data[0]['cha'])
data[0]['cha'] = cha_list
e = data[0]
print e
print type(e)
output = {"data":data[0]}
output = json.dumps(e)
memcache.set(key, output)
render_json_cached(self, output)
else:
render_json_cached(self, cached_copy)
def post(self,company_nickname="", hardware_name="", config_name="",start_tse=""):
"store data by intstrument name and time slice name"
#key = 'bscopedata' + company_nickname + hardware_name + config_name + start_tse
#memcache.set(key, self.request.body)
test_results = json.loads(self.request.body)
test_results_data = test_results['cha']
data_length = len(test_results_data)
slice_size = int(test_results['p_settings']['Slice_Size_msec'])
sample_rate = int(test_results['i_settings']['Sample_Rate_Hz'])
test_plan = test_results['test_plan']
testplan_name = test_results['testplan_name']
print testplan_name
sample_per_slice = int((float(sample_rate)/1000)*float(slice_size))
print slice_size, sample_rate, sample_per_slice
print data_length
slicename = int(start_tse)
stuffing = []
for i in range(0, data_length, sample_per_slice):
chunk = str(test_results_data[i:i + sample_per_slice])
stuffing = chunk
key = 'bscopedata' + company_nickname + hardware_name + config_name + str(slicename)
print key
stuffing = convert_str_to_cha_list(stuffing)
window_bscope = {'i_settings':test_results['i_settings'], 'p_settings':test_results['p_settings'], 'cha':stuffing, 'testplan_name':testplan_name,
'start_tse':start_tse, 'company_nickname':company_nickname, 'slicename':slicename, 'hardware_name':hardware_name, 'config_name':config_name, 'test_plan':test_plan}
out_bscope = json.dumps(window_bscope, ensure_ascii=True)
memcache.set(key, out_bscope)
slicename += slice_size |
13,720 | 191fd46978f6c25c4b39d7419e368282f0c8aa5a |
#Python doesnt have a main() function unless you code for it
#Python interpreter only executes the launched script file internally known as __main__
#if the launched script wants to reuse qnd run code from another module, the imported or
#called module will be known to the Python interpreter with its own file "name" and therefore
#not be confused with the __main__ script
def OutSideScript():
import MyScript
MyScript.main()
if __name__ == '__main__':
OutSideScript()
print("From Outside script")
|
13,721 | 659ee335556058f13b555ce7b5e98538f7ac9d0e | '''
Created on Nov 2, 2012
@author: marek
'''
import unittest
from sk.marek.barak.app.UtilClass import Util
class Test(unittest.TestCase):
def setUp(self):
self.__util__ = Util()
def testIsInteger(self):
self.assertTrue(self.__util__.isInteger(10))
def testIsNotINteger(self):
self.assertFalse(self.__util__.isInteger("a"));
def testCanConvertToInteger(self):
self.assertTrue(self.__util__.canBeCastedToInteger("10"))
def testCanNotConvertToInteger(self):
self.assertFalse(self.__util__.canBeCastedToInteger("b1"))
def testCastToInteger(self):
self.assertTrue(self.__util__.castToInteger("100"))
def testIsPositive(self):
self.assertTrue(self.__util__.isPositive(10))
def testIsBoolean(self):
self.assertTrue(self.__util__.isBoolean("False"))
self.assertTrue(self.__util__.isBoolean("True"))
self.assertFalse(self.__util__.isBoolean("Tkrue"))
self.assertFalse(self.__util__.isBoolean("nie"))
def testCastStringIntegerToBoolean(self):
self.assertTrue(self.__util__.castStringIntegerToBoolean("1"))
self.assertFalse(self.__util__.castStringIntegerToBoolean("0"))
def testCastStringToBoolean(self):
self.assertTrue(self.__util__.castStringToBoolean("tRue"))
self.assertFalse(self.__util__.castStringToBoolean("falSe"))
self.assertEquals(self.__util__.castStringToBoolean("fsa"),None)
self.assertTrue(self.__util__.castStringToBoolean("Ano"))
def testCanCastStringToBoolean(self):
self.assertTrue(self.__util__.canCastStringToBoolean("true"))
self.assertTrue(self.__util__.canCastStringToBoolean("false"))
self.assertFalse(self.__util__.canCastStringToBoolean("trssd"))
self.assertTrue(self.__util__.canCastStringToBoolean("ano"))
def testIsPrevzatie(self):
self.assertEquals(self.__util__.isPrevzatie("kurier"),True)
self.assertEquals(self.__util__.isPrevzatie("posta"),True)
self.assertEquals(self.__util__.isPrevzatie("osobny odber"),True)
self.assertEquals(self.__util__.isPrevzatie("osob odber"),False)
def testIsPlatba(self):
self.assertEquals(self.__util__.isPlatba("hotovost"),True)
self.assertEquals(self.__util__.isPlatba("online"),True)
self.assertEquals(self.__util__.isPlatba("prevod"),True)
self.assertEquals(self.__util__.isPlatba("hotoVost"),False)
def testPlatbaValidate(self):
self.assertEqual(self.__util__.validatePlatba("Hotovost"),"hotovost")
self.assertEqual(self.__util__.validatePlatba("oNliNe"), "online")
self.assertEqual(self.__util__.validatePlatba("otovost"), None)
def testIsStavObjednavky(self):
self.assertEquals(self.__util__.isStavObjednavky("pripravena na expediciu"),True)
self.assertEquals(self.__util__.isStavObjednavky("priprvena na expediciu"),False)
def testStavObjednavkyValidate(self):
self.assertEqual(self.__util__.validateStavObjednavky("Pripravena na expediciu"), "pripravena na expediciu")
self.assertEqual(self.__util__.validateStavObjednavky("Priprave na expediciu"), None)
def testNameIsValid(self):
self.assertEquals(self.__util__.isValidName("Marek"),True)
self.assertEquals(self.__util__.isValidName("jano"),False)
def testNameValidate(self):
self.assertEqual(self.__util__.validateName("marek"),"Marek")
self.assertEqual(self.__util__.validateName("mar ek"),None)
def testTextValid(self):
self.assertEquals(self.__util__.isTextValid("Ano je valid"),True)
value = ""
for i in range(1000):
value += str(i)
self.assertEquals(self.__util__.isTextValid(value),False)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() |
13,722 | ee2ec4c30b5735f69a7280dd318139165040f6e4 | from BlissFramework import BaseComponents
from BlissFramework import Icons
import math
from qt import *
import logging
from BlissFramework.Utils import widget_colors
'''
Motor control brick using a spin box (as an input field, and for the steps)
and buttons to move the motors (while pressed)
'''
__category__ = 'Motor'
class MotorSpinBoxBrick(BaseComponents.BlissWidget):
STATE_COLORS = (widget_colors.LIGHT_RED,
widget_colors.DARK_GRAY,
widget_colors.LIGHT_GREEN,
widget_colors.LIGHT_YELLOW,
widget_colors.LIGHT_YELLOW,
widget_colors.LIGHT_YELLOW)
MAX_HISTORY = 20
def __init__(self,*args):
BaseComponents.BlissWidget.__init__(self,*args)
self.stepEditor=None
self.motor=None
self.demandMove=0
self.inExpert=None
self.addProperty('mnemonic','string','')
self.addProperty('formatString','formatString','+##.##')
self.addProperty('label','string','')
self.addProperty('showLabel', 'boolean', True)
self.addProperty('showMoveButtons', 'boolean', True)
self.addProperty('showBox', 'boolean', True)
self.addProperty('showStop', 'boolean', True)
self.addProperty('showStep', 'boolean', True)
self.addProperty('showStepList', 'boolean', False)
self.addProperty('showPosition', 'boolean', True)
self.addProperty('invertButtons', 'boolean', False)
self.addProperty('delta', 'string', '')
self.addProperty('icons', 'string', '')
self.addProperty('helpDecrease', 'string', '')
self.addProperty('helpIncrease', 'string', '')
self.addProperty('decimalPlaces', 'string', '')
self.addProperty('hideInUser', 'boolean', False)
self.addProperty('defaultStep', 'string', '')
self.containerBox=QHGroupBox(self)
self.containerBox.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Fixed)
self.labelBox=QHBox(self.containerBox)
self.motorBox=QHBox(self.containerBox)
self.label=QLabel(self.labelBox)
self.moveLeftButton=QPushButton(self.motorBox)
self.moveLeftButton.setPixmap(Icons.load('far_left'))
self.moveRightButton=QPushButton(self.motorBox)
self.moveRightButton.setPixmap(Icons.load('far_right'))
QToolTip.add(self.moveLeftButton,"Moves the motor down (while pressed)")
QToolTip.add(self.moveRightButton,"Moves the motor up (while pressed)")
self.box=QHBox(self.motorBox)
self.box.setSizePolicy(QSizePolicy.MinimumExpanding,QSizePolicy.Minimum)
self.spinBox=mySpinBox(self.box)
self.spinBox.setDecimalPlaces(4)
self.spinBox.setMinValue(-10000)
self.spinBox.setMaxValue(10000)
self.spinBox.setMinimumSize(QSize(75,25))
self.spinBox.setMaximumSize(QSize(75,25))
self.spinBox.setSizePolicy(QSizePolicy.MinimumExpanding,QSizePolicy.Minimum)
QToolTip.add(self.spinBox,"Moves the motor to a specific position or step by step; right-click for motor history")
self.extraButtonsBox=QHBox(self.motorBox)
self.extraButtonsBox.setSizePolicy(QSizePolicy.Minimum,QSizePolicy.Minimum)
self.stopButton=QPushButton(self.extraButtonsBox)
self.stopButton.setPixmap(Icons.load('stop_small'))
self.stopButton.setEnabled(False)
QToolTip.add(self.stopButton,"Stops the motor")
self.stepButton=QPushButton(self.extraButtonsBox)
self.stepButtonIcon=Icons.load('steps_small')
self.stepButton.setPixmap(self.stepButtonIcon)
QToolTip.add(self.stepButton,"Changes the motor step")
self.stepList=myComboBox(self.extraButtonsBox)
self.stepList.setValidator(QDoubleValidator(self))
self.stepList.setDuplicatesEnabled(False)
#self.stepList.setSizePolicy(QSizePolicy.Fixed,QSizePolicy.Minimum)
pol=self.stepList.sizePolicy()
pol.setVerData(QSizePolicy.MinimumExpanding)
self.stepList.setSizePolicy(pol)
QObject.connect(self.stepList,SIGNAL('activated(int)'),self.goToStep)
self.moveLeftButton.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Minimum)
self.moveRightButton.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Minimum)
self.stopButton.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Minimum)
self.stepButton.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Minimum)
self.connect(self.spinBox,PYSIGNAL('stepUp'),self.stepUp)
self.connect(self.spinBox,PYSIGNAL('stepDown'),self.stepDown)
self.connect(self.stopButton,SIGNAL('clicked()'),self.stopMotor)
self.connect(self.stepButton,SIGNAL('clicked()'),self.openStepEditor)
self.connect(self.spinBox,PYSIGNAL('contextMenu'),self.openHistoryMenu)
self.connect(self.spinBox.editor(),SIGNAL('returnPressed()'),self.valueChangedStr)
self.connect(self.moveLeftButton,SIGNAL('pressed()'),self.moveDown)
self.connect(self.moveLeftButton,SIGNAL('released()'),self.stopMoving)
self.connect(self.moveRightButton,SIGNAL('pressed()'),self.moveUp)
self.connect(self.moveRightButton,SIGNAL('released()'),self.stopMoving)
self.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Fixed)
self.instanceSynchronize("spinBox","stepList")
QVBoxLayout(self)
self.layout().addWidget(self.containerBox)
self.defineSlot('setEnabled',())
self.defineSlot('setDisabled',())
self.defineSlot('toggle_enabled',())
def setExpertMode(self,mode):
self.inExpert=mode
if self['hideInUser']:
if mode:
self.containerBox.show()
else:
self.containerBox.hide()
def toggle_enabled(self):
self.setEnabled(not self.isEnabled())
def run(self):
if self.inExpert is not None:
self.setExpertMode(self.inExpert)
def stop(self):
self.containerBox.show()
def getLineStep(self):
return self.spinBox.lineStep()
def setLineStep(self,val):
self.spinBox.setLineStep(float(val))
found=False
for i in range(self.stepList.count()):
if float(str(self.stepList.text(i)))==float(val):
found=True
if not found:
self.stepList.insertItem(self.stepButtonIcon,str(val))
self.stepList.setCurrentItem(self.stepList.count()-1)
def goToStep(self,step_index):
step=str(self.stepList.currentText())
if step!="":
self.stepList.changeItem(self.stepButtonIcon,step,step_index)
self.stepList.setCurrentItem(step_index)
self.setLineStep(step)
def setStepButtonIcon(self,icon_name):
self.stepButtonIcon=Icons.load(icon_name)
self.stepButton.setPixmap(self.stepButtonIcon)
for i in range(self.stepList.count()):
txt=self.stepList.text(i)
self.stepList.changeItem(self.stepButtonIcon,txt,i)
# Stop the motor
def stopMotor(self):
self.motor.stop()
def stopMoving(self):
self.demandMove=0
# Move motor to top limit
def moveUp(self):
self.demandMove=1
self.updateGUI()
state=self.motor.getState()
if state==self.motor.READY:
if self['invertButtons']:
self.reallyMoveDown()
else:
self.reallyMoveUp()
# Move motor to bottom limit
def moveDown(self):
self.demandMove=-1
self.updateGUI()
state=self.motor.getState()
if state==self.motor.READY:
if self['invertButtons']:
self.reallyMoveUp()
else:
self.reallyMoveDown()
def reallyMoveUp(self):
if self['delta']!="":
s=float(self['delta'])
else:
try:
s=self.motor.GUIstep
except:
s=1.0
if self.motor is not None:
if self.motor.isReady():
self.motor.moveRelative(s)
def reallyMoveDown(self):
if self['delta']!="":
s=float(self['delta'])
else:
try:
s=self.motor.GUIstep
except:
s=1.0
if self.motor is not None:
if self.motor.isReady():
self.setSpinBoxColor(self.motor.READY)
self.motor.moveRelative(-s)
# Force an update on the brick interface
def updateGUI(self):
if self.motor is not None:
self.containerBox.setEnabled(True)
try:
if self.motor.isReady():
self.limitsChanged(self.motor.getLimits())
self.positionChanged(self.motor.getPosition())
self.stateChanged(self.motor.getState())
except:
if self.motor:
self.stateChanged(self.motor.UNUSABLE)
else:
pass
else:
self.containerBox.setEnabled(False)
# Set the limits for the spin box
def limitsChanged(self,limits):
self.spinBox.blockSignals(True)
self.spinBox.setMinValue(limits[0])
self.spinBox.setMaxValue(limits[1])
self.spinBox.blockSignals(False)
self.setToolTip(limits=limits)
def openHistoryMenu(self):
menu=QPopupMenu(self)
menu.insertItem(QLabel('<nobr><b>%s history</b></nobr>' % self.motor.userName(), menu))
menu.insertSeparator()
for i in range(len(self.posHistory)):
menu.insertItem(self.posHistory[i],i)
menu.popup(QCursor.pos())
QObject.connect(menu,SIGNAL('activated(int)'),self.goToHistoryPos)
def goToHistoryPos(self,id):
pos=self.posHistory[id]
self.motor.move(float(pos))
def updateHistory(self,pos):
pos=str(pos)
if pos not in self.posHistory:
if len(self.posHistory)==MotorSpinBoxBrick.MAX_HISTORY:
del self.posHistory[-1]
self.posHistory.insert(0,pos)
# Opens a dialog to change the motor step
def openStepEditor(self):
if self.isRunning():
if self.stepEditor is None:
self.stepEditor=StepEditorDialog(self)
icons_list=self['icons'].split()
try:
self.stepEditor.setIcons(icons_list[4],icons_list[5])
except IndexError:
pass
self.stepEditor.setMotor(self.motor,self,self['label'],self['defaultStep'])
s=self.font().pointSize()
f = self.stepEditor.font()
f.setPointSize(s)
self.stepEditor.setFont(f)
self.stepEditor.updateGeometry()
self.stepEditor.show()
self.stepEditor.setActiveWindow()
self.stepEditor.raiseW()
# Updates the spin box when the motor moves
def positionChanged(self,newPosition):
self.spinBox.setValue(float(newPosition))
def setSpinBoxColor(self,state):
color=MotorSpinBoxBrick.STATE_COLORS[state]
self.spinBox.setEditorBackgroundColor(color)
# Enables/disables the interface when the motor changes state
def stateChanged(self,state):
#if self.demandMove==0:
self.setSpinBoxColor(state)
if state==self.motor.MOVESTARTED:
self.updateHistory(self.motor.getPosition())
if state==self.motor.READY:
if self.demandMove==1:
if self['invertButtons']:
self.reallyMoveDown()
else:
self.reallyMoveUp()
return
elif self.demandMove==-1:
if self['invertButtons']:
self.reallyMoveUp()
else:
self.reallyMoveDown()
return
self.spinBox.setMoving(False)
self.stopButton.setEnabled(False)
self.moveLeftButton.setEnabled(True)
self.moveRightButton.setEnabled(True)
elif state in (self.motor.NOTINITIALIZED, self.motor.UNUSABLE):
self.spinBox.setEnabled(False)
self.stopButton.setEnabled(False)
self.moveLeftButton.setEnabled(False)
self.moveRightButton.setEnabled(False)
elif state in (self.motor.MOVING, self.motor.MOVESTARTED):
self.stopButton.setEnabled(True)
self.spinBox.setMoving(True)
elif state==self.motor.ONLIMIT:
self.spinBox.setEnabled(True)
self.stopButton.setEnabled(False)
self.moveLeftButton.setEnabled(True)
self.moveRightButton.setEnabled(True)
self.setToolTip(state=state)
# Move the motor one step up
def stepUp(self):
if self.motor is not None:
if self.motor.isReady():
self.motor.moveRelative(self.spinBox.lineStep())
# Move the motor one step down
def stepDown(self):
if self.motor is not None:
if self.motor.isReady():
self.motor.moveRelative(-self.spinBox.lineStep())
# Moves the motor when the spin box text is changed
def valueChangedInt(self,value):
self.updateGUI()
if self.motor is not None:
self.motor.move(value)
# Moves the motor when the spin box text is changed
def valueChangedStr(self): #,value):
if self.motor is not None:
self.motor.move(float(str(self.spinBox.editor().text())))
# Updates the tooltip in the correct widgets
def setToolTip(self,name=None,state=None,limits=None):
states=("NOTINITIALIZED","UNUSABLE","READY","MOVESTARTED","MOVING","ONLIMIT")
if name is None:
name=self['mnemonic']
if self.motor is None:
tip="Status: unknown motor "+name
else:
try:
if state is None:
state=self.motor.getState()
except:
logging.exception("%s: could not get motor state", self.name())
state=self.motor.UNUSABLE
try:
if limits is None and self.motor.isReady():
limits=self.motor.getLimits()
except:
logging.exception("%s: could not get motor limits", self.name())
limits=None
try:
state_str=states[state]
except IndexError:
state_str="UNKNOWN"
limits_str=""
if limits is not None:
l_bot=self['formatString'] % float(limits[0])
l_top=self['formatString'] % float(limits[1])
limits_str=" Limits:%s,%s" % (l_bot,l_top)
tip="State:"+state_str+limits_str
QToolTip.add(self.label,tip)
if not self['showBox']:
tip=""
QToolTip.add(self.containerBox,tip)
def setLabel(self,label):
if not self['showLabel']:
label=None
if label is None:
self.labelBox.hide()
self.containerBox.setTitle("")
return
if label=="":
if self.motor is not None:
label=self.motor.username
if self['showBox']:
self.labelBox.hide()
self.containerBox.setTitle(label)
else:
if label!="":
label+=": "
self.containerBox.setTitle("")
self.label.setText(label)
self.labelBox.show()
def setMotor(self,motor,motor_ho_name=None):
if self.motor is not None:
self.disconnect(self.motor,PYSIGNAL('limitsChanged'),self.limitsChanged)
self.disconnect(self.motor,PYSIGNAL('positionChanged'),self.positionChanged)
self.disconnect(self.motor,PYSIGNAL('stateChanged'),self.stateChanged)
if motor_ho_name is not None:
motor=self.getHardwareObject(motor_ho_name)
if self.motor is None:
# first time motor is set
try:
s=float(self['defaultStep'])
except:
try:
s=motor.GUIstep
except:
s=1.0
self.setLineStep(s)
self.motor = motor
if self.motor is not None:
self.connect(self.motor,PYSIGNAL('limitsChanged'),self.limitsChanged)
self.connect(self.motor,PYSIGNAL('positionChanged'),self.positionChanged,instanceFilter=True)
self.connect(self.motor,PYSIGNAL('stateChanged'),self.stateChanged,instanceFilter=True)
self.posHistory=[]
self.updateGUI()
self['label']=self['label']
#self['defaultStep']=self['defaultStep']
def propertyChanged(self,propertyName,oldValue,newValue):
if propertyName=='mnemonic':
self.setMotor(self.motor,newValue)
elif propertyName=='formatString':
if self.motor is not None:
self.updateGUI()
elif propertyName=='label':
self.setLabel(newValue)
elif propertyName=='showLabel':
if newValue:
self.setLabel(self['label'])
else:
self.setLabel(None)
elif propertyName=='showMoveButtons':
if newValue:
self.moveLeftButton.show()
self.moveRightButton.show()
else:
self.moveLeftButton.hide()
self.moveRightButton.hide()
elif propertyName=='showStop':
if newValue:
self.stopButton.show()
else:
self.stopButton.hide()
elif propertyName=='showStep':
if newValue:
self.stepButton.show()
else:
self.stepButton.hide()
elif propertyName=='showStepList':
if newValue:
self.stepList.show()
else:
self.stepList.hide()
elif propertyName=='showPosition':
if newValue:
self.spinBox.show()
else:
self.spinBox.hide()
elif propertyName=='showBox':
if newValue:
self.containerBox.setFrameShape(self.containerBox.GroupBoxPanel)
self.containerBox.setInsideMargin(4)
self.containerBox.setInsideSpacing(0)
else:
self.containerBox.setFrameShape(self.containerBox.NoFrame)
self.containerBox.setInsideMargin(0)
self.containerBox.setInsideSpacing(0)
self.setLabel(self['label'])
elif propertyName=='icons':
icons_list=newValue.split()
try:
self.moveLeftButton.setPixmap(Icons.load(icons_list[0]))
self.moveRightButton.setPixmap(Icons.load(icons_list[1]))
self.stopButton.setPixmap(Icons.load(icons_list[2]))
self.setStepButtonIcon(icons_list[3])
except IndexError:
pass
elif propertyName=='helpDecrease':
if newValue=="":
QToolTip.add(self.moveLeftButton,"Moves the motor down (while pressed)")
else:
QToolTip.add(self.moveLeftButton,newValue)
elif propertyName=='helpIncrease':
if newValue=="":
QToolTip.add(self.moveRightButton,"Moves the motor up (while pressed)")
else:
QToolTip.add(self.moveRightButton,newValue)
elif propertyName=='decimalPlaces':
try:
dec_places=int(newValue)
except ValueError:
dec_places=2
self.spinBox.setDecimalPlaces(dec_places)
elif propertyName=='defaultStep':
if newValue!="":
self.setLineStep(float(newValue))
else:
BaseComponents.BlissWidget.propertyChanged(self,propertyName,oldValue,newValue)
###
### Auxiliary class for a floating-point spinbox
###
class mySpinBox(QSpinBox):
CHANGED_COLOR = QColor(255,165,0)
def __init__(self,parent):
QSpinBox.__init__(self,parent)
self.decimalPlaces=1
self.__moving = False
self.colorGroupDict={}
self.setValidator(QDoubleValidator(self))
self.editor().setAlignment(QWidget.AlignRight)
QObject.connect(self.editor(),SIGNAL("textChanged(const QString &)"),self.textChanged)
self.rangeChange()
self.updateDisplay()
def setMoving(self, moving):
self.setEnabled(not moving)
self.__moving = moving
def textChanged(self):
if self.__moving:
return
else:
self.setEditorBackgroundColor(mySpinBox.CHANGED_COLOR)
def i2d(self, v):
return v/math.pow(10, self.decimalPlaces)
def d2i(self, v):
d=1 if v >= 0 else -1
return int(d*0.5+math.pow(10, self.decimalPlaces)*v)
def rangeChange(self):
self.validator().setRange(self.minValue(), self.maxValue(), self.decimalPlaces)
return QSpinBox.rangeChange(self)
def setValue(self, value):
if type(value)==type(0.0):
return QSpinBox.setValue(self, self.d2i(value))
else:
return self.setValue(self.i2d(value))
def value(self):
return self.i2d(QSpinBox.value(self))
def stepUp(self):
self.emit(PYSIGNAL("stepUp"), ())
def stepDown(self):
self.emit(PYSIGNAL("stepDown"), ())
def setDecimalPlaces(self,places):
minval = self.minValue()
maxval = self.maxValue()
val = self.value()
ls = self.lineStep()
self.decimalPlaces=places
self.setMaxValue(maxval)
self.setMinValue(minval)
self.setValue(val)
self.setLineStep(ls)
self.rangeChange()
self.updateDisplay()
def setMinValue(self, value):
value = self.d2i(value)
if math.fabs(value) > 1E8:
value = int(math.copysign(1E8, value))
return QSpinBox.setMinValue(self, value)
def setMaxValue(self, value):
value = self.d2i(value)
if math.fabs(value) > 1E8:
value = int(math.copysign(1E8, value))
return QSpinBox.setMaxValue(self, value)
def minValue(self):
return self.i2d(QSpinBox.minValue(self))
def maxValue(self):
return self.i2d(QSpinBox.maxValue(self))
def decimalPlaces(self):
return self.decimalPlaces
def mapValueToText(self,value):
frmt="%."+"%df" % self.decimalPlaces
return QString(frmt % self.i2d(value))
def mapTextToValue(self):
t = str(self.text())
try:
return (self.d2i(float(t)), True)
except:
return (0, False)
def lineStep(self):
return self.i2d(QSpinBox.lineStep(self))
def setLineStep(self,step):
return QSpinBox.setLineStep(self, self.d2i(step))
def eventFilter(self,obj,ev):
if isinstance(ev,QContextMenuEvent):
self.emit(PYSIGNAL("contextMenu"),())
return True
else:
return QSpinBox.eventFilter(self,obj,ev)
def setEditorBackgroundColor(self,color):
editor=self.editor()
editor.setPaletteBackgroundColor(color)
spinbox_palette=editor.palette()
try:
cg=self.colorGroupDict[color.rgb()]
except KeyError:
cg=QColorGroup(spinbox_palette.disabled())
cg.setColor(cg.Background,color)
self.colorGroupDict[color.rgb()]=cg
spinbox_palette.setDisabled(cg)
###
### Dialog box to change the motor step
###
class StepEditorDialog(QDialog):
def __init__(self,parent):
QDialog.__init__(self,parent,'',False)
self.contentsBox=QHGroupBox('Motor step',self)
box2=QHBox(self)
grid1=QWidget(self.contentsBox)
QGridLayout(grid1, 2, 3, 0, 2)
label1=QLabel("Current:",grid1)
grid1.layout().addWidget(label1, 0, 0)
self.currentStep=myLineEdit(grid1)
grid1.layout().addMultiCellWidget(self.currentStep, 0, 0,1,2)
label2=QLabel("Set to:",grid1)
grid1.layout().addWidget(label2, 1, 0)
self.newStep=QLineEdit(grid1)
self.newStep.setAlignment(QWidget.AlignRight)
self.newStep.setValidator(QDoubleValidator(self))
QObject.connect(self.newStep,SIGNAL('returnPressed()'),self.applyClicked)
grid1.layout().addWidget(self.newStep, 1, 1)
self.applyButton=QPushButton("Apply",grid1)
grid1.layout().addWidget(self.applyButton, 1, 2)
QObject.connect(self.applyButton,SIGNAL('clicked()'),self.applyClicked)
self.closeButton=QToolButton(box2)
self.closeButton.setTextLabel("Dismiss")
self.closeButton.setUsesTextLabel(True)
self.closeButton.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
QObject.connect(self.closeButton,SIGNAL("clicked()"),self.accept)
HorizontalSpacer(box2)
QVBoxLayout(self)
self.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Minimum)
self.layout().addWidget(self.contentsBox)
self.layout().addWidget(box2)
def setMotor(self,motor,brick,name,default_step):
self.motor=motor
self.brick=brick
if name is None or name=="":
name=motor.userName()
self.setCaption(name)
self.contentsBox.setTitle('%s step' % name)
self.currentStep.setText(str(brick.getLineStep()))
def applyClicked(self):
try:
val=float(str(self.newStep.text()))
except ValueError:
return
self.brick.setLineStep(val)
self.newStep.setText('')
self.currentStep.setText(str(val))
def setIcons(self,apply_icon,dismiss_icon):
self.applyButton.setPixmap(Icons.load(apply_icon))
self.closeButton.setPixmap(Icons.load(dismiss_icon))
class myLineEdit(QLineEdit):
def __init__(self,parent):
QLineEdit.__init__(self,parent)
palette=self.palette()
self.disabledCG=QColorGroup(palette.disabled())
self.disabledCG.setColor(QColorGroup.Text,QWidget.black)
self.setEnabled(False)
self.setAlignment(QWidget.AlignRight)
palette.setDisabled(self.disabledCG)
class myComboBox(QComboBox):
CHANGED_COLOR = QColor(255,165,0)
def __init__(self,*args):
QComboBox.__init__(self,*args)
self.setEditable(True)
QObject.connect(self,SIGNAL('activated(int)'),self.stepChanged)
QObject.connect(self,SIGNAL('textChanged(const QString &)'),self.stepEdited)
def sizeHint(self):
hint=QComboBox.sizeHint(self)
hint.setWidth(1.10*hint.width())
return hint
def stepEdited(self,step):
self.setEditorBackgroundColor(myComboBox.CHANGED_COLOR)
def stepChanged(self,step):
self.setEditorBackgroundColor(QWidget.white)
def setEditorBackgroundColor(self,color):
editor=self.lineEdit()
editor.setPaletteBackgroundColor(color)
###
### Auxiliary class for positioning
###
class HorizontalSpacer(QWidget):
def __init__(self,*args):
QWidget.__init__(self,*args)
self.setSizePolicy(QSizePolicy.Expanding,QSizePolicy.Fixed)
|
13,723 | c8ebb2547db3da81d6b48f125760fcf9460bbf1c | import sys
input = sys.stdin.readline
from collections import deque
# 상하좌우
dx = [-1, 1, 0, 0]
dy = [0, 0, -1, 1]
# n, m을 입력받음
n, m = map(int, input().split())
# 미로정보를 입력받음
maze = []
for _ in range(n):
maze.append(list(input().rstrip()))
# bfs를 위한 visited 리스트 생성
visited = [[False] * m for _ in range(n)]
# 큐 생성하고 첫 노드를 넣음
q = deque()
q.append((0, 0, 1))
visited[0][0] = True
# bfs
while q:
# 큐에서 노드를 하나 꺼냄
x, y, t = q.popleft()
# 도착지에 도착했다면 반복종료
if x == n - 1 and y == m - 1:
break
# 상하좌우로 다음위치 탐색
for d in range(4):
# 다음위치
nx = x + dx[d]
ny = y + dy[d]
# 미로를 벗어난다면 통과
if nx < 0 or nx >= n or ny < 0 or ny >= m:
continue
# 이미 방문했다면 통과
if visited[nx][ny]:
continue
# 이동할 수 있는 칸이면 이동
if maze[nx][ny] == '1':
q.append((nx, ny, t + 1))
visited[nx][ny] = True
# 결과 출력
print(t)
|
13,724 | 70bb4a3398cb0e1255d3f5bad1ad3d26feb9de83 | import sys
import numpy as np
import matplotlib.pyplot as plt
import plots as P
e = 0.25 # Birth rate
d = 0.2 # Death rate
dI = 0.35 # Death rate of infected people due to the disease
f = 0.5 # Vaccination rate
# Runge-Kutta 4
# ----------------------------------------------------------------------------
def RK4(a_in, b, c, x0, y0, z0, N, T, n, fx, fy, fz=None, Basic=False, Vital=False, Season=False, Vaccine=False, CombinedModel=False):
"""4th Order Runge-Kutta method (RK4)
RK4 that solves a system of three coupled differential equations.
Additional parameters not explained below, are described in
the main.py program located in the same folder as this file.
Parameters
----------
Basic : boolean
if True: The basic SIRS model is calculated, meaning
the three categories S, I and R and the rates
of transmission between them.
Vital : boolean
if True: vital dynamics - include birth and death rates.
Season : boolean
if True: seasonal variation, meaning the
transmission rate `a` is now a function of time a(t).
Vaccine : boolean
if True: vaccines - introduce vaccinations after a certain time.
fx,fy,fz : objects
function for solving the right hand side of
S' = dS/dt, I' = dI/dt, R' = dR/dt
Returns
-------
x, y, z : ndarrays
number of susceptibles, infected and recovered
over a certain time period.
time : ndarray
the time values
"""
# Setting up arrays
x = np.zeros(n)
y = np.zeros(n)
z = np.zeros(n)
t = np.zeros(n)
# Size of time step
dt = T/n
# Initialize
x[0] = x0
y[0] = y0
z[0] = z0
if Basic: # basic SIRS model
a = a_in
for i in range(n-1):
kx1 = dt*fx(a, b, c, N, x[i], y[i])
ky1 = dt*fy(a, b, c, N, x[i], y[i])
kx2 = dt*fx(a, b, c, N, x[i] + kx1/2, y[i] + ky1/2)
ky2 = dt*fy(a, b, c, N, x[i] + kx1/2, y[i] + ky1/2)
kx3 = dt*fx(a, b, c, N, x[i] + kx2/2, y[i] + ky2/2)
ky3 = dt*fy(a, b, c, N, x[i] + kx2/2, y[i] + ky2/2)
kx4 = dt*fx(a, b, c, N, x[i] + kx3, y[i] + ky3)
ky4 = dt*fy(a, b, c, N, x[i] + kx3, y[i] + ky3)
x[i+1] = x[i] + (kx1 + 2*(kx2 + kx3) + kx4)/6
y[i+1] = y[i] + (ky1 + 2*(ky2 + ky3) + ky4)/6
z[i+1] = N - x[i] - y[i]
t[i+1] = t[i] + dt
if Vital: # vital dynamics
a = a_in
for i in range(n-1):
kx1 = dt*fx(a, b, c, N, x[i], y[i], z[i], vital=True)
ky1 = dt*fy(a, b, c, N, x[i], y[i], z[i], vital=True)
kz1 = dt*fz(a, b, c, N, x[i], y[i], z[i], vital=True)
kx2 = dt*fx(a, b, c, N, x[i] + kx1/2, y[i] + ky1/2, z[i] + ky1/2, vital=True)
ky2 = dt*fy(a, b, c, N, x[i] + kx1/2, y[i] + ky1/2, z[i] + kz1/2, vital=True)
kz2 = dt*fz(a, b, c, N, x[i] + kx1/2, y[i] + ky1/2, z[i] + kz1/2, vital=True)
kx3 = dt*fx(a, b, c, N, x[i] + kx2/2, y[i] + ky2/2, z[i] + kz2/2, vital=True)
ky3 = dt*fy(a, b, c, N, x[i] + kx2/2, y[i] + ky2/2, z[i] + kz2/2, vital=True)
kz3 = dt*fz(a, b, c, N, x[i] + kx2/2, y[i] + ky2/2, z[i] + kz2/2, vital=True)
kx4 = dt*fx(a, b, c, N, x[i] + kx3, y[i] + ky3, z[i] + kz3, vital=True)
ky4 = dt*fy(a, b, c, N, x[i] + kx3, y[i] + ky3, z[i] + kz3, vital=True)
kz4 = dt*fz(a, b, c, N, x[i] + kx3, y[i] + ky3, z[i] + kz3, vital=True)
x[i+1] = x[i] + (kx1 + 2*(kx2 + kx3) + kx4)/6
y[i+1] = y[i] + (ky1 + 2*(ky2 + ky3) + ky4)/6
z[i+1] = z[i] + (kz1 + 2*(kz2 + kz3) + kz4)/6
t[i+1] = t[i] + dt
if Season: # seasonal variations
for i in range(n-1):
#setting the transmission rate a, which varies with time
a0 = a_in #av.transmission rate
A = 4 #max(a) = 4, min(a)= -4
omega = 0.5 #a is at max in beginning and end of year (winter)
a = A*np.cos(omega*t[i]) + a0
kx1 = dt*fx(a, b, c, N, x[i], y[i])
ky1 = dt*fy(a, b, c, N, x[i], y[i])
kx2 = dt*fx(a, b, c, N, x[i] + kx1/2, y[i] + ky1/2)
ky2 = dt*fy(a, b, c, N, x[i] + kx1/2, y[i] + ky1/2)
kx3 = dt*fx(a, b, c, N, x[i] + kx2/2, y[i] + ky2/2)
ky3 = dt*fy(a, b, c, N, x[i] + kx2/2, y[i] + ky2/2)
kx4 = dt*fx(a, b, c, N, x[i] + kx3, y[i] + ky3)
ky4 = dt*fy(a, b, c, N, x[i] + kx3, y[i] + ky3)
x[i+1] = x[i] + (kx1 + 2*(kx2 + kx3) + kx4)/6
y[i+1] = y[i] + (ky1 + 2*(ky2 + ky3) + ky4)/6
z[i+1] = N - x[i] - y[i]
t[i+1] = t[i] + dt
if Vaccine: #vaccinations are introduced
a = a_in #transmission rate
t_v = T/2 #start vaccination from T/2
for i in range(n-1):
if t[i] >= t_v:
kx1 = dt*fx(a, b, c, N, x[i], y[i], z[i], vaccine=True)
ky1 = dt*fy(a, b, c, N, x[i], y[i], z[i], vaccine=True)
kz1 = dt*fz(a, b, c, N, x[i], y[i], z[i], vaccine=True)
kx2 = dt*fx(a, b, c, N, x[i] + kx1/2, y[i] + ky1/2, z[i] + ky1/2, vaccine=True)
ky2 = dt*fy(a, b, c, N, x[i] + kx1/2, y[i] + ky1/2, z[i] + kz1/2, vaccine=True)
kz2 = dt*fz(a, b, c, N, x[i] + kx1/2, y[i] + ky1/2, z[i] + kz1/2, vaccine=True)
kx3 = dt*fx(a, b, c, N, x[i] + kx2/2, y[i] + ky2/2, z[i] + kz2/2, vaccine=True)
ky3 = dt*fy(a, b, c, N, x[i] + kx2/2, y[i] + ky2/2, z[i] + kz2/2, vaccine=True)
kz3 = dt*fz(a, b, c, N, x[i] + kx2/2, y[i] + ky2/2, z[i] + kz2/2, vaccine=True)
kx4 = dt*fx(a, b, c, N, x[i] + kx3, y[i] + ky3, z[i] + kz3, vaccine=True)
ky4 = dt*fy(a, b, c, N, x[i] + kx3, y[i] + ky3, z[i] + kz3, vaccine=True)
kz4 = dt*fz(a, b, c, N, x[i] + kx3, y[i] + ky3, z[i] + kz3, vaccine=True)
x[i+1] = x[i] + (kx1 + 2*(kx2 + kx3) + kx4)/6
y[i+1] = y[i] + (ky1 + 2*(ky2 + ky3) + ky4)/6
z[i+1] = z[i] + (kz1 + 2*(kz2 + kz3) + kz4)/6
t[i+1] = t[i] + dt
else:
kx1 = dt*fx(a, b, c, N, x[i], y[i])
ky1 = dt*fy(a, b, c, N, x[i], y[i])
kx2 = dt*fx(a, b, c, N, x[i] + kx1/2, y[i] + ky1/2)
ky2 = dt*fy(a, b, c, N, x[i] + kx1/2, y[i] + ky1/2)
kx3 = dt*fx(a, b, c, N, x[i] + kx2/2, y[i] + ky2/2)
ky3 = dt*fy(a, b, c, N, x[i] + kx2/2, y[i] + ky2/2)
kx4 = dt*fx(a, b, c, N, x[i] + kx3, y[i] + ky3)
ky4 = dt*fy(a, b, c, N, x[i] + kx3, y[i] + ky3)
x[i+1] = x[i] + (kx1 + 2*(kx2 + kx3) + kx4)/6
y[i+1] = y[i] + (ky1 + 2*(ky2 + ky3) + ky4)/6
z[i+1] = N - x[i] - y[i]
t[i+1] = t[i] + dt
if CombinedModel:
t_v = T/2 #start vaccination from T/2
for i in range(n-1):
#setting the transmission rate a, which varies with time
a0 = a_in #av.transmission rate
A = 4 #max(a) = 4, min(a)= -4
omega = 0.5 #a is at max in beginning and end of year (winter)
a = A*np.cos(omega*t[i]) + a0
if t[i] >= t_v: #vital + seasonal + vaccines
kx1 = dt*fx(a, b, c, N, x[i], y[i], z[i], combined=True)
ky1 = dt*fy(a, b, c, N, x[i], y[i], z[i], combined=True)
kz1 = dt*fz(a, b, c, N, x[i], y[i], z[i], combined=True)
kx2 = dt*fx(a, b, c, N, x[i] + kx1/2, y[i] + ky1/2, z[i] + ky1/2, combined=True)
ky2 = dt*fy(a, b, c, N, x[i] + kx1/2, y[i] + ky1/2, z[i] + kz1/2, combined=True)
kz2 = dt*fz(a, b, c, N, x[i] + kx1/2, y[i] + ky1/2, z[i] + kz1/2, combined=True)
kx3 = dt*fx(a, b, c, N, x[i] + kx2/2, y[i] + ky2/2, z[i] + kz2/2, combined=True)
ky3 = dt*fy(a, b, c, N, x[i] + kx2/2, y[i] + ky2/2, z[i] + kz2/2, combined=True)
kz3 = dt*fz(a, b, c, N, x[i] + kx2/2, y[i] + ky2/2, z[i] + kz2/2, combined=True)
kx4 = dt*fx(a, b, c, N, x[i] + kx3, y[i] + ky3, z[i] + kz3, combined=True)
ky4 = dt*fy(a, b, c, N, x[i] + kx3, y[i] + ky3, z[i] + kz3, combined=True)
kz4 = dt*fz(a, b, c, N, x[i] + kx3, y[i] + ky3, z[i] + kz3, combined=True)
x[i+1] = x[i] + (kx1 + 2*(kx2 + kx3) + kx4)/6
y[i+1] = y[i] + (ky1 + 2*(ky2 + ky3) + ky4)/6
z[i+1] = z[i] + (kz1 + 2*(kz2 + kz3) + kz4)/6
t[i+1] = t[i] + dt
else: #vital + seasonal
kx1 = dt*fx(a, b, c, N, x[i], y[i], z[i], vital=True)
ky1 = dt*fy(a, b, c, N, x[i], y[i], z[i], vital=True)
kz1 = dt*fz(a, b, c, N, x[i], y[i], z[i], vital=True)
kx2 = dt*fx(a, b, c, N, x[i] + kx1/2, y[i] + ky1/2, z[i] + ky1/2, vital=True)
ky2 = dt*fy(a, b, c, N, x[i] + kx1/2, y[i] + ky1/2, z[i] + kz1/2, vital=True)
kz2 = dt*fz(a, b, c, N, x[i] + kx1/2, y[i] + ky1/2, z[i] + kz1/2, vital=True)
kx3 = dt*fx(a, b, c, N, x[i] + kx2/2, y[i] + ky2/2, z[i] + kz2/2, vital=True)
ky3 = dt*fy(a, b, c, N, x[i] + kx2/2, y[i] + ky2/2, z[i] + kz2/2, vital=True)
kz3 = dt*fz(a, b, c, N, x[i] + kx2/2, y[i] + ky2/2, z[i] + kz2/2, vital=True)
kx4 = dt*fx(a, b, c, N, x[i] + kx3, y[i] + ky3, z[i] + kz3, vital=True)
ky4 = dt*fy(a, b, c, N, x[i] + kx3, y[i] + ky3, z[i] + kz3, vital=True)
kz4 = dt*fz(a, b, c, N, x[i] + kx3, y[i] + ky3, z[i] + kz3, vital=True)
x[i+1] = x[i] + (kx1 + 2*(kx2 + kx3) + kx4)/6
y[i+1] = y[i] + (ky1 + 2*(ky2 + ky3) + ky4)/6
z[i+1] = z[i] + (kz1 + 2*(kz2 + kz3) + kz4)/6
t[i+1] = t[i] + dt
return x, y, z, t, f
def fS(a, b, c, N, S, I, R=None, vital=False, vaccine=False, combined=False):
"""Right hand side of S' = dS/dt
For basic SIRS, vital dynamics,
seasonal variation, vaccine and a combined model
"""
if vital:
temp = c*R - a*S*I/N - d*S + e*N
elif vaccine:
R = N - S - I
temp = c*R - a*S*I/N - f*S
elif combined:
temp = c*R - a*S*I/N - d*S + e*N - f*S
else:
temp = c*(N-S-I) - a*S*I/N
return temp
def fI(a, b, c, N, S, I, R=None, vital=False, vaccine=False, combined=False):
"""Right hand side of I' = dI/dt
For basic SIRS, with vital dynamics,
seasonal variation, vaccine and a combined model
"""
if vital:
temp = a*S*I/N - b*I - d*I - dI*I
elif vaccine:
temp = a*S*I/N - b*I
elif combined:
temp = a*S*I/N - b*I - d*I - dI*I
else:
temp = a*S*I/N - b*I
return temp
def fR(a, b, c, N, S, I, R, vital=False, vaccine=False, combined=False):
"""Right hand side of R' = dR/dt
For basic SIRS, with vital dynamics,
seasonal variation, vaccine and a combined model
"""
if vital:
temp = b*I - c*R - d*R
elif vaccine:
R = N - S - I
temp = b*I - c*R + f*S
elif combined:
temp = b*I - c*R - d*R + f*S
else:
temp = 0
return temp
# Monte Carlo
# ----------------------------------------------------------------------------
def MC(a_in, b, c, S_0, I_0, R_0, N, T, vitality=False, seasonal=False, vaccine=False):
"""Disease modelling using Monte-Carlo.
This function uses randomness and transition probabilities
as a basis for the disease modelling.
Additional parameters not explained below, are described in
the main.py program located in the same folder as this file.
Parameters
----------
vitality : boolean
if True: vital dynamics - include birth and death rates.
seasonal : boolean
if True: seasonal variation included, meaning the
transmission rate `a` is now a function of time a(t).
vaccine : boolean
if True: vaccines - introduce vaccinations after T/2.
Returns
-------
S, I, R : ndarrays
number of susceptibles, infected and recovered
over a certain time period.
time : ndarray
the time values
"""
if seasonal:
a0 = a_in #average transmission rate
A = 4 #max.deviation from a0
omega = 0.5 #frequency of oscillation
a = A*np.cos(omega*0) + a0
else:
a = a_in
# Size of time step
dt = np.min([4/(a*N), 1/(b*N), 1/(c*N)])
# Nr of time steps
N_time = int(T/dt)
# Set up empty arrys
S = np.zeros(N_time)
I = np.zeros_like(S)
R = np.zeros_like(S)
t = np.zeros_like(S)
#initalize arrays
S[0] = S_0
I[0] = I_0
R[0] = R_0
t[0] = 0
# time loop
for i in range(N_time - 1):
if seasonal:
a0 = a_in
A = 4
omega = 0.5
a = A*np.cos(omega*t[i]) + a0
else:
a = a_in
S[i+1] = S[i]
I[i+1] = I[i]
R[i+1] = R[i]
rdm = np.random.random() #random number SIRS-transitions
# S to I
r_SI = rdm #np.random.random()
if r_SI < (a*S[i]*I[i]*dt/N):
S[i+1] -= 1
I[i+1] += 1
# I to R
r_IR = rdm #np.random.random()
if r_IR < (b*I[i]*dt):
I[i+1] -= 1
R[i+1] += 1
# R to S
r_RS = rdm #np.random.random()
if r_RS < (c*R[i]*dt):
R[i+1] -= 1
S[i+1] += 1
if vitality:
rdm1 = np.random.random() #random number vital dynamics
#death rate d in general population S, I and R
r_dS = rdm1 #np.random.random()
if r_dS < (d*S[i]*dt): #d*S*dt:probability of 1 individ. dying in S category
S[i+1] -= 1
#r_dI = rdm #np.random.random()
r_dI = rdm1 #np.random.random()
if r_dS < (d*I[i]*dt):
I[i+1] -= 1
#r_dR = rdm #np.random.random()
r_dR = rdm1 #np.random.random()
if r_dR < (d*R[i]*dt):
R[i+1] -= 1
#death rate dI for infected population I
r_dII = rdm1 #np.random.random()
if r_dII < (dI*I[i]*dt):
I[i+1] -= 1
#birth rate e for general population S, I and R
r_eS = rdm1 #np.random.random()
if r_eS < (e*S[i]*dt): #e*S*dt:probability of 1 individ. born in S category
S[i+1] += 1
r_eI = rdm1 #np.random.random()
if r_eS < (e*I[i]*dt):
I[i+1] += 1
r_eR = rdm1 #np.random.random()
if r_eR < (e*R[i]*dt):
R[i+1] += 1
if vaccine:
tv = T/2
if t[i] >= tv:
r_v = rdm #np.random.random()
if r_v < (f*S[i]*dt): #f*S*dt:probability of 1 individ. in S getting a vaccine
S[i+1] -= 1
R[i+1] += 1
t[i+1] = t[i] + dt
return S, I, R, t, f
|
13,725 | 1f2afc1df7ca80eea3ce4dd85cff7006ab0049b1 | """ Style utilities, templates, and defaults for syntax highlighting widgets.
"""
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from colorsys import rgb_to_hls
from pygments.styles import get_style_by_name
from pygments.token import Token
#-----------------------------------------------------------------------------
# Constants
#-----------------------------------------------------------------------------
# The default light style sheet: black text on a white background.
default_light_style_template = '''
QPlainTextEdit, QTextEdit { background-color: %(bgcolor)s;
color: %(fgcolor)s ;
selection-background-color: %(select)s}
.error { color: red; }
.in-prompt { color: navy; }
.in-prompt-number { font-weight: bold; }
.out-prompt { color: darkred; }
.out-prompt-number { font-weight: bold; }
.inverted { background-color: %(fgcolor)s ; color:%(bgcolor)s;}
'''
default_light_style_sheet = default_light_style_template % dict(
bgcolor='white', fgcolor='black', select="#ccc")
default_light_syntax_style = 'default'
# The default dark style sheet: white text on a black background.
default_dark_style_template = '''
QPlainTextEdit, QTextEdit { background-color: %(bgcolor)s;
color: %(fgcolor)s ;
selection-background-color: %(select)s}
QFrame { border: 1px solid grey; }
.error { color: red; }
.in-prompt { color: lime; }
.in-prompt-number { color: lime; font-weight: bold; }
.out-prompt { color: red; }
.out-prompt-number { color: red; font-weight: bold; }
.inverted { background-color: %(fgcolor)s ; color:%(bgcolor)s;}
'''
default_dark_style_sheet = default_dark_style_template % dict(
bgcolor='black', fgcolor='white', select="#555")
default_dark_syntax_style = 'monokai'
# The default monochrome
default_bw_style_sheet = '''
QPlainTextEdit, QTextEdit { background-color: white;
color: black ;
selection-background-color: #cccccc}
.in-prompt-number { font-weight: bold; }
.out-prompt-number { font-weight: bold; }
.inverted { background-color: black ; color: white;}
'''
default_bw_syntax_style = 'bw'
def hex_to_rgb(color):
"""Convert a hex color to rgb integer tuple."""
if color.startswith('#'):
color = color[1:]
if len(color) == 3:
color = ''.join([c * 2 for c in color])
if len(color) != 6:
return False
try:
r = int(color[:2], 16)
g = int(color[2:4], 16)
b = int(color[4:], 16)
except ValueError:
return False
else:
return r, g, b
def dark_color(color):
"""Check whether a color is 'dark'.
Currently, this is simply whether the luminance is <50%"""
rgb = hex_to_rgb(color)
if rgb:
return rgb_to_hls(*rgb)[1] < 128
else: # default to False
return False
def dark_style(stylename):
"""Guess whether the background of the style with name 'stylename'
counts as 'dark'."""
return dark_color(get_style_by_name(stylename).background_color)
def get_colors(stylename):
"""Construct the keys to be used building the base stylesheet
from a templatee."""
style = get_style_by_name(stylename)
fgcolor = style.style_for_token(Token.Text)['color'] or ''
if len(fgcolor) in (3, 6):
# could be 'abcdef' or 'ace' hex, which needs '#' prefix
try:
int(fgcolor, 16)
except TypeError:
pass
else:
fgcolor = "#" + fgcolor
return dict(
bgcolor=style.background_color,
select=style.highlight_color,
fgcolor=fgcolor
)
def sheet_from_template(name, colors='lightbg'):
"""Use one of the base templates, and set bg/fg/select colors."""
colors = colors.lower()
if colors == 'lightbg':
return default_light_style_template % get_colors(name)
elif colors == 'linux':
return default_dark_style_template % get_colors(name)
elif colors == 'nocolor':
return default_bw_style_sheet
else:
raise KeyError("No such color scheme: %s" % colors)
|
13,726 | e85aec69efad7cfd7b5406addb78befafab26a9d | import numpy as np
import pandas as pd
from sklearn import ensemble
from sklearn import metrics
from sklearn import model_selection
if __name__ == '__main__':
df = pd.read_csv('../data/mobile-pricing/train.csv')
X = df.drop('price_range', axis=1).values
y = df['price_range'].values
classifier = ensemble.RandomForestClassifier(n_jobs=-1)
param_grid = {
'n_estimators': [100, 200, 250, 300, 400, 500],
'max_depth': [1, 2, 5, 7, 11, 15],
'criterion': ['gini', 'entropy']
}
model = model_selection.GridSearchCV(estimator=classifier,
param_grid=param_grid,
scoring='accuracy',
verbose=1,
n_jobs=-1,
cv=5)
model.fit(X, y)
print('Best score: {}'.format(model.best_score_))
print('Best parameters set:')
best_params = model.best_estimator_.get_params()
for param in sorted(param_grid.keys()):
print(f'\t{param}: {best_params[param]}')
|
13,727 | a72f5b9362c7c35133846c357b8e2e35dd78991b | #
# Author: Tiberiu Boros
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import dynet as dy
import numpy as np
class Encoder:
def __init__(self, params, encodings, model=None, runtime=False):
self.model = model
self.params = params
self.PHONE_EMBEDDINGS_SIZE = 100
self.SPEAKER_EMBEDDINGS_SIZE = 200
self.ENCODER_SIZE = 200
self.ENCODER_LAYERS = 2
self.DECODER_SIZE = 400
self.DECODER_LAYERS = 2
self.MGC_PROJ_SIZE = 100
self.encodings = encodings
from models.utils import orthonormal_VanillaLSTMBuilder
lstm_builder = orthonormal_VanillaLSTMBuilder
if runtime:
lstm_builder = dy.VanillaLSTMBuilder
if self.model is None:
self.model = dy.Model()
self.trainer = dy.AdamTrainer(self.model, alpha=params.learning_rate)
self.trainer.set_sparse_updates(True)
self.trainer.set_clip_threshold(5.0)
self.phone_lookup = self.model.add_lookup_parameters((len(encodings.char2int), self.PHONE_EMBEDDINGS_SIZE))
self.feature_lookup = self.model.add_lookup_parameters((len(encodings.context2int), self.PHONE_EMBEDDINGS_SIZE))
self.speaker_lookup = self.model.add_lookup_parameters(
(len(encodings.speaker2int), self.SPEAKER_EMBEDDINGS_SIZE))
self.encoder_fw = []
self.encoder_bw = []
self.encoder_fw.append(
lstm_builder(1, self.PHONE_EMBEDDINGS_SIZE, self.ENCODER_SIZE, self.model))
self.encoder_bw.append(
lstm_builder(1, self.PHONE_EMBEDDINGS_SIZE, self.ENCODER_SIZE, self.model))
for zz in range(1, self.ENCODER_LAYERS):
self.encoder_fw.append(
lstm_builder(1, self.ENCODER_SIZE * 2, self.ENCODER_SIZE, self.model))
self.encoder_bw.append(
lstm_builder(1, self.ENCODER_SIZE * 2, self.ENCODER_SIZE, self.model))
self.decoder = lstm_builder(self.DECODER_LAYERS,
self.ENCODER_SIZE * 2 + self.MGC_PROJ_SIZE + self.SPEAKER_EMBEDDINGS_SIZE,
self.DECODER_SIZE, self.model)
# self.aux_hid_w = self.model.add_parameters((500, self.ENCODER_SIZE * 2))
# self.aux_hid_b = self.model.add_parameters((500))
# self.aux_proj_w = self.model.add_parameters((params.mgc_order, 500))
# self.aux_proj_b = self.model.add_parameters((params.mgc_order))
self.hid_w = self.model.add_parameters((500, self.DECODER_SIZE))
self.hid_b = self.model.add_parameters((500))
self.proj_w_1 = self.model.add_parameters((params.mgc_order, 500))
self.proj_b_1 = self.model.add_parameters((params.mgc_order))
self.proj_w_2 = self.model.add_parameters((params.mgc_order, 500))
self.proj_b_2 = self.model.add_parameters((params.mgc_order))
self.proj_w_3 = self.model.add_parameters((params.mgc_order, 500))
self.proj_b_3 = self.model.add_parameters((params.mgc_order))
self.highway_w = self.model.add_parameters(
(params.mgc_order, self.ENCODER_SIZE * 2 + self.SPEAKER_EMBEDDINGS_SIZE))
self.last_mgc_proj_w = self.model.add_parameters((self.MGC_PROJ_SIZE, self.params.mgc_order))
self.last_mgc_proj_b = self.model.add_parameters((self.MGC_PROJ_SIZE))
# self.last_att_proj_w = self.model.add_parameters((200, self.ENCODER_SIZE * 2))
# self.last_att_proj_b = self.model.add_parameters((200))
self.stop_w = self.model.add_parameters((1, self.DECODER_SIZE))
self.stop_b = self.model.add_parameters((1))
self.att_w1 = self.model.add_parameters((100, self.ENCODER_SIZE * 2 + self.SPEAKER_EMBEDDINGS_SIZE))
self.att_w2 = self.model.add_parameters((100, self.DECODER_SIZE))
self.att_v = self.model.add_parameters((1, 100))
self.start_lookup = self.model.add_lookup_parameters((1, params.mgc_order))
self.decoder_start_lookup = self.model.add_lookup_parameters(
(1, self.ENCODER_SIZE * 2 + self.MGC_PROJ_SIZE + self.SPEAKER_EMBEDDINGS_SIZE))
def _make_input(self, seq):
x_list = [self.phone_lookup[self.encodings.char2int['START']]]
for pi in seq:
char_emb = self.phone_lookup[self.encodings.char2int[pi.char]]
context = []
for feature in pi.context:
if feature in self.encodings.context2int:
context.append(self.feature_lookup[self.encodings.context2int[feature]])
if len(context) == 0:
x_list.append(char_emb)
else:
x_list.append(char_emb + dy.esum(context) * dy.scalarInput(1.0 / len(context)))
x_list.append(self.phone_lookup[self.encodings.char2int['STOP']])
return x_list
def _get_speaker_embedding(self, seq):
for entry in seq:
for feature in entry.context:
if feature.startswith('SPEAKER:'):
return self.speaker_lookup[self.encodings.speaker2int[feature]]
return None
def _predict(self, characters, gold_mgc=None, max_size=-1):
if gold_mgc is None:
runtime = True
else:
runtime = False
mgc_index = 0
output_mgc = []
output_stop = []
# aux_output_mgc = []
output_att = []
last_mgc = self.start_lookup[0]
# encoder
x_input = self._make_input(characters)
for lstm_fw, lstm_bw in zip(self.encoder_fw, self.encoder_bw):
x_fw = lstm_fw.initial_state().transduce(x_input)
x_bw = lstm_bw.initial_state().transduce(reversed(x_input))
x_input = [dy.concatenate([fw, bw]) for fw, bw in zip(x_fw, reversed(x_bw))]
x_speaker = self._get_speaker_embedding(characters)
final_input = []
for x in x_input:
final_input.append(dy.concatenate([x, x_speaker]))
encoder = final_input
decoder = self.decoder.initial_state().add_input(self.decoder_start_lookup[0])
last_att_pos = None
if gold_mgc is None:
last_att_pos = 0
stationed_count = 0
# stationed_index = 0
while True:
att, align = self._attend(encoder, decoder, last_att_pos)
if gold_mgc is None:
last_att_pos = np.argmax(align.value())
if runtime and last_att_pos == len(characters) - 1:
stationed_count += 1
if stationed_count > 5:
break
output_att.append(align)
# main output
mgc_proj = dy.tanh(self.last_mgc_proj_w.expr(update=True) * last_mgc + self.last_mgc_proj_b.expr(update=True))
decoder = decoder.add_input(dy.concatenate([mgc_proj, att]))
hidden = dy.tanh(self.hid_w.expr(update=True) * decoder.output() + self.hid_b.expr(update=True))
output = dy.logistic(self.highway_w.expr(update=True) * att + self.proj_w_1.expr(update=True) * hidden + self.proj_b_1.expr(update=True))
output_mgc.append(output)
output = dy.logistic(self.highway_w.expr(update=True) * att + self.proj_w_2.expr(update=True) * hidden + self.proj_b_2.expr(update=True))
output_mgc.append(output)
output = dy.logistic(self.highway_w.expr(update=True) * att + self.proj_w_3.expr(update=True) * hidden + self.proj_b_3.expr(update=True))
output_mgc.append(output)
output_stop.append(dy.tanh(self.stop_w.expr(update=True) * decoder.output() + self.stop_b.expr(update=True)))
if runtime:
if max_size != -1 and mgc_index > max_size:
break
last_mgc = dy.inputVector(output.value())
# print output_stop[-1].value()
if max_size == -1 and output_stop[-1].value() < -0.5:
break
if mgc_index >= len(characters) * 7: # safeguard
break
else:
last_mgc = dy.inputVector(gold_mgc[min(mgc_index + 2, len(gold_mgc) - 1)])
mgc_index += 3
if not runtime and mgc_index >= gold_mgc.shape[0]:
break
return output_mgc, output_stop, output_att
def _compute_guided_attention(self, att_vect, decoder_step, num_characters, num_mgcs):
target_probs = []
t1 = float(decoder_step) / num_mgcs
for encoder_step in range(num_characters):
target_probs.append(1.0 - np.exp(-((float(encoder_step) / num_characters - t1) ** 2) / 0.08))
# print target_probs
target_probs = dy.inputVector(target_probs)
return dy.transpose(target_probs) * att_vect
def _compute_binary_divergence(self, pred, target):
return dy.binary_log_loss(pred, target)
def learn(self, characters, target_mgc, guided_att=True):
num_mgc = target_mgc.shape[0]
# print num_mgc
dy.renew_cg()
output_mgc, output_stop, output_attention = self._predict(characters, target_mgc)
losses = []
index = 0
for mgc, real_mgc in zip(output_mgc, target_mgc):
t_mgc = dy.inputVector(real_mgc)
# losses.append(self._compute_binary_divergence(mgc, t_mgc) )
losses.append(dy.l1_distance(mgc, t_mgc))
if index % 3 == 0:
# attention loss
if guided_att:
att = output_attention[index / 3]
losses.append(self._compute_guided_attention(att, index / 3, len(characters) + 2, num_mgc / 3))
# EOS loss
stop = output_stop[index / 3]
if index >= num_mgc - 6:
losses.append(dy.l1_distance(stop, dy.scalarInput(-0.8)))
else:
losses.append(dy.l1_distance(stop, dy.scalarInput(0.8)))
index += 1
loss = dy.esum(losses)
loss_val = loss.value() / num_mgc
loss.backward()
self.trainer.update()
return loss_val
def generate(self, characters, max_size=-1):
dy.renew_cg()
output_mgc, ignore1, att = self._predict(characters, max_size=max_size)
mgc_output = [mgc.npvalue() for mgc in output_mgc]
import numpy as np
mgc_final = np.zeros((len(mgc_output), mgc_output[-1].shape[0]))
for i in range(len(mgc_output)):
for j in range(mgc_output[-1].shape[0]):
mgc_final[i, j] = mgc_output[i][j]
return mgc_final, att
def store(self, output_base):
self.model.save(output_base + ".network")
def load(self, output_base):
self.model.populate(output_base + ".network")
def _attend(self, input_list, decoder_state, last_pos=None):
w1 = self.att_w1.expr(update=True)
w2 = self.att_w2.expr(update=True)
v = self.att_v.expr(update=True)
attention_weights = []
w2dt = w2 * dy.concatenate([decoder_state.s()[-1]])
for input_vector in input_list:
attention_weight = v * dy.tanh(w1 * input_vector + w2dt)
attention_weights.append(attention_weight)
attention_weights = dy.softmax(dy.concatenate(attention_weights))
# force incremental attention if this is runtime
if last_pos is not None:
current_pos = np.argmax(attention_weights.value())
if current_pos < last_pos or current_pos >= last_pos + 3:
current_pos = last_pos + 1
if current_pos >= len(input_list):
current_pos = len(input_list) - 1
output_vectors = input_list[current_pos]
simulated_att = np.zeros((len(input_list)))
simulated_att[current_pos] = 1.0
new_att_vec = dy.inputVector(simulated_att)
return output_vectors, new_att_vec
output_vectors = dy.esum(
[vector * attention_weight for vector, attention_weight in zip(input_list, attention_weights)])
return output_vectors, attention_weights
|
13,728 | bea1511a0b24cc29d2bf125cd627a8d7f9f4e682 | # -*- coding: utf-8 -*-
""" Code developed during 2018 MLH hackathon at University of Manitoba.
main.py: this contains the main code for running the application.
"""
##############################################################################
__author__ = ["Chris Cadonic", "Cassandra Aldaba"]
__credits__ = ["Chris Cadonic", "Cassandra Aldaba"]
__version__ = "0.1"
##############################################################################
import os
import yaml
import util
import numpy as np
import cv2
from lk import LK
from custom_lk import CustomLK
#import cnn
def test_lk():
"""
Main function for running the application.
:return:
"""
# get current directory to work relative to current file path
curdir = os.path.dirname(__file__)
# Load configuration for system
yaml_file = os.path.join(curdir, 'config.yaml')
with open(yaml_file, "r") as f:
config = yaml.load(f)
# extract list of videos from data dir
vid_dir = os.path.join(curdir, config['traindir'])
vid_names = util.load_data(vid_dir)
# extract background subtraction image from bg vid
bg_file = os.path.join(curdir, config['bg_img'])
bg_valid, bg_video, bg_frame = util.load_video(bg_file)
valid, video, frame = util.load_video(vid_names[1])
init_frame = frame[40: 680, 70: 1210]
valid, next_frame = video.read()
orig_next_frame = next_frame.copy()
next_frame = next_frame[40: 680, 70: 1210]
# rescale to gray
if len(init_frame.shape) > 2:
init_frame = cv2.cvtColor(init_frame, cv2.COLOR_BGR2GRAY)
if len(next_frame.shape) > 2:
next_frame = cv2.cvtColor(next_frame, cv2.COLOR_BGR2GRAY)
_, mask = util.background_subtraction(init_frame, bg_frame, thresh=0.25)
mask[:140, :] = 0
mask[520:, :] = 0
mask[:, 150: 220] = 0
mask[:, :100] = 0
mask[:, 1000:] = 0
elem = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10, 10))
dilated_mask = cv2.dilate(mask, kernel=elem)
# lk = LK()
# lk.process_frame(init_frame, next_frame, bg_file=bg_file)
custom_lk = CustomLK()
writer = cv2.VideoWriter('output.avi', -1, 20, (1140, 640))
frame_num = 1
while valid:
print("Frame:", frame_num)
u, v, img, next_frame = custom_lk.hierarchical_lk(img_a=init_frame,
img_b=next_frame,
orig_b=orig_next_frame,
levels=5,
k_size=8,
k_type="uniform",
sigma=0,
interpolation=cv2.INTER_CUBIC,
border_mode=cv2.BORDER_REPLICATE,
mask=dilated_mask)
cv2.imshow('img.png', img)
cv2.waitKey(10)
# writer.write(img)
init_frame = next_frame.copy()
valid, next_frame = video.read()
orig_next_frame = next_frame.copy()
next_frame = next_frame[40: 680, 70: 1210]
next_frame = cv2.cvtColor(next_frame, cv2.COLOR_BGR2GRAY)
frame_num += 1
writer.release()
if __name__ == '__main__':
test_lk()
|
13,729 | 7b5c96442c8bd07a710c8c971988e691349ceb25 | #config/urls.py
from django.contrib import admin
from django.urls import path, include
from . import index
urlpatterns = [
#path('admin/', admin.site.urls),
#path('', include('.urls')),
path('', index.signin),
path('signup.html', index.signup),
path('signin.html', index.signin),
]
|
13,730 | f7d4464db016379e88f3f044c04c9a353909346e | import sys
import struct
import idautils
import idc
from patchwork.DataTransferObjects import Selector, Selection
import patchwork.ida_lib as ida_lib
import patchwork.core as core
##############################################
# patterns
push_push_call_regex = (
r"\x68(?P<operand_1>[\S\s]{4})"
r"\x68(?P<operand_2>[\S\s]{4})"
r"\xE8"
)
push_call_regex = (
r"\x68(?P<operand_1>[\S\s]{4})"
r"\xE8"
)
push_reg_regex = (
r"\x6a(?P<operand_1>[\x30-\x37]{1})"
r"\xE8"
)
xor_reg_regex = (
r"\x53"
r"\xBB(?P<operand_1>[\S\s]{4})"
r"\x31\xD8"
r"\x5B"
r"\xC3"
)
mov_eax_regex = (
r"\xB8(?P<operand_1>[\S\s]{4})"
r"\xC3"
)
ppc_validators = {
"call_detour": [
'push dword',
'push dword',
'push ebp',
'mov ebp,esp',
'push eax',
'mov eax,[ebp+0x4]',
'mov [ebp+0x10],eax',
'mov eax,[ebp+0xc]',
'', # contains the operand -> add, sub, xor
'add [ebp+0x4],eax',
'pop eax',
'leave'],
"short_call_detour": [
'push dword',
'push dword',
'push ebp',
'mov ebp,esp',
'push eax',
'mov eax,[ebp+0xc]',
'', # contains the operand -> add, sub, xor
'add [ebp+0x4],eax',
'pop eax',
'leave'],
"jump_dual_detour": [
'push dword',
'push dword',
'push ebp',
'mov ebp,esp',
'push eax',
'push ecx',
'mov eax,[ebp+0xc]',
'mov ecx,[ebp+0x8]',
'lea eax,[eax+ecx]',
'mov ecx,[ebp+0x4]',
'lea eax,[eax+ecx]',
'mov [ebp+0x4],eax',
'pop ecx',
'pop eax',
'leave'],
}
pc_validators = {
"jump_single_detour": [
'push dword',
'push ebp',
'mov ebp,esp',
'push eax',
'push ecx',
'mov eax,[ebp+0x8]',
'mov ecx,[ebp+0x4]',
'mov ecx,[ecx]',
'', # contains the operand -> add, sub, xor
'add [ebp+0x4],eax',
'pop ecx',
'pop eax',
'leave'],
"jump_single_detour_2": [
'push dword',
'push ebp',
'mov ebp,esp',
'push eax',
'push ecx',
'mov ecx,[ebp+0x4]',
'mov ecx,[ecx',
'mov eax,[ebp+0x8]',
'lea eax,[eax+ecx]',
'mov ecx,[ebp+0x4]',
'lea eax,[eax+ecx]',
'mov [ebp+0x4],eax',
'pop ecx',
'pop eax',
'leave']
}
pr_validators = {
"push_reg_deobfuscation": [
'push byte',
'cmp dword [esp+0x4]']
}
##############################################
# Stitch
class NymaimDeobfuscation():
def __init__(self, emulator):
# super(NymaimDeobfuscation, self).__init__(emulator)
self.emulator = emulator
self.memory = self.emulator.get_memory(emulator.textstart, emulator.textend - emulator.textstart)
self.transformators = {
"push_reg_deobfuscation": self._deobfuscate_push_reg,
"call_detour": self._deobfuscate_call_detour,
"short_call_detour": self._deobfuscate_call_detour,
"jump_dual_detour": self._deobfuscate_jump_dual_detour,
"jump_single_detour": self._deobfuscate_jump_single_detour,
"jump_single_detour_2": self._deobfuscate_jump_single_detour,
"xor_eax": self._deobfuscate_xor_eax,
"mov_eax": self._deobfuscate_mov_eax,
}
def run(self):
selector = Selector(push_push_call_regex)
ppc_selections = core.select(selector, self.memory, self.emulator.textstart)
ppc_validations = []
print "\nFound %d ppc_selection hits." % len(ppc_selections)
ppc_validations = self.validate_selections(ppc_selections, ppc_validators)
selector = Selector(push_call_regex)
pc_selections = core.select(selector, self.memory, self.emulator.textstart)
pc_validations = []
print "\nFound %d pc_selection hits." % len(pc_selections)
pc_validations = self.validate_selections(pc_selections, pc_validators)
selector = Selector(push_reg_regex)
pr_selections = core.select(selector, self.memory, self.emulator.textstart)
pr_validations = []
print "\nFound %d pr_selection hits." % len(pr_selections)
pr_validations = self.validate_selections(pr_selections, pr_validators)
selector = Selector(xor_reg_regex, transformator="xor_eax")
xr_selections = core.select(selector, self.memory, self.emulator.textstart)
print "\nFound %d xr_selection hits (no validation required)." % len(xr_selections)
selector = Selector(mov_eax_regex, transformator="mov_eax")
me_selections = core.select(selector, self.memory, self.emulator.textstart)
print "\nFound %d me_selection hits (no validation required)." % len(me_selections)
print "\n** Results:"
print "ppc - %d/%d validated hits" % (len([hit for hit in ppc_validations if hit.positivePatterns]), len(ppc_selections))
print "pc - %d/%d validated hits" % (len([hit for hit in pc_validations if hit.positivePatterns]), len(pc_selections))
print "pr - %d/%d validated hits" % (len([hit for hit in pr_validations if hit.positivePatterns]), len(pr_selections))
print "xr - %d hits" % len(xr_selections)
print "me - %d hits" % len(me_selections)
num_all_transformations = 0
fixed_offsets = set([])
for candidates in [pr_validations, ppc_validations, pc_validations, xr_selections, me_selections]:
for candidate in candidates:
num_transformations, offsets = core.transform(candidate, self.transformators)
num_all_transformations += num_transformations
fixed_offsets.update(offsets)
print "performed %d (hopefully) successful transformations" % num_all_transformations
print "fixed offsets (%d): " % len(fixed_offsets)
# print "undefine_offsets = ["
# for offset in fixed_offsets:
# print " 0x%x," % offset
# print "]"
def emulate_single(self, start_addr):
result = core.emulate(self.emulator, Selection(start_addr), self._cbCatchDetourAddress)
print "details for 0x%x" % (result.selection.selectionOffset)
for ins in result.instructionTrace:
print ins
print result.instructionTrace
sys.exit()
def validate_selections(self, selections, validators):
emu_outcomes = []
for index, selection in enumerate(selections):
if (index % 50) == 0:
sys.stdout.write(".")
sys.stdout.flush()
emu_outcomes.append(core.emulate(self.emulator, selection, self._cbCatchDetourAddress))
validations = []
for emu_outcome in emu_outcomes:
validation = core.validate(validators, emu_outcome)
if validation:
validations.append(validation)
return validations
def _cbCatchDetourAddress(self, emulator):
return emulator.get_memory(emulator.get_register("ESP"))
def updateCallXref(self, source, new_dest):
orig_dest = [x for x in idautils.XrefsFrom(source) if x != source + 5]
if orig_dest:
pass
# print "from 0x%x to 0x%x becomes 0x%x to 0x%x" % (source, orig_dest[0].to, source, new_dest)
# idc.DelCodeXref(source, orig_dest[0])
# idc.DelCodeXref(source, new_dest)
else:
"didn't obtain orig address... 0x%x" % source
def _deobfuscate_push_reg(self, validation_outcome):
obfuscation_start_addr = validation_outcome.selection.selectionOffset
# reg IDs: EAX - 0x30, EDX: 0x31, ... (was 0x31, ... in older versions?)
reg_id = idc.Byte(obfuscation_start_addr + 1)
# rewrite deobfuscation as <6x NOP>, <push reg> where reg can be numerically derived from
# reg ID by adding 0x1F. E.g. EAX has parameter 31, so <push EAX> has opcode byte 0x50
deobfuscated = ida_lib.get_multi_nop_buf(6) + chr(reg_id + 0x20)
ida_lib.patch_bytes(obfuscation_start_addr, deobfuscated)
return 1, []
def _deobfuscate_call_detour(self, validation_outcome):
obfuscation_start_addr = validation_outcome.selection.selectionOffset
rel_call_offset = validation_outcome.emulation.callbackResult - (obfuscation_start_addr + 10 + 5)
deobfuscated_call = "\x90" * 10 + "\xE8" + struct.pack("I", (rel_call_offset) & 0xffffffff)
ida_lib.patch_bytes(obfuscation_start_addr, deobfuscated_call)
self.updateCallXref(obfuscation_start_addr + 10, validation_outcome.emulation.callbackResult)
rel_s = obfuscation_start_addr + 5 + 5 - self.emulator.textstart + 1
dw = self.memory[rel_s:rel_s + 4]
fixed_destination = (5 + obfuscation_start_addr + 5 + 5 + struct.unpack("I", dw)[0]) & 0xffffffff
return 1, [fixed_destination]
def _deobfuscate_jump_dual_detour(self, validation_outcome):
obfuscation_start_addr = validation_outcome.selection.selectionOffset
rel_jmp_offset = validation_outcome.emulation.callbackResult - (obfuscation_start_addr + 10 + 5)
deobfuscated_jmp = "\x90" * 10 + "\xE9" + struct.pack("I", (rel_jmp_offset) & 0xffffffff)
ida_lib.patch_bytes(obfuscation_start_addr, deobfuscated_jmp)
self.updateCallXref(obfuscation_start_addr + 10, validation_outcome.emulation.callbackResult)
rel_s = obfuscation_start_addr + 5 + 5 - self.emulator.textstart + 1
dw = self.memory[rel_s:rel_s + 4]
fixed_destination = (5 + obfuscation_start_addr + 5 + 5 + struct.unpack("I", dw)[0]) & 0xffffffff
return 1, [fixed_destination]
def _deobfuscate_jump_single_detour(self, validation_outcome):
obfuscation_start_addr = validation_outcome.selection.selectionOffset
rel_jmp_offset = validation_outcome.emulation.callbackResult - (obfuscation_start_addr + 5 + 5)
deobfuscated_jmp = "\x90" * 5 + "\xE9" + struct.pack("I", (rel_jmp_offset) & 0xffffffff)
ida_lib.patch_bytes(obfuscation_start_addr, deobfuscated_jmp)
self.updateCallXref(obfuscation_start_addr + 5, validation_outcome.emulation.callbackResult)
# replace the post jump bytes with NOP to improve IDA's code recognition
ida_lib.patch_bytes(obfuscation_start_addr + 5 + 5, ida_lib.get_multi_nop_buf(4))
rel_s = obfuscation_start_addr + 5 - self.emulator.textstart + 1
dw = self.memory[rel_s:rel_s + 4]
fixed_destination = (5 + obfuscation_start_addr + 5 + struct.unpack("I", dw)[0]) & 0xffffffff
return 1, [fixed_destination]
def _deobfuscate_xor_eax(self, selection_outcome):
num_deobfuscations = 0
for referencing_call in selection_outcome.codeRefsToFunction:
obfuscation_start_addr = referencing_call
deobfuscated_xor = "\x35" + selection_outcome.selectionGroupdict["operand_1"]
ida_lib.patch_bytes(obfuscation_start_addr, deobfuscated_xor)
num_deobfuscations += 1
return num_deobfuscations, []
def _deobfuscate_mov_eax(self, selection_outcome):
num_deobfuscations = 0
for referencing_call in selection_outcome.codeRefsToFunction:
obfuscation_start_addr = referencing_call
deobfusbcated_mov = "\xb8" + selection_outcome.selectionGroupdict["operand_1"]
ida_lib.patch_bytes(obfuscation_start_addr, deobfusbcated_mov)
num_deobfuscations += 1
return num_deobfuscations, []
|
13,731 | 5898fde8438ac3656fee99ed290a1a3117e98496 | #!/usr/bin/env python3
'''
Tkinter LabelEdit app
'''
import tkinter as tk
from tkinter import font
class InputLabel(tk.Label):
def __init__(self, master=None, **kwards):
self.text = tk.StringVar()
self.font = font.Font(family="Consolas", size=10, weight="normal")
super().__init__(
master,
font=self.font,
textvariable=self.text,
takefocus=True,
highlightthickness=1,
anchor="nw",
**kwards)
self.Cursor = tk.Frame(self, width=2, bd=2, relief=tk.RIDGE)
self.bind("<Button-1>", self.mouse_click_handler)
self.bind('<Any-KeyPress>', self.key_press_handler)
def get_cursor_pos(self, x):
for i in range(len(self.text.get()) + 1):
if self.font.measure(self.text.get()[:i]) > x:
return i - 1
return len(self.text.get())
def move_cursor(self, i):
self.Cursor.place(
relheight=1,
x=self.font.measure(self.text.get()[:i]))
def mouse_click_handler(self, event):
self.focus_set()
self.move_cursor(self.get_cursor_pos(event.x))
def key_press_handler(self, event):
print(event)
i = self.get_cursor_pos(int(self.Cursor.place_info()['x']))
if event.keysym == "BackSpace":
self.text.set(self.text.get()[:i - 1] + self.text.get()[i:])
self.move_cursor(i - 1)
elif event.keysym == "Left":
self.move_cursor(i - 1)
elif event.keysym == "Right":
self.move_cursor(i + 1)
elif event.keysym == "Home":
self.move_cursor(0)
elif event.keysym == "End":
self.move_cursor(len(self.text.get()))
elif event.keysym == "Up" or event.keysym == "Down" or event.keysym == "Next" or event.keysym == "Prior":
pass
elif event.char.isprintable():
text = self.text.get()
self.text.set(text[:i] + event.char + text[i:])
self.move_cursor(i + 1)
class Application(tk.Frame):
def __init__(self, master=None, title="<application>", **kwargs):
'''Create root window with frame, tune weight and resize'''
super().__init__(master, **kwargs)
self.master.title(title)
self.master.columnconfigure(0, weight=1)
self.master.rowconfigure(0, weight=1)
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
self.grid(sticky="NEWS")
self.createWidgets()
def createWidgets(self):
'''Create all the widgets'''
self.L = tk.Label(self)
self.L.grid(sticky="NEWS")
self.IL = InputLabel(self.L)
self.IL.grid(row=0, column=0, sticky="NEWS")
self.Q = tk.Button(self.L, text="Quit", command=self.master.quit)
self.Q.grid(row=1, column=0)
app = Application(title="LabelEdit")
app.mainloop()
|
13,732 | 37b70925f958f9da99f78c852c8cfe2d9f3db6ed | import base64
import hashlib
from Crypto import Random
from Crypto.Cipher import AES
class AESCipher(object):
def __init__(self, key):
self.bs = 32
self.key = hashlib.sha256(key.encode()).digest()
def encrypt(self, raw):
raw = self._pad(raw)
iv = Random.new().read(AES.block_size)
cipher = AES.new(self.key, AES.MODE_CBC, iv)
return base64.b64encode(iv + cipher.encrypt(raw))
def decrypt(self, enc):
enc = base64.b64decode(enc)
iv = enc[:AES.block_size]
cipher = AES.new(self.key, AES.MODE_CBC, iv)
return self._unpad(cipher.decrypt(enc[AES.block_size:]))
def _pad(self, s):
return s + (self.bs - len(s) % self.bs) * chr(self.bs - len(s) % self.bs)
def encryptB64(self, raw):
return base64.b64encode(self.encrypt(raw))
def decryptB64(self, enc):
return self.decrypt(base64.b64decode(enc))
@staticmethod
def _unpad(s):
return s[:-ord(s[len(s)-1:])]
class FileCryptor(object):
def __init__(self, key):
self.aes = AESCipher(key)
def encryptToFile(self, inPath, outPath, b64=False):
with open (inPath, "rb") as fp:
data = fp.read()
with open(outPath, "wb") as fp:
if b64:
fp.write(self.aes.encryptB64(data))
else:
fp.write(self.aes.encrypt(data))
def decryptFromFile(self, inPath, outPath, b64=False):
with open(inPath, "rb") as fp:
data = fp.read()
with open(outPath, "wb") as fw:
if b64:
fw.write(self.aes.decryptB64(data))
else:
fw.write(self.aes.decrypt(data))
|
13,733 | 48bcbeb573fa701dab676aacf074140b0fdf5221 | # Generated by Django 2.2.24 on 2021-09-08 13:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('consignment', '0030_remove_orderconsignment_num'),
]
operations = [
migrations.AddField(
model_name='orderconsignment',
name='price',
field=models.DecimalField(blank=True, decimal_places=3, default=0, max_digits=10, null=True, verbose_name='Цена'),
),
]
|
13,734 | e89f0429d5f4469daa68984f24642940f43e1207 | import pytest
import sys
from pycpslib import lib as P
ALL = set("darwin linux2 win32".split())
def pytest_runtest_setup(item):
if isinstance(item, item.Function):
plat = sys.platform
if not item.get_marker(plat):
if ALL.intersection(item.keywords):
pytest.skip("cannot run on platform %s" %(plat))
@pytest.fixture
def flush(request):
"Flushes gcov data onto disk after test is executed."
def gcov_flush():
P.gcov_flush()
request.addfinalizer(gcov_flush)
@pytest.fixture
def almost_equal():
# This function is taken from the unittest.case module.
def almost_equal(first, second, places=None, delta=0.1):
"""Fail if the two objects are unequal as determined by their
difference rounded to the given number of decimal places
(default 7) and comparing to zero, or by comparing that the
between the two objects is more than the given delta.
Note that decimal places (from zero) are usually not the same
as significant digits (measured from the most signficant digit).
If the two objects compare equal then they will automatically
compare almost equal.
"""
if first == second:
# shortcut
return True
if delta is not None and places is not None:
raise TypeError("specify delta or places not both")
if delta is not None:
if abs(first - second) <= delta:
return True
else:
if places is None:
places = 2
if round(abs(second-first), places) == 0:
return True
return False
return almost_equal
@pytest.fixture
def compare_cpu_times(almost_equal):
def compare_linux(t1, t2):
return all([almost_equal(t1.user, t2.user),
almost_equal(t1.system, t2.system),
almost_equal(t1.idle, t2.idle),
almost_equal(t1.nice, t2.nice),
almost_equal(t1.iowait, t2.iowait),
almost_equal(t1.irq, t2.irq),
almost_equal(t1.softirq, t2.softirq),
almost_equal(t1.steal, t2.steal),
almost_equal(t1.guest, t2.guest),
almost_equal(t1.guest_nice, t2.guest_nice)])
def compare_darwin(t1, t2):
return all([almost_equal(t1.user, t2.user),
almost_equal(t1.system, t2.system),
almost_equal(t1.idle, t2.idle),
almost_equal(t1.nice, t2.nice)])
if sys.platform == 'darwin':
return compare_darwin
# TODO: add more cases as more platforms get implemented
# See https://github.com/nibrahim/cpslib/issues/9 as well.
return compare_linux
|
13,735 | 4059ce646b1171bf6d31ae1129eede05f8c822a9 | from sqlalchemy import Column, Integer, String, Date, create_engine, MetaData
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from settings import DB_CONNECTION_STRING, SCHEMA
Base = declarative_base(metadata=MetaData(schema=SCHEMA))
engine = create_engine(DB_CONNECTION_STRING, echo=True)
session = sessionmaker()
session.configure(bind=engine)
session = session()
class Client(Base):
__tablename__ = "tt_clients"
id = Column(Integer, primary_key=True)
last_name = Column(String(255))
first_name = Column(String(255))
dob = Column(Date)
social_status_id = Column(Integer)
gender = Column(String(1))
class Dictionary(Base):
__tablename__ = 'tt_dictionary'
id = Column(Integer, primary_key=True)
category = Column(String(25))
str_id = Column(String(50))
int_id = Column(String(Integer))
value = Column(String(255))
|
13,736 | ba2bce02eb0b44c43ac1acca1513cdedea07697b | # Generated by Django 3.1.10 on 2021-05-11 23:20
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Game',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('key', models.CharField(max_length=64)),
('owners', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='owned_games', to=settings.AUTH_USER_MODEL)),
('seller', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='products', to=settings.AUTH_USER_MODEL)),
],
),
]
|
13,737 | a4af67979baf1f782211359a2f6236ff18ffee73 | from __future__ import annotations
import collections
import sys
from collections.abc import Iterable
from typing import IO, Any
from peewee import CharField, ForeignKeyField
from taxonomy.apis.cloud_search import SearchField, SearchFieldType
from ... import events, getinput
from .. import constants, models
from ..derived_data import DerivedField
from .base import BaseModel, EnumField, get_tag_based_derived_field
class Region(BaseModel):
creation_event = events.Event["Region"]()
save_event = events.Event["Region"]()
label_field = "name"
call_sign = "R"
name = CharField()
comment = CharField(null=True)
parent = ForeignKeyField(
"self", related_name="children", db_column="parent_id", null=True
)
kind = EnumField(constants.RegionKind)
derived_fields = [
DerivedField("has_collections", bool, lambda region: region.has_collections()),
DerivedField(
"has_citation_groups", bool, lambda region: region.has_citation_groups()
),
DerivedField("has_locations", bool, lambda region: region.has_locations()),
DerivedField("has_periods", bool, lambda region: region.has_periods()),
DerivedField("has_type_localities", bool, lambda region: not region.is_empty()),
DerivedField(
"has_associated_people", bool, lambda region: region.has_associated_people()
),
get_tag_based_derived_field(
"associated_people",
lambda: models.Person,
"tags",
lambda: models.tags.PersonTag.ActiveRegion,
1,
),
]
search_fields = [
SearchField(SearchFieldType.text, "name"),
SearchField(SearchFieldType.literal, "kind"),
]
def get_search_dicts(self) -> list[dict[str, Any]]:
return [{"name": self.name, "kind": self.kind.name}]
@classmethod
def make(
cls, name: str, kind: constants.RegionKind, parent: Region | None = None
) -> Region:
region = cls.create(name=name, kind=kind, parent=parent)
models.Location.make(
name=name,
period=models.Period.filter(models.Period.name == "Recent").get(),
region=region,
)
return region
def __repr__(self) -> str:
out = self.name
if self.parent:
out += f", {self.parent.name}"
out += f" ({self.kind.name})"
return out
def get_adt_callbacks(self) -> getinput.CallbackMap:
return {
**super().get_adt_callbacks(),
"display_collections": self.display_collections,
"display_citation_groups": self.display_citation_groups,
"display_periods": self.display_periods,
"display_type_localities": lambda: self.display(full=False, locations=True),
}
def get_general_localities(self) -> list[models.Location]:
name_field = models.Location.name
my_name = self.name
return models.Location.bfind(
models.Location.region == self,
(name_field == my_name)
| (name_field == f"{my_name} Pleistocene")
| (name_field == f"{my_name} fossil")
| (name_field.endswith(f"({my_name})")),
)
def rename(self, new_name: str | None = None) -> None:
old_name = self.name
if new_name is None:
new_name = self.getter("name").get_one_key(
default=old_name, allow_empty=False
)
for loc in self.get_general_localities():
if loc.name.endswith(f"({old_name})"):
loc_name = loc.name.replace(f"({old_name})", f"({new_name})")
elif loc.name == old_name:
loc_name = new_name
elif loc.name == f"{old_name} fossil":
loc_name = f"{new_name} fossil"
elif loc.name == f"{old_name} Pleistocene":
loc_name = f"{new_name} Pleistocene"
else:
print("Skipping unrecognized name", loc.name)
continue
print(f"Renaming {loc.name!r} -> {loc_name!r}")
loc.name = loc_name
self.name = new_name
def display(
self,
full: bool = False,
depth: int = 0,
file: IO[str] = sys.stdout,
children: bool = False,
skip_empty: bool = True,
locations: bool = False,
) -> None:
if skip_empty and self.is_empty():
return
getinput.flush()
file.write("{}{}\n".format(" " * (depth + 4), repr(self)))
if self.comment:
file.write("{}Comment: {}\n".format(" " * (depth + 12), self.comment))
if locations or full:
for location in self.sorted_locations():
if skip_empty and location.type_localities.count() == 0:
continue
location.display(full=full, depth=depth + 4, file=file)
if children or full:
for child in self.sorted_children():
child.display(
full=full,
depth=depth + 4,
file=file,
skip_empty=skip_empty,
locations=locations,
)
def display_without_stratigraphy(
self,
full: bool = False,
depth: int = 0,
file: IO[str] = sys.stdout,
skip_empty: bool = False,
) -> None:
for location in self.sorted_locations():
if skip_empty and location.type_localities.count() == 0:
continue
if location.stratigraphic_unit is not None:
continue
if location.has_tag(models.location.LocationTag.General):
continue
location.display(full=full, depth=depth + 4, file=file)
def is_empty(self) -> bool:
for loc in self.locations.filter(models.Location.deleted != True):
if loc.type_localities.count() > 0:
return False
for child in self.children:
if not child.is_empty():
return False
return True
def sorted_children(self) -> list[Region]:
return sorted(self.children, key=lambda c: c.name)
def sorted_locations(self) -> list[models.Location]:
return sorted(
self.locations.filter(models.Location.deleted != True),
key=models.Location.sort_key,
)
def get_location(self) -> models.Location:
"""Returns the corresponding Recent Location."""
return models.Location.get(region=self, name=self.name, deleted=False)
def all_parents(self) -> Iterable[Region]:
"""Returns all parent regions of this region."""
if self.parent is not None:
yield self.parent
yield from self.parent.all_parents()
def parent_of_kind(self, kind: constants.RegionKind) -> Region | None:
if self.kind is kind:
return self
for parent in self.all_parents():
if parent.kind is kind:
return parent
return None
def all_citation_groups(self) -> Iterable[models.CitationGroup]:
yield from self.citation_groups
for child in self.children:
yield from child.all_citation_groups()
def has_citation_groups(self, type: constants.ArticleType | None = None) -> bool:
for cg in self.citation_groups:
if type is None or cg.type is type:
return True
return any(child.has_citation_groups(type) for child in self.children)
def display_citation_groups(
self,
full: bool = False,
only_nonempty: bool = True,
depth: int = 0,
type: constants.ArticleType | None = None,
) -> None:
if only_nonempty and not self.has_citation_groups(type=type):
return
print(" " * depth + self.name)
by_type: dict[constants.ArticleType, list[models.CitationGroup]] = (
collections.defaultdict(list)
)
for group in sorted(self.citation_groups, key=lambda cg: cg.name):
if type is not None and group.type is not type:
continue
by_type[group.type].append(group)
for typ, groups in sorted(by_type.items(), key=lambda pair: pair[0].name):
if type is None:
print(f"{' ' * (depth + 4)}{typ.name}")
for group in groups:
if not group.deleted:
group.display(full=full, include_articles=full, depth=depth + 8)
for child in self.sorted_children():
child.display_citation_groups(
full=full, only_nonempty=only_nonempty, depth=depth + 4, type=type
)
def has_collections(self) -> bool:
for _ in self.collections:
return True
return any(child.has_collections() for child in self.children)
def display_collections(
self, full: bool = False, only_nonempty: bool = True, depth: int = 0
) -> None:
if only_nonempty and not self.has_collections():
return
print(" " * depth + self.name)
by_city: dict[str, list[models.Collection]] = collections.defaultdict(list)
cities = set()
for collection in sorted(self.collections, key=lambda c: c.label):
by_city[collection.city or ""].append(collection)
cities.add(collection.city)
if cities == {None}:
for collection in by_city[""]:
collection.display(full=full, depth=depth + 4)
else:
for city, colls in sorted(by_city.items()):
print(" " * (depth + 4) + city)
for collection in colls:
collection.display(full=full, depth=depth + 8)
for child in self.sorted_children():
child.display_collections(
full=full, only_nonempty=only_nonempty, depth=depth + 4
)
def has_locations(self) -> bool:
for _ in self.locations:
return True
return any(child.has_locations() for child in self.children)
def has_associated_people(self) -> bool:
if self.get_raw_derived_field("associated_people"):
return True
return any(child.has_associated_people() for child in self.children)
def has_periods(self) -> bool:
for _ in self.periods:
return True
return any(child.has_periods() for child in self.children)
def display_periods(self, full: bool = False, depth: int = 0) -> None:
if not self.has_periods():
return
print(" " * depth + self.name)
for period in sorted(self.periods, key=lambda p: p.name):
if full:
period.display(depth=depth + 4)
else:
print(" " * (depth + 4) + period.name)
for child in self.sorted_children():
child.display_periods(full=full, depth=depth + 4)
def add_cities(self) -> None:
for collection in self.collections.filter(models.Collection.city == None):
collection.display()
collection.fill_field("city")
for child in self.children:
child.add_cities()
def has_parent(self, parent: Region) -> bool:
if self == parent:
return True
elif self.parent is None:
return False
else:
return self.parent.has_parent(parent)
|
13,738 | 715da2472c1b8ce32e76316f9809b29e797717ae | from __future__ import print_function
__author__ = 'max'
"""
Implementation of Bi-directional LSTM-CNNs-TreeCRF model for Graph-based dependency parsing.
"""
import sys
import os
sys.path.append(".")
sys.path.append("..")
import time
import argparse
import uuid
import json
import numpy as np
import torch
from neuronlp2.io import get_logger, conllx_stacked_data, conllx_data
from neuronlp2.io import CoNLLXWriter
from neuronlp2.tasks import parser
from neuronlp2.models import StackPtrNet, BiRecurrentConvBiAffine
from neuronlp2.io_multi import guess_language_id, create_alphabets, lang_specific_word
from neuronlp2.io_multi.multi_vocab import iter_file
from neuronlp2.utils import load_embedding_dict
from neuronlp2.io.utils import DIGIT_RE
from neuronlp2.nn import Embedding
uid = uuid.uuid4().hex[:6]
def main():
args_parser = argparse.ArgumentParser(description='Tuning with stack pointer parser')
args_parser.add_argument('--parser', choices=['stackptr', 'biaffine'], help='Parser', required=True)
args_parser.add_argument('--test') # "data/POS-penn/wsj/split1/wsj1.test.original"
args_parser.add_argument('--model_path', help='path for saving model file.', required=True)
args_parser.add_argument('--model_name', help='name for saving model file.', required=True)
args_parser.add_argument('--out_filename', help='filename to save analysis results.', required=True)
args_parser.add_argument('--punctuation', nargs='+', type=str, help='List of punctuations')
args_parser.add_argument('--beam', type=int, default=1, help='Beam size for decoding')
args_parser.add_argument('--ordered', action='store_true', help='Using order constraints in decoding')
args_parser.add_argument('--decode', choices=['mst', 'greedy'], default='mst', help='decoding algorithm')
args_parser.add_argument('--display', action='store_true', help='Display wrong examples')
args_parser.add_argument('--gpu', action='store_true', help='Using GPU')
#
args_parser.add_argument('--extra_embed', type=str,
help="Path for extra embedding file for extra language testing.")
args_parser.add_argument('--extra_embed_src', type=str,
help="Path for extra embedding file for src language (maybe need adding new ones).")
args = args_parser.parse_args()
logger = get_logger("Analyzer")
test_path = args.test
model_path = args.model_path
model_name = args.model_name
punct_set = None
punctuation = args.punctuation
if punctuation is not None:
punct_set = set(punctuation)
logger.info("punctuations(%d): %s" % (len(punct_set), ' '.join(punct_set)))
use_gpu = args.gpu
parser = args.parser
if parser == 'stackptr':
stackptr(model_path, model_name, test_path, punct_set, use_gpu, logger, args)
elif parser == 'biaffine':
biaffine(model_path, model_name, test_path, punct_set, use_gpu, logger, args)
else:
raise ValueError('Unknown parser: %s' % parser)
# ==========
# about augmenting the vocabs and embeddings with the current test file and extra embeddings
def augment_with_extra_embedding(the_alphabet, extra_embed_file, extra_embed_src_file, test_file, logger):
extra_embeds_arr = []
if extra_embed_file is not None:
# reopen the vocab
the_alphabet.open()
# read the embed
extra_word_dict, _ = load_embedding_dict('word2vec', extra_embed_file)
if extra_embed_src_file is not None:
src_extra_word_dict, _ = load_embedding_dict('word2vec', extra_embed_src_file)
lang_id = guess_language_id(test_file)
for one_sent in iter_file(test_file):
for w in one_sent["word"]:
already_spec = w.startswith("!en_")
if already_spec:
normed_word = w
else:
normed_word = DIGIT_RE.sub(b"0", w)
normed_word = lang_specific_word(normed_word, lang_id=lang_id)
#
if normed_word in the_alphabet.instance2index:
continue
# TODO: assume english is the source for run-translate
if already_spec:
w = w[4:]
check_dict = src_extra_word_dict
else:
check_dict = extra_word_dict
#
if w in check_dict:
new_embed_arr = check_dict[w]
elif w.lower() in check_dict:
new_embed_arr = check_dict[w.lower()]
else:
new_embed_arr = None
if new_embed_arr is not None:
extra_embeds_arr.append(new_embed_arr)
the_alphabet.add(normed_word)
# close the vocab
the_alphabet.close()
logger.info(
"Augmenting the vocab with new words of %s, now vocab is %s." % (len(extra_embeds_arr), the_alphabet.size()))
return extra_embeds_arr
def augment_network_embed(new_size, network, extra_embeds_arr):
if len(extra_embeds_arr) == 0:
return
old_embed = network.word_embedd
if old_embed is None:
return
old_arr = old_embed.weight.data.cpu().numpy()
new_arr = np.concatenate([old_arr, extra_embeds_arr], 0)
assert new_arr.shape[0] == new_size
new_embed = Embedding(new_size, new_arr.shape[1], init_embedding=torch.from_numpy(new_arr))
network.word_embedd = new_embed
# ==========
def biaffine(model_path, model_name, test_path, punct_set, use_gpu, logger, args):
alphabet_path = os.path.join(model_path, 'alphabets/')
model_name = os.path.join(model_path, model_name)
word_alphabet, char_alphabet, pos_alphabet, type_alphabet, max_sent_length = conllx_data.create_alphabets(
alphabet_path,
None, data_paths=[None, None], max_vocabulary_size=50000, embedd_dict=None)
# word_alphabet, char_alphabet, pos_alphabet, type_alphabet = create_alphabets(alphabet_path,
# None, data_paths=[None, None], max_vocabulary_size=50000, embedd_dict=None)
num_words = word_alphabet.size()
num_chars = char_alphabet.size()
num_pos = pos_alphabet.size()
num_types = type_alphabet.size()
logger.info("Word Alphabet Size: %d" % num_words)
logger.info("Character Alphabet Size: %d" % num_chars)
logger.info("POS Alphabet Size: %d" % num_pos)
logger.info("Type Alphabet Size: %d" % num_types)
decoding = args.decode
out_filename = args.out_filename
logger.info('use gpu: %s, decoding: %s' % (use_gpu, decoding))
#
extra_embeds_arr = augment_with_extra_embedding(word_alphabet, args.extra_embed, args.extra_embed_src, test_path,
logger)
# ===== the reading
def _read_one(path, is_train):
lang_id = guess_language_id(path)
logger.info("Reading: guess that the language of file %s is %s." % (path, lang_id))
one_data = conllx_data.read_data_to_variable(path, word_alphabet, char_alphabet, pos_alphabet, type_alphabet,
use_gpu=use_gpu, volatile=(not is_train), symbolic_root=True,
lang_id=lang_id)
return one_data
data_test = _read_one(test_path, False)
# data_test = conllx_data.read_data_to_variable(test_path, word_alphabet, char_alphabet, pos_alphabet, type_alphabet,
# use_gpu=use_gpu, volatile=True, symbolic_root=True)
pred_writer = CoNLLXWriter(word_alphabet, char_alphabet, pos_alphabet, type_alphabet)
gold_writer = CoNLLXWriter(word_alphabet, char_alphabet, pos_alphabet, type_alphabet)
logger.info('model: %s' % model_name)
def load_model_arguments_from_json():
arguments = json.load(open(arg_path, 'r'))
return arguments['args'], arguments['kwargs']
arg_path = model_name + '.arg.json'
args, kwargs = load_model_arguments_from_json()
network = BiRecurrentConvBiAffine(use_gpu=use_gpu, *args, **kwargs)
network.load_state_dict(torch.load(model_name))
#
augment_network_embed(word_alphabet.size(), network, extra_embeds_arr)
if use_gpu:
network.cuda()
else:
network.cpu()
network.eval()
test_ucorrect = 0.0
test_lcorrect = 0.0
test_ucomlpete_match = 0.0
test_lcomplete_match = 0.0
test_total = 0
test_ucorrect_nopunc = 0.0
test_lcorrect_nopunc = 0.0
test_ucomlpete_match_nopunc = 0.0
test_lcomplete_match_nopunc = 0.0
test_total_nopunc = 0
test_total_inst = 0
test_root_correct = 0.0
test_total_root = 0
if decoding == 'greedy':
decode = network.decode
elif decoding == 'mst':
decode = network.decode_mst
else:
raise ValueError('Unknown decoding algorithm: %s' % decoding)
# pred_writer.start('tmp/analyze_pred_%s' % str(uid))
# gold_writer.start('tmp/analyze_gold_%s' % str(uid))
# pred_writer.start(model_path + out_filename + '_pred')
# gold_writer.start(model_path + out_filename + '_gold')
pred_writer.start(out_filename + '_pred')
gold_writer.start(out_filename + '_gold')
sent = 0
start_time = time.time()
for batch in conllx_data.iterate_batch_variable(data_test, 1):
sys.stdout.write('%d, ' % sent)
sys.stdout.flush()
sent += 1
word, char, pos, heads, types, masks, lengths = batch
heads_pred, types_pred = decode(word, char, pos, mask=masks, length=lengths,
leading_symbolic=conllx_data.NUM_SYMBOLIC_TAGS)
word = word.data.cpu().numpy()
pos = pos.data.cpu().numpy()
lengths = lengths.cpu().numpy()
heads = heads.data.cpu().numpy()
types = types.data.cpu().numpy()
pred_writer.write(word, pos, heads_pred, types_pred, lengths, symbolic_root=True)
gold_writer.write(word, pos, heads, types, lengths, symbolic_root=True)
stats, stats_nopunc, stats_root, num_inst = parser.eval(word, pos, heads_pred, types_pred, heads, types,
word_alphabet, pos_alphabet, lengths,
punct_set=punct_set, symbolic_root=True)
ucorr, lcorr, total, ucm, lcm = stats
ucorr_nopunc, lcorr_nopunc, total_nopunc, ucm_nopunc, lcm_nopunc = stats_nopunc
corr_root, total_root = stats_root
test_ucorrect += ucorr
test_lcorrect += lcorr
test_total += total
test_ucomlpete_match += ucm
test_lcomplete_match += lcm
test_ucorrect_nopunc += ucorr_nopunc
test_lcorrect_nopunc += lcorr_nopunc
test_total_nopunc += total_nopunc
test_ucomlpete_match_nopunc += ucm_nopunc
test_lcomplete_match_nopunc += lcm_nopunc
test_root_correct += corr_root
test_total_root += total_root
test_total_inst += num_inst
pred_writer.close()
gold_writer.close()
print('\ntime: %.2fs' % (time.time() - start_time))
print('test W. Punct: ucorr: %d, lcorr: %d, total: %d, uas: %.2f%%, las: %.2f%%, ucm: %.2f%%, lcm: %.2f%%' % (
test_ucorrect, test_lcorrect, test_total, test_ucorrect * 100 / test_total, test_lcorrect * 100 / test_total,
test_ucomlpete_match * 100 / test_total_inst, test_lcomplete_match * 100 / test_total_inst))
print('test Wo Punct: ucorr: %d, lcorr: %d, total: %d, uas: %.2f%%, las: %.2f%%, ucm: %.2f%%, lcm: %.2f%%' % (
test_ucorrect_nopunc, test_lcorrect_nopunc, test_total_nopunc,
test_ucorrect_nopunc * 100 / test_total_nopunc, test_lcorrect_nopunc * 100 / test_total_nopunc,
test_ucomlpete_match_nopunc * 100 / test_total_inst, test_lcomplete_match_nopunc * 100 / test_total_inst))
print('test Root: corr: %d, total: %d, acc: %.2f%%' % (
test_root_correct, test_total_root, test_root_correct * 100 / test_total_root))
def stackptr(model_path, model_name, test_path, punct_set, use_gpu, logger, args):
alphabet_path = os.path.join(model_path, 'alphabets/')
model_name = os.path.join(model_path, model_name)
word_alphabet, char_alphabet, pos_alphabet, type_alphabet, \
max_sent_length = conllx_stacked_data.create_alphabets(alphabet_path, None, data_paths=[None, None],
max_vocabulary_size=50000, embedd_dict=None)
num_words = word_alphabet.size()
num_chars = char_alphabet.size()
num_pos = pos_alphabet.size()
num_types = type_alphabet.size()
logger.info("Word Alphabet Size: %d" % num_words)
logger.info("Character Alphabet Size: %d" % num_chars)
logger.info("POS Alphabet Size: %d" % num_pos)
logger.info("Type Alphabet Size: %d" % num_types)
beam = args.beam
ordered = args.ordered
display_inst = args.display
out_filename = args.out_filename
extra_embed = args.extra_embed
extra_embed_src = args.extra_embed_src
def load_model_arguments_from_json():
arguments = json.load(open(arg_path, 'r'))
return arguments['args'], arguments['kwargs']
arg_path = model_name + '.arg.json'
args, kwargs = load_model_arguments_from_json()
prior_order = kwargs['prior_order']
logger.info('use gpu: %s, beam: %d, order: %s (%s)' % (use_gpu, beam, prior_order, ordered))
#
extra_embeds_arr = augment_with_extra_embedding(word_alphabet, extra_embed, extra_embed_src, test_path, logger)
# ===== the reading
def _read_one(path, is_train):
lang_id = guess_language_id(path)
logger.info("Reading: guess that the language of file %s is %s." % (path, lang_id))
one_data = conllx_stacked_data.read_stacked_data_to_variable(path, word_alphabet, char_alphabet, pos_alphabet,
type_alphabet, use_gpu=use_gpu,
volatile=(not is_train), prior_order=prior_order,
lang_id=lang_id)
return one_data
data_test = _read_one(test_path, False)
pred_writer = CoNLLXWriter(word_alphabet, char_alphabet, pos_alphabet, type_alphabet)
gold_writer = CoNLLXWriter(word_alphabet, char_alphabet, pos_alphabet, type_alphabet)
logger.info('model: %s' % model_name)
network = StackPtrNet(use_gpu=use_gpu, *args, **kwargs)
network.load_state_dict(torch.load(model_name))
#
augment_network_embed(word_alphabet.size(), network, extra_embeds_arr)
if use_gpu:
network.cuda()
else:
network.cpu()
network.eval()
test_ucorrect = 0.0
test_lcorrect = 0.0
test_ucomlpete_match = 0.0
test_lcomplete_match = 0.0
test_total = 0
test_ucorrect_nopunc = 0.0
test_lcorrect_nopunc = 0.0
test_ucomlpete_match_nopunc = 0.0
test_lcomplete_match_nopunc = 0.0
test_total_nopunc = 0
test_total_inst = 0
test_root_correct = 0.0
test_total_root = 0
test_ucorrect_stack_leaf = 0.0
test_ucorrect_stack_non_leaf = 0.0
test_lcorrect_stack_leaf = 0.0
test_lcorrect_stack_non_leaf = 0.0
test_leaf = 0
test_non_leaf = 0
# pred_writer.start('tmp/analyze_pred_%s' % str(uid))
# gold_writer.start('tmp/analyze_gold_%s' % str(uid))
# pred_writer.start(model_path + out_filename + '_pred')
# gold_writer.start(model_path + out_filename + '_gold')
pred_writer.start(out_filename + '_pred')
gold_writer.start(out_filename + '_gold')
sent = 0
start_time = time.time()
for batch in conllx_stacked_data.iterate_batch_stacked_variable(data_test, 1):
sys.stdout.write('%d, ' % sent)
sys.stdout.flush()
sent += 1
input_encoder, input_decoder = batch
word, char, pos, heads, types, masks, lengths = input_encoder
stacked_heads, children, siblings, stacked_types, skip_connect, mask_d, lengths_d = input_decoder
heads_pred, types_pred, children_pred, stacked_types_pred = network.decode(word, char, pos, mask=masks,
length=lengths, beam=beam,
ordered=ordered,
leading_symbolic=conllx_stacked_data.NUM_SYMBOLIC_TAGS)
stacked_heads = stacked_heads.data
children = children.data
stacked_types = stacked_types.data
children_pred = torch.from_numpy(children_pred).long()
stacked_types_pred = torch.from_numpy(stacked_types_pred).long()
if use_gpu:
children_pred = children_pred.cuda()
stacked_types_pred = stacked_types_pred.cuda()
mask_d = mask_d.data
mask_leaf = torch.eq(children, stacked_heads).float()
mask_non_leaf = (1.0 - mask_leaf)
mask_leaf = mask_leaf * mask_d
mask_non_leaf = mask_non_leaf * mask_d
num_leaf = mask_leaf.sum()
num_non_leaf = mask_non_leaf.sum()
ucorr_stack = torch.eq(children_pred, children).float()
lcorr_stack = ucorr_stack * torch.eq(stacked_types_pred, stacked_types).float()
ucorr_stack_leaf = (ucorr_stack * mask_leaf).sum()
ucorr_stack_non_leaf = (ucorr_stack * mask_non_leaf).sum()
lcorr_stack_leaf = (lcorr_stack * mask_leaf).sum()
lcorr_stack_non_leaf = (lcorr_stack * mask_non_leaf).sum()
test_ucorrect_stack_leaf += ucorr_stack_leaf
test_ucorrect_stack_non_leaf += ucorr_stack_non_leaf
test_lcorrect_stack_leaf += lcorr_stack_leaf
test_lcorrect_stack_non_leaf += lcorr_stack_non_leaf
test_leaf += num_leaf
test_non_leaf += num_non_leaf
# ------------------------------------------------------------------------------------------------
word = word.data.cpu().numpy()
pos = pos.data.cpu().numpy()
lengths = lengths.cpu().numpy()
heads = heads.data.cpu().numpy()
types = types.data.cpu().numpy()
pred_writer.write(word, pos, heads_pred, types_pred, lengths, symbolic_root=True)
gold_writer.write(word, pos, heads, types, lengths, symbolic_root=True)
stats, stats_nopunc, stats_root, num_inst = parser.eval(word, pos, heads_pred, types_pred, heads, types,
word_alphabet, pos_alphabet, lengths,
punct_set=punct_set, symbolic_root=True)
ucorr, lcorr, total, ucm, lcm = stats
ucorr_nopunc, lcorr_nopunc, total_nopunc, ucm_nopunc, lcm_nopunc = stats_nopunc
corr_root, total_root = stats_root
test_ucorrect += ucorr
test_lcorrect += lcorr
test_total += total
test_ucomlpete_match += ucm
test_lcomplete_match += lcm
test_ucorrect_nopunc += ucorr_nopunc
test_lcorrect_nopunc += lcorr_nopunc
test_total_nopunc += total_nopunc
test_ucomlpete_match_nopunc += ucm_nopunc
test_lcomplete_match_nopunc += lcm_nopunc
test_root_correct += corr_root
test_total_root += total_root
test_total_inst += num_inst
pred_writer.close()
gold_writer.close()
print('\ntime: %.2fs' % (time.time() - start_time))
print('test W. Punct: ucorr: %d, lcorr: %d, total: %d, uas: %.2f%%, las: %.2f%%, ucm: %.2f%%, lcm: %.2f%%' % (
test_ucorrect, test_lcorrect, test_total, test_ucorrect * 100 / test_total, test_lcorrect * 100 / test_total,
test_ucomlpete_match * 100 / test_total_inst, test_lcomplete_match * 100 / test_total_inst))
print('test Wo Punct: ucorr: %d, lcorr: %d, total: %d, uas: %.2f%%, las: %.2f%%, ucm: %.2f%%, lcm: %.2f%%' % (
test_ucorrect_nopunc, test_lcorrect_nopunc, test_total_nopunc,
test_ucorrect_nopunc * 100 / test_total_nopunc, test_lcorrect_nopunc * 100 / test_total_nopunc,
test_ucomlpete_match_nopunc * 100 / test_total_inst, test_lcomplete_match_nopunc * 100 / test_total_inst))
print('test Root: corr: %d, total: %d, acc: %.2f%%' % (
test_root_correct, test_total_root, test_root_correct * 100 / test_total_root))
print(
'============================================================================================================================')
print('Stack leaf: ucorr: %d, lcorr: %d, total: %d, uas: %.2f%%, las: %.2f%%' % (
test_ucorrect_stack_leaf, test_lcorrect_stack_leaf, test_leaf,
test_ucorrect_stack_leaf * 100 / test_leaf, test_lcorrect_stack_leaf * 100 / test_leaf))
print('Stack non_leaf: ucorr: %d, lcorr: %d, total: %d, uas: %.2f%%, las: %.2f%%' % (
test_ucorrect_stack_non_leaf, test_lcorrect_stack_non_leaf, test_non_leaf,
test_ucorrect_stack_non_leaf * 100 / test_non_leaf, test_lcorrect_stack_non_leaf * 100 / test_non_leaf))
print(
'============================================================================================================================')
def analyze():
np.set_printoptions(linewidth=100000)
# pred_path = 'tmp/analyze_pred_%s' % str(uid)
pred_path = out_filename + '_pred'
data_gold = conllx_stacked_data.read_stacked_data_to_variable(test_path, word_alphabet, char_alphabet,
pos_alphabet, type_alphabet,
use_gpu=use_gpu, volatile=True,
prior_order=prior_order)
data_pred = conllx_stacked_data.read_stacked_data_to_variable(pred_path, word_alphabet, char_alphabet,
pos_alphabet, type_alphabet,
use_gpu=use_gpu, volatile=True,
prior_order=prior_order)
gold_iter = conllx_stacked_data.iterate_batch_stacked_variable(data_gold, 1)
test_iter = conllx_stacked_data.iterate_batch_stacked_variable(data_pred, 1)
model_err = 0
search_err = 0
type_err = 0
for gold, pred in zip(gold_iter, test_iter):
gold_encoder, gold_decoder = gold
word, char, pos, gold_heads, gold_types, masks, lengths = gold_encoder
gold_stacked_heads, gold_children, gold_siblings, gold_stacked_types, gold_skip_connect, gold_mask_d, gold_lengths_d = gold_decoder
pred_encoder, pred_decoder = pred
_, _, _, pred_heads, pred_types, _, _ = pred_encoder
pred_stacked_heads, pred_children, pred_siblings, pred_stacked_types, pred_skip_connect, pred_mask_d, pred_lengths_d = pred_decoder
assert gold_heads.size() == pred_heads.size(), 'sentence dis-match.'
ucorr_stack = torch.eq(pred_children, gold_children).float()
lcorr_stack = ucorr_stack * torch.eq(pred_stacked_types, gold_stacked_types).float()
ucorr_stack = (ucorr_stack * gold_mask_d).data.sum()
lcorr_stack = (lcorr_stack * gold_mask_d).data.sum()
num_stack = gold_mask_d.data.sum()
if lcorr_stack < num_stack:
loss_pred, loss_pred_arc, loss_pred_type = calc_loss(network, word, char, pos, pred_heads,
pred_stacked_heads, pred_children, pred_siblings,
pred_stacked_types,
pred_skip_connect, masks, lengths, pred_mask_d,
pred_lengths_d)
loss_gold, loss_gold_arc, loss_gold_type = calc_loss(network, word, char, pos, gold_heads,
gold_stacked_heads, gold_children, gold_siblings,
gold_stacked_types,
gold_skip_connect, masks, lengths, gold_mask_d,
gold_lengths_d)
if display_inst:
print('%d, %d, %d' % (ucorr_stack, lcorr_stack, num_stack))
print('pred(arc, type): %.4f (%.4f, %.4f), gold(arc, type): %.4f (%.4f, %.4f)' % (
loss_pred, loss_pred_arc, loss_pred_type, loss_gold, loss_gold_arc, loss_gold_type))
word = word[0].data.cpu().numpy()
pos = pos[0].data.cpu().numpy()
head_gold = gold_heads[0].data.cpu().numpy()
type_gold = gold_types[0].data.cpu().numpy()
head_pred = pred_heads[0].data.cpu().numpy()
type_pred = pred_types[0].data.cpu().numpy()
display(word, pos, head_gold, type_gold, head_pred, type_pred, lengths[0], word_alphabet,
pos_alphabet, type_alphabet)
length_dec = gold_lengths_d[0]
gold_display = np.empty([3, length_dec])
gold_display[0] = gold_stacked_types.data[0].cpu().numpy()[:length_dec]
gold_display[1] = gold_children.data[0].cpu().numpy()[:length_dec]
gold_display[2] = gold_stacked_heads.data[0].cpu().numpy()[:length_dec]
print(gold_display)
print('--------------------------------------------------------')
pred_display = np.empty([3, pred_lengths_d[0]])[:length_dec]
pred_display[0] = pred_stacked_types.data[0].cpu().numpy()[:length_dec]
pred_display[1] = pred_children.data[0].cpu().numpy()[:length_dec]
pred_display[2] = pred_stacked_heads.data[0].cpu().numpy()[:length_dec]
print(pred_display)
print('========================================================')
raw_input()
if ucorr_stack == num_stack:
type_err += 1
elif loss_pred < loss_gold:
model_err += 1
else:
search_err += 1
print('type errors: %d' % type_err)
print('model errors: %d' % model_err)
print('search errors: %d' % search_err)
analyze()
def calc_loss(network, word, char, pos, heads, stacked_heads, children, sibling, stacked_types, skip_connect, mask_e,
length_e, mask_d, length_d):
loss_arc_leaf, loss_arc_non_leaf, \
loss_type_leaf, loss_type_non_leaf, \
loss_cov, num_leaf, num_non_leaf = network.loss(word, char, pos, heads, stacked_heads, children, sibling,
stacked_types, 1.0, skip_connect=skip_connect,
mask_e=mask_e, length_e=length_e, mask_d=mask_d, length_d=length_d)
num_leaf = num_leaf.data[0]
num_non_leaf = num_non_leaf.data[0]
err_arc_leaf = loss_arc_leaf.data[0] * num_leaf
err_arc_non_leaf = loss_arc_non_leaf.data[0] * num_non_leaf
err_type_leaf = loss_type_leaf.data[0] * num_leaf
err_type_non_leaf = loss_type_non_leaf.data[0] * num_non_leaf
err_cov = loss_cov.data[0] * (num_leaf + num_non_leaf)
err_arc = err_arc_leaf + err_arc_non_leaf
err_type = err_type_leaf + err_type_non_leaf
err = err_arc + err_type
return err, err_arc, err_type
def display(word, pos, head_gold, type_gold, head_pred, type_pred, length, word_alphabet, pos_alphabet, type_alphabet):
for j in range(0, length):
w = word_alphabet.get_instance(word[j]).encode('utf-8')
p = pos_alphabet.get_instance(pos[j]).encode('utf-8')
t_g = type_alphabet.get_instance(type_gold[j]).encode('utf-8')
h_g = head_gold[j]
t_p = type_alphabet.get_instance(type_pred[j]).encode('utf-8')
h_p = head_pred[j]
print('%d\t%s\t%s\t%d\t%s\t%d\t%s\n' % (j, w, p, h_g, t_g, h_p, t_p))
print('-----------------------------------------------------------------------------')
if __name__ == '__main__':
main()
|
13,739 | 87d6e498b3ad4e68161c4befcb4c5281b2b8ed7c | import numpy as np
from sklearn.datasets import load_iris
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.model_selection import train_test_split, KFold, cross_val_score,GridSearchCV, RandomizedSearchCV
from sklearn.metrics import accuracy_score
from sklearn.svm import LinearSVC, SVC
import pandas as pd
# from sklearn.neighbors import KNeighborsClassifier
# from sklearn.linear_model import LogisticRegression
# from sklearn.tree import DecisionTreeClassifier
# from sklearn.ensemble import RandomForestClassifier
import warnings
warnings.filterwarnings('ignore')
import datetime
start1 = datetime.datetime.now()
# 1. 데이터
# x, y = load_iris(return_X_y=True)
# datasets = load_iris()
# x = datasets.data
# y = datasets.target
datasets = pd.read_csv('../data/csv/iris_sklean.csv', header=0, indax_col=0)
x = datasets.iloc[:,:-1]
y = datasets.iloc[:,-1]
x_train, x_test, y_train, y_test = train_test_split(x,y,test_size=0.2, random_state=66)
kfold = KFold(n_splits=5, shuffle=True)
parameters = [
{'C': [1,10,100,1000], 'kernel':['linear']},
{'C': [1,10,100], 'kernel':['rbf'], 'gamma':[0.001,0.0001]},
{'C': [1,10,100,1000], 'kernel':['signodel'], 'gamma':[0.001,0.0001]}
]
# model =LinearSVC()
# model = GridSearchCV(SVC(), parameters, cv = kfold)
model = RandomizedSearchCV(SVC(), parameters, cv = kfold)
# scores = cross_val_score(model, x, y, cv=kfold)
# print("scores : ", scores)
# 3. 컴파일
model.fit(x_train,y_train)
# result = model.evaluate(x_test, y_test)
# result = model.score(x,y)
# print(result)
print('최적의 매개변수', model.best_estimator_)
y_pred=model.predict(x_test)
print('최종정답률', accuracy_score(y_test,y_pred))
aaa = model.score(x_test, y_test)
print(aaa)
end1 = datetime.datetime.now()
time_delta1 = end1 - start1
print('처리시간 : ', time_delta1)
# grid
# 최적의 매개변수 SVC(C=1, kernel='linear')
# 최종정답률 0.9666666666666667
# 0.9666666666666667
# 처리시간 : 0:00:00.088735
# random
# 최적의 매개변수 SVC(C=1, kernel='linear')
# 최종정답률 0.9666666666666667
# 0.9666666666666667
# 처리시간 : 0:00:00.051631 |
13,740 | 437b96c8f833f25366634658e76fbff240008542 | import random
MAX_INCREASE = 0.175
MAX_DECREASE = 0.05
MIN_PRICE = 0.01
MAX_PRICE = 100
INITIAL_PRICE = 10.0
day = 0
OUTPUT_FILE = "string_formatting.txt"
out_file = open(OUTPUT_FILE, 'w')
# noinspection PyTypeChecker
print("Starting price: ${:,.2f}".format(INITIAL_PRICE), file=out_file)
price = INITIAL_PRICE
while MIN_PRICE <= price <= MAX_PRICE:
price_change = 0
day = day + 1
if random.randint(1, 2) == 1:
price_change = random.uniform(0, MAX_INCREASE)
else:
price_change = random.uniform(0, MAX_INCREASE)
price *= (1 + price_change)
# noinspection PyTypeChecker
print("On day {} ${:,.2f}".format(day, price), file=out_file)
out_file.close()
|
13,741 | 4ace8ea40ac7d0e065c9281ce44dcaf3a7e85713 | #HolaMundo.py
#Alan Manzanares
#Fecha de creacion: 18/09/2019
#Para agregar comentarios a nuestro programa usamos
#hashtag el cual ayuda a que lo que escribimos
#no se tome en cuenta para fines de nuestro programa Python y lo afecte
#si lo hariamos de forma habitual marcaria error porque no seria valido
#La funcion aqui usada se llama print que sirve para mostrar un mensaje en pantalla
#puede ser una cadena o cualquier objeto para que python
#lo reconozca como tal debera estar escrito entre comillas y entre parentesis
print ("Hey, Buen Dia estoy usando Python") |
13,742 | 6bd582320ad1e76f640c6714ebd719862366fd98 | # Generated by Django 3.0.8 on 2020-07-19 19:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0006_remove_allpost_foto_produto'),
]
operations = [
migrations.AddField(
model_name='allpost',
name='foto_produto',
field=models.ImageField(blank=True, upload_to='fotos/%d/%m/%Y/'),
),
migrations.AlterField(
model_name='allpost',
name='descricao',
field=models.TextField(max_length=255),
),
]
|
13,743 | b52441b24cdae12adb32545d46bd0ac4720d6e88 | """
파이션 - 무료이지만 강력하다
` 만들고자 하는 대부분의 프로그램 가능
` 물론, 하드웨어 제어같은 복잡하고 반복 연산이 많은 프로그램은 부적절
` 그러나, 다른언어 프로그램을 파이썬에 포함 가능
[주의] 줄을 맞추지 않으면 실행 안됨
[실행] Run 메뉴를 클릭하거나 단축키로 shift + ctrl + F10
[도움말] ctrl + q
"""
""" 여러줄 주석 """
# 한줄 주석
# 문자열표시
# '와 " 동일
# '와 ''' 차이는 > 사이에 공백과 개행 인식 여부
print("헬로우")
print('hello')
print("""
안녕
""")
print('''올
라''')
# 실행 : shift + ctrl + F10
'''
변수란
파이션의 모든 자료형은 객체로 취급한다
a = 7
7 객체을 가리키는 변수 a이다. ( 저장한다는 표현 안함 )
변수 a는 7이라는 정수형 객체를 가리키는 레퍼런스이다.
여기서 7은 기존 프로그램언어에 말하는 상수가 아닌 하나의 객체이다.
[변수명 규칙]
- 영문자 + 숫자 + _ 조합
- 첫글자에 숫자는 안됨
- 대소문자 구별
- 길이 제한 없음
- 예약어 사용 안됨
'''
# import keyword
# print(keyword.kwlist)
# print(len(keyword.kwlist))
a = 7
# 7이라는 숫자의 주소를 a가 가리킨다
b = 7
print(id(a))
print(id(b))
print(id(7))
print(a == 7)
print(b == 7)
print(a == b)
'''
파이썬 콘솔 실행시
미리 0 ~ 256을 미리 메모리에 상주시킴
'''
# 여러 변수 선언 가능
a, b = 10, 20
print('a+b=', a+b)
# 두 변수 값 변경
a, b = b, a
print('A=', a, 'B=', b)
del b
print('B=', b)
|
13,744 | 8a179e26c0e2dc435a0291961196beeca22926bc | # /usr/bin/env python
# coding=utf-8
"""utils"""
import logging
import os
import shutil
import json
from pathlib import Path
import torch
import torch.nn as nn
import torch.nn.init as init
MAP_DICT = {'C': 0, 'III': 1, 'V': 2, 'XXXVIII': 3, 'XXIX': 4, 'LV': 5, 'XXXIX': 6, 'XIV': 7,
'IV': 8, 'XIX': 9, 'I': 10, 'XI': 11, 'VIII': 12, 'XVIII': 13, 'XLIX': 14, 'XXII': 15,
'XXIII': 16, 'XXXVII': 17, 'VII': 18, 'LIV': 19, 'XII': 20, 'X': 21, 'XVII': 22, 'XXI': 23,
'XLIV': 24, 'XXIV': 25, 'XXXV': 26, 'XXXII': 27, 'IX': 28, 'L': 29, 'XXXI': 30, 'XXXVI': 31,
'XIII': 32, 'XXVII': 33, 'XXXIII': 34, 'VI': 35, 'XV': 36, 'XLI': 37, 'LI': 38, 'XLII': 39,
'XXVI': 40, 'XLV': 41, 'XVI': 42, 'LIII': 43, 'XX': 44, 'LII': 45, 'XL': 46, 'XLIII': 47, 'XXX': 48,
'XLVII': 49, 'XXXIV': 50, 'XLVI': 51, 'XXV': 52, 'XLVIII': 53, 'II': 54, 'XXVIII': 55}
ID2TAG = {v: k for k, v in MAP_DICT.items()}
class Params:
"""参数定义
"""
def __init__(self):
# 根路径
self.root_path = Path(os.path.abspath(os.path.dirname(__file__)))
# 数据集路径
self.data_dir = self.root_path / 'data'
# 参数路径
self.params_path = self.root_path / 'experiments'
# 模型保存路径
self.model_dir = self.root_path / 'model'
# 预训练模型路径
self.pretrain_model_dir = self.root_path / 'pretrain_model'
# device
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.tags = list(MAP_DICT.values())
self.n_gpu = torch.cuda.device_count()
# 读取保存的data
self.data_cache = True
self.train_batch_size = 256
self.dev_batch_size = 128
self.test_batch_size = 128
# 最小训练次数
self.min_epoch_num = 5
# 容纳的提高量(f1-score)
self.patience = 0.001
# 容纳多少次未提高
self.patience_num = 5
self.seed = 2020
# 句子最大长度(pad)
self.max_seq_length = 256
# learning_rate
self.fin_tuning_lr = 1e-4
# downstream lr
self.ds_lr = 1e-4 * 100
# 梯度截断
self.clip_grad = 2
# dropout prob
self.drop_prob = 0.3
# 权重衰减系数
self.weight_decay_rate = 0.01
def get(self):
"""Gives dict-like access to Params instance by `params.show['learning_rate']"""
return self.__dict__
def load(self, json_path):
"""Loads parameters from json file"""
with open(json_path) as f:
params = json.load(f)
self.__dict__.update(params)
def save(self, json_path):
"""保存配置到json文件
"""
params = {}
with open(json_path, 'w') as f:
for k, v in self.__dict__.items():
if isinstance(v, (str, int, float, bool)):
params[k] = v
json.dump(params, f, indent=4)
class RunningAverage:
"""A simple class that maintains the running average of a quantity
记录平均损失
Example:
```
loss_avg = RunningAverage()
loss_avg.update(2)
loss_avg.update(4)
loss_avg() = 3
```
"""
def __init__(self):
self.steps = 0
self.total = 0
def update(self, val):
self.total += val
self.steps += 1
def __call__(self):
return self.total / float(self.steps)
def set_logger(save=False, log_path=None):
"""Set the logger to log info in terminal and file `log_path`.
In general, it is useful to have a logger so that every output to the terminal is saved
in a permanent file. Here we save it to `model_dir/train.log`.
Example:
```
logging.info("Starting training...")
```
Args:
log_path: (string) where to log
"""
logger = logging.getLogger()
logger.setLevel(logging.INFO)
if not logger.handlers:
if save:
# Logging to a file
file_handler = logging.FileHandler(log_path)
file_handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))
logger.addHandler(file_handler)
# Logging to console
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(logging.Formatter('%(message)s'))
logger.addHandler(stream_handler)
def save_checkpoint(state, is_best, checkpoint):
"""Saves model and training parameters at checkpoint + 'last.pth.tar'. If is_best==True, also saves
checkpoint + 'best.pth.tar'
Args:
state: (dict) contains model's state_dict, may contain other keys such as epoch, optimizer state_dict
is_best: (bool) True if it is the best model seen till now
checkpoint: (string) folder where parameters are to be saved
"""
filepath = os.path.join(checkpoint, 'last.pth.tar')
if not os.path.exists(checkpoint):
print("Checkpoint Directory does not exist! Making directory {}".format(checkpoint))
os.mkdir(checkpoint)
torch.save(state, filepath)
# 如果是最好的checkpoint则以best为文件名保存
if is_best:
shutil.copyfile(filepath, os.path.join(checkpoint, 'best.pth.tar'))
def load_checkpoint(checkpoint, model, optimizer=None):
"""Loads model parameters (state_dict) from file_path. If optimizer is provided, loads state_dict of
optimizer assuming it is present in checkpoint.
Args:
checkpoint: (string) filename which needs to be loaded
model: (torch.nn.Module) model for which the parameters are loaded
optimizer: (torch.optim) optional: resume optimizer from checkpoint
"""
if not os.path.exists(checkpoint):
raise ("File doesn't exist {}".format(checkpoint))
checkpoint = torch.load(checkpoint, map_location=torch.device('cpu'))
model.load_state_dict(checkpoint['state_dict'])
if optimizer:
optimizer.load_state_dict(checkpoint['optim_dict'])
return checkpoint
def initial_parameter(net, initial_method=None):
r"""A method used to initialize the weights of PyTorch models.
:param net: a PyTorch model or a List of Pytorch model
:param str initial_method: one of the following initializations.
- xavier_uniform
- xavier_normal (default)
- kaiming_normal, or msra
- kaiming_uniform
- orthogonal
- sparse
- normal
- uniform
"""
if initial_method == 'xavier_uniform':
init_method = init.xavier_uniform_
elif initial_method == 'xavier_normal':
init_method = init.xavier_normal_
elif initial_method == 'kaiming_normal' or initial_method == 'msra':
init_method = init.kaiming_normal_
elif initial_method == 'kaiming_uniform':
init_method = init.kaiming_uniform_
elif initial_method == 'orthogonal':
init_method = init.orthogonal_
elif initial_method == 'sparse':
init_method = init.sparse_
elif initial_method == 'normal':
init_method = init.normal_
elif initial_method == 'uniform':
init_method = init.uniform_
else:
init_method = init.xavier_normal_
def weights_init(m):
# classname = m.__class__.__name__
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d) or isinstance(m, nn.Conv3d): # for all the cnn
if initial_method is not None:
init_method(m.weight.data)
else:
init.xavier_normal_(m.weight.data)
init.normal_(m.bias.data)
elif isinstance(m, nn.LSTM):
for w in m.parameters():
if len(w.data.size()) > 1:
init_method(w.data) # weight
else:
init.normal_(w.data) # bias
elif m is not None and hasattr(m, 'weight') and \
hasattr(m.weight, "requires_grad"):
if len(m.weight.size()) > 1:
init_method(m.weight.data)
else:
init.normal_(m.weight.data)
else:
for w in m.parameters():
if w.requires_grad:
if len(w.data.size()) > 1:
init_method(w.data) # weight
else:
init.normal_(w.data) # bias
# print("init else")
if isinstance(net, list):
for n in net:
n.apply(weights_init)
else:
net.apply(weights_init)
class FGM:
"""扰动训练(Fast Gradient Method)"""
def __init__(self, model):
self.model = model
self.backup = {}
def attack(self, epsilon=1., emb_name='embeddings.'):
"""在embedding层中加扰动
:param epsilon: 系数
:param emb_name: 模型中embedding的参数名
"""
#
for name, param in self.model.named_parameters():
if param.requires_grad and emb_name in name and 'LayerNorm' not in name:
# 保存原始参数
self.backup[name] = param.data.clone()
# scale
norm = torch.norm(param.grad)
if norm != 0 and not torch.isnan(norm):
# 扰动因子
r_at = epsilon * param.grad / norm
param.data.add_(r_at)
def restore(self, emb_name='embeddings.'):
"""恢复扰动前的参数
:param emb_name: 模型中embedding的参数名
"""
for name, param in self.model.named_parameters():
if param.requires_grad and emb_name in name and 'LayerNorm' not in name:
assert name in self.backup
param.data = self.backup[name]
self.backup = {}
class PGD:
"""扰动训练(Projected Gradient Descent)"""
def __init__(self, model):
self.model = model
self.emb_backup = {}
self.grad_backup = {}
def attack(self, epsilon=1., alpha=0.3, emb_name='emb.', is_first_attack=False):
# emb_name这个参数要换成你模型中embedding的参数名
for name, param in self.model.named_parameters():
if param.requires_grad and emb_name in name:
if is_first_attack:
self.emb_backup[name] = param.data.clone()
norm = torch.norm(param.grad)
if norm != 0 and not torch.isnan(norm):
r_at = alpha * param.grad / norm
param.data.add_(r_at)
param.data = self.project(name, param.data, epsilon)
def restore(self, emb_name='emb.'):
# emb_name这个参数要换成你模型中embedding的参数名
for name, param in self.model.named_parameters():
if param.requires_grad and emb_name in name:
assert name in self.emb_backup
param.data = self.emb_backup[name]
self.emb_backup = {}
def project(self, param_name, param_data, epsilon):
r = param_data - self.emb_backup[param_name]
if torch.norm(r) > epsilon:
r = epsilon * r / torch.norm(r)
return self.emb_backup[param_name] + r
def backup_grad(self):
for name, param in self.model.named_parameters():
if param.requires_grad:
self.grad_backup[name] = param.grad.clone()
def restore_grad(self):
for name, param in self.model.named_parameters():
if param.requires_grad:
param.grad = self.grad_backup[name]
|
13,745 | 8c84b9aafe9fefbd6bbf2260587126de88108cce | #
# models.py -- Models for the "reviewboard.site" app.
#
# Copyright (c) 2010 David Trowbridge
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from __future__ import annotations
from enum import Enum
from typing import Optional, Union
from django.contrib.auth.models import User
from django.db import models
from django.utils.translation import gettext_lazy as _
from djblets.db.fields import JSONField
from typing_extensions import Final, Literal, TypeAlias
from reviewboard.site.managers import LocalSiteManager
class _LocalSiteConstants(Enum):
"""Constants for special LocalSite queries.
Version Added:
6.0:
This replaces an older ALL-specific object, to aid in typing.
"""
ALL = '<ALL>'
class LocalSite(models.Model):
"""A division within a Review Board installation.
This allows the creation of independent, isolated divisions within a given
server. Users can be designated as members of a LocalSite, and optionally
as admins (which allows them to manipulate the repositories, groups and
users in the site).
Pretty much every other model in this module can all be assigned to a
single LocalSite, at which point only members will be able to see or
manipulate these objects. Access control is performed at every level, and
consistency is enforced through a liberal sprinkling of assertions and unit
tests.
"""
#: A sentinel indicating all LocalSites.
#:
#: This is supported by some functions to perform a query against all
#: LocalSites, as opposed to either a single LocalSite instance or the
#: global site through ``None``.
#:
#: Check the documentation for a function's argument to determine whether
#: this is supported.
#:
#: Note that the value is considered opaque. It can be compared directly
#: using ``is``.
#:
#: Version Added:
#: 5.0
ALL: Final[_LocalSiteConstants] = _LocalSiteConstants.ALL
name = models.SlugField(_('name'), max_length=32, blank=False, unique=True)
public = models.BooleanField(
default=False,
db_index=True,
help_text=_('Allow people outside the team to access and post '
'review requests and reviews.'))
users = models.ManyToManyField(User, blank=True,
related_name='local_site')
admins = models.ManyToManyField(User, blank=True,
related_name='local_site_admins')
extra_data = JSONField(null=True)
objects = LocalSiteManager()
def is_accessible_by(self, user):
"""Returns whether or not the user has access to this LocalSite.
This checks that the user is logged in, and that they're listed in the
'users' field.
"""
return (self.public or
(user.is_authenticated and
(user.is_staff or self.users.filter(pk=user.pk).exists())))
def is_mutable_by(self, user, perm='site.change_localsite'):
"""Returns whether or not a user can modify settings in a LocalSite.
This checks that the user is either staff with the proper permissions,
or that they're listed in the 'admins' field.
By default, this is checking whether the LocalSite itself can be
modified, but a different permission can be passed to check for
another object.
"""
return user.has_perm(perm) or self.admins.filter(pk=user.pk).exists()
def __str__(self):
return self.name
class Meta:
db_table = 'site_localsite'
verbose_name = _('Local Site')
verbose_name_plural = _('Local Sites')
#: A filter value for matching a LocalSite, all LocalSites, or None.
#:
#: Version Added:
#: 6.0
AnyOrAllLocalSites: TypeAlias = Union[Optional[LocalSite],
Literal[_LocalSiteConstants.ALL]]
|
13,746 | 59ecb451e1d4d052d83d4077f2574db24c0eab08 | """
DESCRIPTION OF APP
Lets you add/remove/delete/update friends
"""
# Import Built-In Modules
import requests # Does API calls to other APIS
import traceback # Lets you print the error stack trace
import random # Lets you get a random number
import sys # Lets you use operating system functions
import os # Lets you get information of the os Ex. os.path | Ex. os.environ
# Import Framework Modules
from flask import Flask # Main Flask Class
from flask import jsonify # Lets flask deliver json data
from flask import render_template # Lets flask serve the htmml page
from flask import request # Lets you view the requester info (HEADERS, Data, Status Code, etc.)
# Import Other Modules/Data in the App
from data import friends_list
# Gives all the powers of flask to app
# __name__ is a special method in python that refers to the file that is running
app = Flask(__name__)
# REST API
# VERB: GET - To retreive information | Can send data in the url | Insecure way of sending data | Normal use case: Search
# Response for a GET Request HAS TO RETURN DATA OR HTML
# Example of serving html
@app.route('/', methods=['GET'])
def home():
return render_template('jHome.html')
# Example of serving json Data
@app.route('/getFriendsList', methods=['GET'])
def getFriendsList():
try:
user_name = 'Rakin'
response = {'msg':'Successfully found friends list', 'data': friends_list, 'err':'', 'status': 'Success'}
except Exception as e:
print(str(e))
print(traceback.format_exc())
response = {'msg':'Failed to retrieve friends list!', 'data':'', 'err':str(e), 'status': 'Fail'}
return jsonify(response)
# Example of serving json Data
@app.route('/getUserName', methods=['GET'])
def getUserName():
try:
user_name = 'Rakin'
response = {'msg':'Successfully found user name', 'data': {'name': user_name}, 'err':'', 'status': 'Success'}
except Exception as e:
print(str(e))
print(traceback.format_exc())
response = {'msg':'Failed to retrieve user name!', 'data':'', 'err':str(e), 'status': 'Fail'}
return jsonify(response)
# VERB: POST - To securely send data | Typically used to save a new data | Requires requester to send data in body
# Response for a POST Request HAS TO RETURN DATA
# Exampole of a basic POST Request
@app.route('/addFriend', methods=['POST'])
def newFriend():
try:
# Retrieve data from body of request
data = request.json
print('THIS IS WHERE YOU CHECK THAT DATA!')
print(data)
# Save data to DB
# Create a nice consistent data format for what your api sends back
response = {'msg':'Successfully added new friend', 'err':'', 'status': 'Success'}
except Exception as e:
print(str(e))
print(traceback.format.exc())
response = {'msg':'Failed to save new friend!', 'err':str(e), 'status': 'Fail'}
return jsonify(response)
#--------------------------------------------------------------------
# Examples of Different Logic usecases
# Example of how to use Jinja2 to use Python logic in HTML
@app.route('/pyHome', methods=['GET'])
def pyHome():
try:
user_name = 'Faraz'
# Render Template can pass in variables from python as extra arugments
return render_template('pyHome.html', name=user_name, friends=friends_list['friends'])
except Exception as e:
print(str(e))
print(traceback.format_exc())
return render_template('404.html')
# Example of a basic Search
# Ex. 127.0.0.1/getFriendInfo?name=Faraz
@app.route('/getFriendInfo', methods=['GET'])
def getFriendInfo():
try:
# Gets data from the URL arguments of the GET Request
data = request.args.get('name')
friendInfo = ''
# Perform Search Operation inside friernds list for friend name
for i in range(0, len(friends_list['friends'])):
# Compare friend name to a name in the list
if data == friends_list['friends'][i]['username']:
friendInfo = friends_list['friends'][i]
response = {'msg':'Successfully found friend info!', 'data': friendInfo , 'err':'', 'status': 'Success'}
except Exception as e:
print(str(e))
print(traceback.format_exc())
response = {'msg':'Failed to find friend"s info!', 'data':'', 'err':str(e), 'status': 'Fail'}
return jsonify(response)
# Examples of API requests to other APIs
@app.route('/randomPicture', methods=['GET'])
def getPicture():
try:
# Gets data from the URL arguments of the GET Request
searchItem = request.args.get('searchItem')
# Do an api request to pexels api to get a set 5 weather picture data
headers = {'Authorization': '563492ad6f91700001000001b1fa1bfe1439432aa3ae3a3519b786b0'}
r = requests.get(f'https://api.pexels.com/v1/search?query={searchItem}&per_page=5', headers=headers)
print(r.json(), file=sys.stderr)
data = r.json()
# Loop through weatherData to get the 5 pictures
pics = []
for i in range(0, len(data['photos'])):
currentPic = data['photos'][i]['src']['original']
pics.append(currentPic)
# Generate random number to get a random pic out of pics
rand = random.randint(0, 4)
randomPic = pics[rand]
response = {'msg':'Successfully found pictures!', 'data': randomPic , 'err':'', 'status': 'Success'}
except Exception as e:
print(str(e))
print(traceback.format_exc())
response = {'msg':'Failed to find pictures!', 'data':'', 'err':str(e), 'status': 'Fail'}
return jsonify(response)
# The code below will only run if the person is running this file directly
# In other words if this file gets run by another file, the below won't run
if __name__ == '__main__':
# Debug=True lets the app restart over and over |
# Custom Port allows for the app to be on a non conflicting port
app.run(debug=True, port=7001)
|
13,747 | d2a09f72e7844d6d72885de84f2c129718cbd134 | from django.apps import AppConfig
class MicrobloggerConfig(AppConfig):
name = 'microBlogger'
|
13,748 | a016332f94db0e088c326a5e1756e96d2eaa4815 | #!/usr/bin/python
import os
import numpy as np
import time as t
from PyQt4 import QtCore as qc
import functools
import traceback
try:
import h5py as mrT
except:
os.system('HDF5_DISABLE_VERSION_CHECK=1')
import h5py as mrT
try:
from ascii_table import *
except ImportError:
print 'No module ascii_table'
print 'Please checkout ascii_table.py svn://forum.astro.keele.ac.uk/utils/pylib and add to python path'
debug=True
class h5FileHolder(qc.QThread):
h5sStarted=[]#A list of booleans of if the thread in h5files has had its
#start and join methods called
preprocName='h5Preproc.txt' #filename of the preprocessor file
# This is a list of isotopes to match to.
isos = ['Neutron','H','He','Li','Be','B','C','N','O','F','Ne','Na','Mg','Al',
'Si','P','S','Cl','Ar','K','Ca','Sc','Ti','V','Cr','Mn','Fe','Co','Ni',
'Cu','Zn','Ga','Ge','As','Se','Br','Kr','Rb','Sr','Y','Zr','Nb','Mo','Tc',
'Ru','Rh','Pd','Ag','Cd','In','Sn','Sb','Te', 'I','Xe','Cs','Ba','La','Ce',
'Pr','Nd','Pm','Sm','Eu','Gd','Tb','Dy','Ho','Er','Tm','Yb','Lu','Hf','Ta',
'W','Re','Os','Ir','Pt','Au','Hg','Tl','Pb','Bi','Po','At']
def findCycle(self,cycNum):
'''
Method that looks through the self.cycles and returns the nearest
cycle:
Input:
cycNum: int of the cycle desired cycle
'''
cycNum=int(cycNum)
i=0
while i < len(self.cycles):
if cycNum < int(self.cycles[i]):
break
i+=1
if i ==0:
return self.cycles[i]
elif i == len(self.cycles):
return self.cycles[i-1]
lower=int(self.cycles[i-1])
higher=int(self.cycles[i])
if higher- cycNum >= cycNum-lower:
return self.cycles[i-1]
else:
return self.cycles[i]
# Upon initialization of an h5fileholder, the h5 files are defined (in their own wrapper-see below) and
# and gathers some important data from the files.
def __init__(self,filename,filepath, textEdit):
qc.QThread.__init__(self,None)
self.textEdit = textEdit
# Cleanup the object just in case
self.h5files = []
self.h5s = [] #not resetting !!
self.cycles = []
self.ages = []
self.hattrs = []
self.cattrs = []
self.Tables = []
self.dcols = []
self.filepaths = []
self.isotopes = []
self.isomeric_states = []
self.A = []
self.Z = []
self.done=[]
self.h5sStarted=[]
self.filepath=filepath #Variable for the directory
self.temp = []
preprocExists=False #Boolean of if the preprocessor file exists
print filename
for name in filename:
self.filepaths.append(name)
#print 'Opening Files ' + str(self.filepaths)
t0 = t.time()
#preprocessor stuff
if self.filepath.endswith(os.sep):
preprocName = str(self.filepath)+self.preprocName
else:
preprocName = str(self.filepath)+os.sep+self.preprocName
self.preprocExists=os.path.exists(preprocName)
for i in xrange(len(filename)):
self.h5sStarted.append(False)
self.h5s.append(h5File(self.filepaths[0],True, True))
self.connect(self.h5s[-1], qc.SIGNAL('finished()'), self.continue_h5s)
self.h5s[-1].start()
self.h5sStarted[0]=True
print "stated:"
print self.h5sStarted
def continue_h5s(self): #tis is equivilent to the run in h5t.py
if debug:
print "continue_h5s"
self.cycles.extend(self.h5s[0].cycle)
self.ages.extend(self.h5s[0].age)
self.hattrs = self.h5s[0].hattr
self.cattrs = self.h5s[0].cattr
self.Tables = self.h5s[0].Table
self.dcols = self.h5s[0].dcol
self.cycle_header = self.h5s[0].cycle_header #This string handles the name of the cycle
new = self.h5s[0].new # This boolean handles the changes to the cycle nomenclature format
#preproc stuff
if self.filepath.endswith(os.sep):
b = str(self.filepath)+self.preprocName
else:
b = str(self.filepath)+os.sep+self.preprocName
if self.preprocExists:
preprocTable=ascii_table(self.preprocName,self.filepath)
if int(preprocTable.hattrs[0])<len(self.h5files):
self.preprocExists=False
print 'A File was added, rewriteing preprocessor file'
if self.preprocExists:
for i in xrange(len(self.h5files)):
if self.h5files[i]+'-cyc' not in preprocTable.dcols and self.preprocExists:
print 'A File was renamed, rewriteing preprocessor file'
self.preprocExists=False
if not self.preprocExists and os.path.exists(b):
os.system('rm '+b)
# create list of isotopes stored in this h5 file
try:
for x in xrange(len(self.Tables[0])):
self.isotopes.append([self.isos[int(self.Tables[1][x])],str(int(self.Tables[0][x]))])
#print 'new file: ', new, self.isotopes
except IndexError:
print self.Tables, self.h5s[0].Table
self.t1 = t.time()
print "continueing"
if len(self.filepaths) > 1:
for x in range(len(self.filepaths)-1):
self.h5s.append(h5File(self.filepaths[x+1],False, new))
if not self.preprocExists:
self.h5sStarted[x+1]=True
self.h5s[-1].start()
self.connect(self.h5s[-1], qc.SIGNAL('finished()'), self.add_data)
if not self.preprocExists:
print "all done?"
self.connect(self, qc.SIGNAL('finished()'), self.all_done)
else:
self.all_done()
def all_done(self):
if debug:
print "all done"
if not self.preprocExists:
for x in xrange(len(self.h5s)):
print len(self.h5s[x].cycle)
print len(self.h5s[x].age)
for x in xrange(len(self.h5s)-1):
self.cycles.extend(self.h5s[x+1].cycle)
self.ages.extend(self.h5s[x+1].age)
print len(self.ages)
print len(self.cycles)
header=[str(len(self.h5s)),'This is a preprocessor file for the directory: '+str(self.filepath),\
'At the time of the creation of this file there were '+str(len(self.h5files))+\
' h5 files.']
self.cycles = sorted(self.cycles, cmp=self.numeric_compare)
#self.cycles = sorted(self.cycles, cmp=self.numeric_compare)
"""
for cycle in self.cycles:
print cycle
try:
#self.ages = self.get(self.cycles,'age',1)
except IndexError:
print 'enountered error fetching age'
"""
#self.ages = sorted(self.ages, cmp=self.numeric_compare)
self.textEdit.append('File search complete. You may begin plotting')
print 'Writeing preprocessor files'
data=[]
dcols=[]
length=0
for i in xrange(len(self.h5s)):
dcols.append(self.h5s[i].filename+'-cyc')
dcols.append(self.h5s[i].filename+'-age')
data.append(self.h5s[i].cycle)
data.append(self.h5s[i].age)
if len(self.h5s[i].cycle)>length:
length=len(self.h5s[i].cycle)
if len(self.h5s[i].age)>length:
length=len(self.h5s[i].age)
for i in xrange(len(data)):
for j in xrange(length-len(data[i])):
data[i].append(3.14159265) #identifier number
write(self.preprocName,header,dcols,data,sldir=self.filepath)
else:
print 'Reading preprocessor files'
preprocTable=ascii_table(self.preprocName,self.filepath)
for i in xrange(len(self.h5s)-1):
print self.h5s[i+1].filename
dat=preprocTable.get(self.h5s[i+1].filename+'-cyc')
dat1=[]
for j in xrange(len(dat)):
if dat[j]!=3.14159265:
dat1.append(dat[j])
dat=dat1
for j in xrange(len(dat)):
dat[j]=str(int(dat[j]))
for k in xrange(10-len(dat[j])):
dat[j]='0'+dat[j]
for j in xrange(len(dat)):
self.cycles.append(dat[j])
self.h5s[i+1].cycle=dat
dat=preprocTable.get(self.h5s[i+1].filename+'-age')
dat1=[]
for j in xrange(len(dat)):
if dat[j]!=3.14159265:
dat1.append(dat[j])
dat=dat1
self.h5s[i+1].age=dat
for j in xrange(len(dat)):
self.ages.append(dat[j])
try:
self.cycles = sorted(self.cycles, cmp=self.numeric_compare)
except TypeError:
print "There was a problem sorting the cycles. You may have problems later. Please consider reloading(h5T) and trying again"
try:
self.ages = sorted(self.ages, cmp=self.numeric_compare)
except TypeError:
None
print self.h5sStarted
t2=t.time()
print "Time: "+str(t2-self.t1)
def numeric_compare(self,x, y):
if int(x)>int(y):
return 1
elif int(x)==int(y):
return 0
else: # x<y
return -1
def add_data(self):
if debug:
print "add data"
self.done.append(self.h5s[-1].filename)
if len(self.done) == len(self.h5s)-1 or len(self.done) == len(self.h5s):
self.all_done()
# Clears up memory upon file deletion
def __del__(self):
print 'File holder destruction event'
self.terminate()
# This function determines which cycle, which file, which storage mechanism (cattr or data) and returns it
def get(self, *args):
if debug:
print "get"
# This function takes in a variety of inputs
# option 1
# get(dataitem)
# fetches the dataitem for all cycles
# option 2
# get(cycle_list, dataitem)
# fetches the dataitem from the list of cycles
# option 3
# get(cycle_list, 'iso_massf', isotope) isotope Must be in the form 'H-2'
# fetches the isotope data for a list of cycles
# Check out the inputs
if len(args) > 4:
print 'Improper use of this function'
return None
isotope_of_interest = []
dat = []
cycle_list = []
scale = 1
# print args
# print len(args)
print'args', args, self.hattrs, self.cattrs
if len(args) == 2:
dataitem = args[0]
if self.hattrs.count(dataitem) == 0:
print 'a'
cycle_list = []
scale = int(args[1])
else:
print 'b'
self.h5s[0] = mrT.File(self.h5s[0].filename,'r')
dat = self.h5s[0].attrs.get(dataitem, None)
return dat
elif len(args) == 3:
cycle_list = args[0]
dataitem = args[1]
scale = int(args[2])
elif len(args) == 4:
cycle_list = args[0]
dataitem = args[1]
isotope_of_interest = args[2]
scale = int(args[3])
if dataitem=='yps' and dataitem not in self.dcols:
print self.dcols
dataitem='iso_massf'
if dataitem=='iso_massf' and dataitem not in self.dcols:
print self.dcols
dataitem='yps'
# print dataitem
# if it is a cattr call, it will not have a cycle value
if cycle_list == []:
# Open all the files.
for h5 in self.h5s:
h5.h5 = mrT.File(h5.filename,'r')
# Decide which cycles are actually important
for x in range(len(self.cycles)/scale):
cycle_list.append(self.cycles[scale*x])
# Fetch the data from each cycle
for cyc in cycle_list:
for h5 in self.h5s:
if h5.cycle.count(cyc) or h5.cycle.count(str(cyc)):
print 'hello'
self.h5sStarted[self.h5s.index(h5)]=True
h5.run()
try:
h5.wait()
except AttributeError:
print 'cheating again'
temp = h5.fetch_data_one(dataitem,cyc)
try:
dat.append(h5.h5[self.cycle_header+str(cyc)].attrs.get(dataitem, None)[0])
except TypeError:
dat.append(h5.h5[self.cycle_header+str(cyc)].attrs.get(dataitem, None))
except IndexError:
print 'looking in the wrong place'
# Close all the files when done
for h5 in self.h5s:
h5.h5.close()
else:
c_len = len(str(self.h5s[0].cycle[0]))
#print 'c_len', c_len
if self.h5s[0].new:
for z in xrange(len(cycle_list)):
while len(str(cycle_list[z])) < c_len:
cycle_list[z] = '0'+str(cycle_list[z])
# We already have a list of cycles to check out
for cyc in cycle_list:
for h5 in self.h5s:
if h5.cycle.count(int(cyc)) or h5.cycle.count(str(cyc)):
try:
if not self.h5sStarted[self.h5s.index(h5)]:
print "not Sarted"
self.h5sStarted[self.h5s.index(h5)]=True
print self.h5sStarted
h5.run()
h5.wait()
self.temp = h5.fetch_data_one(dataitem,cyc)
else:
#print "Sarted"
self.temp = h5.fetch_data_one(dataitem,cyc)
#print self.temp
except :
None
# Occasionally data comes out formatted in a funny way (arrays nested in arrays....)
# This strips the nested arrays until the actual data is found
if dataitem != 'iso_massf' or dataitem != 'yps':
#print 'hello'
#print self.temp
'''
while np.ndim(self.temp) > 1:
shape = np.shape(self.temp)
self.temp = self.temp[0]
'''
#print self.temp
else:
while np.ndim(self.temp) > 2:
shape = np.shape(self.temp)
self.temp = self.temp[0]
while len(self.temp) < 2:
self.temp = self.temp[0]
try:
dat.append(self.temp)
except AttributeError:
np.append(dat, self.temp)
if len(dat) < 2 and (dataitem != 'iso_massf' or dataitem != 'yps'):
try:
dat = dat[0]
except IndexError:
None
# print dat
# print 'indexerror'
print dat
return dat
# This function determines which cycle, which file, which storage mechanism (cattr or data) and returns it
def get1(self, *args):
if debug:
print "get1"
# This function takes in a variety of inputs
# option 1
# get(dataitem)
# fetches the dataitem for all cycles
# option 2
# get(cycle_list, dataitem)
# fetches the dataitem from the list of cycles
# option 3
# get(cycle_list, 'iso_massf', isotope) isotope Must be in the form 'H-2'
# fetches the isotope data for a list of cycles
# Check out the inputs
if len(args) > 3:
print 'Improper use of this function'
return None
isotope_of_interest = []
if len(args) == 1:
dataitem = args[0]
if self.hattrs.count(dataitem) == 0:
cycle_list = self.cycles
else:
self.h5s[0] = mrT.File(self.h5s[0].filename,'r')
dat = self.h5s[0].attrs.get(dataitem, None)
return dat
elif len(args) == 2:
cycle_list = args[0]
dataitem = args[1]
elif len(args) == 3:
cycle_list = args[0]
dataitem = args[1]
isotope_of_interest = args[2]
# Just in case the user inputs integers
try:
for x in xrange(len(cycle_list)):
cycle_list[x] = str(cycle_list[x])
except TypeError:
cycle_list = [str(cycle_list)]
try:
if cycle_list.isdigit():
cycle_list = [cycle_list]
for cycle in cycle_list:
if len(cycle) != len(self.cycles[0]):
diff = len(self.cycles[0])-len(cycle)
OO = ''
while diff >=1:
OO+='0'
cycle = OO+cycle
except AttributeError:
if cycle_list[0].isdigit():
for x in xrange(len(cycle_list)):
if len(str(cycle_list[x])) != len(str(self.cycles[0])):
diff = len(str(self.cycles[0]))-len(str(cycle_list[x]))
OO = ''
while diff >=1:
OO+='0'
diff-=1
try:
cycle_list[x] = OO+cycle_list[x]
except TypeError:
cycle_list[0] = OO+cycle_list[0]
# if it is a cattr call, it will not have a cycle value
dat = []
# for h5 in self.h5s:
# h5.h5 = mrT.File(h5.filename,'r')
#'/rpod2/fherwig/tmp/tmp/'
for cyc in cycle_list:
for h5 in self.h5s:
if h5.cycle.count(int(cyc)) or h5.cycle.count(str(cyc)):
if not self.h5sStarted[self.h5s.index(h5)]:
self.h5sStarted[self.h5s.index(h5)]=True
h5.run()
#try:
h5.wait()
#except:
# print 'failed thread:', h5
temp = h5.fetch_data_one(dataitem,cyc)
else:
temp = h5.fetch_data_one(dataitem,cyc)
# Occasionally data comes out formatted in a funny way (arrays nested in arrays....)
# This strips the nested arrays until the actual data is found
#print 'temp', temp
#else:
# while np.ndim(temp) > 2:
# shape = np.shape(temp)
# temp = temp[0]
#
# while len(temp) < 2:
# temp = temp[0]
if (dataitem == 'iso_massf' or dataitem == 'yps') and isotope_of_interest != []: #
# Figure out the index
#print 'yps', dataitem
index = 0
for x, iso in enumerate(self.isotopes):
print str(iso[0]+'-'+iso[1]), isotope_of_interest
if str(iso[0]+'-'+iso[1]) == isotope_of_interest:
index = x
break
#print 'iso_massf',temp
temp = temp[:,index]
# Now add the information to the list we pass back
elif (dataitem=='iso_massf' or dataitem=='yps'):
print 'the right stuff', isotope_of_interest
else:
#print 'enter', dataitem
while np.ndim(temp) > 1:
shape = np.shape(temp)
temp = temp[0]
try:
dat.append(temp)
#print 'right append'
except AttributeError:
np.append(dat, temp)
#print 'bad append'
if len(dat) < 2 and (dataitem != 'iso_massf'or dataitem != 'yps'):
try:
dat = dat[0]
except IndexError:
None
except TypeError:
None
try:
if len(dat) < 2 and isotope_of_interest != []:
dat = dat[0]
except TypeError:
None
except IndexError:
None
print self.h5sStarted
return dat
# Determines the file-cycle match and gets the associated information
def fetch_datas(self,dataitem1,dataitem2,cycle, scale):
if debug:
print "fetch_datas"
dat = []
if cycle == None:
dat1 = []
dat2 = []
index = 0
# Loop through all the files and grab the appropriate data
for x in xrange(len(self.h5s)):
self.h5s[x].h5 = mrT.File(self.h5s[x].filename,'r')
for y in xrange(len(self.cycles)/scale):
for x in xrange(len(self.h5s)):
try:
node = self.h5s[x].h5[self.cycle_header+str(self.cycles[scale*y])].attrs
dat1.append(node.get(dataitem1, None))
dat2.append(node.get(dataitem2, None))
#print node
except (IndexError,KeyError):
1+1
#print 'bad cycle', self.cycles[scale*y]
#print self.h5sStarted
#for y in xrange(len(self.h5s[x].cycle)/scale):
# node = self.h5s[x].h5[self.cycle_header+str(self.h5s[x].cycle[scale*y])].attrs
# dat1.append(node.get(dataitem1, None))
# dat2.append(node.get(dataitem2, None))
for x in xrange(len(self.h5s)):
self.h5s[x].h5.close()
try:
while len(dat1) == 1:
print 'trimmed dat1'
#print dat1
dat1 = dat1[0]
except AttributeError:
while dat1.shape[0] == 1:
print 'trimmed dat1'
#print dat1
dat1 = dat1[0]
try:
while len(dat2) == 1:
print 'timmed dat2'
# print dat2
dat2 = dat2[0]
except AttributeError:
while dat2.shape[0] == 1:
print 'timmed dat2'
# print dat2
dat2 = dat2[0]
dat = [dat1,dat2]
else:
# We have a user-defined list of cycles to check out
try:
for cyc in cycle:
for h5 in self.h5s:
if h5.cycle.count(int(cyc)):
dat.append(h5.fetch_data(dataitem1,dataitem2,cyc))
else:
print 'not Matched'
except TypeError:
for h5 in self.h5s:
if h5.cycle.count(cycle):
dat.append(h5.fetch_data(dataitem1,dataitem2,cycle))
return dat
def fetch_sp_kh_data(self, cycle_list, scale, ranger, element):
if debug:
print "fetch_sp_kh_data"
t0 = t.time()
for h5 in self.h5s:
h5.h5 = mrT.File(h5.filename,'r')
t2 = t.time()
Z = np.zeros([int(len(cycle_list)), 3000], float)
zindex = 0
for cyc in cycle_list:
for h5 in self.h5s:
if h5.cycle.count(cyc) or h5.cycle.count(str(cyc)):
if element == -1:
goal = "dcoeff"
else:
goal = "iso_massf"
try:
node = h5.h5[self.cycle_header+str(cyc)].__getitem__('SE_DATASET')
except ValueError:
node = h5.h5[self.cycle_header+str(cyc)]
#temp1 = node.__getitem__("dcoeff")
try:
temp1 = node.__getitem__(goal)#node.col('convection_indicator')
except:
temp1 = node.__getitem__("yps")
temp2 = node.__getitem__("mass")#node.col('mass')
try:
for x in xrange(len(temp2)):
if float(temp1[x][element]) >= float(limit):
try:
Z[zindex][x] = temp2[x]
except:
print 'indexerror'
break
zindex += 1
except IndexError:
for x in xrange(len(temp2)):
if float(temp1[x]) >= float(limit):
try:
Z[zindex][x] = temp2[x]
except:
print 'indexerror'
break
zindex += 1
#break
for h5 in self.h5s:
h5.h5.close()
return Z
def fetch_KH_data(self, scale, ranger, cycle_list):
#cycle_list = []
#age = []
if debug:
print "fetch_KH_data"
t0 = t.time()
for h5 in self.h5s:
h5.h5 = mrT.File(h5.filename,'r')
t1 = t.time()
print 'Opening files took: ', str(t1-t0), scale
t2 = t.time()
print 'Buidling cycle List took: ', str(t2-t1)
Z = np.zeros([int(len(cycle_list)), 3000], float) # holds the convective information
zindex = 0
print 'starting check', len(cycle_list)
for cyc in cycle_list:
for h5 in self.h5s:
if h5.cycle.count(cyc):
#print 'checking cycle:', cyc
try:
try:
node = h5.h5[self.cycle_header+str(cyc)].__getitem__('SE_DATASET')
except ValueError:
node = h5.h5[self.cycle_header+str(cyc)]
except:
print 'encountered a problem with cycle:',cyc
break
temp1 = node.__getitem__("convection_indicator")#node.col('convection_indicator')
temp2 = node.__getitem__("mass")#node.col('mass')
maxi = abs(max(temp1))
# print 'maxi', maxi
#temp1 = abs(temp1/(temp1-1e-10))
#if any(temp1) > 1:
for x in xrange(len(temp1)):
if temp1[x] != 1:
temp1[x] = 0
Z[zindex][:len(temp2)] = temp1*temp2
zindex += 1
t3 = t.time()
print 'Data fetch took: ', str(t3-t2)
for h5 in self.h5s:
h5.h5.close()
t4 = t.time()
print 'file closing took: ', str(t4-t3)
return Z#conv
def fetch_sparse_yps(self,block_plot, scale,isotope_index,textEdit):
if debug:
print "fetch_sparse_yps"
self.cycles.sort()
data1 = []
data2 = []
all_mass = []
cycle_list = []
#scale = 4
block_plot=True
isotope_index = 0
t0 = t.time()
limits = [0.3645,0.04956,8.5e-4,2.4082e-3]
for h5 in self.h5s:
h5.h5 = mrT.File(h5.filename,'r')
for x in range(len(self.cycles)/scale):
cycle_list.append(self.cycles[scale*x])
t1 = t.time()
textEdit.append('Open Time: ' + str(t1-t0))
textEdit.append('cycling...............')
for cyc in cycle_list:
for h5 in self.h5s:
if h5.cycle.count(cyc):
try:
node = h5.h5['/cycle-'+str(cyc)].__getitem__('SE_DATASET')
if block_plot:
temp1 = []
temp2 = []
temp3 = []
temp1 = node.__getitem__("convection_indicator")#node.col('convection_indicator')
temp2 = node.__getitem__("mass")#node.col('mass')
#temp1 = np.dot(temp1,temp2)
for x in range(len(temp1)):
temp3.append(abs(temp2[x]*float(temp1[x])))
data1.append(temp3)
data2.append(temp2)
else:
try:
temp1 = node.__getitem__("iso_massf")#col('iso_massf')
except:
temp1 = node.__getitem__("yps")
temp2 = node.__getitem__("mass")#col('mass')
fake1 = []
fake2 = []
for x in range(len(temp1)/scale):
fake1.append(temp1[scale*x][isotope_index])
fake2.append(temp2[scale*x])
data.append(fake1)
all_mass.append(fake2)
del fake1, fake2, temp1, temp2
except AttributeError:
print 'Age does not exist'
t2 = t.time()
textEdit.append( 'Cycle Time: ' + str(t2-t1))
textEdit.append( 'closing a bunch of files')
for h5 in self.h5s:
h5.h5.close()
textEdit.append(str( h5.h5))
t3 = t.time()
textEdit.append( 'Close TIme: ' + str(t3-t2))
return [data2,data1]
# uses the index information to build list of isotopes from tables A,Z
def fetch_isotopes(self):
isos = []
try:
for x in range(len(self.Tables[1])):
isos.append([self.isotopes[int(self.Tables[1][x])], self.Tables[0][x]])
except IndexError:
None
return isos
# This wrapper class allows some automated activity when an h5file is initialized.
# upon inmitialization the h5 file is opened and certain bits of data is read.
# This class also interacts with the h5fileholder class to access needed data.
class h5File(qc.QThread):
h5 = None
filename = None
cycle = []
age = []
dcol = []
hattr = []
data = []
skipped_nodes = 0
ver = ''
classname = ''
cattr=[]
Table = []
isomeric_state = []
new = True
# Initialize the class
def __init__(self, filepath,deep_search, new):
qc.QThread.__init__(self,None)
print 'starting'
# Instantiate
self.h5 = None
self.filename = None
self.cycle = []
self.age = []
self.dcol = []
self.data = []
self.skipped_nodes = 0
self.ver = ''
self.classname = ''
self.hattr = []
self.cattr=[]
self.Table = []
self.isomeric_state = []
self.A = []
self.Z = []
self.new = True
# Build
self.new = new
self.filename = filepath
self.deep_search = deep_search
self.filename = filepath
if self.new:
self.cycle_header = 'cycle'
else:
self.cycle_header = 'cycle-'
def run(self):
if self.deep_search:
self.search_deep()
else:
self.search_shallow()
try:
self.h5.close()
except:
None#print 'error closing file: ', self.h5
print 'done'
#print 'Table: ', self.Table
# Fetches a single category of information
def fetch_data_one(self,dataitem,cycle):
#print 'fetching data one'
self.h5 = mrT.File(self.filename,'r')
try:
data = self.h5.__getitem__(self.cycle_header+str(cycle)).__getitem__('SE_DATASET')[dataitem]
except ValueError:
try:
data = self.h5.__getitem__(self.cycle_header+str(cycle)).attrs.get(dataitem, None)[0]
except TypeError:
data = self.h5.__getitem__(self.cycle_header+str(cycle))[dataitem]
#print data
try:
while data.shape[0] < 2:
data = data[0]
except IndexError:
None
self.h5.close()
return data
# same as above, but for 2 sets of data
def fetch_data(self,dataitem1,dataitem2,cycle):
self.h5 = mrT.File(self.filename,'r')
#print 'cycle ',cycle
try:
dataset = self.h5.__getitem__(self.cycle_header+str(cycle)).__getitem__('SE_DATASET')
data1 = dataset[dataitem1]
data2 = dataset[dataitem2]
except ValueError:
dataset = self.h5.__getitem__(self.cycle_header+str(cycle)).attrs.values()
data1 = dataset[self.cattr.index(dataitem1)][0]
data2 = dataset[self.cattr.index(dataitem2)][0]
self.h5.close()
del dataset
data = [data1,data2]
return data
# The typical search algirthm when a h5file class is initialized
def search_shallow(self):
self.h5 = mrT.File(self.filename,'r')
temp = self.h5.keys()
for te in temp:
if te[0] == 'c':
if te[5:].isdigit():
self.cycle.append(str(te[5:]))
try:
self.age.append(self.h5[te].attrs.get("age",None)[0])
except TypeError:
self.age.append(self.h5[te].attrs.get("age",None))
else:
self.cycle.append(str(te.split('-')[1]))
try:
self.age.append(self.h5[te].attrs.get("age", None)[0])
except TypeError:
self.age.append(self.h5[te].attrs.get("age",None))
self.cycle.sort()
self.age.sort()
def search_deep(self):
self.h5 = mrT.File(self.filename,'r')
temp = self.h5.keys()
# Handles the change in cycle nomenclature
self.new = True
for te in temp:
if te.count('-'):
self.new = False
break
if self.new:
self.cycle_header = 'cycle'
for te in temp:
if te[0] == 'c':
if te[5:].isdigit():
self.cycle.append(str(te[5:]))
try:
self.age.append(self.h5[te].attrs.get("age",None)[0])
except TypeError:
self.age.append(self.h5[te].attrs.get("age",None))
else:
self.isomeric_state.append(self.h5[te]['data'])
else:
obj = self.h5[te].__iter__()
if str(te).count('A'):
holder = []
for ob in obj:
holder.append(ob[0])
self.Table.append(holder)
self.A.append(holder)
elif str(te).count('Z'):
holder = []
for ob in obj:
holder.append(ob[0])
self.Table.append(holder)
self.Z.append(holder)
else:
holder = []
for ob in obj:
holder.append(ob[0])
self.Table.append(holder)
self.isomeric_state.append(holder)
try:
temp = self.h5.__getitem__(self.cycle_header+str(self.cycle[0])).__getitem__('SE_DATASET').dtype.__str__().split(',')
except ValueError:
temp = self.h5.__getitem__(self.cycle_header+str(self.cycle[0])).dtype.__str__().split(',')
else:
self.cycle_header = 'cycle-'
for te in temp:
try:
self.cycle.append(str(te.split('-')[1]))
try:
self.age.append(self.h5[te].attrs.get("age", None)[0])
except TypeError:
self.age.append(self.h5[te].attrs.get("age", None))
except IndexError:
obj = self.h5[te].__iter__()
if str(te).count('A'):
holder = []
for ob in obj:
holder.append(ob[0])
self.Table.append(holder)
self.A.append(holder)
elif str(te).count('Z'):
holder = []
for ob in obj:
holder.append(ob[0])
self.Table.append(holder)
self.Z.append(holder)
else:
holder = []
for ob in obj:
holder.append(ob[0])
self.Table.append(holder)
self.isomeric_state.append(holder)
self.cycle.sort()
# This is kind of stupid, but I have not found a way to access this information directly.
try:
temp = self.h5.__getitem__(self.cycle_header+str(self.cycle[0])).__getitem__('SE_DATASET').dtype.__str__().split(',')
except ValueError:
temp = self.h5.__getitem__(self.cycle_header+str(self.cycle[0])).dtype.__str__().split(',')
for tem in temp:
if tem.count('<') ==0:
try:
self.dcol.append(tem.split('\'')[1])
except IndexError:
None
attrs = self.h5.attrs
for at in attrs:
self.hattr.append(at)
self.cattr = self.h5[self.cycle_header+str(self.cycle[0])].attrs.keys()
table = []
grp = self.h5[self.cycle_header+str(self.cycle[0])]
for gr in grp:
try:
table.append(float(gr[0]))
except ValueError:
None
self.h5.close()
return None
""" old search deep
def search_deep(self):
self.h5 = mrT.File(self.filename,'r')
temp = self.h5.keys()
# Handles the change in cycle nomenclature
self.new = True
for te in temp:
#print te
if te.count('-'):
#print 'checked'
self.new = False
#print self.new
attrs = self.h5.attrs
for at in attrs:
self.hattr.append(at)
#print self.cycle
if self.new:
self.cycle_header = 'cycle'
for te in temp:
if te[0] == 'c':
#print 'cycle', te
if te[5:].isdigit():
self.cycle.append(te[5:])
else:
self.isomeric_state.append(self.h5[te]['data'])
else:
#print 'table', te
obj = self.h5[te].__iter__()
holder = []
for ob in obj:
holder.append(ob[0])
self.Table.append(holder)
try:
#print self.h5.__getitem__(self.cycle_header+str(self.cycle[0])).__getitem__('SE_DATASET').dtype
temp = self.h5.__getitem__(self.cycle_header+str(self.cycle[0])).__getitem__('SE_DATASET').dtype.__str__().split(',')
except ValueError:
temp = self.h5.__getitem__(self.cycle_header+str(self.cycle[0])).dtype.__str__().split(',')
#print temp
else:
self.cycle_header = 'cycle-'
for te in temp:
try:
self.cycle.append(te.split('-')[1])
except IndexError:
obj = self.h5[te].__iter__()
holder = []
for ob in obj:
#print ob
holder.append(ob[0])
#print holder
self.Table.append(holder)
# This is kind of stupid, but I have not found a way to access this information directly.
try:
temp = self.h5.__getitem__(self.cycle_header+str(self.cycle[0])).__getitem__('SE_DATASET').dtype.__str__().split(',')
except ValueError:
temp = self.h5.__getitem__(self.cycle_header+str(self.cycle[0])).dtype.__str__().split(',')
#print temp
#print self.cycle
for tem in temp:
if tem.count('<') ==0:
try:
self.dcol.append(tem.split('\'')[1])
except IndexError:
None
self.cattr = self.h5[self.cycle_header+str(self.cycle[0])].attrs.keys()
#print self.cattr
table = []
grp = self.h5[self.cycle_header+str(self.cycle[0])]
for gr in grp:
try:
table.append(float(gr[0]))
except ValueError:
None
# print self.cattr, self.age
self.cycle.sort()
print 'end search deep'
self.h5.close()
"""
def __del__(self):
print 'Deleting H5File'
self.terminate()
print self.h5
#t0 = t.time()
#f = h5FileHolder(['M2.00Z0.010.0000001.out.h5','M2.00Z0.010.0001001.out.h5'], 'new_mesa_file/')
#datas = f.fetch_datas('logL','R_sol', f.cycles[0], 1)
#print datas
#t1 = t.time()
#print 'Total Time: ' + str(t1-t0)
|
13,749 | 507639abbdfb6698e7f59e2470a41459a8ac4268 | import sys
import weakref
from concurrent.futures import Future
class CircularFuturesChainException(Exception):
pass
class ThenableFuture(Future):
@property
def _chained_futures_log(self):
prop_name = '_chained_futured_log_list'
log = getattr(self, prop_name, None)
if log is None:
log = weakref.WeakSet()
setattr(self, prop_name, log)
return log
def _chain_to_another_future(self, base_future):
"""
Chains a Future instance directly to another Future instance
Used for recursive Promise Resolution Procedure (section 2.3.2) specified in Promise/A+
that allows .then() to piggy -back on a Promise returned by success handler
:param Future base_future:
"""
if base_future in self._chained_futures_log:
raise CircularFuturesChainException(
'Circular Futures chain detected. Future {} is already in the resolved chain {}'.format(
base_future, set(self._chained_futures_log)
)
)
else:
self._chained_futures_log.add(base_future)
def _done_handler(base_future):
"""
Converts results of underlying future into results of new future
:param ThenableFuture base_future: Original Future instance, but now guaranteed to be resolved
due to cancellation or completion.
"""
if not base_future.done():
# this should never ever be true.
# having this code here just to avoid infinite timeout
self.cancel()
return
if base_future.cancelled():
self.cancel()
return
try:
result = base_future.result()
if isinstance(result, Future):
self._chain_to_another_future(result)
else:
self.set_result(result)
return
except BaseException:
# note, that exception may come from self.result()
# and from on_fulfilled(result) calls.
ex, trace_back = sys.exc_info()[1:]
self.set_exception_info(ex, trace_back)
return
base_future.add_done_callback(_done_handler)
def then(self, on_fulfilled=None, on_rejected=None):
"""
JavaScript-like (https://promisesaplus.com/) .then() method allowing
futures to be chained.
In conformance with Promise/A+ spec, if on_fulfilled returns an instance
of Future, this code will auto-chain returned Future instance to the
resolution of the Future returned by on_fulfilled.
Effectively, resolution of Future returned by this method may be infinitely
pushed forward until recursive Futures are exhausted or an error is encountered.
Note, that while Promise Resolution Procedure (auto-chaining to Future
returned by success handler) described in section 2.3.2
is supported when that Future is explicitly
a subclass of Future that is the base class of this class, duck-type
thenable detection and auto-chaining described in section 2.3.3 *IS NOT SUPPORTED*
The goal of this extension of basic PEP 3148 Future API was to make meaningful
thenable support. Promise/A+ Promise Resolution Procedure section 2.3.2
is fundamental for auto-chaining. Section 2.3.3, though, is there largely
for backward-compatibility of previous JavaScript thenable variants,
which we don't have in Python.
:param on_fulfilled: (optional)
Function to be called when this Future is successfully resolved
Once this Future is resolved, it's result value is the input for
on_fulfilled function.
If on_fulfilled is None, new Future (one returned by this method) will be
resolved directly by the result of the underlying Future
:type on_fulfilled: None or function
:param on_rejected: (optional)
Function to be called when this Future is rejected. Exception instance
picked up from rejected Future is the input value for on_rejected
If on_rejected is None, new Future (one returned by this method) will be
rejected directly with the exception result of the underlying Future
:type on_rejected: None or function
:return: A new instance of Future to be resolved once
present Future is resolved and either on_fulfilled or on_rejected
completes.
New Future's result value will be that of on_fulfilled.
:rtype: ThenableFuture
"""
new_future = self.__class__()
def _done_handler(base_future):
"""
Converts results of underlying future into results of new future
:param ThenableFuture base_future: Original Future instance, but now guaranteed to be resolved
due to cancellation or completion.
"""
if not base_future.done():
# this should never ever be true.
# having this code here just to avoid infinite timeout
new_future.cancel()
return
if base_future.cancelled():
new_future.cancel()
return
try:
result = base_future.result()
if on_fulfilled:
result = on_fulfilled(result)
# Per Promise/A+ spec, if return value is a Promise,
# our promise must adapt the state of the return value Promise
if isinstance(result, Future):
# this is the only outcome where we don't
# set new_future's result in this code and
# defer resolution of new_future to outcome of return value Promise resolution
new_future._chain_to_another_future(result)
else:
new_future.set_result(result)
return
except BaseException:
# note, that exception may come from self.result()
# and from on_fulfilled(result) calls.
ex, trace_back = sys.exc_info()[1:]
if not on_rejected:
new_future.set_exception_info(ex, trace_back)
return
else:
try:
result = on_rejected(ex)
if isinstance(result, BaseException):
raise result
else:
new_future.set_result(result)
return
except BaseException:
ex, trace_back = sys.exc_info()[1:]
new_future.set_exception_info(ex, trace_back)
return
self.add_done_callback(_done_handler)
return new_future
|
13,750 | 8c283d56c7fc29de532820a82ae8b241b020ed67 | import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import accuracy_score, classification_report
import nltk
from functions import load_data, prepare_and_clean_data, process_data
import argparse
import sys
import os
from joblib import load
from sklearn.preprocessing import OneHotEncoder
from sklearn.pipeline import make_pipeline
def create_arg_parser():
# Creates and returns the ArgumentParser object
parser = argparse.ArgumentParser(description='Description of your app.')
parser.add_argument('--textpath',
help='Path to test dataset (text).')
parser.add_argument('--tagpath',
help='Path to test dataset (tag).')
return parser
if __name__ == "__main__":
np.random.seed(500)
arg_parser = create_arg_parser()
parsed_args = arg_parser.parse_args(sys.argv[1:])
path_text = parsed_args.textpath
path_tags = parsed_args.tagpath
text = load_data(path_text)
tag = load_data(path_tags)
data = prepare_and_clean_data(text,tag)
data = process_data(data, "./stopwords/polish.stopwords.txt" )
test_X = data['text_final']
test_y = data['label']
Tfidf_vect = TfidfVectorizer(max_features=3500)
Tfidf_vect.fit(data['text_final'])
Test_X_Tfidf = Tfidf_vect.transform(test_X)
model = load("trained_model.pkl")
RFC_pred = model.predict(Test_X_Tfidf)
report = classification_report(test_y, RFC_pred)
print(RFC_pred)
print(report)
|
13,751 | b2ce8562668cf16267f8ba035d83c4cff22522b6 | # -*- coding: utf-8 -*-
# Copyright (c) 1996-2015 PSERC. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""Solves the power flow using a fast decoupled method.
"""
import warnings
from numpy import array, angle, exp, linalg, conj, r_, Inf, column_stack, real
from scipy.sparse.linalg import splu
from pandapower.pypower.makeSbus import makeSbus
from pandapower.pypower.makeB import makeB
def decoupledpf(Ybus, Sbus, V0, pv, pq, ppci, options):
"""Solves the power flow using a fast decoupled method.
Solves for bus voltages given the full system admittance matrix (for
all buses), the complex bus power injection vector (for all buses),
the initial vector of complex bus voltages, the FDPF matrices B prime
and B double prime, and column vectors with the lists of bus indices
for the swing bus, PV buses, and PQ buses, respectively. The bus voltage
vector contains the set point for generator (including ref bus)
buses, and the reference angle of the swing bus, as well as an initial
guess for remaining magnitudes and angles. C{ppopt} is a PYPOWER options
vector which can be used to set the termination tolerance, maximum
number of iterations, and output options (see L{ppoption} for details).
Uses default options if this parameter is not given. Returns the
final complex voltages, a flag which indicates whether it converged
or not, and the number of iterations performed.
@see: L{runpf}
@author: Ray Zimmerman (PSERC Cornell)
Modified to consider voltage_depend_loads
"""
# old algortihm options to the new ones
pp2pypower_algo = {'fdbx': 2, 'fdxb': 3}
# options
tol = options["tolerance_mva"]
max_it = options["max_iteration"]
# No use currently for numba. TODO: Check if can be applied in Bp and Bpp
# numba = options["numba"]
# NOTE: options["algorithm"] is either 'fdbx' or 'fdxb'. Otherwise, error
algorithm = pp2pypower_algo[options["algorithm"]]
voltage_depend_loads = options["voltage_depend_loads"]
v_debug = options["v_debug"]
baseMVA = ppci["baseMVA"]
bus = ppci["bus"]
branch = ppci["branch"]
gen = ppci["gen"]
# initialize
i = 0
V = V0
Va = angle(V)
Vm = abs(V)
dVa, dVm = None, None
if v_debug:
Vm_it = Vm.copy()
Va_it = Va.copy()
else:
Vm_it = None
Va_it = None
# set up indexing for updating V
pvpq = r_[pv, pq]
# evaluate initial mismatch
P, Q = _evaluate_mis(Ybus, V, Sbus, pvpq, pq)
# check tolerance
converged = _check_for_convergence(P, Q, tol)
# create and reduce B matrices
Bp, Bpp = makeB(baseMVA, bus, real(branch), algorithm)
# splu requires a CSC matrix
Bp = Bp[array([pvpq]).T, pvpq].tocsc()
Bpp = Bpp[array([pq]).T, pq].tocsc()
# factor B matrices
Bp_solver = splu(Bp)
Bpp_solver = splu(Bpp)
# do P and Q iterations
while (not converged and i < max_it):
# update iteration counter
i = i + 1
# ----- do P iteration, update Va -----
dVa = -Bp_solver.solve(P)
# update voltage
Va[pvpq] = Va[pvpq] + dVa
V = Vm * exp(1j * Va)
# evalute mismatch
P, Q = _evaluate_mis(Ybus, V, Sbus, pvpq, pq)
# check tolerance
if _check_for_convergence(P, Q, tol):
converged = True
break
# ----- do Q iteration, update Vm -----
dVm = -Bpp_solver.solve(Q)
# update voltage
Vm[pq] = Vm[pq] + dVm
V = Vm * exp(1j * Va)
if v_debug:
Vm_it = column_stack((Vm_it, Vm))
Va_it = column_stack((Va_it, Va))
if voltage_depend_loads:
Sbus = makeSbus(baseMVA, bus, gen, vm=Vm)
# evalute mismatch
P, Q = _evaluate_mis(Ybus, V, Sbus, pvpq, pq)
# check tolerance
if _check_for_convergence(P, Q, tol):
converged = True
break
# the newtonpf/newtonpf funtion returns J. We are returning Bp and Bpp
return V, converged, i, Bp, Bpp, Vm_it, Va_it
def _evaluate_mis(Ybus, V, Sbus, pvpq, pq):
# evalute mis_p(x) and mis_q(x)
mis = V * conj(Ybus * V) - Sbus
mis_p, mis_q = mis[pvpq].real, mis[pq].imag
return mis_p, mis_q
def _check_for_convergence(mis_p, mis_q, tol):
# calc infinity norm
return (
(linalg.norm(mis_p, Inf) < tol) and (linalg.norm(mis_q, Inf) < tol)
)
|
13,752 | a6afb02780f5418e60601aa83c740eb85a13e07e | # class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def largestValues(self, root: TreeNode) -> List[int]:
if root is None:
return []
# init max_list
max_list = []
max_list.append(root.val)
# broad first search
queue = []
queue.append((root, 0))
while len(queue) > 0:
item = queue.pop(0)
node, level = item[0], item[1]
if level == len(max_list):
max_list.append(node.val)
else:
if max_list[level] < node.val:
max_list[level] = node.val
if node.left is not None:
queue.append((node.left, level+1))
if node.right is not None:
queue.append((node.right, level+1))
return max_list
|
13,753 | 1a13a6f3425bfdf4b49a1ed1059ebeb3e4a44a0a | import sys
import scrapy
if len(sys.argv) < 2:
print('This crawler takes exactly two arguments [URL] [email]')
exit(-1)
class LBCSpider(scrapy.Spider):
def __init__(self, *args, **kwargs):
super(LBCSpider, self).__init__(*args, **kwargs)
self.start_urls = [kwargs.get('start_url')]
name = 'lbc'
def parse(self, response):
number_of_results = response.xpath('//*[@id="listingAds"]/section/header/nav/a/span/text()').extract()[
0].replace(' ', "")
number_of_results = int(number_of_results)
yield {
"count": number_of_results
}
for r in response.xpath('//*[@id="listingAds"]/section/section/ul/li/a'):
try:
o = Object()
o.title = r.xpath(".//section/h2/text()").extract_first().strip()
o.location = r.xpath(".//section/p[2]/meta[1]/@content").extract_first() + ' / ' \
+ r.xpath(".//section/p[2]/meta[2]/@content").extract_first()
o.price = self.parse_price(r.xpath(".//section/h3/text()").extract_first().strip())
o.date_pub = r.xpath(".//section/aside/p/text()").extract_first().strip()
o.href = r.xpath('.//@href').extract_first()
yield o.serialize()
except (TypeError, AttributeError):
pass
def parse_price(self, price):
return price.replace(u'\u00a0', '').replace(u'\u20ac', '')
class Object:
title = ""
location = ""
price = ""
date_pub = ""
href = ""
def serialize(self):
return {
"title": self.title,
"location": self.location,
"price": self.price,
"date_pub": self.date_pub,
'href': self.href
}
|
13,754 | 91269e39b188d8bf056de0ecafab693fd9483c23 | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 29 16:35:08 2020
@author: morte
"""
from claseProfesor import Profesor
def testComposicion():
profesor = Profesor(11334441, 'Rodríguez', 'Myriam')
print(profesor)
del profesor
if __name__=='__main__':
testComposicion()
|
13,755 | cb7f79fba68c4cd0c8e2ac08de84086e9157e2d1 | n = int(input())
for i in range(n):
a, b = map(int, input().split())
if(a % 2 == 1 and b % 2 == 0 and a+b>=10):
print("OPEN")
else:
print("CLOSED")
|
13,756 | bca27e6f0aadf2b848ee651189bc8fd80c66a555 | import unittest
from unittest import TestCase
from python_translators.translators.glosbe_translator import GlosbeTranslator
from python_translators.translation_query import TranslationQuery
from translators.glosbe_over_tor_translator import GlosbeOverTorTranslator
class TestGlosbeOverTorTranslator(TestCase):
def setUp(self):
self.translator = GlosbeOverTorTranslator(source_language='de', target_language='nl')
def testNumberOfTranslationsWorks(self):
response = self.translator.translate(TranslationQuery(
query="genommen",
max_translations=5
))
print (response.translations)
response = self.translator.translate(TranslationQuery(
query="genommen",
max_translations=5
))
print (response.translations)
# response = self.translator.translate(TranslationQuery(
# query="genommen",
# max_translations=5
# ))
#
# print (response.translations)
# self.assertEqual(response.translations[0]['translation'], 'bed')
# self.assertEqual(len(response.translations), 5)
if __name__ == '__main__':
unittest.main()
|
13,757 | 32a3ad79591182c9ba50204847a810b156de646f | import os
import re
import time
import configparser
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as tck
from ..utils.obslog import read_obslog
def load_config(pattern, verbose=True):
"""Load the config file.
Args:
pattern (str):
verbose (bool):
Returns:
config
"""
# load config files
config = configparser.ConfigParser(
inline_comment_prefixes = (';','#'),
interpolation = configparser.ExtendedInterpolation(),
)
# find local config file
for fname in os.listdir(os.curdir):
if re.match(pattern, fname):
config.read(fname)
if verbose:
message = 'Load congfig file: "{}"'.format(fname)
print(message)
break
return config
def load_obslog(pattern, fmt='obslog', verbose=True):
"""Find and read the observing log file.
Args:
pattern (str): Pattern of the filename of observing log.
fmt (str):
verbose (bool):
Returns:
:class:`astropy.io.Table`: Observing log table.
"""
# find observing log in the current workin gdirectory
logname_lst = [fname for fname in os.listdir(os.curdir)
if re.match(pattern, fname)]
if len(logname_lst)==0:
print('No observation log found')
return None
elif len(logname_lst)==1:
select_logname = logname_lst[0]
elif len(logname_lst)>1:
nlog = len(logname_lst)
# maximum length of log filename
maxlen = max([len(logname) for logname in logname_lst])
# maximum length of log number
maxdgt = len(str(nlog))
fmt_string = (' - [{{:{:d}d}}] {{:{:d}s}} '
'Last modified in {{:s}}').format(maxdgt, maxlen)
# build a list of (filename, modified time)
nametime_lst = [(logname, os.path.getmtime(logname))
for logname in logname_lst]
# sort with last modified time
nametime_lst = sorted(nametime_lst, key=lambda v:v[1])
# print lognames one by one
for i, (logname, mtime) in enumerate(nametime_lst):
t = time.localtime(mtime)
time_str = '{0:02d}-{1:02d}-{2:02d} {3:02d}:{4:02d}:{5:02d}'.format(
*t)
print(fmt_string.format(i, logname, time_str))
# repeat the loop until user give a valid logname ID
while(True):
string = input('Select an observing log: ')
if string.isdigit() and int(string) < nlog:
select_logname = nametime_lst[int(string)][0]
break
elif len(string.strip())==0:
print('Warning: no logfile selected')
else:
print('Warning: {} is not a valid log ID'.format(string))
else:
pass
if verbose:
message = 'Load obslog file: "{}"'.format(select_logname)
print(message)
logtable = read_obslog(select_logname, fmt=fmt)
return logtable
def plot_spectra1d():
"""Plot 1d spectra.
"""
config = read_config('')
obslog_file = find_log(os.curdir)
log = read_log(obslog_file)
section = config['data']
midproc = section['midproc']
report = section['report']
steps_string = config['reduction']['steps']
step_lst = steps_string.split(',')
suffix = config[step_lst[-1].strip()]['suffix']
image_path = 'images'
if not os.path.exists(image_path):
os.mkdir(image_path)
color_lst = 'rgbcmyk'
for item in log:
if item.imagetype == 'sci':
filename = os.path.join(midproc, '%s%s.fits'%(item.fileid, suffix))
if not os.path.exists(filename):
continue
data = fits.getdata(filename)
omin = data['order'].min()
omax = data['order'].max()
order_lst = np.arange(omin, omax+1)
for io, order in enumerate(order_lst):
if io%10 == 0:
fig = plt.figure(figsize=(14.14,10), dpi=150)
ax = fig.add_axes([0.055+(io%2)*0.50,
0.06 + (4-int((io%10)/2.))*0.188, 0.43, 0.16])
wavemin, wavemax = 1e9, 0
channels = sorted(np.unique(data['channel']))
for ich, channel in enumerate(channels):
mask1 = (data['channel']==channel)
mask2 = (data['order']==order)
mask = mask1*mask2
if mask.sum()==0:
continue
row = data[mask][0]
wave = row['wavelength']
flux = row['flux']
color = color_lst[ich%7]
ax.plot(wave, flux, color+'-', lw=0.7, alpha=0.7)
wavemin = min(wavemin, wave.min())
wavemax = max(wavemax, wave.max())
ax.set_xlabel(u'Wavelength (\xc5)')
x1, x2 = wavemin, wavemax
y1, y2 = ax.get_ylim()
ax.text(0.97*x1+0.03*x2, 0.8*y2, 'Order %d'%order)
ax.set_xlim(x1, x2)
ax.set_ylim(0, y2)
if io%10 == 9:
fig.savefig(os.path.join(image_path, 'spec_%s_%02d.png'%(item.fileid, int(io/10.))))
plt.close(fig)
fig.savefig(os.path.join(image_path, 'spec_%s_%02d.png'%(item.fileid, int(io/10.))))
plt.close(fig)
|
13,758 | 53eecffd9377716262f8245d4d73f944fb805da9 | import numpy as np
from PIL import Image
image_array = np.asarray(Image.open("./output.png")).copy()
for x in range(len(image_array)):
for y in range(len(image_array[0])):
r = image_array[x][y][0] & 0b00000011
g = image_array[x][y][1] & 0b00000011
b = image_array[x][y][2] & 0b00000111
c = (r | (g << 2) | (b << 4))
print(chr(c), end='')
if chr(c) == '}':
break
else:
continue
break
print()
|
13,759 | 9bdf3cbb7dde279413066c6a3fe1929185307ee2 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2021/1/2 4:00 下午
# @Author : taicheng.guo
# @Email: : 2997347185@qq.com
# @File : config.py
from args import read_args
args = read_args()
hparams = {
'title_size': 30,
'his_size': 50,
'npratio': 4,
'word_emb_dim': 300,
# model
'encoder_size': 300,
'v_size': 200,
'embed_size': 300,
'nhead': 20,
'batch_size': args.mini_batch_s,
'epochs': 20,
'learning_rate': 0.0005,
'wordDict_file': './data/process_utils/word_dict.pkl',
'userDict_file': './data/process_utils/uid2index.pkl',
'wordEmb_file': './data/process_utils/embedding.npy',
} |
13,760 | 489ab75480561b3a90efa1e9d2c756f742f48ac6 | from argparse import ArgumentParser
from pathlib import Path
from nhl_predict.dataset_manager import DatasetManager
from nhl_predict.game_prediction.experiment import Experiment
def get_args():
parser = ArgumentParser("Script for training game predictor.")
parser.add_argument(
"-p", "--path", type=str, help="Path to project's root dir.", required=True
)
parser.add_argument(
"-v",
dest="verbose",
action="store_true",
help="Plot history of the training.",
)
parser.set_defaults(verbose=False)
return parser.parse_args()
if __name__ == "__main__":
args = get_args()
project_root = Path(args.path)
dm = DatasetManager(project_root / "data")
train_seasons = [2011, 2012, 2013, 2014, 2015]
val_seasons = [2016, 2017, 2018]
exp = Experiment(
project_root=project_root,
hidden_layers="512-128",
epochs=60,
batch_size=128,
dropout=0.1,
verbose=True,
)
history = exp.train_final_model(train_seasons, val_seasons)
if args.verbose:
exp.model.plot_training_history(history, "auc")
exp.model.plot_training_history(history, "loss")
for season in train_seasons:
x, y = dm.get_dataset_by_seasons([season])
predictions = exp.predict(x)
predictions.to_pickle(
project_root / "data" / "games_predictions" / f"{season}-{season+1}.pkl"
)
for season in val_seasons:
x, y = dm.get_dataset_by_seasons([season])
predictions = exp.predict(x)
predictions.to_pickle(
project_root / "data" / "games_predictions" / f"{season}-{season+1}.pkl"
)
|
13,761 | 204c53e815d063c04a333033e260187c05465799 | from django.contrib import admin
from .models import Selecciona
admin.site.register(Selecciona)
|
13,762 | 2f1b9421fdd81d0cc5d25d67f69867da9b68ef52 | class Movie:
"""This class contains information about a movie"""
count = 0
def __init__(self, theTitle, theDirector, theDuration, theActors, theRating):
self.title = theTitle
self.director = theDirector
self.duration = theDuration
self.actors = theActors
self.rating = theRating
Movie.count += 1 # static variable
# find if movie director name starts with specific symbol
def startsWith(self, theSymbol):
self.symbol = theSymbol
if self.director[0] == self.symbol:
print(self.director)
else:
print("No director's name starts with ", self.symbol)
# find if some person is an actor
def isActor(self, theActorName):
self.actorName = theActorName
for actor in self.actors:
if self.actorName == actor:
print(actor)
break
else:
print("We don't have an actor with that name")
# change rating
def changeRating(self, theChangedRating):
self.changedRatign = theChangedRating
self.rating = self.changedRatign
print("The rating has been changed")
def showMovie(self):
print("Movie name {0}; Director {1}; Duration {2}; Actor {3}; Rating {4}".
format(self.title, self.director, self.duration, self.actors, self.rating))
actors1 = ['Sylvester_Stallone', 'Richard_Crenna', 'Brian_Dennehy']
actors2 = ['Mark_Hamill', 'Harrison_Ford', 'Carrie_Fisher']
actors3 = ['Chris_Pratt', 'Will_Ferrell', 'Elizabeth_Banks']
movie1 = Movie('First_Blood', 'Ted_Kotcheff', 2, actors1, 4.5)
movie2 = Movie('Star_Wars', 'Irvin_Kershner', 3, actors2, 4.8)
movie3 = Movie('Lego_Movie', 'Phil_Lord', 1, actors3, 4.6)
print("---------------Movie info---------------")
movie1.showMovie()
movie2.showMovie()
movie3.showMovie()
print("---------------Starts with---------------")
movie1.startsWith('T')
print("---------------Change rating---------------")
movie2.changeRating(5)
print("---------------Find actor---------------")
movie3.isActor('Chris_Pratt')
print("There are ", movie1.count, " movies")
|
13,763 | 233f3f7157c34f667058e1eec9e80896f8e2078d | from math import pi
from copy import copy
### Read in content, spread it out and create single list
def readContentIn(contentRead):
contentRead = contentRead.replace('(', ' ( ')
# contentRead = contentRead.replace('[', ' ( ')
contentRead = contentRead.replace(')', ' ) ')
# contentRead = contentRead.replace(']', ' ) ')
contentRead = filter(lambda a: a != '', contentRead.split(' '))
return contentRead
### Parse content and move into nested lists
def parseContent(currentParsedInput, contentInput):
content = contentInput
if len(content) == 0:
raise SyntaxError('Lisp program ended unexpectedly - incomplete code.')
firstChar = content[0]
content = content[1:]
if firstChar == '(':
tempList = []
while content[0] != ')':
toAppend, contentRev = parseContent('', content)
tempList.append(toAppend)
content = contentRev
content = content[1:] # pops off the last '('
return tempList, content
elif firstChar == ')':
raise SyntaxError('EOF Error')
else:
return testFloatElseString(firstChar), content
### Depth clean
def depthClean(inputList):
if type(inputList) is list:
while len(inputList) == 1 and type(inputList) is list:
inputList = inputList[0]
for i in range(len(inputList)):
if not type(inputList[i]) is list:
pass
else:
inputList[i] = depthClean(inputList[i])
return inputList
### Global environment
def geAdd(args):
return sum(args)
def geSubtract(args):
return reduce(lambda x, y: x-y, args)
def geMultiply(args):
return reduce(lambda x, y: x*y, args)
def geDivide(args):
return reduce(lambda x, y: x/y, args)
def geEqual(args):
return args[1:] == args[:-1]
def geGreater(args):
return args[1:] < args[:-1]
def geLess(args):
return args[1:] > args[:-1]
def geGreaterEq(args):
return args[1:] <= args[:-1]
def geLessEq(args):
return args[1:] >= args[:-1]
def geAbs(args):
try:
assert len(args) == 1
return abs(args[0])
except AssertionError:
print "Cannot take abs() of ", args
return None
def geCar(args):
return args[0]
def geCdr(args):
return args[1:]
globalEnv = {
"+": geAdd,
"-": geSubtract,
"*": geMultiply,
"/": geDivide,
"=": geEqual,
">": geGreater,
"<": geLess,
">=": geGreaterEq,
"<=": geLessEq,
"abs": geAbs,
"car": geCar,
"cdr": geCdr,
"pi": pi
}
### Global functions
def testFloatElseString(char):
try:
return float(char)
except ValueError:
return str(char)
def is_number(inputVal):
try:
int(inputVal)
return True
except ValueError:
try:
float(inputVal)
return True
except ValueError:
return False
def is_symbol(inputVal):
try:
int(inputVal)
return False
except ValueError:
try:
float(inputVal)
return False
except ValueError:
return True
except TypeError:
return False
def lisp_boolean(x):
if x == True:
return True
elif x == False:
return False
else:
print x, "is not a boolean."
return None
### Lisp interpreter
def lisp_eval(sexp, env):
if type(sexp) is list:
result = lisp_eval_list(sexp, env)
else:
result = lisp_eval_atom(sexp, env)
if result is None:
print "ERROR: S-expression", sexp, "has failed."
return None
else:
return result
# atoms
def lisp_eval_atom(sexp, env):
if is_number(sexp):
return sexp
elif is_symbol(sexp):
if sexp in env:
return env[sexp]
elif sexp[0] == '"' or sexp[0] == "'":
return sexp[1:]
else:
print "ERROR: No binding for " + sexp
return None
else:
print "No idea what this atom is: " + sexp
return None
# lists
def lisp_eval_list(sexp, env):
operator = sexp[0]
if type(operator) is list and operator[0] == 'lambda':
return lisp_eval_special(sexp, env)
elif not operator in env:
return lisp_eval_special(sexp, env)
else:
# Basic evaluation rule for list
evaluated = [ lisp_eval(exp, env) for exp in sexp ]
fn = evaluated[0]
args = evaluated[1:]
return evalApply(fn, args, env)
def lisp_eval_special(sexp, env):
operator = sexp[0]
body = sexp[1:]
if operator == "if":
if body[0] == 'not':
cond, no, yes = body[1:] # reverse no and yes cases to handle opposite not
else:
cond, yes, no = body
if lisp_boolean(lisp_eval(cond, env)):
return lisp_eval(yes, env)
elif not lisp_boolean(lisp_eval(cond, env)):
return lisp_eval(no, env)
else:
print "Boolean failed to return T/F; returning None."
return None
elif operator == "quote":
return body[0]
elif operator == "define":
name, value = body
if is_symbol(name):
env[name] = lisp_eval(value, env)
elif type(name) is list:
fn = name[0]
args = name[1:]
env[fn] = ["lambda", args, value]
return env
elif operator[0] == "lambda":
return evalApply(operator, body, env)
# if operator == "begin": pass
if operator == "set!":
variable, value = body
if variable in env:
env[variable] = value
return env
else:
print "ERROR: Variable", variable, "has not been defined yet."
return None
if operator == "let":
nameList = []
bodyList = []
for i in body[0]:
name, value = i
nameList.append(name)
bodyList.append(lisp_eval(value, env))
try:
assert len(body[1:]) == 1
fn = ['lambda', nameList, body[1]]
args = bodyList
return evalApply(fn, args, env)
except AssertionError:
print "ERROR: Too many elements in body,", body[1:]
return None
def evalApply(fn, args, env):
try:
fn[0]
if fn[0] == 'lambda':
variables = fn[1]
body = fn[-1]
new_env = copy(env)
assert(fn and len(variables) == len(args))
for (variable, value) in zip(variables, args):
new_env[variable] = value
return lisp_eval(body, new_env)
except TypeError:
# fn is primitive function
cleanArgs = []
for i in args:
cleanArgs.append(lisp_eval(i, env))
return fn(cleanArgs)
# contentReadInit = "(define (fib n) (if (< n 2) n (+ (fib (- n 1)) (fib (- n 2))))) (fib 13)"
# contentReadInit = "(define (con x y) (+ x y)) (con 3 2)"
# contentReadInit = '(define (add1 x) (+ 1 x)) (+ (add1 3) ((lambda (x) (+ x 3)) (abs -4)))'
# contentReadInit = '(+ 3 ((lambda (x) (+ x 3)) (abs -23)))'
# contentReadInit = "(define (con x y) (+ x y)) (con 3 ((lambda (x) (+ x 3)) 3))"
# contentReadInit = "((lambda (x) (* x x)) 5)"
# contentReadInit = "(let ((x 1)(w 3)(e 4)) (+ x e w))"
contentReadInit = "(define g 3) (set! g 12) g"
def wrapperRun(contentInput, dictIn):
print 'Lisp input is:', contentInput
read = ['('] + readContentIn(contentInput) + [')']
# read = readContentIn(contentInput)
parsedList, _ = parseContent('', read)
print 'Parsed list is:', parsedList
superGlobal = dictIn
if len(parsedList) > 1:
print "Multiple components in Lisp input..."
for com in parsedList[:-1]:
print "Processing component: ", com
newDic = lisp_eval(com, superGlobal)
if not newDic is None:
superGlobal = dict(superGlobal.items() + newDic.items())
print "Final assembly with last component: ", parsedList[-1]
return str('Final output: ' + str(lisp_eval(parsedList[-1], superGlobal)))
else:
return str('Final output: ' + str(lisp_eval(parsedList[0], superGlobal)))
print wrapperRun(contentReadInit, globalEnv)
|
13,764 | dd834b489dca722e93cd5256fc93e1efaf188f4b | #!/usr/bin/python3
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
from google.protobuf import text_format
import tensorflow as tf
from tensorflow.python.framework import graph_util
from tensorflow.core.framework import graph_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_io
from tensorflow.python.platform import app
from tensorflow.python.platform import gfile
from tensorflow.python.tools import optimize_for_inference_lib
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.framework.errors_impl import NotFoundError
try:
from tensorflow.contrib.nccl.python.ops import nccl_ops
nccl_ops._maybe_load_nccl_ops_so()
except NotFoundError:
pass # only cpu or only single gpu so no nccl installed
except AttributeError:
pass # only cpu or only single gpu so no nccl installed
except ImportError:
pass # only cpu or only single gpu so no nccl installed
# The original freeze_graph function
# from tensorflow.python.tools.freeze_graph import freeze_graph
dir = os.path.dirname(os.path.realpath(__file__))
def freeze_graph(model_dir, output_node_names):
"""Extract the sub graph defined by the output nodes and convert
all its variables into constant
Args:
model_dir: the root folder containing the checkpoint state file
output_node_names: a string, containing all the output node's names,
comma separated
"""
if not tf.gfile.Exists(model_dir):
raise AssertionError(
"Export directory doesn't exists. Please specify an export "
"directory: %s" % model_dir)
if not output_node_names:
print("You need to supply the name of a node to --output_node_names.")
return -1
# We retrieve our checkpoint fullpath
checkpoint = tf.train.get_checkpoint_state(model_dir)
input_checkpoint = checkpoint.model_checkpoint_path
print("\n\nLoading checkpoint: %s\n\n" % input_checkpoint)
# We precise the file fullname of our freezed graph
absolute_model_dir = "/".join(input_checkpoint.split('/')[:-1])
if not os.path.isdir('frozen_graphs'):
os.mkdir('frozen_graphs')
output_graph = "frozen_graphs/unet_frozen.pb"
# We clear devices to allow TensorFlow to control on which device it will load operations
clear_devices = True
# We start a session using a temporary fresh Graph
with tf.Session(graph=tf.Graph()) as sess:
# We import the meta graph in the current default Graph
saver = tf.train.import_meta_graph(input_checkpoint + '.meta', clear_devices=clear_devices)
# We restore the weights
saver.restore(sess, input_checkpoint)
gd = tf.get_default_graph().as_graph_def()
"""
# fix batch norm nodes
for node in gd.node:
if node.op == 'RefSwitch':
node.op = 'Switch'
for index in xrange(len(node.input)):
if 'moving_' in node.input[index]:
node.input[index] = node.input[index] + '/read'
elif node.op == 'AssignSub':
node.op = 'Sub'
if 'use_locking' in node.attr: del node.attr['use_locking']
"""
# We use a built-in TF helper to export variables to constants
output_graph_def = tf.graph_util.convert_variables_to_constants(
sess, # The session is used to retrieve the weights
gd, # The graph_def is used to retrieve the nodes
output_node_names.split(",") # The output node names are used to select the usefull nodes
)
# Finally we serialize and dump the output graph to the filesystem
with tf.gfile.GFile(output_graph, "wb") as f:
f.write(output_graph_def.SerializeToString())
print("%d ops in the final graph." % len(output_graph_def.node))
return output_graph_def
def optimize_for_inference():
input_graph_def = graph_pb2.GraphDef()
inpt = "frozen_graphs/unet_frozen.pb"
output = "cpp/unet.pb"
with gfile.Open(inpt, "rb") as f:
data = f.read()
#if FLAGS.frozen_graph:
input_graph_def.ParseFromString(data)
#else:
# text_format.Merge(data.decode("utf-8"), input_graph_def)
input_names = ["UNet/images"]
output_names = ["UNet/mask"]
output_graph_def = optimize_for_inference_lib.optimize_for_inference(
input_graph_def,
input_names,
output_names,
dtypes.float32.as_datatype_enum,
False)
f = gfile.FastGFile(output, "w")
f.write(output_graph_def.SerializeToString())
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--model_dir", type=str, default="model", help="Model folder to export")
args = parser.parse_args()
freeze_graph(args.model_dir, "UNet/mask")
print("Graph frozen successfully, optimizing for inference...")
optimize_for_inference()
print("Optimized successfully, done.")
|
13,765 | 9ca16403d0a3870b2d8038c3b4a51b9338a924c7 | #!/usr/bin/python3
""" defines a function that writes an Object to a text file, using a
JSON representation: """
import json
def save_to_json_file(my_obj, filename):
""" functions """
with open(filename, 'w') as f:
f.write(json.dumps(my_obj))
|
13,766 | 6da30bd1ad6355da4bf64c9ab260834223e91f9a | """
EXERCÍCIO 030: Par ou Ímpar?
Crie um programa que leia um número inteiro e mostre na tela se ele é PAR ou ÍMPAR.
"""
def main():
pass
if __name__ == '__main__':
main()
|
13,767 | 8af43b13f10df2b7921771454b0d7cfb6a47b2b3 | # In questo tutorial utilizzerò alberi decisionali (dall'inglese Decision Tree) e foreste casuali (dall'inglese Random
# Forest) per creare un modello di machine learning in grado di dirci se una determinata persona avrebbe avuto
# possibilità di salvarsi dal disastro del Titanic.
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import log_loss
from sklearn.metrics import accuracy_score
titanic = pd.read_csv("http://web.stanford.edu/class/archive/cs/cs109/cs109.1166/stuff/titanic.csv")
print(titanic.info())
titanic = titanic.drop("Name",axis=1)
titanic = pd.get_dummies(titanic)
print(titanic.head())
X = titanic.drop("Survived", axis=1).values
Y = titanic["Survived"].values
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.3, random_state=0)
print(X_train.shape)
from sklearn.ensemble import RandomForestClassifier
forest = RandomForestClassifier(random_state=False, max_depth=8, n_estimators=30)
forest.fit(X_train, Y_train)
Y_pred_train = forest.predict(X_train)
Y_pred = forest.predict(X_test)
accuracy_train = accuracy_score(Y_train, Y_pred_train)
accuracy_test = accuracy_score(Y_test, Y_pred)
print("ACCURACY: TRAIN=%.4f TEST=%.4f" % (accuracy_train, accuracy_test)) |
13,768 | 8b7d0f5dfc1c332af06b0998aaa9a8ac9e871ef6 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import flask
from flask import json
import logging
from dci.api.v1 import api
from dci.api.v1 import base
from dci.api.v1 import utils as v1_utils
from dci import decorators
from dci.common import exceptions as dci_exc
from dci.common.schemas import (
check_json_is_valid,
clean_json_with_schema,
create_pipeline_schema,
update_pipeline_schema,
check_and_get_args,
)
from dci.common import utils
from dci.db import declarative
from dci.db import models2
import sqlalchemy.orm as sa_orm
@api.route("/pipelines", methods=["POST"])
@decorators.login_required
def create_pipeline(user):
values = flask.request.json
check_json_is_valid(create_pipeline_schema, values)
values.update(v1_utils.common_values_dict())
if not user.is_in_team(values["team_id"]):
raise dci_exc.Unauthorized()
created_pipeline = base.create_resource_orm(models2.Pipeline, values)
result = json.dumps({"pipeline": created_pipeline})
return flask.Response(result, 201, content_type="application/json")
@api.route("/pipelines/<uuid:p_id>", methods=["GET"])
@decorators.login_required
def get_pipeline_by_id(user, p_id):
p = base.get_resource_orm(
models2.Pipeline,
p_id,
options=[sa_orm.selectinload("team")],
)
if user.is_not_super_admin() and user.is_not_read_only_user() and user.is_not_epm():
if p.team_id not in user.teams_ids:
raise dci_exc.Unauthorized()
return flask.Response(
json.dumps({"pipeline": p.serialize()}),
200,
content_type="application/json",
)
@api.route("/pipelines", methods=["GET"])
@decorators.login_required
def get_pipelines(user):
args = check_and_get_args(flask.request.args.to_dict())
query = flask.g.session.query(models2.Pipeline)
if user.is_not_super_admin() and user.is_not_read_only_user() and user.is_not_epm():
query = query.filter(models2.Pipeline.team_id.in_(user.teams_ids))
query = query.filter(models2.Pipeline.state != "archived")
query = query.from_self()
query = declarative.handle_args(query, models2.Pipeline, args)
query = query.options(sa_orm.joinedload("team", innerjoin=True))
nb_pipelines = query.count()
query = declarative.handle_pagination(query, args)
pipelines = [j.serialize(ignore_columns=["data"]) for j in query.all()]
return flask.jsonify({"pipelines": pipelines, "_meta": {"count": nb_pipelines}})
@api.route("/pipelines/<uuid:p_id>/jobs", methods=["GET"])
@decorators.login_required
def get_jobs_from_pipeline(user, p_id):
p = base.get_resource_orm(models2.Pipeline, p_id)
query = flask.g.session.query(models2.Job)
if user.is_not_super_admin() and user.is_not_read_only_user() and user.is_not_epm():
if p.team_id not in user.teams_ids:
raise dci_exc.Unauthorized()
query = query.filter(models2.Job.team_id.in_(user.teams_ids))
query = query.filter(models2.Job.pipeline_id == p.id)
query = query.filter(models2.Job.state != "archived")
query = query.order_by(models2.Job.created_at.asc())
query = (
query.options(sa_orm.selectinload("results"))
.options(sa_orm.joinedload("remoteci", innerjoin=True))
.options(sa_orm.selectinload("components"))
.options(sa_orm.joinedload("team", innerjoin=True))
)
jobs = [j.serialize() for j in query.all()]
return flask.jsonify({"jobs": jobs, "_meta": {"count": len(jobs)}})
@api.route("/pipelines/<uuid:p_id>", methods=["PUT"])
@decorators.login_required
def update_pipeline_by_id(user, p_id):
if_match_etag = utils.check_and_get_etag(flask.request.headers)
values = clean_json_with_schema(update_pipeline_schema, flask.request.json)
p = base.get_resource_orm(models2.Pipeline, p_id, if_match_etag)
if user.is_not_in_team(p.team_id) and user.is_not_epm():
raise dci_exc.Unauthorized()
base.update_resource_orm(p, values)
p = base.get_resource_orm(models2.Pipeline, p_id)
return flask.Response(
json.dumps({"pipeline": p.serialize()}),
200,
headers={"ETag": p.etag},
content_type="application/json",
)
@api.route("/pipelines/<uuid:p_id>", methods=["DELETE"])
@decorators.login_required
def delete_pipeline_by_id(user, p_id):
if_match_etag = utils.check_and_get_etag(flask.request.headers)
p = base.get_resource_orm(models2.Pipeline, p_id, if_match_etag)
if (
user.is_not_in_team(p.team_id) or user.is_read_only_user()
) and user.is_not_epm():
raise dci_exc.Unauthorized()
try:
p.state = "archived"
flask.g.session.add(p)
flask.g.session.commit()
except Exception as e:
flask.g.session.rollback()
logging.error("unable to delete pipeline %s: %s" % (p_id, str(e)))
raise dci_exc.DCIException("unable to delete pipeline %s: %s" % (p_id, str(e)))
return flask.Response(None, 204, content_type="application/json")
|
13,769 | ac6f0fbab20659164c49aadc301a80a894d2732c | from unittest import TestCase
import time
import pty
import subprocess
class TestDotfiles(TestCase):
def test_git(self):
"""
By now, we should have access to zsh, git, an alias for git, and git
aliases.
"""
g_d = subprocess.check_output(["zsh", "-i", "-c", "g d --help"])
self.assertIn(
"`git d' is aliased to "
"`diff --ignore-all-space --ignore-blank-lines --word-diff=color'",
g_d,
)
|
13,770 | deb041277edec9dfddedd51de69a354422475772 | # coding=utf-8
# Copyright (C) 2012 Allis Tauri <allista@gmail.com>
#
# degen_primer is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# indicator_gddccontrol is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
'''
Created on Jun 24, 2012
@author: Allis Tauri <allista@gmail.com>
All calculations are based on:
1) SantaLucia, J., & Hicks, D. (2004).
The thermodynamics of DNA structural motifs. Annual review of biophysics and
biomolecular structure, 33, 415-40. doi:10.1146/annurev.biophys.32.110601.141800
2) von Ahsen, N., Wittwer, C. T., & Schütz, E. (2001). Oligonucleotide melting
temperatures under PCR conditions: nearest-neighbor corrections for Mg(2+),
deoxynucleotide triphosphate, and dimethyl sulfoxide concentrations with
comparison to alternative empirical formulas. Clinical chemistry, 47(11), 1956-61.
'''
from math import sqrt, log
from UnifiedNN import *
from StringTools import print_exception
try:
from Bio.SeqFeature import SeqFeature, FeatureLocation
except Exception, e:
print_exception(e)
raise ImportError('The BioPython must be installed in your system.')
#utility functions
def print_exception(e):
print "Exception occurred: " + str(type(e)) + " : " + e.__str__()
###############################################################################
#standard PCR conditions
C_Mg = 1.5 #mM
C_Na = 50 #mM; should be above 0.05M and below 1.1M
C_dNTP = 0 #mM
C_DNA = 50 #nM; DNA template concentration
C_Prim = 0.1 #uM; Primer concentration
C_DMSO = 0 #percent
def C_Na_eq():
"""divalent cation correction (Ahsen et al., 2001)"""
global C_Na, C_Mg, C_dNTP
return C_Na + 120*sqrt(C_Mg - C_dNTP)
#end def
def NN_Tr(seq, r):
'''Calculate temperature for primer-template association equilibrium
with 'r' ratio using two-state equilibrium model and the Nearest Neighbor \
TD tables and from the paper of SantaLucia & Hicks (2004).
Note, that two-state equilibrium model used here is based on assumption, that
primer sequence is not self-complementary.'''
#value constraints
if r >=1 or r <=0:
raise ValueError('TD_Functions.NN_Tr: equilibrium ratio should be in the (0;1) interval.')
#definitions
global C_Prim, C_DNA, C_DMSO, R, K0, Sym_Correction
seq_str = str(seq)
rev_com = str(seq.reverse_complement())
seq_len = len(seq)
dH, dS = 0, 0
#concentrations
P = C_Prim*1e-6
D = C_DNA *1e-9
DUP = r*min(P,D)
#equilibrium constant
K = DUP/((P-DUP)*(D-DUP))
#initial corrections
dH += delta_H('ini', 'ini')
dS += delta_S('ini', 'ini')
#test for AT terminals
if seq_str[0] == 'A' or seq_str[0] == 'T':
dH += delta_H('ter', 'ter')
dS += delta_S('ter', 'ter')
if seq_str[-1] == 'A' or seq_str[-1] == 'T':
dH += delta_H('ter', 'ter')
dS += delta_S('ter', 'ter')
#stacking interactions
for n in range(len(seq_str)-1):
NN = seq_str[n:n+2]
RC = rev_com[seq_len-n-2:seq_len-n]
dH += delta_H(NN, RC)
dS += delta_S(NN, RC)
#salt concentration correction
dS = dS + dS_Na_coefficient * len(seq_str) * log(C_Na_eq()*1e-3) #C_Na mM
#final temperature calculation
return dH * 1000/(dS - R * log(K)) + K0 - 0.75 * C_DMSO #DMSO correction from [2]
#end def
def NN_Tm(seq): return NN_Tr(seq, 0.5)
def source_feature(seq_rec):
feature = None
for f in seq_rec.features:
if f.type == 'source':
feature = f
break
if not feature:
feature = SeqFeature(FeatureLocation(0,len(seq_rec.seq)),
type = 'source')
seq_rec.features.append(feature)
return feature
#end def
def format_PCR_conditions():
conc_str = ''
spacer = max(len(str(C_Na)),
len(str(C_Mg)),
len(str(C_dNTP)),
len(str(C_DNA)),
len(str(C_Prim)),
len(str(C_DMSO)))
conc_str += 'C(Na) = ' + str(C_Na) + ' '*(spacer-len(str(C_Na))) +' mM\n'
conc_str += 'C(Mg) = ' + str(C_Mg) + ' '*(spacer-len(str(C_Mg))) +' mM\n'
conc_str += 'C(dNTP) = ' + str(C_dNTP)+ ' '*(spacer-len(str(C_dNTP))) +' mM\n'
conc_str += 'C(DNA) = ' + str(C_DNA) + ' '*(spacer-len(str(C_DNA))) +' nM\n'
conc_str += 'C(Primer) = ' + str(C_Prim)+ ' '*(spacer-len(str(C_Prim))) +' uM\n'
conc_str += 'C(DMSO) = ' + str(C_DMSO)+ ' '*(spacer-len(str(C_DMSO))) +' %\n'
return conc_str
#end_def
def add_PCR_conditions(feature):
try:
feature.qualifiers['C_Na'] = str(C_Na)+ ' mM'
feature.qualifiers['C_Mg'] = str(C_Mg)+ ' mM'
feature.qualifiers['C_dNTP'] = str(C_dNTP)+' mM'
feature.qualifiers['C_DNA'] = str(C_DNA)+ ' nM'
feature.qualifiers['C_Primer'] = str(C_Prim)+' uM'
feature.qualifiers['C_DMSO'] = str(C_DMSO)+' %'
except Exception, e:
print 'add_PCR_conditions:'
print_exception(e)
#end def
def calculate_Tr(seq_rec, r):
primer_Tr = NN_Tr(seq_rec.seq, r)
feature = source_feature(seq_rec)
add_PCR_conditions(feature)
feature.qualifiers['T-'+str(r)] = str(primer_Tr)
return primer_Tr
#end def
def calculate_Tm(seq_rec):
primer_Tm = NN_Tm(seq_rec.seq)
feature = source_feature(seq_rec)
add_PCR_conditions(feature)
feature.qualifiers['Tm'] = str(primer_Tm)
return primer_Tm
#end def
def dimer_dG(dimer, seq1, seq2):
fwd_matches = list(dimer[0])
fwd_matches.sort()
#e.g. (2 ,3 ,4 ,8 ,9 )
rev_matches = list(dimer[1])
rev_matches.sort()
#e.g. (13,14,15,19,20)
seq_str = str(seq1)
seq_len = len(seq_str)
rev_str = str(seq2[::-1])
rev_len = len(rev_str)
dG_Na = dG_Na_coefficient_oligo * 1 * log(C_Na_eq()*1e-3)
dG = delta_G('ini', 'ini')
#check for 'left' dangling end
if fwd_matches[0] == 0 and rev_matches[0] > 0: #3' dangling
dG += DanglingNN[rev_str[rev_matches[0]]+'X'][rev_str[rev_matches[0]-1]]
elif rev_matches[0] == 0 and fwd_matches[0] > 0: #5' dangling
dG += DanglingNN['X'+seq_str[fwd_matches[0]]][seq_str[fwd_matches[0]-1]]
#check for 'left' terminal mismatch
elif fwd_matches[0] > 0 and rev_matches[0] > 0:
dG += Terminal_mismatch_mean
#check for 'left' terminal AT
elif fwd_matches[0] == 0 and rev_matches[0] == 0:
if seq_str[0] == 'A' or seq_str[0] == 'T':
dG += delta_G('ter', 'ter')
#check for 'right' dangling end
if fwd_matches[-1] == seq_len-1 and rev_matches[-1] < rev_len-1: #5' dangling
dG += DanglingNN['X'+rev_str[rev_matches[-1]]][rev_str[rev_matches[-1]+1]]
elif rev_matches[-1] == rev_len-1 and fwd_matches[-1] < seq_len-1: #3' dangling
dG += DanglingNN[seq_str[fwd_matches[-1]]+'X'][seq_str[fwd_matches[-1]+1]]
#check for 'right' terminal mismatch
elif fwd_matches[-1] < seq_len-1 and rev_matches[0] < rev_len-1:
dG += Terminal_mismatch_mean
#check for 'right' terminal AT
elif fwd_matches[-1] == seq_len-1 and rev_matches[-1] == rev_len-1:
if seq_str[-1] == 'A' or seq_str[-1] == 'T':
dG += delta_G('ter', 'ter')
#stacking and mismatches
for i in range(len(fwd_matches)-1):
f_match = fwd_matches[i]
f_next = fwd_matches[i+1]
r_match = rev_matches[i]
r_next = rev_matches[i+1]
#if either || or |x| or |xx|
if f_next-f_match < 4:
NN = seq_str[f_match:f_match+2]
RV = rev_str[r_match:r_match+2]
#salt-corrected dG
dG += MismatchNN[NN][RV] + dG_Na
#if ||
if f_next-f_match == 1: continue
#if |x| or |xx|
elif f_next-f_match < 4:
NN1 = rev_str[r_next-1:r_next+1][::-1]
RV1 = seq_str[f_next-1:f_next+1][::-1]
dG += MismatchNN[NN1][RV1] + dG_Na
continue
#loop
elif f_next-f_match < 31:
dG += loop_dG(f_next-f_match-1, 'I') + 2*Terminal_mismatch_mean
else: pass
return dG
#end def
def hairpin_dG(hairpin, seq):
fwd_matches = list(hairpin[0])
fwd_matches.sort()
#e.g. (2 ,3 ,4 ,8 ,9 )
rev_matches = list(hairpin[1])
rev_matches.sort(reverse=True)
#e.g (24,23,22,18,17)
seq_str = str(seq)
seq_len = len(seq_str)
dG_Na = dG_Na_coefficient_oligo * 1 * log(C_Na_eq()*1e-3)
dG = delta_G('ini', 'ini')
#check for 'left' dangling end
if fwd_matches[0] == 0 and rev_matches[0] < seq_len-1:
dG += DanglingNN['X'+seq_str[rev_matches[0]]][seq_str[rev_matches[0]+1]]
elif fwd_matches[0] > 0 and rev_matches[0] == seq_len-1:
dG += DanglingNN['X'+seq_str[fwd_matches[0]]][seq_str[fwd_matches[0]-1]]
#check for 'left' terminal mismatch
elif fwd_matches[0] > 0 and rev_matches[0] < seq_len-1:
dG += Terminal_mismatch_mean
#check for 'left' terminal AT
elif fwd_matches[0] == 0 and rev_matches[0] == seq_len-1:
if seq_str[0] == 'A' or seq_str[0] == 'T':
dG += delta_G('ter', 'ter')
#stacking and mismatches
for i in range(len(fwd_matches)-1):
f_match = fwd_matches[i]
f_next = fwd_matches[i+1]
r_match = rev_matches[i]
r_next = rev_matches[i+1]
#if either || or |x| or |xx|
if f_next-f_match < 4:
NN = seq_str[f_match:f_match+2]
RV = seq_str[r_match-1:r_match+1][::-1]
#salt-corrected dG
dG += MismatchNN[NN][RV] + dG_Na
#if ||
if f_next-f_match == 1: continue
#if |x| or |xx|
elif f_next-f_match < 4:
NN1 = seq_str[r_next:r_next+2]
RV1 = seq_str[f_next-1:f_next+1][::-1]
dG += MismatchNN[NN1][RV1] + dG_Na
continue
#internal loop
elif f_next-f_match < 31:
dG += loop_dG(f_next-f_match-1, 'I') + 2*Terminal_mismatch_mean
else: pass
#hairpin loop
hp_len = rev_matches[-1]-fwd_matches[-1]-1
dG += loop_dG(hp_len, 'H')
#3-4 loop
if hp_len < 5:
hp_str = seq_str[fwd_matches[-1]:rev_matches[-1]+1]
if hp_str in Tri_Tetra_Loops:
dG += Tri_Tetra_Loops[hp_str]
if hp_len == 3:
if seq_str[fwd_matches[-1]] == 'A' or seq_str[fwd_matches[-1]] == 'T':
dG += 0.5 #kcal/mol; AT-closing penalty
elif hp_len == 4:
dG += Terminal_mismatch_mean
else: dG += Terminal_mismatch_mean
return dG
#end def |
13,771 | 4457dd2062f32ca42af3806fa31dfd87e8733c94 | """
naive algorith to find prime numbers
version 1.0
"""
import math
import time
start_time = time.time()
prims = [] # list of prims
for p in range(2, 500001): # find prims up to 50000
prime = True
for divider in range(2, int(math.sqrt(p))+1):
if p % divider == 0: # remainder of division is zero
prime = False # it is not a prime
if prime:
prims.append(p) # store prime number
print('ready')
print('%d prims in %f seconds' % (len(prims), time.time() - start_time))
|
13,772 | 75a28a449579abde1a0f261899eb4d0ba1d67de7 | import numpy as np
import pandas as pd
import os
import sys
sys.path.insert(0, os.path.join(
os.path.dirname(os.path.realpath(__file__)),
os.path.pardir,
os.path.pardir
))
import model.model as model
import logging
from nose.tools import with_setup
logging.basicConfig(level=logging.DEBUG)
class TestDataContainer(object):
def __init__(self):
self.logger = logging.getLogger('TestDataContainer')
self.logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
self.logger.addHandler(ch)
def setup(self):
self.test_data = np.column_stack((
np.arange(100),
np.arange(100, 200),
np.arange(200, 300)
))
self.frequency = 10
self.channels = ['flexor', 'brachoradialis', 'novelis']
self.container = model.DataContainer.from_array(
self.test_data,
self.frequency,
self.channels
)
def test_num_channels(self):
self.logger.debug('type of container: {}'.format(type(self.container)))
assert self.container.num_channels == 3
def test_columns(self):
for i in range(len(self.channels)):
assert self.channels[i] == self.container.columns[i]
def test_data(self):
data = self.container.data
assert type(data) == np.ndarray
assert data.shape[0] == self.test_data.shape[0]
assert data.shape[1] == self.test_data.shape[1]
def test_dataframe(self):
frame = self.container.dataframe
fcolumns = frame.columns
assert type(frame) == pd.DataFrame
for i in range(len(self.channels)):
assert fcolumns[i] == self.channels[i]
assert frame.shape[0] == self.test_data.shape[0]
def test_duration(self):
assert self.container.duration == 10
def test_getitem(self):
slice = self.container[2.5:5.5]
assert type(slice) == model.DataContainer
assert slice.samples == 30, 'Samples returned are {}'.format(slice.data.shape)
slice = self.container[5]
assert type(slice) == model.DataContainer
assert slice.data.ndim == 2
assert slice.data.shape[0] == 1
def test_getitem_border(self):
slice = self.container[10]
slice = self.container[0:10]
assert slice.data.shape[0] == 100
def test_getitem_fails(self):
success = True
try:
self.container[-4]
success = False
except AssertionError as e:
self.logger.debug(e.message)
self.logger.info('AssertionEerror for negative value occured')
try:
self.container[14:15]
success = False
except AssertionError as e:
self.logger.debug(e.message)
self.logger.info('AssertionError for start of slice out of bounds' + \
'occured')
try:
self.container[2:34]
success = False
except AssertionError as e:
self.logger.debug(e.message)
self.logger.info('AssertionError for stop of slice out of bounds' + \
'occured')
def test_set_data(self):
new_data = np.arange(60).reshape(20,3)
self.container.data = new_data
assert self.container.data.shape[0] == 20
assert self.container.duration == 2
assert self.container.samples == 20
class ModelTest(object):
@classmethod
def setup(cls):
""" Creates an Experiment with totaly artificial data. Experiment
has one setup with two modalities, EMG and kin. EMG has four channels,
KIN has three channels. Two sessions are "recorded" for two
different subjects.
All EMG recordings have sampling rate of 20Hz, all KIN recordings
sampling rate of 5Hz.
EMG recording of session1 consists of the following values:
0 0 0 0.5
0.1 1 0.01 0.6
... ... ... ...
0.9 9 0.09 1.4
1.0 10 0.1 1.5
0.9 9 0.09 1.4
... ... ... ...
0.1 1 0.01 0.6
Each column is repeated 10 times resulting in a recording of
20 * 10 = 200 <--> 10s.
EMG recording of session2 is created from this data by adding
gaussian noise.
KIN data for session1 is created from above array by taking the mean
of all consecutive 4 samples. First channel is the sum of all four
EMG channels, second channel is the product of all four EMG channels
along the columns and third channel is square of the first KIN
channel.
KIN data for session2 is based on noisy version of above array.
First channel is the sin of the sum of the four EMG channels along
the columns, second channel the cosine and third channel the tan.
As before mean over four samples was taken.
For each recording five Trials of duration 2s are defined.
"""
cls.logger = logging.getLogger('ModelTestLogger')
cls.logger.setLevel(logging.DEBUG)
s1 = model.Subject('subject1')
s2 = model.Subject('subject2')
cls.experiment = model.Experiment()
cls.experiment.put_subject(s1)
cls.experiment.put_subject(s2)
setup1 = model.Setup(cls.experiment)
modality1 = model.Modality(setup1, 20, 'emg')
modality2 = model.Modality(setup1, 5, 'kin')
model.Channel(modality1, 'brachoradialis')
model.Channel(modality1, 'musculus sterno clavicularis')
model.Channel(modality1, 'musculus rhombideus')
model.Channel(modality1, 'musculus lattisimus')
model.Channel(modality2, 'Pos-X')
model.Channel(modality2, 'Pos-Y')
model.Channel(modality2, 'Pos-Z')
session1 = model.Session(cls.experiment, setup1, s1, 'session1')
arr = np.column_stack((
np.tile(
np.concatenate((
np.arange(0., 1., 0.1),
np.arange(1., 0., -0.1)
)),
10
),
np.tile(
np.concatenate((
np.arange(10),
np.arange(10, 0, -1)
)),
10
),
np.tile(
np.concatenate((
np.arange(0.0, 0.1, 0.01),
np.arange(0.1, 0.0, -0.01)
)),
10
),
np.tile(
np.concatenate((
np.arange(0.5, 1.5, 0.1),
np.arange(1.5, 0.5, -0.1)
)),
10
),
))
recording1 = model.Recording(session1, modality1, data=arr,
identifier='emg_recording1')
arr2 = np.column_stack((
np.sum(np.mean(arr.reshape(-1, 4, 4), axis=1), axis=1),
np.prod(np.mean(arr.reshape(-1, 4, 4), axis=1), axis=1),
np.square(np.sum(np.mean(arr.reshape(-1, 4, 4), axis=1), axis=1))
))
recording2 = model.Recording(session1, modality2, data=arr2,
identifier='kin_recording1')
for i in range(5):
model.Trial(recording1, i * 2, 2)
model.Trial(recording2, i * 2, 2)
session2 = model.Session(cls.experiment, setup1, s2, 'session2')
arr = np.add(arr, np.random.randn(*arr.shape))
recording1 = model.Recording(session2, modality1, data=arr,
identifier='emg_recording2')
arr2 = np.column_stack((
np.sin(np.mean(np.sum(arr.reshape(-1, 4, 4), axis=1))),
np.cos(np.mean(np.sum(arr.reshape(-1, 4, 4), axis=1))),
np.tan(np.mean(np.sum(arr.reshape(-1, 4, 4), axis=1)))
))
recording2 = model.Recording(session2, modality2, data=arr2,
identifier='kin_recording2')
for i in range(5):
model.Trial(recording1, i * 2, 2)
model.Trial(recording2, i * 2, 2)
def test_model_definition(self):
self.logger.debug(self.experiment.recursive_to_string())
class ExperimentTest(ModelTest):
def test_get_recording(self):
recording = self.experiment.get_recording('emg_recording', 'session1')
assert recording.identifier == 'emg_recording'
def test_get_trial(self):
trials = ['trial0', 'trial1', 'trial2', 'trial3', 'trial4']
for trial in trials:
t = self.experiment.get_trial(trial, 'session1', 'emg_recording1')
assert t.identifier == trial
def test_get_data(self):
trials = self.experiment.get_data(modality='emg')
assert len(trials) == 10, 'Wrong number of trials returned, ' + \
'expected 10 got {}'.format(len(trials))
class TrialTest(ModelTest):
def test_start(self):
trials = ['trial0', 'trial1', 'trial2', 'trial3', 'trial4']
start = 0
for tid in trials:
trial = self.experiment.get_trial(
session='session1',
recording='emg_recording1',
identifier=tid
)
assert trial.start == start, 'Start does not match. Start should' + \
'be {} but is {}'.format(start, trial.start)
start += 2
def test_duration(self):
trials = ['trial0', 'trial1', 'trial2', 'trial3', 'trial4']
duration = 2
for tid in trials:
trial = self.experiment.get_trial(
session='session1',
recording='emg_recording1',
identifier=tid
)
assert trial.duration == duration, 'Start does not match. Start should' + \
'be {} but is {}'.format(duration, trial.duration)
def test_samples(self):
trials = ['trial0', 'trial1', 'trial2', 'trial3', 'trial4']
for tid in trials:
trial = self.experiment.get_trial(
session='session1',
recording='emg_recording1',
identifier=tid
)
assert trial.samples == 40, 'Start does not match. Start should' + \
'be {} but is {} for emg rec'.format(40, trial.samples)
trial = self.experiment.get_trial(
session='session1',
recording='kin_recording1',
identifier=tid
)
assert trial.samples == 10, 'Start does not match. Start should' + \
'be {} but is {} for kin rec'.format(5, trial.samples)
def test_get_data(self):
control_emg = np.column_stack((
np.tile(
np.concatenate((
np.arange(0., 1., 0.1),
np.arange(1., 0., -0.1)
)),
2
),
np.tile(
np.concatenate((
np.arange(10),
np.arange(10, 0, -1)
)),
2
),
np.tile(
np.concatenate((
np.arange(0.0, 0.1, 0.01),
np.arange(0.1, 0.0, -0.01)
)),
2
),
np.tile(
np.concatenate((
np.arange(0.5, 1.5, 0.1),
np.arange(1.5, 0.5, -0.1)
)),
2
),
))
control_kin = np.column_stack((
np.sum(np.mean(control_emg.reshape(-1, 4, 4), axis=1), axis=1),
np.prod(np.mean(control_emg.reshape(-1, 4, 4), axis=1), axis=1),
np.square(np.sum(np.mean(control_emg.reshape(-1, 4, 4), axis=1), axis=1))
))
trials = ['trial0', 'trial1', 'trial2', 'trial3', 'trial4']
for tid in trials:
trial = self.experiment.get_trial(
session='session1',
recording='emg_recording1',
identifier=tid
)
data = trial.get_data()
assert data.shape == control_emg.shape, ('EMG trial {} ' + \
'has not matching shape. Shape of data: {}, shape ' + \
'of control: {}').format(tid, data.shape, control_emg.shape)
assert np.mean(np.equal(data.data, control_emg)) == 1, 'EMG trial {} ' + \
'has not matching elements'.format(tid)
data = trial.get_data(channels=['musculus lattisimus', 'brachoradialis'])
assert np.mean(np.equal(data.data, control_emg[:,0:2])) == 1, 'slicing' + \
'columns errorenous for emg trial {}'.format(tid)
data = trial.get_data(begin=0.4, end=1.2)
assert np.mean(np.equal(data.data, control_emg[8:24])) == 1, \
'Returned values for time slicing wrong for emg ' + \
'trial {}'.format(tid)
trial = self.experiment.get_trial(
session='session1',
recording='kin_recording1',
identifier=tid
)
data = trial.get_data()
assert data.shape == control_kin.shape, 'EMG trial {} ' + \
'has not matching shape'.format(tid)
assert np.mean(np.equal(data.data, control_kin)) == 1, 'KIN trial {} ' + \
'has not matching elements'.format(tid)
data = trial.get_data(channels=['Pos-X', 'Pos-Y'])
assert np.mean(np.equal(data.data, control_kin[:,0:2])) == 1, 'slicing' + \
'columns errorenous for kin trial {}'.format(tid)
data = trial.get_data(begin=0.4, end=1.2)
assert np.mean(np.equal(data.data, control_kin[2:6])) == 1, \
'Returned values for time slicing wrong for emg ' + \
'trial {}'.format(tid)
def test_set_data(self):
recording = self.experiment.get_recording(
session='session1',
identifier='kin_recording1'
)
trial = self.experiment.get_trial(
session='session1',
recording='kin_recording1',
identifier='trial4'
)
new_data = np.ones(trial.get_data().shape)
trial.set_data(new_data)
assert np.mean(trial.get_data().data) == 1, ('Setting ' + \
'new data failed for trial')
class RecordingTest(ModelTest):
def test_add_events(self):
dic = {
'trial0': [['single', 0.4], ['long', 1, 0.5]],
'trial1': [['single', 0.4], ['long', 1, 0.5]],
'trial2': [['single', 0.4], ['long', 1, 0.5]],
'trial3': [['single', 0.4], ['long', 1, 0.5]],
'trial4': [['single', 0.4], ['long', 1, 0.5]],
}
df = pd.DataFrame(
[
['trial0', 'single', 0.4, 0],
['trial0', 'long', 1, 0.5],
['trial1', 'single', 0.4, 0],
['trial1', 'long', 1, 0.5],
['trial2', 'single', 0.4, 0],
['trial2', 'long', 1, 0.5],
['trial3', 'single', 0.4, 0],
['trial3', 'long', 1, 0.5],
['trial4', 'single', 0.4, 0],
['trial4', 'long', 1, 0.5]
])
recording = self.experiment.get_recording('emg_recording1', 'session1')
recording.add_events(dic)
self.logger.debug(recording.recursive_to_string())
recording.add_events(df)
self.logger.debug(recording.recursive_to_string())
|
13,773 | 77f50fd6256c18915b5ca652c0eb72478621b68e | # Import configugartion file
from ServerConfig import *
# Import Domain
from Domain.Users import Users
# Import other modules
from flask import Blueprint, render_template, redirect, url_for, request, make_response
import time
import os
import datetime
login = Blueprint('login', __name__, url_prefix='/', template_folder='templates')
@login.route('login.html', methods=['GET', 'POST'])
def Login():
"""
Render /login.html web page.
"""
bad_login = False
try:
if request.args.get('logout') == "1":
resp = make_response(render_template('login.html', bad_login=bad_login))
resp.set_cookie('user_id', '', expires=0)
resp.set_cookie('user_auth_token', '', expires=0)
return resp
except:
pass
if request.method == 'POST':
try:
if request.form['submit'] == "True":
email = request.form['Email']
password = request.form['Password']
users = Users()
(success, user_id, user_auth_token) = users.user_login(email, password)
if success:
expire_date = datetime.datetime.now()
expire_date = expire_date + datetime.timedelta(hours=1)
resp = make_response(redirect(url_for('configuration.Configuration')))
resp.set_cookie('user_id', str(user_id), expires=expire_date)
resp.set_cookie('user_auth_token', user_auth_token, expires=expire_date)
return resp
else:
bad_login = True
except KeyError:
pass
return render_template('login.html', bad_login=bad_login)
|
13,774 | cc494ceb255969959a5c7ea5da4e608879761ba5 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2016-01-10 11:52
# @Author : LeiYue (mr.leiyue@gmail.com)
# @Link : https://leiyue.wordpress.com/
# @Version : $Id$
from __future__ import print_function
import json
import re
import time
import pymongo
import requests
USER_AGENT = 'Mozilla/5.0 (iPhone; CPU iPhone OS 8_0 like Mac OS X) ' \
'AppleWebKit/600.1.3 (KHTML, like Gecko) Version/8.0 ' \
'Mobile/12A4345d Safari/600.1.4'
COOKIE = 'abtest_ABTest4SearchDate=b; ' \
'__utmt=1; ' \
'OZ_1U_2282=vid=v687bcdc624ccf.0&ctime=1452396673<ime=1452396659; ' \
'OZ_1Y_2282=erefer=-&eurl=http%3A//bj.xiaozhu.com/search-duanzufang-p1-0/&etime=1452396218&ctime=1452396673<ime=1452396659&compid=2282; ' \
'startDate=2016-01-10; ' \
'endDate=2016-01-11; ' \
'__utma=29082403.615837787.1452396220.1452396220.1452396220.1; ' \
'__utmb=29082403.16.10.1452396220; ' \
'__utmc=29082403; ' \
'__utmz=29082403.1452396220.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none); ' \
'OZ_1U_2283=vid=v687bd8498dba5.0&ctime=1452396758<ime=1452396734; ' \
'OZ_1Y_2283=erefer=-&eurl=http%3A//m.xiaozhu.com/search.html%3Fcityid%3D12%26city%3D%2525E5%25258C%252597%2525E4%2525BA%2525AC%26offset%3D25%26step%3D15%26st%3D2016-01-10%26et%3D2016-01-11%26&etime=1452396413&ctime=1452396758<ime=1452396734&compid=2283'
REFERER = 'http://m.xiaozhu.com/search.html?' \
'cityid=12&' \
'city=%25E5%258C%2597%25E4%25BA%25AC&' \
'sort=zuigui&' \
'offset=1&' \
'step=15&' \
'st=2016-01-10&' \
'et=2016-01-11&'
HEADERS = {
'User-Agent': USER_AGENT,
'Cookie' : COOKIE,
'Referer' : REFERER,
}
OFFSET = 0
LENGTH = 500
MIN_PRICE = 499
URL = 'http://wireless.xiaozhu.com/app/xzfk/html5/201/search/result?jsonp=api_search_result&' \
'cityId=12&' \
'offset={offset}&' \
'length={length}&' \
'orderBy=zuigui&' \
'minPrice={minPrice}&' \
'userId=0&' \
'sessId=0&' \
'jsonp=api_search_result&' \
'timestamp={timestamp}7'.format(offset=OFFSET, length=LENGTH, minPrice=MIN_PRICE,
timestamp=int(round(time.time()*1000)))
'''
'http://wireless.xiaozhu.com/app/xzfk/html5/201/search/result?jsonp=api_search_result&' \
'cityId=12&' \
'offset={offset}&' \
'length={length}&' \
'orderBy=zuigui&' \
'checkInDay=&' \
'checkOutDay=&' \
'leaseType=&' \
'minPrice={minPrice}&' \
'maxPrice=&' \
'distId=&' \
'locId=&' \
'keyword=&' \
'huXing=&' \
'facilitys=&' \
'guestNum=&' \
'userId=0&' \
'sessId=0&' \
'jsonp=api_search_result&' \
'timestamp=1452396770527&' \
'_=1452396733429'.format(offset=OFFSET, length=LENGTH, minPrice=MIN_PRICE)
'''
def get_items(url):
response = requests.get(url, headers=HEADERS)
if response.status_code != 200:
print(u'爬取数据出现错误,错误代码:' + str(response.status_code))
return None
pattern = re.compile(r'api_search_result\((.*)\)')
result = re.sub(pattern, r'\1', response.text)
print(u'获取数据成功,正在处理数据……')
units = json.loads(result)['content']['item']
return units
def main():
client = pymongo.MongoClient('localhost', 27017)
database = client['xiaozhu']
table = database['units']
print(u'正在从以下地址读取数据:' + URL)
items = get_items(URL)
print(u'总共获取了 {number} 项数据.'.format(number=len(items)))
# with open('items.json', 'w') as f:
# json.dump(items, f)
print(u'正在将数据导入数据库中……')
for item in items:
table.insert_one(item)
print(u'数据导入完毕。')
if __name__ == '__main__':
main()
|
13,775 | cec3d3e1d9f4eb7f2d3337b3d4d80f01f873e771 | def strangeListFunction(n):
strangeList = []
for i in range(0, n):
strangeList.insert(0, i)
return strangeList
def startfromzeroto(n):
mylist=[]
for i in range(0,n):
mylist.append(i)
return mylist
#print(strangeListFunction(50))
x=int(input("Enter the number :"))
print(startfromzeroto(x))
|
13,776 | c5e57f88a469b2e7c1e8ad37bdb8637eb5e74478 | # -*- coding: utf-8 -*-
# Koska plottaus mahdollisuuksia on niin paljo
# tässä tiedostossa on vain urleja plot esimerkkeihin
# ********* Hox Hox********
# Ei pidä vaipua masennukseen, sillä netti esimerkeissä on myös
# koodit ja kuvat, jotka voi kopioida suoraan tiedostoon, jos haluaa testata
#
# Basic esimerkit
# http://matplotlib.org/users/pyplot_tutorial.html
#
# Scatter plot
# http://matplotlib.org/examples/shapes_and_collections/scatter_demo.html
#
# Histogram demo
# http://matplotlib.org/1.2.1/examples/pylab_examples/histogram_demo.html
# https://plot.ly/matplotlib/histograms/
#
# Contour plot demoja
# http://matplotlib.org/examples/pylab_examples/contour_demo.html
#
# 3D-plot esimerkkejä, sisältävät source koodit ja kuvat
# http://matplotlib.org/mpl_toolkits/mplot3d/tutorial.html
#
# 3D-scatter plot
# http://matplotlib.org/examples/mplot3d/scatter3d_demo.html
#
# Statistical data plotteja
# http://matplotlib.org/examples/statistics/
#
# PyPlot API
# http://matplotlib.org/api/pyplot_api.html
#
# Teksti esimerkkejä
# http://matplotlib.org/users/text_intro.html
|
13,777 | 3cd4e5222f35ac6431b98e8744c182ab3a0d33b7 | from tamcolors import tam, tam_tools, tam_io
class TAMKeyManager(tam.tam_loop.TAMFrame):
def __init__(self):
super().__init__(fps=10,
char="@",
foreground_color=tam_io.tam_colors.GRAY,
background_color=tam_io.tam_colors.BLACK,
min_width=70, max_width=70, min_height=40, max_height=40)
self._key_manager = tam_tools.tam_key_manager.TAMKeyManager()
self._output = ""
def update(self, tam_loop, keys, loop_data):
self._key_manager.update(keys)
if len(self._output) >= 20:
self._output = ""
if self._key_manager.get_key_state("a"):
self._output += "a"
if self._key_manager.get_key_state("b"):
self._output += "b"
if self._key_manager.get_key_state("c"):
self._output += "c"
if self._key_manager.get_key_state("BACKSPACE"):
tam_loop.done()
def draw(self, tam_buffer, loop_data):
tam_buffer.clear()
tam_tools.tam_print.tam_print(tam_buffer,
0,
0,
text=self._output,
foreground_color=tam_io.tam_colors.LIGHT_GREEN,
background_color=tam_io.tam_colors.ALPHA)
tam_tools.tam_print.tam_print(tam_buffer,
0,
30,
text="Try a, b and c.\nbackspace to quit.",
foreground_color=tam_io.tam_colors.RED,
background_color=tam_io.tam_colors.ALPHA)
def run():
tam.tam_loop.TAMLoop(TAMKeyManager()).run()
|
13,778 | 0a4316dc784712f0affe3764871d824576ac8d96 | # Generated by Django 3.0.7 on 2020-07-02 11:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('post', '0005_auto_20200702_1328'),
]
operations = [
migrations.AlterField(
model_name='post',
name='kategori',
field=models.CharField(default='#Kategorisiz', max_length=30),
),
]
|
13,779 | 07488abfd841f0c5c54a32e5229e929f7569cca6 | """
Module for the DomainMatrix class.
A DomainMatrix represents a matrix with elements that are in a particular
Domain. Each DomainMatrix internally wraps a DDM which is used for the
lower-level operations. The idea is that the DomainMatrix class provides the
convenience routines for converting between Expr and the poly domains as well
as unifying matrices with different domains.
"""
from collections import Counter
from functools import reduce
from typing import Union as tUnion, Tuple as tTuple
from sympy.utilities.decorator import doctest_depends_on
from sympy.core.sympify import _sympify
from ..domains import Domain
from ..constructor import construct_domain
from .exceptions import (
DMFormatError,
DMBadInputError,
DMShapeError,
DMDomainError,
DMNotAField,
DMNonSquareMatrixError,
DMNonInvertibleMatrixError
)
from .domainscalar import DomainScalar
from sympy.polys.domains import ZZ, EXRAW, QQ
from sympy.polys.densearith import dup_mul
from sympy.polys.densebasic import dup_convert
from sympy.polys.densetools import (
dup_mul_ground,
dup_quo_ground,
dup_content,
dup_clear_denoms,
dup_primitive,
dup_transform,
)
from sympy.polys.factortools import dup_factor_list
from sympy.polys.polyutils import _sort_factors
from .ddm import DDM
from .sdm import SDM
from .dfm import DFM
from .rref import _dm_rref, _dm_rref_den
def DM(rows, domain):
"""Convenient alias for DomainMatrix.from_list
Examples
========
>>> from sympy import ZZ
>>> from sympy.polys.matrices import DM
>>> DM([[1, 2], [3, 4]], ZZ)
DomainMatrix([[1, 2], [3, 4]], (2, 2), ZZ)
See Also
========
DomainMatrix.from_list
"""
return DomainMatrix.from_list(rows, domain)
class DomainMatrix:
r"""
Associate Matrix with :py:class:`~.Domain`
Explanation
===========
DomainMatrix uses :py:class:`~.Domain` for its internal representation
which makes it faster than the SymPy Matrix class (currently) for many
common operations, but this advantage makes it not entirely compatible
with Matrix. DomainMatrix are analogous to numpy arrays with "dtype".
In the DomainMatrix, each element has a domain such as :ref:`ZZ`
or :ref:`QQ(a)`.
Examples
========
Creating a DomainMatrix from the existing Matrix class:
>>> from sympy import Matrix
>>> from sympy.polys.matrices import DomainMatrix
>>> Matrix1 = Matrix([
... [1, 2],
... [3, 4]])
>>> A = DomainMatrix.from_Matrix(Matrix1)
>>> A
DomainMatrix({0: {0: 1, 1: 2}, 1: {0: 3, 1: 4}}, (2, 2), ZZ)
Directly forming a DomainMatrix:
>>> from sympy import ZZ
>>> from sympy.polys.matrices import DomainMatrix
>>> A = DomainMatrix([
... [ZZ(1), ZZ(2)],
... [ZZ(3), ZZ(4)]], (2, 2), ZZ)
>>> A
DomainMatrix([[1, 2], [3, 4]], (2, 2), ZZ)
See Also
========
DDM
SDM
Domain
Poly
"""
rep: tUnion[SDM, DDM, DFM]
shape: tTuple[int, int]
domain: Domain
def __new__(cls, rows, shape, domain, *, fmt=None):
"""
Creates a :py:class:`~.DomainMatrix`.
Parameters
==========
rows : Represents elements of DomainMatrix as list of lists
shape : Represents dimension of DomainMatrix
domain : Represents :py:class:`~.Domain` of DomainMatrix
Raises
======
TypeError
If any of rows, shape and domain are not provided
"""
if isinstance(rows, (DDM, SDM, DFM)):
raise TypeError("Use from_rep to initialise from SDM/DDM")
elif isinstance(rows, list):
rep = DDM(rows, shape, domain)
elif isinstance(rows, dict):
rep = SDM(rows, shape, domain)
else:
msg = "Input should be list-of-lists or dict-of-dicts"
raise TypeError(msg)
if fmt is not None:
if fmt == 'sparse':
rep = rep.to_sdm()
elif fmt == 'dense':
rep = rep.to_ddm()
else:
raise ValueError("fmt should be 'sparse' or 'dense'")
# Use python-flint for dense matrices if possible
if rep.fmt == 'dense' and DFM._supports_domain(domain):
rep = rep.to_dfm()
return cls.from_rep(rep)
def __reduce__(self):
rep = self.rep
if rep.fmt == 'dense':
arg = self.to_list()
elif rep.fmt == 'sparse':
arg = dict(rep)
else:
raise RuntimeError # pragma: no cover
args = (arg, rep.shape, rep.domain)
return (self.__class__, args)
def __getitem__(self, key):
i, j = key
m, n = self.shape
if not (isinstance(i, slice) or isinstance(j, slice)):
return DomainScalar(self.rep.getitem(i, j), self.domain)
if not isinstance(i, slice):
if not -m <= i < m:
raise IndexError("Row index out of range")
i = i % m
i = slice(i, i+1)
if not isinstance(j, slice):
if not -n <= j < n:
raise IndexError("Column index out of range")
j = j % n
j = slice(j, j+1)
return self.from_rep(self.rep.extract_slice(i, j))
def getitem_sympy(self, i, j):
return self.domain.to_sympy(self.rep.getitem(i, j))
def extract(self, rowslist, colslist):
return self.from_rep(self.rep.extract(rowslist, colslist))
def __setitem__(self, key, value):
i, j = key
if not self.domain.of_type(value):
raise TypeError
if isinstance(i, int) and isinstance(j, int):
self.rep.setitem(i, j, value)
else:
raise NotImplementedError
@classmethod
def from_rep(cls, rep):
"""Create a new DomainMatrix efficiently from DDM/SDM.
Examples
========
Create a :py:class:`~.DomainMatrix` with an dense internal
representation as :py:class:`~.DDM`:
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.matrices import DomainMatrix
>>> from sympy.polys.matrices.ddm import DDM
>>> drep = DDM([[ZZ(1), ZZ(2)], [ZZ(3), ZZ(4)]], (2, 2), ZZ)
>>> dM = DomainMatrix.from_rep(drep)
>>> dM
DomainMatrix([[1, 2], [3, 4]], (2, 2), ZZ)
Create a :py:class:`~.DomainMatrix` with a sparse internal
representation as :py:class:`~.SDM`:
>>> from sympy.polys.matrices import DomainMatrix
>>> from sympy.polys.matrices.sdm import SDM
>>> from sympy import ZZ
>>> drep = SDM({0:{1:ZZ(1)},1:{0:ZZ(2)}}, (2, 2), ZZ)
>>> dM = DomainMatrix.from_rep(drep)
>>> dM
DomainMatrix({0: {1: 1}, 1: {0: 2}}, (2, 2), ZZ)
Parameters
==========
rep: SDM or DDM
The internal sparse or dense representation of the matrix.
Returns
=======
DomainMatrix
A :py:class:`~.DomainMatrix` wrapping *rep*.
Notes
=====
This takes ownership of rep as its internal representation. If rep is
being mutated elsewhere then a copy should be provided to
``from_rep``. Only minimal verification or checking is done on *rep*
as this is supposed to be an efficient internal routine.
"""
if not (isinstance(rep, (DDM, SDM)) or (DFM is not None and isinstance(rep, DFM))):
raise TypeError("rep should be of type DDM or SDM")
self = super().__new__(cls)
self.rep = rep
self.shape = rep.shape
self.domain = rep.domain
return self
@classmethod
def from_list(cls, rows, domain):
r"""
Convert a list of lists into a DomainMatrix
Parameters
==========
rows: list of lists
Each element of the inner lists should be either the single arg,
or tuple of args, that would be passed to the domain constructor
in order to form an element of the domain. See examples.
Returns
=======
DomainMatrix containing elements defined in rows
Examples
========
>>> from sympy.polys.matrices import DomainMatrix
>>> from sympy import FF, QQ, ZZ
>>> A = DomainMatrix.from_list([[1, 0, 1], [0, 0, 1]], ZZ)
>>> A
DomainMatrix([[1, 0, 1], [0, 0, 1]], (2, 3), ZZ)
>>> B = DomainMatrix.from_list([[1, 0, 1], [0, 0, 1]], FF(7))
>>> B
DomainMatrix([[1 mod 7, 0 mod 7, 1 mod 7], [0 mod 7, 0 mod 7, 1 mod 7]], (2, 3), GF(7))
>>> C = DomainMatrix.from_list([[(1, 2), (3, 1)], [(1, 4), (5, 1)]], QQ)
>>> C
DomainMatrix([[1/2, 3], [1/4, 5]], (2, 2), QQ)
See Also
========
from_list_sympy
"""
nrows = len(rows)
ncols = 0 if not nrows else len(rows[0])
conv = lambda e: domain(*e) if isinstance(e, tuple) else domain(e)
domain_rows = [[conv(e) for e in row] for row in rows]
return DomainMatrix(domain_rows, (nrows, ncols), domain)
@classmethod
def from_list_sympy(cls, nrows, ncols, rows, **kwargs):
r"""
Convert a list of lists of Expr into a DomainMatrix using construct_domain
Parameters
==========
nrows: number of rows
ncols: number of columns
rows: list of lists
Returns
=======
DomainMatrix containing elements of rows
Examples
========
>>> from sympy.polys.matrices import DomainMatrix
>>> from sympy.abc import x, y, z
>>> A = DomainMatrix.from_list_sympy(1, 3, [[x, y, z]])
>>> A
DomainMatrix([[x, y, z]], (1, 3), ZZ[x,y,z])
See Also
========
sympy.polys.constructor.construct_domain, from_dict_sympy
"""
assert len(rows) == nrows
assert all(len(row) == ncols for row in rows)
items_sympy = [_sympify(item) for row in rows for item in row]
domain, items_domain = cls.get_domain(items_sympy, **kwargs)
domain_rows = [[items_domain[ncols*r + c] for c in range(ncols)] for r in range(nrows)]
return DomainMatrix(domain_rows, (nrows, ncols), domain)
@classmethod
def from_dict_sympy(cls, nrows, ncols, elemsdict, **kwargs):
"""
Parameters
==========
nrows: number of rows
ncols: number of cols
elemsdict: dict of dicts containing non-zero elements of the DomainMatrix
Returns
=======
DomainMatrix containing elements of elemsdict
Examples
========
>>> from sympy.polys.matrices import DomainMatrix
>>> from sympy.abc import x,y,z
>>> elemsdict = {0: {0:x}, 1:{1: y}, 2: {2: z}}
>>> A = DomainMatrix.from_dict_sympy(3, 3, elemsdict)
>>> A
DomainMatrix({0: {0: x}, 1: {1: y}, 2: {2: z}}, (3, 3), ZZ[x,y,z])
See Also
========
from_list_sympy
"""
if not all(0 <= r < nrows for r in elemsdict):
raise DMBadInputError("Row out of range")
if not all(0 <= c < ncols for row in elemsdict.values() for c in row):
raise DMBadInputError("Column out of range")
items_sympy = [_sympify(item) for row in elemsdict.values() for item in row.values()]
domain, items_domain = cls.get_domain(items_sympy, **kwargs)
idx = 0
items_dict = {}
for i, row in elemsdict.items():
items_dict[i] = {}
for j in row:
items_dict[i][j] = items_domain[idx]
idx += 1
return DomainMatrix(items_dict, (nrows, ncols), domain)
@classmethod
def from_Matrix(cls, M, fmt='sparse',**kwargs):
r"""
Convert Matrix to DomainMatrix
Parameters
==========
M: Matrix
Returns
=======
Returns DomainMatrix with identical elements as M
Examples
========
>>> from sympy import Matrix
>>> from sympy.polys.matrices import DomainMatrix
>>> M = Matrix([
... [1.0, 3.4],
... [2.4, 1]])
>>> A = DomainMatrix.from_Matrix(M)
>>> A
DomainMatrix({0: {0: 1.0, 1: 3.4}, 1: {0: 2.4, 1: 1.0}}, (2, 2), RR)
We can keep internal representation as ddm using fmt='dense'
>>> from sympy import Matrix, QQ
>>> from sympy.polys.matrices import DomainMatrix
>>> A = DomainMatrix.from_Matrix(Matrix([[QQ(1, 2), QQ(3, 4)], [QQ(0, 1), QQ(0, 1)]]), fmt='dense')
>>> A.rep
[[1/2, 3/4], [0, 0]]
See Also
========
Matrix
"""
if fmt == 'dense':
return cls.from_list_sympy(*M.shape, M.tolist(), **kwargs)
return cls.from_dict_sympy(*M.shape, M.todod(), **kwargs)
@classmethod
def get_domain(cls, items_sympy, **kwargs):
K, items_K = construct_domain(items_sympy, **kwargs)
return K, items_K
def choose_domain(self, **opts):
"""Convert to a domain found by :func:`~.construct_domain`.
Examples
========
>>> from sympy import ZZ
>>> from sympy.polys.matrices import DM
>>> M = DM([[1, 2], [3, 4]], ZZ)
>>> M
DomainMatrix([[1, 2], [3, 4]], (2, 2), ZZ)
>>> M.choose_domain(field=True)
DomainMatrix([[1, 2], [3, 4]], (2, 2), QQ)
>>> from sympy.abc import x
>>> M = DM([[1, x], [x**2, x**3]], ZZ[x])
>>> M.choose_domain(field=True).domain
ZZ(x)
Keyword arguments are passed to :func:`~.construct_domain`.
See Also
========
construct_domain
convert_to
"""
elements, data = self.to_sympy().to_flat_nz()
dom, elements_dom = construct_domain(elements, **opts)
return self.from_flat_nz(elements_dom, data, dom)
def copy(self):
return self.from_rep(self.rep.copy())
def convert_to(self, K):
r"""
Change the domain of DomainMatrix to desired domain or field
Parameters
==========
K : Represents the desired domain or field.
Alternatively, ``None`` may be passed, in which case this method
just returns a copy of this DomainMatrix.
Returns
=======
DomainMatrix
DomainMatrix with the desired domain or field
Examples
========
>>> from sympy import ZZ, ZZ_I
>>> from sympy.polys.matrices import DomainMatrix
>>> A = DomainMatrix([
... [ZZ(1), ZZ(2)],
... [ZZ(3), ZZ(4)]], (2, 2), ZZ)
>>> A.convert_to(ZZ_I)
DomainMatrix([[1, 2], [3, 4]], (2, 2), ZZ_I)
"""
if K == self.domain:
return self.copy()
rep = self.rep
# The DFM, DDM and SDM types do not do any implicit conversions so we
# manage switching between DDM and DFM here.
if rep.is_DFM and not DFM._supports_domain(K):
rep_K = rep.to_ddm().convert_to(K)
elif rep.is_DDM and DFM._supports_domain(K):
rep_K = rep.convert_to(K).to_dfm()
else:
rep_K = rep.convert_to(K)
return self.from_rep(rep_K)
def to_sympy(self):
return self.convert_to(EXRAW)
def to_field(self):
r"""
Returns a DomainMatrix with the appropriate field
Returns
=======
DomainMatrix
DomainMatrix with the appropriate field
Examples
========
>>> from sympy import ZZ
>>> from sympy.polys.matrices import DomainMatrix
>>> A = DomainMatrix([
... [ZZ(1), ZZ(2)],
... [ZZ(3), ZZ(4)]], (2, 2), ZZ)
>>> A.to_field()
DomainMatrix([[1, 2], [3, 4]], (2, 2), QQ)
"""
K = self.domain.get_field()
return self.convert_to(K)
def to_sparse(self):
"""
Return a sparse DomainMatrix representation of *self*.
Examples
========
>>> from sympy.polys.matrices import DomainMatrix
>>> from sympy import QQ
>>> A = DomainMatrix([[1, 0],[0, 2]], (2, 2), QQ)
>>> A.rep
[[1, 0], [0, 2]]
>>> B = A.to_sparse()
>>> B.rep
{0: {0: 1}, 1: {1: 2}}
"""
if self.rep.fmt == 'sparse':
return self
return self.from_rep(self.rep.to_sdm())
def to_dense(self):
"""
Return a dense DomainMatrix representation of *self*.
Examples
========
>>> from sympy.polys.matrices import DomainMatrix
>>> from sympy import QQ
>>> A = DomainMatrix({0: {0: 1}, 1: {1: 2}}, (2, 2), QQ)
>>> A.rep
{0: {0: 1}, 1: {1: 2}}
>>> B = A.to_dense()
>>> B.rep
[[1, 0], [0, 2]]
"""
rep = self.rep
if rep.fmt == 'dense':
return self
return self.from_rep(rep.to_dfm_or_ddm())
def to_ddm(self):
"""
Return a :class:`~.DDM` representation of *self*.
Examples
========
>>> from sympy.polys.matrices import DomainMatrix
>>> from sympy import QQ
>>> A = DomainMatrix({0: {0: 1}, 1: {1: 2}}, (2, 2), QQ)
>>> ddm = A.to_ddm()
>>> ddm
[[1, 0], [0, 2]]
>>> type(ddm)
<class 'sympy.polys.matrices.ddm.DDM'>
See Also
========
to_sdm
to_dense
sympy.polys.matrices.ddm.DDM.to_sdm
"""
return self.rep.to_ddm()
def to_sdm(self):
"""
Return a :class:`~.SDM` representation of *self*.
Examples
========
>>> from sympy.polys.matrices import DomainMatrix
>>> from sympy import QQ
>>> A = DomainMatrix([[1, 0],[0, 2]], (2, 2), QQ)
>>> sdm = A.to_sdm()
>>> sdm
{0: {0: 1}, 1: {1: 2}}
>>> type(sdm)
<class 'sympy.polys.matrices.sdm.SDM'>
See Also
========
to_ddm
to_sparse
sympy.polys.matrices.sdm.SDM.to_ddm
"""
return self.rep.to_sdm()
@doctest_depends_on(ground_types=['flint'])
def to_dfm(self):
"""
Return a :class:`~.DFM` representation of *self*.
Examples
========
>>> from sympy.polys.matrices import DomainMatrix
>>> from sympy import QQ
>>> A = DomainMatrix([[1, 0],[0, 2]], (2, 2), QQ)
>>> dfm = A.to_dfm()
>>> dfm
[[1, 0], [0, 2]]
>>> type(dfm)
<class 'sympy.polys.matrices._dfm.DFM'>
See Also
========
to_ddm
to_dense
DFM
"""
return self.rep.to_dfm()
@doctest_depends_on(ground_types=['flint'])
def to_dfm_or_ddm(self):
"""
Return a :class:`~.DFM` or :class:`~.DDM` representation of *self*.
Explanation
===========
The :class:`~.DFM` representation can only be used if the ground types
are ``flint`` and the ground domain is supported by ``python-flint``.
This method will return a :class:`~.DFM` representation if possible,
but will return a :class:`~.DDM` representation otherwise.
Examples
========
>>> from sympy.polys.matrices import DomainMatrix
>>> from sympy import QQ
>>> A = DomainMatrix([[1, 0],[0, 2]], (2, 2), QQ)
>>> dfm = A.to_dfm_or_ddm()
>>> dfm
[[1, 0], [0, 2]]
>>> type(dfm) # Depends on the ground domain and ground types
<class 'sympy.polys.matrices._dfm.DFM'>
See Also
========
to_ddm: Always return a :class:`~.DDM` representation.
to_dfm: Returns a :class:`~.DFM` representation or raise an error.
to_dense: Convert internally to a :class:`~.DFM` or :class:`~.DDM`
DFM: The :class:`~.DFM` dense FLINT matrix representation.
DDM: The Python :class:`~.DDM` dense domain matrix representation.
"""
return self.rep.to_dfm_or_ddm()
@classmethod
def _unify_domain(cls, *matrices):
"""Convert matrices to a common domain"""
domains = {matrix.domain for matrix in matrices}
if len(domains) == 1:
return matrices
domain = reduce(lambda x, y: x.unify(y), domains)
return tuple(matrix.convert_to(domain) for matrix in matrices)
@classmethod
def _unify_fmt(cls, *matrices, fmt=None):
"""Convert matrices to the same format.
If all matrices have the same format, then return unmodified.
Otherwise convert both to the preferred format given as *fmt* which
should be 'dense' or 'sparse'.
"""
formats = {matrix.rep.fmt for matrix in matrices}
if len(formats) == 1:
return matrices
if fmt == 'sparse':
return tuple(matrix.to_sparse() for matrix in matrices)
elif fmt == 'dense':
return tuple(matrix.to_dense() for matrix in matrices)
else:
raise ValueError("fmt should be 'sparse' or 'dense'")
def unify(self, *others, fmt=None):
"""
Unifies the domains and the format of self and other
matrices.
Parameters
==========
others : DomainMatrix
fmt: string 'dense', 'sparse' or `None` (default)
The preferred format to convert to if self and other are not
already in the same format. If `None` or not specified then no
conversion if performed.
Returns
=======
Tuple[DomainMatrix]
Matrices with unified domain and format
Examples
========
Unify the domain of DomainMatrix that have different domains:
>>> from sympy import ZZ, QQ
>>> from sympy.polys.matrices import DomainMatrix
>>> A = DomainMatrix([[ZZ(1), ZZ(2)]], (1, 2), ZZ)
>>> B = DomainMatrix([[QQ(1, 2), QQ(2)]], (1, 2), QQ)
>>> Aq, Bq = A.unify(B)
>>> Aq
DomainMatrix([[1, 2]], (1, 2), QQ)
>>> Bq
DomainMatrix([[1/2, 2]], (1, 2), QQ)
Unify the format (dense or sparse):
>>> A = DomainMatrix([[ZZ(1), ZZ(2)]], (1, 2), ZZ)
>>> B = DomainMatrix({0:{0: ZZ(1)}}, (2, 2), ZZ)
>>> B.rep
{0: {0: 1}}
>>> A2, B2 = A.unify(B, fmt='dense')
>>> B2.rep
[[1, 0], [0, 0]]
See Also
========
convert_to, to_dense, to_sparse
"""
matrices = (self,) + others
matrices = DomainMatrix._unify_domain(*matrices)
if fmt is not None:
matrices = DomainMatrix._unify_fmt(*matrices, fmt=fmt)
return matrices
def to_Matrix(self):
r"""
Convert DomainMatrix to Matrix
Returns
=======
Matrix
MutableDenseMatrix for the DomainMatrix
Examples
========
>>> from sympy import ZZ
>>> from sympy.polys.matrices import DomainMatrix
>>> A = DomainMatrix([
... [ZZ(1), ZZ(2)],
... [ZZ(3), ZZ(4)]], (2, 2), ZZ)
>>> A.to_Matrix()
Matrix([
[1, 2],
[3, 4]])
See Also
========
from_Matrix
"""
from sympy.matrices.dense import MutableDenseMatrix
# XXX: If the internal representation of RepMatrix changes then this
# might need to be changed also.
if self.domain in (ZZ, QQ, EXRAW):
if self.rep.fmt == "sparse":
rep = self.copy()
else:
rep = self.to_sparse()
else:
rep = self.convert_to(EXRAW).to_sparse()
return MutableDenseMatrix._fromrep(rep)
def to_list(self):
"""
Convert :class:`DomainMatrix` to list of lists.
See Also
========
from_list
to_list_flat
to_flat_nz
to_dok
"""
return self.rep.to_list()
def to_list_flat(self):
"""
Convert :class:`DomainMatrix` to flat list.
Examples
========
>>> from sympy import ZZ
>>> from sympy.polys.matrices import DomainMatrix
>>> A = DomainMatrix([[ZZ(1), ZZ(2)], [ZZ(3), ZZ(4)]], (2, 2), ZZ)
>>> A.to_list_flat()
[1, 2, 3, 4]
See Also
========
from_list_flat
to_list
to_flat_nz
to_dok
"""
return self.rep.to_list_flat()
@classmethod
def from_list_flat(cls, elements, shape, domain):
"""
Create :class:`DomainMatrix` from flat list.
Examples
========
>>> from sympy import ZZ
>>> from sympy.polys.matrices import DomainMatrix
>>> element_list = [ZZ(1), ZZ(2), ZZ(3), ZZ(4)]
>>> A = DomainMatrix.from_list_flat(element_list, (2, 2), ZZ)
>>> A
DomainMatrix([[1, 2], [3, 4]], (2, 2), ZZ)
>>> A == A.from_list_flat(A.to_list_flat(), A.shape, A.domain)
True
See Also
========
to_list_flat
"""
ddm = DDM.from_list_flat(elements, shape, domain)
return cls.from_rep(ddm.to_dfm_or_ddm())
def to_flat_nz(self):
"""
Convert :class:`DomainMatrix` to list of nonzero elements and data.
Explanation
===========
Returns a tuple ``(elements, data)`` where ``elements`` is a list of
elements of the matrix with zeros possibly excluded. The matrix can be
reconstructed by passing these to :meth:`from_flat_nz`. The idea is to
be able to modify a flat list of the elements and then create a new
matrix of the same shape with the modified elements in the same
positions.
The format of ``data`` differs depending on whether the underlying
representation is dense or sparse but either way it represents the
positions of the elements in the list in a way that
:meth:`from_flat_nz` can use to reconstruct the matrix. The
:meth:`from_flat_nz` method should be called on the same
:class:`DomainMatrix` that was used to call :meth:`to_flat_nz`.
Examples
========
>>> from sympy import ZZ
>>> from sympy.polys.matrices import DomainMatrix
>>> A = DomainMatrix([
... [ZZ(1), ZZ(2)],
... [ZZ(3), ZZ(4)]], (2, 2), ZZ)
>>> elements, data = A.to_flat_nz()
>>> elements
[1, 2, 3, 4]
>>> A == A.from_flat_nz(elements, data, A.domain)
True
Create a matrix with the elements doubled:
>>> elements_doubled = [2*x for x in elements]
>>> A2 = A.from_flat_nz(elements_doubled, data, A.domain)
>>> A2 == 2*A
True
See Also
========
from_flat_nz
"""
return self.rep.to_flat_nz()
def from_flat_nz(self, elements, data, domain):
"""
Reconstruct :class:`DomainMatrix` after calling :meth:`to_flat_nz`.
See :meth:`to_flat_nz` for explanation.
See Also
========
to_flat_nz
"""
rep = self.rep.from_flat_nz(elements, data, domain)
return self.from_rep(rep)
def to_dok(self):
"""
Convert :class:`DomainMatrix` to dictionary of keys (dok) format.
Examples
========
>>> from sympy import ZZ
>>> from sympy.polys.matrices import DomainMatrix
>>> A = DomainMatrix([
... [ZZ(1), ZZ(0)],
... [ZZ(0), ZZ(4)]], (2, 2), ZZ)
>>> A.to_dok()
{(0, 0): 1, (1, 1): 4}
The matrix can be reconstructed by calling :meth:`from_dok` although
the reconstructed matrix will always be in sparse format:
>>> A.to_sparse() == A.from_dok(A.to_dok(), A.shape, A.domain)
True
See Also
========
from_dok
to_list
to_list_flat
to_flat_nz
"""
return self.rep.to_dok()
@classmethod
def from_dok(cls, dok, shape, domain):
"""
Create :class:`DomainMatrix` from dictionary of keys (dok) format.
See :meth:`to_dok` for explanation.
See Also
========
to_dok
"""
return cls.from_rep(SDM.from_dok(dok, shape, domain))
def nnz(self):
"""
Number of nonzero elements in the matrix.
Examples
========
>>> from sympy import ZZ
>>> from sympy.polys.matrices import DM
>>> A = DM([[1, 0], [0, 4]], ZZ)
>>> A.nnz()
2
"""
return self.rep.nnz()
def __repr__(self):
return 'DomainMatrix(%s, %r, %r)' % (str(self.rep), self.shape, self.domain)
def transpose(self):
"""Matrix transpose of ``self``"""
return self.from_rep(self.rep.transpose())
def flat(self):
rows, cols = self.shape
return [self[i,j].element for i in range(rows) for j in range(cols)]
@property
def is_zero_matrix(self):
return self.rep.is_zero_matrix()
@property
def is_upper(self):
"""
Says whether this matrix is upper-triangular. True can be returned
even if the matrix is not square.
"""
return self.rep.is_upper()
@property
def is_lower(self):
"""
Says whether this matrix is lower-triangular. True can be returned
even if the matrix is not square.
"""
return self.rep.is_lower()
@property
def is_diagonal(self):
"""
True if the matrix is diagonal.
Can return true for non-square matrices. A matrix is diagonal if
``M[i,j] == 0`` whenever ``i != j``.
Examples
========
>>> from sympy import ZZ
>>> from sympy.polys.matrices import DM
>>> M = DM([[ZZ(1), ZZ(0)], [ZZ(0), ZZ(1)]], ZZ)
>>> M.is_diagonal
True
See Also
========
is_upper
is_lower
is_square
diagonal
"""
return self.rep.is_diagonal()
def diagonal(self):
"""
Get the diagonal entries of the matrix as a list.
Examples
========
>>> from sympy import ZZ
>>> from sympy.polys.matrices import DM
>>> M = DM([[ZZ(1), ZZ(2)], [ZZ(3), ZZ(4)]], ZZ)
>>> M.diagonal()
[1, 4]
See Also
========
is_diagonal
diag
"""
return self.rep.diagonal()
@property
def is_square(self):
"""
True if the matrix is square.
"""
return self.shape[0] == self.shape[1]
def rank(self):
rref, pivots = self.rref()
return len(pivots)
def hstack(A, *B):
r"""Horizontally stack the given matrices.
Parameters
==========
B: DomainMatrix
Matrices to stack horizontally.
Returns
=======
DomainMatrix
DomainMatrix by stacking horizontally.
Examples
========
>>> from sympy import ZZ
>>> from sympy.polys.matrices import DomainMatrix
>>> A = DomainMatrix([[ZZ(1), ZZ(2)], [ZZ(3), ZZ(4)]], (2, 2), ZZ)
>>> B = DomainMatrix([[ZZ(5), ZZ(6)], [ZZ(7), ZZ(8)]], (2, 2), ZZ)
>>> A.hstack(B)
DomainMatrix([[1, 2, 5, 6], [3, 4, 7, 8]], (2, 4), ZZ)
>>> C = DomainMatrix([[ZZ(9), ZZ(10)], [ZZ(11), ZZ(12)]], (2, 2), ZZ)
>>> A.hstack(B, C)
DomainMatrix([[1, 2, 5, 6, 9, 10], [3, 4, 7, 8, 11, 12]], (2, 6), ZZ)
See Also
========
unify
"""
A, *B = A.unify(*B, fmt=A.rep.fmt)
return DomainMatrix.from_rep(A.rep.hstack(*(Bk.rep for Bk in B)))
def vstack(A, *B):
r"""Vertically stack the given matrices.
Parameters
==========
B: DomainMatrix
Matrices to stack vertically.
Returns
=======
DomainMatrix
DomainMatrix by stacking vertically.
Examples
========
>>> from sympy import ZZ
>>> from sympy.polys.matrices import DomainMatrix
>>> A = DomainMatrix([[ZZ(1), ZZ(2)], [ZZ(3), ZZ(4)]], (2, 2), ZZ)
>>> B = DomainMatrix([[ZZ(5), ZZ(6)], [ZZ(7), ZZ(8)]], (2, 2), ZZ)
>>> A.vstack(B)
DomainMatrix([[1, 2], [3, 4], [5, 6], [7, 8]], (4, 2), ZZ)
>>> C = DomainMatrix([[ZZ(9), ZZ(10)], [ZZ(11), ZZ(12)]], (2, 2), ZZ)
>>> A.vstack(B, C)
DomainMatrix([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]], (6, 2), ZZ)
See Also
========
unify
"""
A, *B = A.unify(*B, fmt='dense')
return DomainMatrix.from_rep(A.rep.vstack(*(Bk.rep for Bk in B)))
def applyfunc(self, func, domain=None):
if domain is None:
domain = self.domain
return self.from_rep(self.rep.applyfunc(func, domain))
def __add__(A, B):
if not isinstance(B, DomainMatrix):
return NotImplemented
A, B = A.unify(B, fmt='dense')
return A.add(B)
def __sub__(A, B):
if not isinstance(B, DomainMatrix):
return NotImplemented
A, B = A.unify(B, fmt='dense')
return A.sub(B)
def __neg__(A):
return A.neg()
def __mul__(A, B):
"""A * B"""
if isinstance(B, DomainMatrix):
A, B = A.unify(B, fmt='dense')
return A.matmul(B)
elif B in A.domain:
return A.scalarmul(B)
elif isinstance(B, DomainScalar):
A, B = A.unify(B)
return A.scalarmul(B.element)
else:
return NotImplemented
def __rmul__(A, B):
if B in A.domain:
return A.rscalarmul(B)
elif isinstance(B, DomainScalar):
A, B = A.unify(B)
return A.rscalarmul(B.element)
else:
return NotImplemented
def __pow__(A, n):
"""A ** n"""
if not isinstance(n, int):
return NotImplemented
return A.pow(n)
def _check(a, op, b, ashape, bshape):
if a.domain != b.domain:
msg = "Domain mismatch: %s %s %s" % (a.domain, op, b.domain)
raise DMDomainError(msg)
if ashape != bshape:
msg = "Shape mismatch: %s %s %s" % (a.shape, op, b.shape)
raise DMShapeError(msg)
if a.rep.fmt != b.rep.fmt:
msg = "Format mismatch: %s %s %s" % (a.rep.fmt, op, b.rep.fmt)
raise DMFormatError(msg)
if type(a.rep) != type(b.rep):
msg = "Type mismatch: %s %s %s" % (type(a.rep), op, type(b.rep))
raise DMFormatError(msg)
def add(A, B):
r"""
Adds two DomainMatrix matrices of the same Domain
Parameters
==========
A, B: DomainMatrix
matrices to add
Returns
=======
DomainMatrix
DomainMatrix after Addition
Raises
======
DMShapeError
If the dimensions of the two DomainMatrix are not equal
ValueError
If the domain of the two DomainMatrix are not same
Examples
========
>>> from sympy import ZZ
>>> from sympy.polys.matrices import DomainMatrix
>>> A = DomainMatrix([
... [ZZ(1), ZZ(2)],
... [ZZ(3), ZZ(4)]], (2, 2), ZZ)
>>> B = DomainMatrix([
... [ZZ(4), ZZ(3)],
... [ZZ(2), ZZ(1)]], (2, 2), ZZ)
>>> A.add(B)
DomainMatrix([[5, 5], [5, 5]], (2, 2), ZZ)
See Also
========
sub, matmul
"""
A._check('+', B, A.shape, B.shape)
return A.from_rep(A.rep.add(B.rep))
def sub(A, B):
r"""
Subtracts two DomainMatrix matrices of the same Domain
Parameters
==========
A, B: DomainMatrix
matrices to subtract
Returns
=======
DomainMatrix
DomainMatrix after Subtraction
Raises
======
DMShapeError
If the dimensions of the two DomainMatrix are not equal
ValueError
If the domain of the two DomainMatrix are not same
Examples
========
>>> from sympy import ZZ
>>> from sympy.polys.matrices import DomainMatrix
>>> A = DomainMatrix([
... [ZZ(1), ZZ(2)],
... [ZZ(3), ZZ(4)]], (2, 2), ZZ)
>>> B = DomainMatrix([
... [ZZ(4), ZZ(3)],
... [ZZ(2), ZZ(1)]], (2, 2), ZZ)
>>> A.sub(B)
DomainMatrix([[-3, -1], [1, 3]], (2, 2), ZZ)
See Also
========
add, matmul
"""
A._check('-', B, A.shape, B.shape)
return A.from_rep(A.rep.sub(B.rep))
def neg(A):
r"""
Returns the negative of DomainMatrix
Parameters
==========
A : Represents a DomainMatrix
Returns
=======
DomainMatrix
DomainMatrix after Negation
Examples
========
>>> from sympy import ZZ
>>> from sympy.polys.matrices import DomainMatrix
>>> A = DomainMatrix([
... [ZZ(1), ZZ(2)],
... [ZZ(3), ZZ(4)]], (2, 2), ZZ)
>>> A.neg()
DomainMatrix([[-1, -2], [-3, -4]], (2, 2), ZZ)
"""
return A.from_rep(A.rep.neg())
def mul(A, b):
r"""
Performs term by term multiplication for the second DomainMatrix
w.r.t first DomainMatrix. Returns a DomainMatrix whose rows are
list of DomainMatrix matrices created after term by term multiplication.
Parameters
==========
A, B: DomainMatrix
matrices to multiply term-wise
Returns
=======
DomainMatrix
DomainMatrix after term by term multiplication
Examples
========
>>> from sympy import ZZ
>>> from sympy.polys.matrices import DomainMatrix
>>> A = DomainMatrix([
... [ZZ(1), ZZ(2)],
... [ZZ(3), ZZ(4)]], (2, 2), ZZ)
>>> b = ZZ(2)
>>> A.mul(b)
DomainMatrix([[2, 4], [6, 8]], (2, 2), ZZ)
See Also
========
matmul
"""
return A.from_rep(A.rep.mul(b))
def rmul(A, b):
return A.from_rep(A.rep.rmul(b))
def matmul(A, B):
r"""
Performs matrix multiplication of two DomainMatrix matrices
Parameters
==========
A, B: DomainMatrix
to multiply
Returns
=======
DomainMatrix
DomainMatrix after multiplication
Examples
========
>>> from sympy import ZZ
>>> from sympy.polys.matrices import DomainMatrix
>>> A = DomainMatrix([
... [ZZ(1), ZZ(2)],
... [ZZ(3), ZZ(4)]], (2, 2), ZZ)
>>> B = DomainMatrix([
... [ZZ(1), ZZ(1)],
... [ZZ(0), ZZ(1)]], (2, 2), ZZ)
>>> A.matmul(B)
DomainMatrix([[1, 3], [3, 7]], (2, 2), ZZ)
See Also
========
mul, pow, add, sub
"""
A._check('*', B, A.shape[1], B.shape[0])
return A.from_rep(A.rep.matmul(B.rep))
def _scalarmul(A, lamda, reverse):
if lamda == A.domain.zero:
return DomainMatrix.zeros(A.shape, A.domain)
elif lamda == A.domain.one:
return A.copy()
elif reverse:
return A.rmul(lamda)
else:
return A.mul(lamda)
def scalarmul(A, lamda):
return A._scalarmul(lamda, reverse=False)
def rscalarmul(A, lamda):
return A._scalarmul(lamda, reverse=True)
def mul_elementwise(A, B):
assert A.domain == B.domain
return A.from_rep(A.rep.mul_elementwise(B.rep))
def __truediv__(A, lamda):
""" Method for Scalar Division"""
if isinstance(lamda, int) or ZZ.of_type(lamda):
lamda = DomainScalar(ZZ(lamda), ZZ)
elif A.domain.is_Field and lamda in A.domain:
K = A.domain
lamda = DomainScalar(K.convert(lamda), K)
if not isinstance(lamda, DomainScalar):
return NotImplemented
A, lamda = A.to_field().unify(lamda)
if lamda.element == lamda.domain.zero:
raise ZeroDivisionError
if lamda.element == lamda.domain.one:
return A
return A.mul(1 / lamda.element)
def pow(A, n):
r"""
Computes A**n
Parameters
==========
A : DomainMatrix
n : exponent for A
Returns
=======
DomainMatrix
DomainMatrix on computing A**n
Raises
======
NotImplementedError
if n is negative.
Examples
========
>>> from sympy import ZZ
>>> from sympy.polys.matrices import DomainMatrix
>>> A = DomainMatrix([
... [ZZ(1), ZZ(1)],
... [ZZ(0), ZZ(1)]], (2, 2), ZZ)
>>> A.pow(2)
DomainMatrix([[1, 2], [0, 1]], (2, 2), ZZ)
See Also
========
matmul
"""
nrows, ncols = A.shape
if nrows != ncols:
raise DMNonSquareMatrixError('Power of a nonsquare matrix')
if n < 0:
raise NotImplementedError('Negative powers')
elif n == 0:
return A.eye(nrows, A.domain)
elif n == 1:
return A
elif n % 2 == 1:
return A * A**(n - 1)
else:
sqrtAn = A ** (n // 2)
return sqrtAn * sqrtAn
def scc(self):
"""Compute the strongly connected components of a DomainMatrix
Explanation
===========
A square matrix can be considered as the adjacency matrix for a
directed graph where the row and column indices are the vertices. In
this graph if there is an edge from vertex ``i`` to vertex ``j`` if
``M[i, j]`` is nonzero. This routine computes the strongly connected
components of that graph which are subsets of the rows and columns that
are connected by some nonzero element of the matrix. The strongly
connected components are useful because many operations such as the
determinant can be computed by working with the submatrices
corresponding to each component.
Examples
========
Find the strongly connected components of a matrix:
>>> from sympy import ZZ
>>> from sympy.polys.matrices import DomainMatrix
>>> M = DomainMatrix([[ZZ(1), ZZ(0), ZZ(2)],
... [ZZ(0), ZZ(3), ZZ(0)],
... [ZZ(4), ZZ(6), ZZ(5)]], (3, 3), ZZ)
>>> M.scc()
[[1], [0, 2]]
Compute the determinant from the components:
>>> MM = M.to_Matrix()
>>> MM
Matrix([
[1, 0, 2],
[0, 3, 0],
[4, 6, 5]])
>>> MM[[1], [1]]
Matrix([[3]])
>>> MM[[0, 2], [0, 2]]
Matrix([
[1, 2],
[4, 5]])
>>> MM.det()
-9
>>> MM[[1], [1]].det() * MM[[0, 2], [0, 2]].det()
-9
The components are given in reverse topological order and represent a
permutation of the rows and columns that will bring the matrix into
block lower-triangular form:
>>> MM[[1, 0, 2], [1, 0, 2]]
Matrix([
[3, 0, 0],
[0, 1, 2],
[6, 4, 5]])
Returns
=======
List of lists of integers
Each list represents a strongly connected component.
See also
========
sympy.matrices.matrices.MatrixBase.strongly_connected_components
sympy.utilities.iterables.strongly_connected_components
"""
if not self.is_square:
raise DMNonSquareMatrixError('Matrix must be square for scc')
return self.rep.scc()
def clear_denoms(self, convert=False):
"""
Clear denominators, but keep the domain unchanged.
Examples
========
>>> from sympy import QQ
>>> from sympy.polys.matrices import DM
>>> A = DM([[(1,2), (1,3)], [(1,4), (1,5)]], QQ)
>>> den, Anum = A.clear_denoms()
>>> den.to_sympy()
60
>>> Anum.to_Matrix()
Matrix([
[30, 20],
[15, 12]])
>>> den * A == Anum
True
The numerator matrix will be in the same domain as the original matrix
unless ``convert`` is set to ``True``:
>>> A.clear_denoms()[1].domain
QQ
>>> A.clear_denoms(convert=True)[1].domain
ZZ
The denominator is always in the associated ring:
>>> A.clear_denoms()[0].domain
ZZ
>>> A.domain.get_ring()
ZZ
See Also
========
sympy.polys.polytools.Poly.clear_denoms
"""
elems0, data = self.to_flat_nz()
K0 = self.domain
K1 = K0.get_ring() if K0.has_assoc_Ring else K0
den, elems1 = dup_clear_denoms(elems0, K0, K1, convert=convert)
if convert:
Kden, Knum = K1, K1
else:
Kden, Knum = K1, K0
den = DomainScalar(den, Kden)
num = self.from_flat_nz(elems1, data, Knum)
return den, num
def cancel_denom(self, denom):
"""
Cancel factors between a matrix and a denominator.
Returns a matrix and denominator on lowest terms.
Requires ``gcd`` in the ground domain.
Methods like :meth:`solve_den`, :meth:`inv_den` and :meth:`rref_den`
return a matrix and denominator but not necessarily on lowest terms.
Reduction to lowest terms without fractions can be performed with
:meth:`cancel_denom`.
Examples
========
>>> from sympy.polys.matrices import DM
>>> from sympy import ZZ
>>> M = DM([[2, 2, 0],
... [0, 2, 2],
... [0, 0, 2]], ZZ)
>>> Minv, den = M.inv_den()
>>> Minv.to_Matrix()
Matrix([
[1, -1, 1],
[0, 1, -1],
[0, 0, 1]])
>>> den
2
>>> Minv_reduced, den_reduced = Minv.cancel_denom(den)
>>> Minv_reduced.to_Matrix()
Matrix([
[1, -1, 1],
[0, 1, -1],
[0, 0, 1]])
>>> den_reduced
2
>>> Minv_reduced.to_field() / den_reduced == Minv.to_field() / den
True
The denominator is made canonical with respect to units (e.g. a
negative denominator is made positive):
>>> M = DM([[2, 2, 0]], ZZ)
>>> den = ZZ(-4)
>>> M.cancel_denom(den)
(DomainMatrix([[-1, -1, 0]], (1, 3), ZZ), 2)
Any factor common to _all_ elements will be cancelled but there can
still be factors in common between _some_ elements of the matrix and
the denominator. To cancel factors between each element and the
denominator, use :meth:`cancel_denom_elementwise` or otherwise convert
to a field and use division:
>>> M = DM([[4, 6]], ZZ)
>>> den = ZZ(12)
>>> M.cancel_denom(den)
(DomainMatrix([[2, 3]], (1, 2), ZZ), 6)
>>> numers, denoms = M.cancel_denom_elementwise(den)
>>> numers
DomainMatrix([[1, 1]], (1, 2), ZZ)
>>> denoms
DomainMatrix([[3, 2]], (1, 2), ZZ)
>>> M.to_field() / den
DomainMatrix([[1/3, 1/2]], (1, 2), QQ)
See Also
========
solve_den
inv_den
rref_den
cancel_denom_elementwise
"""
M = self
K = self.domain
if K.is_zero(denom):
raise ZeroDivisionError('denominator is zero')
elif K.is_one(denom):
return (M.copy(), denom)
elements, data = M.to_flat_nz()
# First canonicalize the denominator (e.g. multiply by -1).
if K.is_negative(denom):
u = -K.one
else:
u = K.canonical_unit(denom)
# Often after e.g. solve_den the denominator will be much more
# complicated than the elements of the numerator. Hopefully it will be
# quicker to find the gcd of the numerator and if there is no content
# then we do not need to look at the denominator at all.
content = dup_content(elements, K)
common = K.gcd(content, denom)
if not K.is_one(content):
common = K.gcd(content, denom)
if not K.is_one(common):
elements = dup_quo_ground(elements, common, K)
denom = K.quo(denom, common)
if not K.is_one(u):
elements = dup_mul_ground(elements, u, K)
denom = u * denom
elif K.is_one(common):
return (M.copy(), denom)
M_cancelled = M.from_flat_nz(elements, data, K)
return M_cancelled, denom
def cancel_denom_elementwise(self, denom):
"""
Cancel factors between the elements of a matrix and a denominator.
Returns a matrix of numerators and matrix of denominators.
Requires ``gcd`` in the ground domain.
Examples
========
>>> from sympy.polys.matrices import DM
>>> from sympy import ZZ
>>> M = DM([[2, 3], [4, 12]], ZZ)
>>> denom = ZZ(6)
>>> numers, denoms = M.cancel_denom_elementwise(denom)
>>> numers.to_Matrix()
Matrix([
[1, 1],
[2, 2]])
>>> denoms.to_Matrix()
Matrix([
[3, 2],
[3, 1]])
>>> M_frac = (M.to_field() / denom).to_Matrix()
>>> M_frac
Matrix([
[1/3, 1/2],
[2/3, 2]])
>>> denoms_inverted = denoms.to_Matrix().applyfunc(lambda e: 1/e)
>>> numers.to_Matrix().multiply_elementwise(denoms_inverted) == M_frac
True
Use :meth:`cancel_denom` to cancel factors between the matrix and the
denominator while preserving the form of a matrix with a scalar
denominator.
See Also
========
cancel_denom
"""
K = self.domain
M = self
if K.is_zero(denom):
raise ZeroDivisionError('denominator is zero')
elif K.is_one(denom):
M_numers = M.copy()
M_denoms = M.ones(M.shape, M.domain)
return (M_numers, M_denoms)
elements, data = M.to_flat_nz()
cofactors = [K.cofactors(numer, denom) for numer in elements]
gcds, numers, denoms = zip(*cofactors)
M_numers = M.from_flat_nz(list(numers), data, K)
M_denoms = M.from_flat_nz(list(denoms), data, K)
return (M_numers, M_denoms)
def content(self):
"""
Return the gcd of the elements of the matrix.
Requires ``gcd`` in the ground domain.
Examples
========
>>> from sympy.polys.matrices import DM
>>> from sympy import ZZ
>>> M = DM([[2, 4], [4, 12]], ZZ)
>>> M.content()
2
See Also
========
primitive
cancel_denom
"""
K = self.domain
elements, _ = self.to_flat_nz()
return dup_content(elements, K)
def primitive(self):
"""
Factor out gcd of the elements of a matrix.
Requires ``gcd`` in the ground domain.
Examples
========
>>> from sympy.polys.matrices import DM
>>> from sympy import ZZ
>>> M = DM([[2, 4], [4, 12]], ZZ)
>>> content, M_primitive = M.primitive()
>>> content
2
>>> M_primitive
DomainMatrix([[1, 2], [2, 6]], (2, 2), ZZ)
>>> content * M_primitive == M
True
>>> M_primitive.content() == ZZ(1)
True
See Also
========
content
cancel_denom
"""
K = self.domain
elements, data = self.to_flat_nz()
content, prims = dup_primitive(elements, K)
M_primitive = self.from_flat_nz(prims, data, K)
return content, M_primitive
def rref(self, *, method='auto'):
r"""
Returns reduced-row echelon form (RREF) and list of pivots.
If the domain is not a field then it will be converted to a field. See
:meth:`rref_den` for the fraction-free version of this routine that
returns RREF with denominator instead.
The domain must either be a field or have an associated fraction field
(see :meth:`to_field`).
Examples
========
>>> from sympy import QQ
>>> from sympy.polys.matrices import DomainMatrix
>>> A = DomainMatrix([
... [QQ(2), QQ(-1), QQ(0)],
... [QQ(-1), QQ(2), QQ(-1)],
... [QQ(0), QQ(0), QQ(2)]], (3, 3), QQ)
>>> rref_matrix, rref_pivots = A.rref()
>>> rref_matrix
DomainMatrix([[1, 0, 0], [0, 1, 0], [0, 0, 1]], (3, 3), QQ)
>>> rref_pivots
(0, 1, 2)
Parameters
==========
method : str, optional (default: 'auto')
The method to use to compute the RREF. The default is ``'auto'``,
which will attempt to choose the fastest method. The other options
are:
- ``A.rref(method='GJ')`` uses Gauss-Jordan elimination with
division. If the domain is not a field then it will be converted
to a field with :meth:`to_field` first and RREF will be computed
by inverting the pivot elements in each row. This is most
efficient for very sparse matrices or for matrices whose elements
have complex denominators.
- ``A.rref(method='FF')`` uses fraction-free Gauss-Jordan
elimination. Elimination is performed using exact division
(``exquo``) to control the growth of the coefficients. In this
case the current domain is always used for elimination but if
the domain is not a field then it will be converted to a field
at the end and divided by the denominator. This is most efficient
for dense matrices or for matrices with simple denominators.
- ``A.rref(method='CD')`` clears the denominators before using
fraction-free Gauss-Jordan elimination in the assoicated ring.
This is most efficient for dense matrices with very simple
denominators.
- ``A.rref(method='GJ_dense')``, ``A.rref(method='FF_dense')``, and
``A.rref(method='CD_dense')`` are the same as the above methods
except that the dense implementations of the algorithms are used.
By default ``A.rref(method='auto')`` will usually choose the
sparse implementations for RREF.
Regardless of which algorithm is used the returned matrix will
always have the same format (sparse or dense) as the input and its
domain will always be the field of fractions of the input domain.
Returns
=======
(DomainMatrix, list)
reduced-row echelon form and list of pivots for the DomainMatrix
See Also
========
rref_den
RREF with denominator
sympy.polys.matrices.sdm.sdm_irref
Sparse implementation of ``method='GJ'``.
sympy.polys.matrices.sdm.sdm_rref_den
Sparse implementation of ``method='FF'`` and ``method='CD'``.
sympy.polys.matrices.dense.ddm_irref
Dense implementation of ``method='GJ'``.
sympy.polys.matrices.dense.ddm_irref_den
Dense implementation of ``method='FF'`` and ``method='CD'``.
clear_denoms
Clear denominators from a matrix, used by ``method='CD'`` and
by ``method='GJ'`` when the original domain is not a field.
"""
return _dm_rref(self, method=method)
def rref_den(self, *, method='auto', keep_domain=True):
r"""
Returns reduced-row echelon form with denominator and list of pivots.
Requires exact division in the ground domain (``exquo``).
Examples
========
>>> from sympy import ZZ, QQ
>>> from sympy.polys.matrices import DomainMatrix
>>> A = DomainMatrix([
... [ZZ(2), ZZ(-1), ZZ(0)],
... [ZZ(-1), ZZ(2), ZZ(-1)],
... [ZZ(0), ZZ(0), ZZ(2)]], (3, 3), ZZ)
>>> A_rref, denom, pivots = A.rref_den()
>>> A_rref
DomainMatrix([[6, 0, 0], [0, 6, 0], [0, 0, 6]], (3, 3), ZZ)
>>> denom
6
>>> pivots
(0, 1, 2)
>>> A_rref.to_field() / denom
DomainMatrix([[1, 0, 0], [0, 1, 0], [0, 0, 1]], (3, 3), QQ)
>>> A_rref.to_field() / denom == A.convert_to(QQ).rref()[0]
True
Parameters
==========
method : str, optional (default: 'auto')
The method to use to compute the RREF. The default is ``'auto'``,
which will attempt to choose the fastest method. The other options
are:
- ``A.rref(method='FF')`` uses fraction-free Gauss-Jordan
elimination. Elimination is performed using exact division
(``exquo``) to control the growth of the coefficients. In this
case the current domain is always used for elimination and the
result is always returned as a matrix over the current domain.
This is most efficient for dense matrices or for matrices with
simple denominators.
- ``A.rref(method='CD')`` clears denominators before using
fraction-free Gauss-Jordan elimination in the assoicated ring.
The result will be converted back to the original domain unless
``keep_domain=False`` is passed in which case the result will be
over the ring used for elimination. This is most efficient for
dense matrices with very simple denominators.
- ``A.rref(method='GJ')`` uses Gauss-Jordan elimination with
division. If the domain is not a field then it will be converted
to a field with :meth:`to_field` first and RREF will be computed
by inverting the pivot elements in each row. The result is
converted back to the original domain by clearing denominators
unless ``keep_domain=False`` is passed in which case the result
will be over the field used for elimination. This is most
efficient for very sparse matrices or for matrices whose elements
have complex denominators.
- ``A.rref(method='GJ_dense')``, ``A.rref(method='FF_dense')``, and
``A.rref(method='CD_dense')`` are the same as the above methods
except that the dense implementations of the algorithms are used.
By default ``A.rref(method='auto')`` will usually choose the
sparse implementations for RREF.
Regardless of which algorithm is used the returned matrix will
always have the same format (sparse or dense) as the input and if
``keep_domain=True`` its domain will always be the same as the
input.
keep_domain : bool, optional
If True (the default), the domain of the returned matrix and
denominator are the same as the domain of the input matrix. If
False, the domain of the returned matrix might be changed to an
associated ring or field if the algorithm used a different domain.
This is useful for efficiency if the caller does not need the
result to be in the original domain e.g. it avoids clearing
denominators in the case of ``A.rref(method='GJ')``.
Returns
=======
(DomainMatrix, scalar, list)
Reduced-row echelon form, denominator and list of pivot indices.
See Also
========
rref
RREF without denominator for field domains.
sympy.polys.matrices.sdm.sdm_irref
Sparse implementation of ``method='GJ'``.
sympy.polys.matrices.sdm.sdm_rref_den
Sparse implementation of ``method='FF'`` and ``method='CD'``.
sympy.polys.matrices.dense.ddm_irref
Dense implementation of ``method='GJ'``.
sympy.polys.matrices.dense.ddm_irref_den
Dense implementation of ``method='FF'`` and ``method='CD'``.
clear_denoms
Clear denominators from a matrix, used by ``method='CD'``.
"""
return _dm_rref_den(self, method=method, keep_domain=keep_domain)
def columnspace(self):
r"""
Returns the columnspace for the DomainMatrix
Returns
=======
DomainMatrix
The columns of this matrix form a basis for the columnspace.
Examples
========
>>> from sympy import QQ
>>> from sympy.polys.matrices import DomainMatrix
>>> A = DomainMatrix([
... [QQ(1), QQ(-1)],
... [QQ(2), QQ(-2)]], (2, 2), QQ)
>>> A.columnspace()
DomainMatrix([[1], [2]], (2, 1), QQ)
"""
if not self.domain.is_Field:
raise DMNotAField('Not a field')
rref, pivots = self.rref()
rows, cols = self.shape
return self.extract(range(rows), pivots)
def rowspace(self):
r"""
Returns the rowspace for the DomainMatrix
Returns
=======
DomainMatrix
The rows of this matrix form a basis for the rowspace.
Examples
========
>>> from sympy import QQ
>>> from sympy.polys.matrices import DomainMatrix
>>> A = DomainMatrix([
... [QQ(1), QQ(-1)],
... [QQ(2), QQ(-2)]], (2, 2), QQ)
>>> A.rowspace()
DomainMatrix([[1, -1]], (1, 2), QQ)
"""
if not self.domain.is_Field:
raise DMNotAField('Not a field')
rref, pivots = self.rref()
rows, cols = self.shape
return self.extract(range(len(pivots)), range(cols))
def nullspace(self, divide_last=False):
r"""
Returns the nullspace for the DomainMatrix
Returns
=======
DomainMatrix
The rows of this matrix form a basis for the nullspace.
Examples
========
>>> from sympy import QQ
>>> from sympy.polys.matrices import DM
>>> A = DM([
... [QQ(2), QQ(-2)],
... [QQ(4), QQ(-4)]], QQ)
>>> A.nullspace()
DomainMatrix([[1, 1]], (1, 2), QQ)
The returned matrix is a basis for the nullspace:
>>> A_null = A.nullspace().transpose()
>>> A * A_null
DomainMatrix([[0], [0]], (2, 1), QQ)
>>> rows, cols = A.shape
>>> nullity = rows - A.rank()
>>> A_null.shape == (cols, nullity)
True
Nullspace can also be computed for non-field rings. If the ring is not
a field then division is not used. Setting ``divide_last`` to True will
raise an error in this case:
>>> from sympy import ZZ
>>> B = DM([[6, -3],
... [4, -2]], ZZ)
>>> B.nullspace()
DomainMatrix([[3, 6]], (1, 2), ZZ)
>>> B.nullspace(divide_last=True)
Traceback (most recent call last):
...
DMNotAField: Cannot normalize vectors over a non-field
Over a ring with ``gcd`` defined the nullspace can potentially be
reduced with :meth:`primitive`:
>>> B.nullspace().primitive()
(3, DomainMatrix([[1, 2]], (1, 2), ZZ))
A matrix over a ring can often be normalized by converting it to a
field but it is often a bad idea to do so:
>>> from sympy.abc import a, b, c
>>> from sympy import Matrix
>>> M = Matrix([[ a*b, b + c, c],
... [ a - b, b*c, c**2],
... [a*b + a - b, b*c + b + c, c**2 + c]])
>>> M.to_DM().domain
ZZ[a,b,c]
>>> M.to_DM().nullspace().to_Matrix().transpose()
Matrix([
[ c**3],
[ -a*b*c**2 + a*c - b*c],
[a*b**2*c - a*b - a*c + b**2 + b*c]])
The unnormalized form here is nicer than the normalized form that
spreads a large denominator throughout the matrix:
>>> M.to_DM().to_field().nullspace(divide_last=True).to_Matrix().transpose()
Matrix([
[ c**3/(a*b**2*c - a*b - a*c + b**2 + b*c)],
[(-a*b*c**2 + a*c - b*c)/(a*b**2*c - a*b - a*c + b**2 + b*c)],
[ 1]])
Parameters
==========
divide_last : bool, optional
If False (the default), the vectors are not normalized and the RREF
is computed using :meth:`rref_den` and the denominator is
discarded. If True, then each row is divided by its final element;
the domain must be a field in this case.
See Also
========
nullspace_from_rref
rref
rref_den
rowspace
"""
A = self
K = A.domain
if divide_last and not K.is_Field:
raise DMNotAField("Cannot normalize vectors over a non-field")
if divide_last:
A_rref, pivots = A.rref()
else:
A_rref, den, pivots = A.rref_den()
# Ensure that the sign is canonical before discarding the
# denominator. Then M.nullspace().primitive() is canonical.
u = K.canonical_unit(den)
if u != K.one:
A_rref *= u
A_null = A_rref.nullspace_from_rref(pivots)
return A_null
def nullspace_from_rref(self, pivots=None):
"""
Compute nullspace from rref and pivots.
The domain of the matrix can be any domain.
The matrix must be in reduced row echelon form already. Otherwise the
result will be incorrect. Use :meth:`rref` or :meth:`rref_den` first
to get the reduced row echelon form or use :meth:`nullspace` instead.
See Also
========
nullspace
rref
rref_den
sympy.polys.matrices.sdm.SDM.nullspace_from_rref
sympy.polys.matrices.ddm.DDM.nullspace_from_rref
"""
null_rep, nonpivots = self.rep.nullspace_from_rref(pivots)
return self.from_rep(null_rep)
def inv(self):
r"""
Finds the inverse of the DomainMatrix if exists
Returns
=======
DomainMatrix
DomainMatrix after inverse
Raises
======
ValueError
If the domain of DomainMatrix not a Field
DMNonSquareMatrixError
If the DomainMatrix is not a not Square DomainMatrix
Examples
========
>>> from sympy import QQ
>>> from sympy.polys.matrices import DomainMatrix
>>> A = DomainMatrix([
... [QQ(2), QQ(-1), QQ(0)],
... [QQ(-1), QQ(2), QQ(-1)],
... [QQ(0), QQ(0), QQ(2)]], (3, 3), QQ)
>>> A.inv()
DomainMatrix([[2/3, 1/3, 1/6], [1/3, 2/3, 1/3], [0, 0, 1/2]], (3, 3), QQ)
See Also
========
neg
"""
if not self.domain.is_Field:
raise DMNotAField('Not a field')
m, n = self.shape
if m != n:
raise DMNonSquareMatrixError
inv = self.rep.inv()
return self.from_rep(inv)
def det(self):
r"""
Returns the determinant of a square :class:`DomainMatrix`.
Returns
=======
determinant: DomainElement
Determinant of the matrix.
Raises
======
ValueError
If the domain of DomainMatrix is not a Field
Examples
========
>>> from sympy import ZZ
>>> from sympy.polys.matrices import DomainMatrix
>>> A = DomainMatrix([
... [ZZ(1), ZZ(2)],
... [ZZ(3), ZZ(4)]], (2, 2), ZZ)
>>> A.det()
-2
"""
m, n = self.shape
if m != n:
raise DMNonSquareMatrixError
return self.rep.det()
def adj_det(self):
"""
Adjugate and determinant of a square :class:`DomainMatrix`.
Returns
=======
(adjugate, determinant) : (DomainMatrix, DomainScalar)
The adjugate matrix and determinant of this matrix.
Examples
========
>>> from sympy import ZZ
>>> from sympy.polys.matrices import DM
>>> A = DM([
... [ZZ(1), ZZ(2)],
... [ZZ(3), ZZ(4)]], ZZ)
>>> adjA, detA = A.adj_det()
>>> adjA
DomainMatrix([[4, -2], [-3, 1]], (2, 2), ZZ)
>>> detA
-2
See Also
========
adjugate
Returns only the adjugate matrix.
det
Returns only the determinant.
inv_den
Returns a matrix/denominator pair representing the inverse matrix
but perhaps differing from the adjugate and determinant by a common
factor.
"""
m, n = self.shape
I_m = self.eye((m, m), self.domain)
adjA, detA = self.solve_den_charpoly(I_m, check=False)
if self.rep.fmt == "dense":
adjA = adjA.to_dense()
return adjA, detA
def adjugate(self):
"""
Adjugate of a square :class:`DomainMatrix`.
The adjugate matrix is the transpose of the cofactor matrix and is
related to the inverse by::
adj(A) = det(A) * A.inv()
Unlike the inverse matrix the adjugate matrix can be computed and
expressed without division or fractions in the ground domain.
Examples
========
>>> from sympy import ZZ
>>> from sympy.polys.matrices import DM
>>> A = DM([[ZZ(1), ZZ(2)], [ZZ(3), ZZ(4)]], ZZ)
>>> A.adjugate()
DomainMatrix([[4, -2], [-3, 1]], (2, 2), ZZ)
Returns
=======
DomainMatrix
The adjugate matrix of this matrix with the same domain.
See Also
========
adj_det
"""
adjA, detA = self.adj_det()
return adjA
def inv_den(self, method=None):
"""
Return the inverse as a :class:`DomainMatrix` with denominator.
Returns
=======
(inv, den) : (:class:`DomainMatrix`, :class:`~.DomainElement`)
The inverse matrix and its denominator.
This is more or less equivalent to :meth:`adj_det` except that ``inv``
and ``den`` are not guaranteed to be the adjugate and inverse. The
ratio ``inv/den`` is equivalent to ``adj/det`` but some factors
might be cancelled between ``inv`` and ``den``. In simple cases this
might just be a minus sign so that ``(inv, den) == (-adj, -det)`` but
factors more complicated than ``-1`` can also be cancelled.
Cancellation is not guaranteed to be complete so ``inv`` and ``den``
may not be on lowest terms. The denominator ``den`` will be zero if and
only if the determinant is zero.
If the actual adjugate and determinant are needed, use :meth:`adj_det`
instead. If the intention is to compute the inverse matrix or solve a
system of equations then :meth:`inv_den` is more efficient.
Examples
========
>>> from sympy import ZZ
>>> from sympy.polys.matrices import DomainMatrix
>>> A = DomainMatrix([
... [ZZ(2), ZZ(-1), ZZ(0)],
... [ZZ(-1), ZZ(2), ZZ(-1)],
... [ZZ(0), ZZ(0), ZZ(2)]], (3, 3), ZZ)
>>> Ainv, den = A.inv_den()
>>> den
6
>>> Ainv
DomainMatrix([[4, 2, 1], [2, 4, 2], [0, 0, 3]], (3, 3), ZZ)
>>> A * Ainv == den * A.eye(A.shape, A.domain).to_dense()
True
Parameters
==========
method : str, optional
The method to use to compute the inverse. Can be one of ``None``,
``'rref'`` or ``'charpoly'``. If ``None`` then the method is
chosen automatically (see :meth:`solve_den` for details).
See Also
========
inv
det
adj_det
solve_den
"""
I = self.eye(self.shape, self.domain)
return self.solve_den(I, method=method)
def solve_den(self, b, method=None):
"""
Solve matrix equation $Ax = b$ without fractions in the ground domain.
Examples
========
Solve a matrix equation over the integers:
>>> from sympy import ZZ
>>> from sympy.polys.matrices import DM
>>> A = DM([[ZZ(1), ZZ(2)], [ZZ(3), ZZ(4)]], ZZ)
>>> b = DM([[ZZ(5)], [ZZ(6)]], ZZ)
>>> xnum, xden = A.solve_den(b)
>>> xden
-2
>>> xnum
DomainMatrix([[8], [-9]], (2, 1), ZZ)
>>> A * xnum == xden * b
True
Solve a matrix equation over a polynomial ring:
>>> from sympy import ZZ
>>> from sympy.abc import x, y, z, a, b
>>> R = ZZ[x, y, z, a, b]
>>> M = DM([[x*y, x*z], [y*z, x*z]], R)
>>> b = DM([[a], [b]], R)
>>> M.to_Matrix()
Matrix([
[x*y, x*z],
[y*z, x*z]])
>>> b.to_Matrix()
Matrix([
[a],
[b]])
>>> xnum, xden = M.solve_den(b)
>>> xden
x**2*y*z - x*y*z**2
>>> xnum.to_Matrix()
Matrix([
[ a*x*z - b*x*z],
[-a*y*z + b*x*y]])
>>> M * xnum == xden * b
True
The solution can be expressed over a fraction field which will cancel
gcds between the denominator and the elements of the numerator:
>>> xsol = xnum.to_field() / xden
>>> xsol.to_Matrix()
Matrix([
[ (a - b)/(x*y - y*z)],
[(-a*z + b*x)/(x**2*z - x*z**2)]])
>>> (M * xsol).to_Matrix() == b.to_Matrix()
True
When solving a large system of equations this cancellation step might
be a lot slower than :func:`solve_den` itself. The solution can also be
expressed as a ``Matrix`` without attempting any polynomial
cancellation between the numerator and denominator giving a less
simplified result more quickly:
>>> xsol_uncancelled = xnum.to_Matrix() / xnum.domain.to_sympy(xden)
>>> xsol_uncancelled
Matrix([
[ (a*x*z - b*x*z)/(x**2*y*z - x*y*z**2)],
[(-a*y*z + b*x*y)/(x**2*y*z - x*y*z**2)]])
>>> from sympy import cancel
>>> cancel(xsol_uncancelled) == xsol.to_Matrix()
True
Parameters
==========
self : :class:`DomainMatrix`
The ``m x n`` matrix $A$ in the equation $Ax = b$. Underdetermined
systems are not supported so ``m >= n``: $A$ should be square or
have more rows than columns.
b : :class:`DomainMatrix`
The ``n x m`` matrix $b$ for the rhs.
cp : list of :class:`~.DomainElement`, optional
The characteristic polynomial of the matrix $A$. If not given, it
will be computed using :meth:`charpoly`.
method: str, optional
The method to use for solving the system. Can be one of ``None``,
``'charpoly'`` or ``'rref'``. If ``None`` (the default) then the
method will be chosen automatically.
The ``charpoly`` method uses :meth:`solve_den_charpoly` and can
only be used if the matrix is square. This method is division free
and can be used with any domain.
The ``rref`` method is fraction free but requires exact division
in the ground domain (``exquo``). This is also suitable for most
domains. This method can be used with overdetermined systems (more
equations than unknowns) but not underdetermined systems as a
unique solution is sought.
Returns
=======
(xnum, xden) : (DomainMatrix, DomainElement)
The solution of the equation $Ax = b$ as a pair consisting of an
``n x m`` matrix numerator ``xnum`` and a scalar denominator
``xden``.
The solution $x$ is given by ``x = xnum / xden``. The division free
invariant is ``A * xnum == xden * b``. If $A$ is square then the
denominator ``xden`` will be a divisor of the determinant $det(A)$.
Raises
======
DMNonInvertibleMatrixError
If the system $Ax = b$ does not have a unique solution.
See Also
========
solve_den_charpoly
solve_den_rref
inv_den
"""
m, n = self.shape
bm, bn = b.shape
if m != bm:
raise DMShapeError("Matrix equation shape mismatch.")
if method is None:
method = 'rref'
elif method == 'charpoly' and m != n:
raise DMNonSquareMatrixError("method='charpoly' requires a square matrix.")
if method == 'charpoly':
xnum, xden = self.solve_den_charpoly(b)
elif method == 'rref':
xnum, xden = self.solve_den_rref(b)
else:
raise DMBadInputError("method should be 'rref' or 'charpoly'")
return xnum, xden
def solve_den_rref(self, b):
"""
Solve matrix equation $Ax = b$ using fraction-free RREF
Solves the matrix equation $Ax = b$ for $x$ and returns the solution
as a numerator/denominator pair.
Examples
========
>>> from sympy import ZZ
>>> from sympy.polys.matrices import DM
>>> A = DM([[ZZ(1), ZZ(2)], [ZZ(3), ZZ(4)]], ZZ)
>>> b = DM([[ZZ(5)], [ZZ(6)]], ZZ)
>>> xnum, xden = A.solve_den_rref(b)
>>> xden
-2
>>> xnum
DomainMatrix([[8], [-9]], (2, 1), ZZ)
>>> A * xnum == xden * b
True
See Also
========
solve_den
solve_den_charpoly
"""
A = self
m, n = A.shape
bm, bn = b.shape
if m != bm:
raise DMShapeError("Matrix equation shape mismatch.")
if m < n:
raise DMShapeError("Underdetermined matrix equation.")
Aaug = A.hstack(b)
Aaug_rref, denom, pivots = Aaug.rref_den()
# XXX: We check here if there are pivots after the last column. If
# there were than it possibly means that rref_den performed some
# unnecessary elimination. It would be better if rref methods had a
# parameter indicating how many columns should be used for elimination.
if len(pivots) != n or pivots and pivots[-1] >= n:
raise DMNonInvertibleMatrixError("Non-unique solution.")
xnum = Aaug_rref[:n, n:]
xden = denom
return xnum, xden
def solve_den_charpoly(self, b, cp=None, check=True):
"""
Solve matrix equation $Ax = b$ using the characteristic polynomial.
This method solves the square matrix equation $Ax = b$ for $x$ using
the characteristic polynomial without any division or fractions in the
ground domain.
Examples
========
Solve a matrix equation over the integers:
>>> from sympy import ZZ
>>> from sympy.polys.matrices import DM
>>> A = DM([[ZZ(1), ZZ(2)], [ZZ(3), ZZ(4)]], ZZ)
>>> b = DM([[ZZ(5)], [ZZ(6)]], ZZ)
>>> xnum, detA = A.solve_den_charpoly(b)
>>> detA
-2
>>> xnum
DomainMatrix([[8], [-9]], (2, 1), ZZ)
>>> A * xnum == detA * b
True
Parameters
==========
self : DomainMatrix
The ``n x n`` matrix `A` in the equation `Ax = b`. Must be square
and invertible.
b : DomainMatrix
The ``n x m`` matrix `b` for the rhs.
cp : list, optional
The characteristic polynomial of the matrix `A` if known. If not
given, it will be computed using :meth:`charpoly`.
check : bool, optional
If ``True`` (the default) check that the determinant is not zero
and raise an error if it is. If ``False`` then if the determinant
is zero the return value will be equal to ``(A.adjugate()*b, 0)``.
Returns
=======
(xnum, detA) : (DomainMatrix, DomainElement)
The solution of the equation `Ax = b` as a matrix numerator and
scalar denominator pair. The denominator is equal to the
determinant of `A` and the numerator is ``adj(A)*b``.
The solution $x$ is given by ``x = xnum / detA``. The division free
invariant is ``A * xnum == detA * b``.
If ``b`` is the identity matrix, then ``xnum`` is the adjugate matrix
and we have ``A * adj(A) == detA * I``.
See Also
========
solve_den
Main frontend for solving matrix equations with denominator.
solve_den_rref
Solve matrix equations using fraction-free RREF.
inv_den
Invert a matrix using the characteristic polynomial.
"""
A, b = self.unify(b)
m, n = self.shape
mb, nb = b.shape
if m != n:
raise DMNonSquareMatrixError("Matrix must be square")
if mb != m:
raise DMShapeError("Matrix and vector must have the same number of rows")
f, detA = self.adj_poly_det(cp=cp)
if check and not detA:
raise DMNonInvertibleMatrixError("Matrix is not invertible")
# Compute adj(A)*b = det(A)*inv(A)*b using Horner's method without
# constructing inv(A) explicitly.
adjA_b = self.eval_poly_mul(f, b)
return (adjA_b, detA)
def adj_poly_det(self, cp=None):
"""
Return the polynomial $p$ such that $p(A) = adj(A)$ and also the
determinant of $A$.
Examples
========
>>> from sympy import QQ
>>> from sympy.polys.matrices import DM
>>> A = DM([[QQ(1), QQ(2)], [QQ(3), QQ(4)]], QQ)
>>> p, detA = A.adj_poly_det()
>>> p
[-1, 5]
>>> p_A = A.eval_poly(p)
>>> p_A
DomainMatrix([[4, -2], [-3, 1]], (2, 2), QQ)
>>> p[0]*A**1 + p[1]*A**0 == p_A
True
>>> p_A == A.adjugate()
True
>>> A * A.adjugate() == detA * A.eye(A.shape, A.domain).to_dense()
True
See Also
========
adjugate
eval_poly
adj_det
"""
# Cayley-Hamilton says that a matrix satisfies its own minimal
# polynomial
#
# p[0]*A^n + p[1]*A^(n-1) + ... + p[n]*I = 0
#
# with p[0]=1 and p[n]=(-1)^n*det(A) or
#
# det(A)*I = -(-1)^n*(p[0]*A^(n-1) + p[1]*A^(n-2) + ... + p[n-1]*A).
#
# Define a new polynomial f with f[i] = -(-1)^n*p[i] for i=0..n-1. Then
#
# det(A)*I = f[0]*A^n + f[1]*A^(n-1) + ... + f[n-1]*A.
#
# Multiplying on the right by inv(A) gives
#
# det(A)*inv(A) = f[0]*A^(n-1) + f[1]*A^(n-2) + ... + f[n-1].
#
# So adj(A) = det(A)*inv(A) = f(A)
A = self
m, n = self.shape
if m != n:
raise DMNonSquareMatrixError("Matrix must be square")
if cp is None:
cp = A.charpoly()
if len(cp) % 2:
# n is even
detA = cp[-1]
f = [-cpi for cpi in cp[:-1]]
else:
# n is odd
detA = -cp[-1]
f = cp[:-1]
return f, detA
def eval_poly(self, p):
"""
Evaluate polynomial function of a matrix $p(A)$.
Examples
========
>>> from sympy import QQ
>>> from sympy.polys.matrices import DM
>>> A = DM([[QQ(1), QQ(2)], [QQ(3), QQ(4)]], QQ)
>>> p = [QQ(1), QQ(2), QQ(3)]
>>> p_A = A.eval_poly(p)
>>> p_A
DomainMatrix([[12, 14], [21, 33]], (2, 2), QQ)
>>> p_A == p[0]*A**2 + p[1]*A + p[2]*A**0
True
See Also
========
eval_poly_mul
"""
A = self
m, n = A.shape
if m != n:
raise DMNonSquareMatrixError("Matrix must be square")
if not p:
return self.zeros(self.shape, self.domain)
elif len(p) == 1:
return p[0] * self.eye(self.shape, self.domain)
# Evaluate p(A) using Horner's method:
# XXX: Use Paterson-Stockmeyer method?
I = A.eye(A.shape, A.domain)
p_A = p[0] * I
for pi in p[1:]:
p_A = A*p_A + pi*I
return p_A
def eval_poly_mul(self, p, B):
r"""
Evaluate polynomial matrix product $p(A) \times B$.
Evaluate the polynomial matrix product $p(A) \times B$ using Horner's
method without creating the matrix $p(A)$ explicitly. If $B$ is a
column matrix then this method will only use matrix-vector multiplies
and no matrix-matrix multiplies are needed.
If $B$ is square or wide or if $A$ can be represented in a simpler
domain than $B$ then it might be faster to evaluate $p(A)$ explicitly
(see :func:`eval_poly`) and then multiply with $B$.
Examples
========
>>> from sympy import QQ
>>> from sympy.polys.matrices import DM
>>> A = DM([[QQ(1), QQ(2)], [QQ(3), QQ(4)]], QQ)
>>> b = DM([[QQ(5)], [QQ(6)]], QQ)
>>> p = [QQ(1), QQ(2), QQ(3)]
>>> p_A_b = A.eval_poly_mul(p, b)
>>> p_A_b
DomainMatrix([[144], [303]], (2, 1), QQ)
>>> p_A_b == p[0]*A**2*b + p[1]*A*b + p[2]*b
True
>>> A.eval_poly_mul(p, b) == A.eval_poly(p)*b
True
See Also
========
eval_poly
solve_den_charpoly
"""
A = self
m, n = A.shape
mb, nb = B.shape
if m != n:
raise DMNonSquareMatrixError("Matrix must be square")
if mb != n:
raise DMShapeError("Matrices are not aligned")
if A.domain != B.domain:
raise DMDomainError("Matrices must have the same domain")
# Given a polynomial p(x) = p[0]*x^n + p[1]*x^(n-1) + ... + p[n-1]
# and matrices A and B we want to find
#
# p(A)*B = p[0]*A^n*B + p[1]*A^(n-1)*B + ... + p[n-1]*B
#
# Factoring out A term by term we get
#
# p(A)*B = A*(...A*(A*(A*(p[0]*B) + p[1]*B) + p[2]*B) + ...) + p[n-1]*B
#
# where each pair of brackets represents one iteration of the loop
# below starting from the innermost p[0]*B. If B is a column matrix
# then products like A*(...) are matrix-vector multiplies and products
# like p[i]*B are scalar-vector multiplies so there are no
# matrix-matrix multiplies.
if not p:
return B.zeros(B.shape, B.domain, fmt=B.rep.fmt)
p_A_B = p[0]*B
for p_i in p[1:]:
p_A_B = A*p_A_B + p_i*B
return p_A_B
def lu(self):
r"""
Returns Lower and Upper decomposition of the DomainMatrix
Returns
=======
(L, U, exchange)
L, U are Lower and Upper decomposition of the DomainMatrix,
exchange is the list of indices of rows exchanged in the
decomposition.
Raises
======
ValueError
If the domain of DomainMatrix not a Field
Examples
========
>>> from sympy import QQ
>>> from sympy.polys.matrices import DomainMatrix
>>> A = DomainMatrix([
... [QQ(1), QQ(-1)],
... [QQ(2), QQ(-2)]], (2, 2), QQ)
>>> L, U, exchange = A.lu()
>>> L
DomainMatrix([[1, 0], [2, 1]], (2, 2), QQ)
>>> U
DomainMatrix([[1, -1], [0, 0]], (2, 2), QQ)
>>> exchange
[]
See Also
========
lu_solve
"""
if not self.domain.is_Field:
raise DMNotAField('Not a field')
L, U, swaps = self.rep.lu()
return self.from_rep(L), self.from_rep(U), swaps
def lu_solve(self, rhs):
r"""
Solver for DomainMatrix x in the A*x = B
Parameters
==========
rhs : DomainMatrix B
Returns
=======
DomainMatrix
x in A*x = B
Raises
======
DMShapeError
If the DomainMatrix A and rhs have different number of rows
ValueError
If the domain of DomainMatrix A not a Field
Examples
========
>>> from sympy import QQ
>>> from sympy.polys.matrices import DomainMatrix
>>> A = DomainMatrix([
... [QQ(1), QQ(2)],
... [QQ(3), QQ(4)]], (2, 2), QQ)
>>> B = DomainMatrix([
... [QQ(1), QQ(1)],
... [QQ(0), QQ(1)]], (2, 2), QQ)
>>> A.lu_solve(B)
DomainMatrix([[-2, -1], [3/2, 1]], (2, 2), QQ)
See Also
========
lu
"""
if self.shape[0] != rhs.shape[0]:
raise DMShapeError("Shape")
if not self.domain.is_Field:
raise DMNotAField('Not a field')
sol = self.rep.lu_solve(rhs.rep)
return self.from_rep(sol)
def _solve(A, b):
# XXX: Not sure about this method or its signature. It is just created
# because it is needed by the holonomic module.
if A.shape[0] != b.shape[0]:
raise DMShapeError("Shape")
if A.domain != b.domain or not A.domain.is_Field:
raise DMNotAField('Not a field')
Aaug = A.hstack(b)
Arref, pivots = Aaug.rref()
particular = Arref.from_rep(Arref.rep.particular())
nullspace_rep, nonpivots = Arref[:,:-1].rep.nullspace()
nullspace = Arref.from_rep(nullspace_rep)
return particular, nullspace
def charpoly(self):
r"""
Characteristic polynomial of a square matrix.
Computes the characteristic polynomial in a fully expanded form using
division free arithmetic. If a factorization of the characteristic
polynomial is needed then it is more efficient to call
:meth:`charpoly_factor_list` than calling :meth:`charpoly` and then
factorizing the result.
Returns
=======
list: list of DomainElement
coefficients of the characteristic polynomial
Examples
========
>>> from sympy import ZZ
>>> from sympy.polys.matrices import DomainMatrix
>>> A = DomainMatrix([
... [ZZ(1), ZZ(2)],
... [ZZ(3), ZZ(4)]], (2, 2), ZZ)
>>> A.charpoly()
[1, -5, -2]
See Also
========
charpoly_factor_list
Compute the factorisation of the characteristic polynomial.
charpoly_factor_blocks
A partial factorisation of the characteristic polynomial that can
be computed more efficiently than either the full factorisation or
the fully expanded polynomial.
"""
M = self
K = M.domain
factors = M.charpoly_factor_blocks()
cp = [K.one]
for f, mult in factors:
for _ in range(mult):
cp = dup_mul(cp, f, K)
return cp
def charpoly_factor_list(self):
"""
Full factorization of the characteristic polynomial.
Examples
========
>>> from sympy.polys.matrices import DM
>>> from sympy import ZZ
>>> M = DM([[6, -1, 0, 0],
... [9, 12, 0, 0],
... [0, 0, 1, 2],
... [0, 0, 5, 6]], ZZ)
Compute the factorization of the characteristic polynomial:
>>> M.charpoly_factor_list()
[([1, -9], 2), ([1, -7, -4], 1)]
Use :meth:`charpoly` to get the unfactorized characteristic polynomial:
>>> M.charpoly()
[1, -25, 203, -495, -324]
The same calculations with ``Matrix``:
>>> M.to_Matrix().charpoly().as_expr()
lambda**4 - 25*lambda**3 + 203*lambda**2 - 495*lambda - 324
>>> M.to_Matrix().charpoly().as_expr().factor()
(lambda - 9)**2*(lambda**2 - 7*lambda - 4)
Returns
=======
list: list of pairs (factor, multiplicity)
A full factorization of the characteristic polynomial.
See Also
========
charpoly
Expanded form of the characteristic polynomial.
charpoly_factor_blocks
A partial factorisation of the characteristic polynomial that can
be computed more efficiently.
"""
M = self
K = M.domain
# It is more efficient to start from the partial factorization provided
# for free by M.charpoly_factor_blocks than the expanded M.charpoly.
factors = M.charpoly_factor_blocks()
factors_irreducible = []
for factor_i, mult_i in factors:
_, factors_list = dup_factor_list(factor_i, K)
for factor_j, mult_j in factors_list:
factors_irreducible.append((factor_j, mult_i * mult_j))
return _collect_factors(factors_irreducible)
def charpoly_factor_blocks(self):
"""
Partial factorisation of the characteristic polynomial.
This factorisation arises from a block structure of the matrix (if any)
and so the factors are not guaranteed to be irreducible. The
:meth:`charpoly_factor_blocks` method is the most efficient way to get
a representation of the characteristic polynomial but the result is
neither fully expanded nor fully factored.
Examples
========
>>> from sympy.polys.matrices import DM
>>> from sympy import ZZ
>>> M = DM([[6, -1, 0, 0],
... [9, 12, 0, 0],
... [0, 0, 1, 2],
... [0, 0, 5, 6]], ZZ)
This computes a partial factorization using only the block structure of
the matrix to reveal factors:
>>> M.charpoly_factor_blocks()
[([1, -18, 81], 1), ([1, -7, -4], 1)]
These factors correspond to the two diagonal blocks in the matrix:
>>> DM([[6, -1], [9, 12]], ZZ).charpoly()
[1, -18, 81]
>>> DM([[1, 2], [5, 6]], ZZ).charpoly()
[1, -7, -4]
Use :meth:`charpoly_factor_list` to get a complete factorization into
irreducibles:
>>> M.charpoly_factor_list()
[([1, -9], 2), ([1, -7, -4], 1)]
Use :meth:`charpoly` to get the expanded characteristic polynomial:
>>> M.charpoly()
[1, -25, 203, -495, -324]
Returns
=======
list: list of pairs (factor, multiplicity)
A partial factorization of the characteristic polynomial.
See Also
========
charpoly
Compute the fully expanded characteristic polynomial.
charpoly_factor_list
Compute a full factorization of the characteristic polynomial.
"""
M = self
if not M.is_square:
raise DMNonSquareMatrixError("not square")
# scc returns indices that permute the matrix into block triangular
# form and can extract the diagonal blocks. M.charpoly() is equal to
# the product of the diagonal block charpolys.
components = M.scc()
block_factors = []
for indices in components:
block = M.extract(indices, indices)
block_factors.append((block.charpoly_base(), 1))
return _collect_factors(block_factors)
def charpoly_base(self):
"""
Base case for :meth:`charpoly_factor_blocks` after block decomposition.
This method is used internally by :meth:`charpoly_factor_blocks` as the
base case for computing the characteristic polynomial of a block. It is
more efficient to call :meth:`charpoly_factor_blocks`, :meth:`charpoly`
or :meth:`charpoly_factor_list` rather than call this method directly.
This will use either the dense or the sparse implementation depending
on the sparsity of the matrix and will clear denominators if possible
before calling :meth:`charpoly_berk` to compute the characteristic
polynomial using the Berkowitz algorithm.
See Also
========
charpoly
charpoly_factor_list
charpoly_factor_blocks
charpoly_berk
"""
M = self
K = M.domain
# It seems that the sparse implementation is always faster for random
# matrices with fewer than 50% non-zero entries. This does not seem to
# depend on domain, size, bit count etc.
density = self.nnz() / self.shape[0]**2
if density < 0.5:
M = M.to_sparse()
else:
M = M.to_dense()
# Clearing denominators is always more efficient if it can be done.
# Doing it here after block decomposition is good because each block
# might have a smaller denominator. However it might be better for
# charpoly and charpoly_factor_list to restore the denominators only at
# the very end so that they can call e.g. dup_factor_list before
# restoring the denominators. The methods would need to be changed to
# return (poly, denom) pairs to make that work though.
clear_denoms = K.is_Field and K.has_assoc_Ring
if clear_denoms:
clear_denoms = True
d, M = M.clear_denoms(convert=True)
d = d.element
K_f = K
K_r = M.domain
# Berkowitz algorithm over K_r.
cp = M.charpoly_berk()
if clear_denoms:
# Restore the denominator in the charpoly over K_f.
#
# If M = N/d then p_M(x) = p_N(x*d)/d^n.
cp = dup_convert(cp, K_r, K_f)
p = [K_f.one, K_f.zero]
q = [K_f.one/d]
cp = dup_transform(cp, p, q, K_f)
return cp
def charpoly_berk(self):
"""Compute the characteristic polynomial using the Berkowitz algorithm.
This method directly calls the underlying implementation of the
Berkowitz algorithm (:meth:`sympy.polys.matrices.dense.ddm_berk` or
:meth:`sympy.polys.matrices.sdm.sdm_berk`).
This is used by :meth:`charpoly` and other methods as the base case for
for computing the characteristic polynomial. However those methods will
apply other optimizations such as block decomposition, clearing
denominators and converting between dense and sparse representations
before calling this method. It is more efficient to call those methods
instead of this one but this method is provided for direct access to
the Berkowitz algorithm.
Examples
========
>>> from sympy.polys.matrices import DM
>>> from sympy import QQ
>>> M = DM([[6, -1, 0, 0],
... [9, 12, 0, 0],
... [0, 0, 1, 2],
... [0, 0, 5, 6]], QQ)
>>> M.charpoly_berk()
[1, -25, 203, -495, -324]
See Also
========
charpoly
charpoly_base
charpoly_factor_list
charpoly_factor_blocks
sympy.polys.matrices.dense.ddm_berk
sympy.polys.matrices.sdm.sdm_berk
"""
return self.rep.charpoly()
@classmethod
def eye(cls, shape, domain):
r"""
Return identity matrix of size n or shape (m, n).
Examples
========
>>> from sympy.polys.matrices import DomainMatrix
>>> from sympy import QQ
>>> DomainMatrix.eye(3, QQ)
DomainMatrix({0: {0: 1}, 1: {1: 1}, 2: {2: 1}}, (3, 3), QQ)
"""
if isinstance(shape, int):
shape = (shape, shape)
return cls.from_rep(SDM.eye(shape, domain))
@classmethod
def diag(cls, diagonal, domain, shape=None):
r"""
Return diagonal matrix with entries from ``diagonal``.
Examples
========
>>> from sympy.polys.matrices import DomainMatrix
>>> from sympy import ZZ
>>> DomainMatrix.diag([ZZ(5), ZZ(6)], ZZ)
DomainMatrix({0: {0: 5}, 1: {1: 6}}, (2, 2), ZZ)
"""
if shape is None:
N = len(diagonal)
shape = (N, N)
return cls.from_rep(SDM.diag(diagonal, domain, shape))
@classmethod
def zeros(cls, shape, domain, *, fmt='sparse'):
"""Returns a zero DomainMatrix of size shape, belonging to the specified domain
Examples
========
>>> from sympy.polys.matrices import DomainMatrix
>>> from sympy import QQ
>>> DomainMatrix.zeros((2, 3), QQ)
DomainMatrix({}, (2, 3), QQ)
"""
return cls.from_rep(SDM.zeros(shape, domain))
@classmethod
def ones(cls, shape, domain):
"""Returns a DomainMatrix of 1s, of size shape, belonging to the specified domain
Examples
========
>>> from sympy.polys.matrices import DomainMatrix
>>> from sympy import QQ
>>> DomainMatrix.ones((2,3), QQ)
DomainMatrix([[1, 1, 1], [1, 1, 1]], (2, 3), QQ)
"""
return cls.from_rep(DDM.ones(shape, domain).to_dfm_or_ddm())
def __eq__(A, B):
r"""
Checks for two DomainMatrix matrices to be equal or not
Parameters
==========
A, B: DomainMatrix
to check equality
Returns
=======
Boolean
True for equal, else False
Raises
======
NotImplementedError
If B is not a DomainMatrix
Examples
========
>>> from sympy import ZZ
>>> from sympy.polys.matrices import DomainMatrix
>>> A = DomainMatrix([
... [ZZ(1), ZZ(2)],
... [ZZ(3), ZZ(4)]], (2, 2), ZZ)
>>> B = DomainMatrix([
... [ZZ(1), ZZ(1)],
... [ZZ(0), ZZ(1)]], (2, 2), ZZ)
>>> A.__eq__(A)
True
>>> A.__eq__(B)
False
"""
if not isinstance(A, type(B)):
return NotImplemented
return A.domain == B.domain and A.rep == B.rep
def unify_eq(A, B):
if A.shape != B.shape:
return False
if A.domain != B.domain:
A, B = A.unify(B)
return A == B
def lll(A, delta=QQ(3, 4)):
"""
Performs the Lenstra–Lenstra–Lovász (LLL) basis reduction algorithm.
See [1]_ and [2]_.
Parameters
==========
delta : QQ, optional
The Lovász parameter. Must be in the interval (0.25, 1), with larger
values producing a more reduced basis. The default is 0.75 for
historical reasons.
Returns
=======
The reduced basis as a DomainMatrix over ZZ.
Throws
======
DMValueError: if delta is not in the range (0.25, 1)
DMShapeError: if the matrix is not of shape (m, n) with m <= n
DMDomainError: if the matrix domain is not ZZ
DMRankError: if the matrix contains linearly dependent rows
Examples
========
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy.polys.matrices import DM
>>> x = DM([[1, 0, 0, 0, -20160],
... [0, 1, 0, 0, 33768],
... [0, 0, 1, 0, 39578],
... [0, 0, 0, 1, 47757]], ZZ)
>>> y = DM([[10, -3, -2, 8, -4],
... [3, -9, 8, 1, -11],
... [-3, 13, -9, -3, -9],
... [-12, -7, -11, 9, -1]], ZZ)
>>> assert x.lll(delta=QQ(5, 6)) == y
Notes
=====
The implementation is derived from the Maple code given in Figures 4.3
and 4.4 of [3]_ (pp.68-69). It uses the efficient method of only calculating
state updates as they are required.
See also
========
lll_transform
References
==========
.. [1] https://en.wikipedia.org/wiki/Lenstra%E2%80%93Lenstra%E2%80%93Lov%C3%A1sz_lattice_basis_reduction_algorithm
.. [2] https://web.archive.org/web/20221029115428/https://web.cs.elte.hu/~lovasz/scans/lll.pdf
.. [3] Murray R. Bremner, "Lattice Basis Reduction: An Introduction to the LLL Algorithm and Its Applications"
"""
return DomainMatrix.from_rep(A.rep.lll(delta=delta))
def lll_transform(A, delta=QQ(3, 4)):
"""
Performs the Lenstra–Lenstra–Lovász (LLL) basis reduction algorithm
and returns the reduced basis and transformation matrix.
Explanation
===========
Parameters, algorithm and basis are the same as for :meth:`lll` except that
the return value is a tuple `(B, T)` with `B` the reduced basis and
`T` a transformation matrix. The original basis `A` is transformed to
`B` with `T*A == B`. If only `B` is needed then :meth:`lll` should be
used as it is a little faster.
Examples
========
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy.polys.matrices import DM
>>> X = DM([[1, 0, 0, 0, -20160],
... [0, 1, 0, 0, 33768],
... [0, 0, 1, 0, 39578],
... [0, 0, 0, 1, 47757]], ZZ)
>>> B, T = X.lll_transform(delta=QQ(5, 6))
>>> T * X == B
True
See also
========
lll
"""
reduced, transform = A.rep.lll_transform(delta=delta)
return DomainMatrix.from_rep(reduced), DomainMatrix.from_rep(transform)
def _collect_factors(factors_list):
"""
Collect repeating factors and sort.
>>> from sympy.polys.matrices.domainmatrix import _collect_factors
>>> _collect_factors([([1, 2], 2), ([1, 4], 3), ([1, 2], 5)])
[([1, 4], 3), ([1, 2], 7)]
"""
factors = Counter()
for factor, exponent in factors_list:
factors[tuple(factor)] += exponent
factors_list = [(list(f), e) for f, e in factors.items()]
return _sort_factors(factors_list)
|
13,780 | f5575a3ffb30d538355df4c8c6139eada5868a1e | from pymagnitude import *
import fetch
from fetch import db, Article
from tfidf import DFTable
import os
from annoy import AnnoyIndex
import logging
import nltk
from nltk import sent_tokenize
from nltk.tokenize import word_tokenize
import re
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
import numpy
import random
class Seacher:
def __init__(self, df_path='df.npy', annoy_path='index.ann', vec_model_path='wiki-news-300d-1M-subword.magnitude'):
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
self.stop_words = stopwords.words('english')
self.porter = PorterStemmer()
self.dftable = DFTable()
self.annoy = AnnoyIndex(300)
self.vec_model = Magnitude(vec_model_path)
if not os.path.isfile(df_path):
logging.info("first use, building tf-idf table")
self._build_tfidf()
self.dftable.save(df_path)
self.dftable = DFTable(df_path)
self.dftable = DFTable(df_path)
if not os.path.isfile(annoy_path):
logging.info("for the first use, build annoy index")
self._build_annoy()
self.annoy.save(annoy_path)
self.annoy = AnnoyIndex(300)
self.annoy.load(annoy_path)
def _tokenize_and_normalize(self, text):
tokens = word_tokenize(text)
words = list(filter(lambda word: word.isalpha() and word not in self.stop_words, tokens))
stemmed = [self.porter.stem(word) for word in words]
return stemmed
def _get_sentence_vec(self, sentence):
words = self._tokenize_and_normalize(sentence)
_vec = self.vec_model.query(words)
_weight = [self.dftable.getDF(word) for word in words]
for i in range(len(words)):
_vec[i] *= numpy.log(self.dftable.doc_count / (1 + _weight[i]))
return numpy.mean(_vec, axis=0)
def _build_tfidf(self):
with db.atomic():
for article in Article.select():
self.dftable.scanArticle(self._tokenize_and_normalize(article.content))
def _build_annoy(self):
with db.atomic():
for article in Article.select():
_vecs = self._get_sentence_vec("{}\n{}\n{}\n{}".format(article.title, article.content, article.themes, article.nearby_station))
self.annoy.add_item(article.id, _vecs)
self.annoy.build(100)
def search(self, text, step=0.02, limit=0.9, cascading=True):
question_vec = self._get_sentence_vec(text)
ids, ds = self.annoy.get_nns_by_vector(question_vec, 50, include_distances=True)
ids = numpy.array(ids)
ds = numpy.array(ds)
for i in range(int(limit/step)+1):
choices = ids[ds < (i * step)]
if len(choices) > 0:
return list(choices)
return None
def searchArticle(self, text):
ids = self.search(text)
if ids:
_list = [{'content': "{} is locate at {}, near {} station. {}".format(article.title, article.address, article.nearby_station, article.content.replace("\n", ' '), ), 'url': article.url} for article in Article.select().where(Article.id.in_(ids))]
return _list
else:
return None
if __name__ == '__main__':
finder = Seacher()
print('>> Where is the best seafood restaurant?')
print(random.choice(finder.searchArticle('Where is the best seafood restaurant?')))
print('>> Where is the best restaurant?')
print(random.choice(finder.searchArticle('Where is the best restaurant?')))
print('>> Where is a park I can take some rest?')
print(random.choice(finder.searchArticle('Where is a park I can take some rest?')))
|
13,781 | 5333761a37790598914d832495f963ef0c562ec1 | AdminApp.install('/work/app/sample.war', '[ -nopreCompileJSPs -distributeApp -nouseMetaDataFromBinary -nodeployejb -appname sample -createMBeansForResources -noreloadEnabled -nodeployws -validateinstall warn -noprocessEmbeddedConfig -filepermission .*\.dll=755#.*\.so=755#.*\.a=755#.*\.sl=755 -noallowDispatchRemoteInclude -noallowServiceRemoteInclude -asyncRequestDispatchType DISABLED -nouseAutoLink -noenableClientModule -clientMode isolated -novalidateSchema -contextroot /sample -MapModulesToServers [[ "Hello, World Application" sample.war,WEB-INF/web.xml WebSphere:cell=DefaultCell01,node=DefaultNode01,server=server1 ]] -MapWebModToVH [[ "Hello, World Application" sample.war,WEB-INF/web.xml default_host ]] -CtxRootForWebMod [[ "Hello, World Application" sample.war,WEB-INF/web.xml /sample ]]]' )
AdminConfig.save()
|
13,782 | de864e095a88d1b8aaa7338e212f919064cf070b | import logging
import os
from . import questions
def get_datasets(prob, questions_dir, batch_size, cache_dir=None, cache_mem=False):
def get_question_batches_dataset(p, k):
q = questions.individual.get_dataset(questions_dir, p)
if cache_dir is not None:
cache_path = os.path.join(cache_dir, k)
os.makedirs(os.path.dirname(cache_path), exist_ok=True)
logging.info('Caching into: %s', cache_path)
q = q.cache(cache_path)
batches = questions.batch.batch(q, batch_size)
if cache_mem:
batches = batches.cache()
if cache_dir is not None or cache_mem:
questions.batch.preload(batches, k)
return batches
return {k: get_question_batches_dataset(p, k) for k, p in prob.items()}
|
13,783 | 607ca90d292373cdb7ba13f1ae508f4d01b38490 | import redis # 导入redis模块,通过python操作redis 也可以直接在redis主机的服务端操作缓存数据库
r = redis.Redis(host='localhost', port=6379, db=0) # host是redis主机,需要redis服务端和客户端都启动 redis默认端口是6379
# r.set('tasklist', 'junxi') # key是"foo" value是"bar" 将键值对存入redis缓存
# print(r['tasklist'])
# print(r.get('tasklist')) # 取出键name对应的值
# print(type(r.get('tasklist')))
# print(r.keys())
print(r.dbsize())
# 字典
# r.hset('dict', 'url3', 'wwww123')
# print(r.hgetall('dict'))
# 列表
r.lpush('list', "{url:123555}")
r.lpush('list', "{url:1235554}")
print(r.lrange('list', 0, -1)) |
13,784 | 9255b313fef951168b7bc511372b9a53d7bdadd6 |
print 100
print "the next literal is actually -2**30 (weird quirk alert!)"
print 1073741824
|
13,785 | 3b37da634881b96aa4c9baec303d2779fa0a0711 | '''
Created on 2019年7月5日
@author: geqiuli
'''
import sys
import os
import importlib
import inspect
import re
from public import mysql_opr
from public import files
import time
#import platform
#print('当前操作系统:',platform.platform())
current_time=int(time.time()*1000)
print(current_time)
def get_funcs(module, package):
module = importlib.import_module(module, package)
for attr in dir(module):
if not attr.startswith("__"):
func = getattr(module, attr)
def get_packages(repodir,projname,package,coll_id,py_project,platform,depth=1,conn=None):
'''
@param depth: package下目录深度,暂支持0,1,2
'''
except_dir=['__pycache__','Archive','.pytest_cache']
except_file=['__init__.py']
pack_path=os.sep.join([repodir,projname,package])
print('pack_path: ',pack_path)
all_dirs = {}
all_pack = {}
def get_sub_dirs(dir_path,pack_name,sub_dir,depth):
print('sub_dir: ',sub_dir)
sub_list = os.listdir(dir_path)
print('sub_list: ',sub_list)
for item in sub_list:
#print('item: ',item)
item_path=os.sep.join([dir_path,item])
if depth==0:
if os.path.isfile(item_path) and item=='__init__.py':
mname, ext = os.path.splitext(item)
module = "." + mname
#print('type(module): ',type(module))
module = importlib.import_module(module, pack_name)
#print('inspect.getfile(module):',inspect.getfile(module))
#print('module.__name__: ',module.__name__)
if module.__doc__:
all_pack[pack_name]=module.__doc__.replace('\n','')
else:
all_pack[pack_name]=module.__doc__
if conn:
sql1='''select * from auto_ui_businessmodule where py_package='{}'
and py_project={}'''.format(pack_name,py_project)
res1=mysql_opr.select_from_mysql(conn, sql1)
if res1['code']==0 and len(res1['data'])>0:
print('已经存在:',pack_name)
else:
print('新增子包:',pack_name)
sql2='''INSERT INTO auto_ui_businessmodule(name,platform,collection,
sub_package,py_package,py_project,
run_env,ui_sys)
VALUES('{0}','{1}','{2}','{3}','{4}','{5}','{6}','{7}')
'''.format(all_pack[pack_name],platform,coll_id,
1,pack_name,py_project,
0,1)
mysql_opr.query_mysql2(conn,sql2)
if depth>0:
if os.path.isdir(item_path) and item not in except_dir:
sub_dir[item]={}
#print('dirs: ',sub_dir[item])
pack_name1 = ('.').join([pack_name,item])
get_sub_dirs(item_path,pack_name1,sub_dir[item],depth-1)
get_sub_dirs(pack_path,package,all_dirs,depth)
print('------- all_dirs: -----')
print(all_dirs)
print(all_pack)
return all_dirs
def get_moudles(repodir,projname,package,depth=0):
'''
@param depth: package下目录深度,暂支持0,1,2'''
except_dir=['__pycache__','Archive','.pytest_cache']
except_file=['__init__.py']
pack_path=os.sep.join([repodir,projname,package])
print('pack_path: ',pack_path)
all_files = {'files':[],'dirs':{}}
#sub_list = os.listdir(pack_path)
def get_sub_dirs_and_files(dir_path,sub_obj,depth):
print('sub_obj: ',sub_obj)
sub_list = os.listdir(dir_path)
for item in sub_list:
#print('-'*depth+'item: ',item)
#item_path=os.sep.join([repodir,projname,package,item])
item_path=os.sep.join([dir_path,item])
if depth==0:
if os.path.isfile(item_path) and item not in except_file:
print('file path: ',item_path)
filename=item.lower()
if filename.startswith("test_"):
print('test file')
sub_obj['files'].append(item_path)
if depth>0:
if os.path.isfile(item_path) and item not in except_file:
print('file path: ',item_path)
filename=item.lower()
if filename.startswith("test_"):
print('test file')
sub_obj['files'].append(item_path)
elif os.path.isdir(item_path) and item not in except_dir:
sub_obj['dirs'][item]={'files':[],'dirs':{}}
#sub_list1 = os.listdir(item_path)
get_sub_dirs_and_files(item_path,sub_obj['dirs'][item],depth-1)
get_sub_dirs_and_files(pack_path,all_files,depth)
print('------- all_files: -----')
print(all_files)
return all_files
def import_source(module_name,module_file_path):
#module_file_path = module_name.__file__
#module_name = module_name.__name__
print('module_file_path: ',module_file_path)
print('module_name: ',module_name)
module_spec = importlib.util.spec_from_file_location(module_name.name ,module_file_path)
module = importlib.util.module_from_spec(module_spec)
module_spec.loader.exec_module(module)
print(dir(module))
# spec = importlib.util.spec_from_file_location('my_module', '/paht/to/my_module')
# module = importlib.util.module_from_spec(spec)
# spec.loader.exec_module(module)
def get_module_testcase(package,pyfile,run_env,platform,coll_id,conn=None,module_id=None):
#filename=pyfile.lower()
mname, ext = os.path.splitext(pyfile)
module="." + mname
#print('type(module): ',type(module))
module = importlib.import_module(module, package)
#testcases=[]
#print('module.__dict__: ',module.__dict__)
#print('inspect.getfile(module):',inspect.getfile(module))
#print('pyfile: ',pyfile)
print('module.__name__: ',module.__name__)
file_path=module.__name__.replace('.','/')+'.py'
#print('file_path: ',file_path)
#print('module: ',module.__dict__)
#print('module: ',help(module))
#file_path=package.replace('.',os.sep)+os.sep+module.__name__
#print('sourcecode:')
#print(inspect.getsource(module))
print('dir(module): ',dir(module))
for attr in dir(module):
if attr.startswith("test"):
func = getattr(module, attr)
print(' testcase:',func.__name__)
pytestmark=[]
py_skip_reason=''
tapd_id=''
tapd_proj=''
cart_order_oprs=1
if 'pytestmark' in func.__dict__:
#print(' pytestmarks:',func.__dict__['pytestmark'])
#print(' ',func.__dict__['pytestmark'])
for item in func.__dict__['pytestmark']:
if item.__dict__['name']=='tapd':
if item.__dict__['args']:
tapd_str=str(item.__dict__['args'][0]).split('_')
if len(tapd_str)>1:
tapd_proj=tapd_str[0]
tapd_id=tapd_str[1]
else:
tapd_id=tapd_str[0]
else:
pytestmark.append(item.__dict__['name'])
if item.__dict__['name']=='skip':
#print(' py_skip_reason:',item.__dict__)
if item.__dict__['args']:
py_skip_reason=item.__dict__['args'][0]
if '_nocart' in item.__dict__['name']:
cart_order_oprs=0
#print(' ',func.__doc__)
#testcases.append(func.__name__)
py_name=file_path+" ::"+func.__name__
print(' func content:')
#print(inspect.getsource(func))
assert_search=re.findall('assert ',inspect.getsource(func))
assert_num=len(assert_search)
print(' func assert_num:',assert_num)
if conn:
print('------------------------------')
if module_id:
business_module=module_id
else:
business_module=0
sql0="""SELECT id FROM auto_ui_testcase
WHERE py_name='{0}'
AND collection={1}""".format(py_name,coll_id)
res0=mysql_opr.select_from_mysql(conn, sql0, total=0, close=False)
if res0['code']==0 and len(res0['data'])>0:
print('已有此用例,更新:',py_name)
sql1='''UPDATE auto_ui_testcase SET py_desc='{0}',cart_order_oprs='{1}',
py_marks='{2}',run_env='{3}',platform='{4}',py_skip_reason='{5}',
business_module='{6}',tapd_id='{7}',tapd_proj='{8}',update_version='{9}',
assert_num={12}
WHERE py_name='{10}' AND collection={11}
'''.format(func.__doc__.replace('\'','"'), cart_order_oprs,
';'.join(pytestmark), run_env, platform, py_skip_reason,
business_module, tapd_id, tapd_proj, current_time,
py_name,coll_id,assert_num)
mysql_opr.query_mysql2(conn,sql1)
else:
print('无此用例,新增:',py_name)
sql2='''INSERT INTO auto_ui_testcase(py_name,py_desc,py_module,cart_order_oprs,
py_marks,py_file,run_env,platform,collection,py_skip_reason,
business_module,tapd_id,tapd_proj,update_version,assert_num)
VALUES('{0}','{1}','{2}','{3}','{4}','{5}','{6}',{7},'{8}','{9}','{10}','{11}','{12}','{13}','{14}')
'''.format(py_name, func.__doc__.replace('\'','"'), module.__name__, cart_order_oprs,
';'.join(pytestmark), file_path, run_env,
platform, coll_id,py_skip_reason,
business_module, tapd_id, tapd_proj, current_time,assert_num)
#print('sql2: ',sql2)
mysql_opr.query_mysql2(conn,sql2)
def get_pyfiles_in_packages(root_dir,package,run_env,platform,coll_id,conn=None,module_id=None,find_sub=True):
'''
@param root_dir: eg. E:\\test-autotest\\AutoTest
@param package: eg. prdenv_case.b2b_pc
'''
except_dir=['__pycache__','Archive','archive','Archived','archived','.pytest_cache']
except_file=['__init__.py']
root_dir=root_dir.replace('/','\\') #windows
#root_dir=root_dir.replace('/',os.sep) #通用
print('package:',package)
path_items=[root_dir]+package.split('.')
pack_path=os.sep.join(path_items)
print('pack_path: ',pack_path)
all_files = []
all_modules = []
sub_list = os.listdir(pack_path)
def sub_list_cases(sub_list,package,pack_path,find_sub):
print('sub_list: ',sub_list)
for item in sub_list:
#print('-'*depth+'item: ',item)
item_path=os.sep.join([pack_path,item])
print('item_path: ',item_path)
if os.path.isfile(item_path) and item not in except_file:
#print('file path: ',item_path)
filename=item.lower()
if filename.startswith("test_"):
print('----------------')
print('filename: ',filename)
#all_files.append(filename)
get_module_testcase(package,item,run_env,platform,coll_id,conn=conn,module_id=module_id)
elif os.path.isdir(item_path) and item not in except_dir:
if find_sub==True:
sub_list1 = os.listdir(item_path)
sub_package = package+'.'+item
sub_list_cases(sub_list1,sub_package,item_path,False)
sub_list_cases(sub_list,package,pack_path,find_sub)
def store_all_testcase_to_db(proj='AutoTest'):
datainfo=files.read_json('yyw-0345', 'mysql')
conn=mysql_opr.get_connection(serverip = datainfo['host'],
port = int(datainfo['port']),
account = datainfo['user'],
password = datainfo['password'],
db_name='qateam')
sql0="insert into auto_ui_case_updateversion(update_version,proj_name) VALUES('{0}','{1}')".format(current_time,proj)
mysql_opr.query_mysql2(conn,sql0)
sql1="""SELECT * FROM
(SELECT a.id,a.py_package,a.run_env,a.platform,a.sub_package,c.root_dir,c.name from auto_ui_collection a
LEFT JOIN auto_ui_project c ON a.py_project=c.id) b
WHERE b.name='{}'""".format(proj)
package_list=mysql_opr.select_from_mysql(conn, sql1, 0)['data']
for pk in package_list:
if pk['sub_package']==0:
#get_pyfiles_in_packages('E:\\test-autotest\\AutoTest','prdenv_case.b2b_pc',conn)
get_pyfiles_in_packages(pk['root_dir'],pk['py_package'],pk['run_env'],pk['platform'],pk['id'],conn=conn,find_sub=True)
else:
sql2="""SELECT * FROM
(SELECT a.id,a.run_env,a.platform,a.sub_package,c.root_dir,c.name,d.py_package,d.id AS moduleid
FROM auto_ui_collection a
LEFT JOIN auto_ui_project c ON a.py_project=c.id
LEFT JOIN auto_ui_businessmodule d ON a.id=d.collection
) b
WHERE b.name='{}'
AND b.sub_package=1""".format(proj)
package_list1=mysql_opr.select_from_mysql(conn, sql2, 0)['data']
print('package_list1: ',package_list1)
for sub_pk in package_list1:
get_pyfiles_in_packages(sub_pk['root_dir'],sub_pk['py_package'],
sub_pk['run_env'],sub_pk['platform'],sub_pk['id'],
conn=conn,module_id=sub_pk['moduleid'],find_sub=False)
import pytest
@pytest.mark.collect_test
def test_update_testcases():
store_all_testcase_to_db(proj='AutoTest')
if __name__ == '__main__':
pass
store_all_testcase_to_db(proj='AutoTest')
#get_pyfiles_in_packages('E:/test-androidapp/yaoex_app_auto_android','prdenv_case',2,1,3,conn=None,module_id=None,find_sub=True)
#get_module_testcase('prdenv_case.b2b_pc','test_check_order_invoice_info.py',1,1,1,conn=None)
# datainfo=files.read_json('yyw-0345', 'mysql')
# conn=mysql_opr.get_connection(serverip = datainfo['host'],
# port = int(datainfo['port']),
# account = datainfo['user'],
# password = datainfo['password'],
# db_name='qateam')
# get_packages('E:\\test-androidapp','yaoex_app_auto_android','regressioncase',
# coll_id=9,py_project=3,platform=1,depth=1,conn=conn)
|
13,786 | 7e4a05feebf05632d593d10e5802165d591ad133 | # -*- coding: utf-8 -*-
'''
deal列表
'''
from lxml import etree
import time
import random
from selenium.webdriver.chrome.options import Options
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import json
import redis
class Matchlist(object):
def __init__(self):
self.timeout = 15
self.chrome_options = Options()
# 不展示页面
# self.chrome_options.add_argument('--headless')
# self.chrome_options.add_argument('--disable-gpu')
# self.chrome_options.add_argument('--no-sandbox')
# self.chrome_options.add_argument('user-agent=' + self.ua) 自己的ua
# self.chrome_options.add_argument('--proxy-server=http://%s' % self.proxy) 公司的代理ip
self.driver = webdriver.Chrome(executable_path=r'chromedriver', chrome_options=self.chrome_options)
# self.driver.binary_location = r'/usr/bin/google-chrome'
redis = self.redis.StrictRedis(host=self.settings['REDIS_HOST'], port=6379, decode_responses=True,
password=self.settings['REDIS_PARAMS']['password'])
def set_tiktok(self, value):
self.conn.rpush(self.settings['REDIS_KEYS']['tiktok'], value)
def set_tiktok_video(self, value):
self.conn.rpush(self.settings['REDIS_KEYS']['tiktok_video'], value)
def set_url_detail(self, value):
self.conn.rpush(self.settings['REDIS_KEYS']['url_detail'], value)
def page_spider(self):
# 存储操作
pag = self.driver.page_source
tree = etree.HTML(pag)
body = tree.xpath('//div[@class="a-row"]//a[@class="a-link-normal"]/@href')
print(body)
if len(body) > 0:
for href in body:
if href and '/dp/' in href:
str_end = href.find('/ref=')
if str_end:
href = href[0:str_end]
# 把本次请求的href重新放入队列中
red.set_url_detail(json.dumps({'url': href}))
# 存在文件中
with open('1.text', 'a', encoding='utf8')as f:
f.write(href.strip() + '\n')
def startone(self):
'''入口'''
try:
for i in range(1, 10):
print('第%s页>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>' % i)
url = f'https://www.amazon.com/b/ref=gbps_ftr_m-9_475e_page_{i}?node=15529609011&pf_rd_r=96J4CJ1CECKQ3JZVTY1N&pf_rd_p=5d86def2-ec10-4364-9008-8fbccf30475e&gb_f_deals1=dealStates:AVAILABLE%252CWAITLIST%252CWAITLISTFULL%252CEXPIRED%252CSOLDOUT%252CUPCOMING,page:{i},sortOrder:BY_SCORE,MARKETING_ID:ship_export,dealsPerPage:40&pf_rd_s=merchandised-search-9&pf_rd_t=101&pf_rd_i=15529609011&pf_rd_m=ATVPDKIKX0DER&ie=UTF8'
self.driver.get(url)
self.driver.maximize_window()
time.sleep(random.uniform(1, 1.5))
self.page_spider()
except Exception as e:
pass
finally:
print('''完事.....''')
self.driver.quit()
return
if __name__ == '__main__':
mclist = Matchlist()
mclist.startone()
|
13,787 | 000aa4c7b9c9ee66062ac5bc9706003c8583067a | import numpy as np
import pandas as pd
from .. import utils
from sklearn.preprocessing import scale
from sklearn.impute import SimpleImputer
class BaseModel():
def __init__(self, X_train, y_train, X_test, params_file, folds_lookup,
prefix, logger):
self.X_train = X_train
self.y_train = y_train
self.X_test = X_test
self.params_file = params_file
self.params = None
self.evals_out = {} # for GBMModel and DNN only
self.cv_grid = {}
self.cv_results = None
self.sample_preds = {'pred': {}, 'actual': {}}
self.prefix = prefix
# generate default logger if none is passed
if logger is None:
self.logger = utils.get_logger('mltools')
elif isinstance(logger, str):
self.logger = utils.get_logger(logger)
else:
self.logger = logger
if isinstance(folds_lookup, int):
self.logger.info(f'generating {folds_lookup} random folds...')
self.folds_lookup = self.generate_folds(folds_lookup)
if folds_lookup is None:
self.logger.info(f'generating 5 random folds...')
self.folds_lookup = self.generate_folds(5)
else: self.folds_lookup = folds_lookup
self.folds_lookup = self.folds_lookup.astype(int)
self.n_folds = len(self.folds_lookup.unique())
def load_hparams(self):
'''Load hyperparameters and other settings from yaml file'''
if self.params_file is not None:
self.params = utils.load_hparams(self.params_file)
else: print('params_file not set.')
def generate_folds(self, n_folds=5):
n_samples = self.X_train.shape[0]
folds = []
for i in range(n_folds):
folds.extend([i+1] * (n_samples // n_folds))
folds.extend([i+1 for i in range(n_samples % n_folds)])
return pd.Series(np.random.permutation(folds), index=self.X_train.index)
def log_hparams(self):
'''Send a list of hyperparameters to the logger'''
base_params = ''
for k, v in self.params.items():
base_params += f' - {k}: {v}\n'
self.logger.info(f'base params:\n{base_params}')
def preprocess(self, fillna=True, fill_with='mean', standardize=True,
clip_outliers=None, *kargs):
'''Fill NaN, standardize inputs, and apply min/max clip for outliers'''
self.logger.info('preprocessing inputs:')
train_idx = self.X_train.index
test_idx = self.X_test.index
cols_in = self.X_train.columns
train_len = self.X_train.shape[0]
X = np.concatenate([self.X_train.values, self.X_test.values], axis=0)
if fillna:
imputer = SimpleImputer(strategy=fill_with, verbose=1)
self.logger.info(' filling NaN...')
X[X == np.inf] = np.nan
X[X == -np.inf] = np.nan
X = imputer.fit_transform(X)
if standardize:
self.logger.info(' standardizing inputs...')
X = scale(X)
if clip_outliers is not None:
X = np.where(X>clip_outliers, clip_outliers, X)
X = np.where(X<-clip_outliers, -clip_outliers, X)
self.X_train = pd.DataFrame(X[:train_len, :], index=train_idx, columns=cols_in)
self.X_test = pd.DataFrame(X[train_len:, :], index=test_idx, columns=cols_in)
self.logger.info(' finished.')
def plot_results(self, filename='evals_plot.png'):
import matplotlib.pyplot as plt
titles = list(self.evals_out['train'].keys())
n_plots = len(titles)
fig = plt.figure(figsize=(n_plots * 4, 3))
for i, title in enumerate(titles):
ax = fig.add_subplot(1, n_plots, i + 1)
plt.plot(self.evals_out['val'][title], color='r')
plt.plot(self.evals_out['train'][title], color='b')
plt.title(title);
plt.savefig(filename)
def plot_regression_preds(self):
import matplotlib.pyplot as plt
colors = ['r', 'm', 'g', 'c', 'b']
all_act = []
figure = plt.figure(figsize=(5, 5))
for i, k in enumerate(self.sample_preds['pred'].keys()):
all_act.extend(self.sample_preds['actual'][k])
plt.scatter(self.sample_preds['actual'][k],
self.sample_preds['pred'][k],
color=colors[i], alpha=0.5, s=4)
min_, max_ = min(all_act), max(all_act)
plt.plot((min_, max_), (min_, max_), color='k')
plt.xlabel('actual value', fontsize=12)
plt.ylabel('predicted value', fontsize=12);
plt.savefig('CV_preds_plot.png')
def best_eval(self, eval_name, type='max'):
eval_list = self.evals_out['val'][eval_name]
if type == 'max': best_eval = max(eval_list)
elif type == 'min': best_eval = min(eval_list)
if 'round' in self.evals_out:
best_round = self.evals_out['round'][eval_list.index(best_eval)]
else: best_round = eval_list.index(best_eval) + 1
return best_eval, best_round
def best_eval_multi(self, type):
best_out = []
if self.metrics is not None:
for metric in self.metrics:
best_, round_ = self.best_eval(metric, type)
best_out.append((metric, best_, round_))
else:
raise ValueError('self.metrics must not be empty')
return best_out
def evals_df(self):
'''Transform round-by-round metrics from dict to pd.DataFrame'''
parts = []
for pref in ['train', 'val']:
metrics = list(self.evals_out[pref].keys())
columns = {metric: pref + '_' + tag for metric in metrics}
df = pd.DataFrame.from_dict(dict_in[pref]).rename(columns=columns)
parts.append(df)
return pd.concat(parts, axis=1)
def parse_summ_df(self, df_in):
'''Args: df_in, pd.DataFrame with summary CV results
Returns: fixed-width string with metrics for the logger
'''
r = df_in.iterrows()
if isinstance(next(r)[0], tuple):
n_groups = len(next(r)[0])
else:
n_groups = 1
outstr = 'CV results summary, validation scores:\n'
header = ''
for v in df_in.index.names:
header += f'{v:<14}'
for v in df_in.columns.values:
header += f'{v:>12}'
outstr += header + '\n'
rows = df_in.iterrows()
for row in rows:
rowstr = ''
if n_groups == 1:
rowstr += f'{str(row[0]):<14}'
elif n_groups > 1:
for v in row[0]:
rowstr += f'{str(v):<14}'
for v in row[1].values:
rowstr += f'{v:>12.4f}'
outstr += rowstr + '\n'
return outstr
def _get_fold_indices(self, i):
fold_idx = []
for j in range(1, self.n_folds + 1):
fold_idx = fold_idx + [self.folds_lookup[self.folds_lookup == j].index]
test_idx = fold_idx.pop(i-1)
train_idx = []
for idx in fold_idx:
train_idx.extend(idx.values)
return pd.Index(train_idx), test_idx
def _get_fold_data(self, fold_no):
train_idx, test_idx = self._get_fold_indices(fold_no)
X_train = self.X_train.reindex(train_idx)
X_val = self.X_train.reindex(test_idx)
y_train = self.y_train.reindex(train_idx)
y_val = self.y_train.reindex(test_idx)
return X_train, y_train, X_val, y_val
def _get_cv_params_grid(self):
'''Transforms self.cv_grid (dict) into permuted parameter sets
for the CV routine.
Returns:
params_grid: list of permuted param values. Each item is a
list of param values, ordered by key (param name).
keys: list, param names
'''
keys = list(self.cv_grid.keys())
params_grid = []
for v in self.cv_grid.values():
if params_grid == []:
for setting in v:
params_grid.append([setting])
else:
params_grid_ = params_grid.copy()
params_grid = []
for setting in v:
for j in params_grid_:
params_grid.append(j + [setting])
return params_grid, keys
|
13,788 | cd7ba469569110cb2a9a20be23839348b779f0ed | from django.shortcuts import render
from rest_framework import generics
from rest_framework.views import APIView
from django.views.decorators.csrf import csrf_exempt
from rest_framework.decorators import api_view
from rest_framework.response import Response
from django.http import HttpResponse
from demo.models import Funds,Investment
from demo.serializers import FundsSerializer,InvestSerializer
from datetime import datetime
class FundsList(generics.ListCreateAPIView):
queryset = Funds.objects.all()
serializer_class = FundsSerializer
class FundsChange(generics.RetrieveUpdateDestroyAPIView):
queryset = Funds.objects.all()
serializer_class = FundsSerializer
lookup_url_kwarg = 'fund_id'
class InvestList(generics.ListCreateAPIView):
queryset = Investment.objects.all()
serializer_class = InvestSerializer
@csrf_exempt
@api_view(['POST'])
def calculateCurrentValue(request):
data = request.data
scheme_code = data['scheme_code']
date = datetime.today().date()
units = 0
current_value = 0
fund_obj = Funds.objects.filter(scheme_code=scheme_code,date=date)
if fund_obj.exists():
investObj = Investment.objects.filter(fundId_id=data['fund_id'])
if investObj.exists():
units = investObj[0].units_purchased
current_value = units * fund_obj[0].net_asset_value
if current_value:
return HttpResponse(current_value)
else:
return HttpResponse(0)
|
13,789 | 273774f2075956e7ddd5f75a6ab1ce7debacff89 | #Palidnrome
word=input("Enter the word")
word=word.replace(" ","")
word=word.casefold()
new_word=word[::-1]
if(word==new_word):
print("Yes, the String is a palindrome")
else:
print("No, the String is not a palindrome")
|
13,790 | ea16dcdff3aa930a66276dbd0fa990defff9fc9d | import numpy as np
from scipy.ndimage import minimum_filter1d
def cumulative_minimum_energy_map(energyImage,seamDirection):
if seamDirection == 'VERTICAL':
cumulativeMinimumEnergyMap = np.copy(energyImage)
elif seamDirection == 'HORIZONTAL':
cumulativeMinimumEnergyMap = np.transpose(np.copy(energyImage))
energyImage = np.transpose(energyImage)
m,n = cumulativeMinimumEnergyMap.shape
for row in range(1,m):
prev_row = cumulativeMinimumEnergyMap[row-1]
cumulativeMinimumEnergyMap[row] = energyImage[row] + minimum_filter1d(prev_row,3)
if seamDirection =='HORIZONTAL':
cumulativeMinimumEnergyMap = np.transpose(cumulativeMinimumEnergyMap)
return cumulativeMinimumEnergyMap |
13,791 | ee4c4ed2ef3f55e133e29d619dbe77319b2c4a33 | from django.apps import AppConfig
class StudentableConfig(AppConfig):
name = 'studentable'
|
13,792 | 6cc725f7d0add04a6ef644c2e249a29954112b5a |
# Python Imports
import argparse
import copy
# Torch Imports
import torch
#from torch import LongTensor
#from torchtext.vocab import load_word_vectors
from torch.autograd import Variable
#from torch.nn.utils.rnn import pack_padded_sequence#, pad_packed_sequence
import torch.optim as optim
from torch.utils.data import DataLoader
from torch import cuda, FloatTensor
from torch import nn
from torch.utils.data.sampler import SubsetRandomSampler
import numpy as np
# Our modules
from models import *
from utils import *
############
## CONFIG ##
############
class Config:
def __init__(self, args):
self.epochs = args.e
self.batch_size = args.bs
self.lr = args.lr
self.nt = args.nt
self.nv = args.nv
self.print_every = args.pe
self.hidden_size = args.hs
self.feats = args.feats
self.labels = args.labels
self.max_length = args.length
#self.eval_every = args.ee
self.use_gpu = args.gpu
self.dtype = cuda.FloatTensor if self.use_gpu else FloatTensor
self.num_classes = 2
self.finetuning_lr = args.ftlr
self.finetuning_epochs = args.fte
def __str__(self):
properties = vars(self)
properties = ["{} : {}".format(k, str(v)) for k, v in properties.items()]
properties = '\n'.join(properties)
properties = "--- Config --- \n" + properties + "\n"
return properties
def parseConfig(description="Default Model Description"):
parser = argparse.ArgumentParser(description=description)
parser.add_argument('--feats', type=str, help='input features path', default = "../data/features/extracted_features_0.1_0.05.json")
parser.add_argument('--labels', type=str, help='input labels', default = "../data/features/labels_0.1_0.05.json")
parser.add_argument('--length', type=int, help='length of sequence', default = 300)
parser.add_argument('--bs', type=int, help='batch size for training', default = 20)
parser.add_argument('--e', type=int, help='number of epochs', default = 10)
parser.add_argument('--nt', type=int, help='number of training examples', default = 3000)
parser.add_argument('--nv', type=int, help='number of validation examples', default = None)
parser.add_argument('--hs', type=int, help='hidden size', default = 100)
parser.add_argument('--lr', type=float, help='learning rate', default = 1e-3)
parser.add_argument('--gpu', action='store_true', help='use gpu', default = False)
parser.add_argument('--pe', type=int, help='print frequency', default = None)
parser.add_argument('--ee', type=int, help='eval frequency', default = None)
parser.add_argument('--fte', type=int, help='number of finetuning epochs', default = 5)
parser.add_argument('--ftlr', type=int, help='finetuning learning rate', default = 2e-3)
args = parser.parse_args()
return args
############
# TRAINING #
############
def train(model, loss_fn, optimizer, num_epochs = 1, logger = None, hold_out = -1):
best_model = None
best_val_acc = 0
for epoch in range(num_epochs):
print('Starting epoch %d / %d' % (epoch + 1, num_epochs))
model.train()
loss_total = 0
for t, (x, y, _) in enumerate(model.config.train_loader):
x_var = Variable(x)
y_var = Variable(y.type(model.config.dtype).long())
scores = model(x_var)
loss = loss_fn(scores, y_var)
loss_total += loss.data[0]
optimizer.zero_grad()
loss.backward()
optimizer.step()
if ((t+1) % 10) == 0:
grad_magnitude = [(x.grad.data.sum(), torch.numel(x.grad.data)) for x in model.parameters() if x.grad.data.sum() != 0.0]
grad_magnitude = sum([abs(x[0]) for x in grad_magnitude]) #/ sum([x[1] for x in grad_magnitude])
print('t = %d, avg_loss = %.4f, grad_mag = %.2f' % (t + 1, loss_total / (t+1), grad_magnitude))
print("--- Evaluating ---")
check_accuracy(model, model.config.train_loader, type = "train")
val_acc = check_accuracy(model, model.config.val_loader, type = "val")
if val_acc > best_val_acc:
best_val_acc = val_acc
best_model = copy.deepcopy(model)
print("\n")
print("\n--- Final Evaluation ---")
check_accuracy(model, model.config.train_loader, type = "train", logger = logger, hold_out = hold_out)
check_accuracy(model, model.config.val_loader, type = "val", logger = logger, hold_out = hold_out)
# Return model with best validation accuracy
return best_model
#check_accuracy(model, model.config.test_loader, type = "test")
def check_accuracy(model, loader, type="", logger = None, hold_out = -1):
print("Checking accuracy on {} set".format(type))
num_correct = 0
num_samples = 0
examples, all_labels, all_predicted = [], [], []
model.eval() # Put the model in test mode (the opposite of model.train(), essentially)
for t, (x, y, keys) in enumerate(loader):
x_var = Variable(x)
#y_var = Variable(y.type(model.config.dtype).long())
scores = model(x_var)
_, preds = scores.data.cpu().max(1)
num_correct += (preds == y).sum()
num_samples += preds.size(0)
examples.extend(keys)
all_labels.extend(list(y))
all_predicted.extend(list(np.ndarray.flatten(preds.numpy())))
#print("Completed evaluating {} examples".format(t*model.config.batch_size))
acc = float(num_correct) / num_samples
print('Got %d / %d correct (%.2f)' % (num_correct, num_samples, 100 * acc))
if logger:
for i in range(len(examples)):
row = "{},{},{},{},{}".format(type, examples[i], all_labels[i], all_predicted[i], hold_out)
logger.logResult(row)
return acc
def eval_on_test_set(model,loss_fn,num_epochs=1, logger = None, hold_out = -1):
#first check the accuracy of the model on all of the data
print("Trained model on all test data:")
check_accuracy(model,model.config.test_loader_all,type="test", logger = logger, hold_out = hold_out)
"""
#Finetuning doesn't work
print "Now trying finetuning"
#Freeze the layers, replace the final layer with a fully connected layer
num_ft = model.fc.in_features
for param in model.parameters():
param.requires_grad = False
model.fc = nn.Linear(num_ft, 2)
optimizer = optim.Adam(model.fc.parameters(), lr = model.config.finetuning_lr)
#try to train
for epoch in range(num_epochs):
print('Starting epoch %d / %d' % (epoch + 1, num_epochs))
model.train()
loss_total = 0
for t, (x, y) in enumerate(model.config.test_loader_finetuning):
x_var = Variable(x)
y_var = Variable(y.type(model.config.dtype).long())
scores = model(x_var)
loss = loss_fn(scores, y_var)
loss_total += loss.data[0]
optimizer.zero_grad()
loss.backward()
optimizer.step()
if ((t+1) % 10) == 0:
grad_magnitude = [(x.grad.data.sum(), torch.numel(x.grad.data)) for x in model.parameters() if x.grad.data.sum() != 0.0]
grad_magnitude = sum([abs(x[0]) for x in grad_magnitude]) #/ sum([x[1] for x in grad_magnitude])
print('t = %d, avg_loss = %.4f, grad_mag = %.2f' % (t + 1, loss_total / (t+1), grad_magnitude))
print("--- Evaluating ---")
check_accuracy(model, model.config.train_loader, type = "Finetuning training")
check_accuracy(model, model.config.val_loader, type = "Finetuning held out")
print("\n--- Final Evaluation after finetuning ---")
check_accuracy(model, model.config.test_loader_finetuning, type = "Finetuning training")
check_accuracy(model, model.config.test_loader_holdout, type = "Finetuning held out")
"""
########
# MAIN #
########
def main():
# Config
args = parseConfig()
config = Config(args)
print(config)
logger = Logger()
print("Logging destination: ", logger)
# Load Embeddings
#vocab, embeddings, embedding_dim = load_word_vectors('.', 'glove.6B', 100)
# Model
model = ComplexAudioRNN_2(config, audio_dim = 34)
model.apply(initialize_weights)
if config.use_gpu:
model = model.cuda()
# Load Data
#new getAudioDatasets util lets you specify subjects to hold out from training and val datasets.
#train_dataset = AudioDataset(config)
train_dataset, test_dataset = getAudioDatasets(config,hold_out={15})
train_idx, val_idx = splitIndices(train_dataset, config.nt, config.nv, shuffle = True)
test_finetuning_idx, test_holdout_idx = splitIndices(test_dataset, len(test_dataset), shuffle = True)
train_sampler, val_sampler = SubsetRandomSampler(train_idx), SubsetRandomSampler(val_idx)
test_finetuning_sampler, test_holdout_sampler = SubsetRandomSampler(test_finetuning_idx), SubsetRandomSampler(test_holdout_idx)
train_loader = DataLoader(train_dataset, batch_size = config.batch_size, num_workers = 3, sampler = train_sampler)
val_loader = DataLoader(train_dataset, batch_size = config.batch_size, num_workers = 1, sampler = val_sampler)
test_loader_finetuning = DataLoader(test_dataset, batch_size = config.batch_size/2, num_workers = 1, sampler = test_finetuning_sampler)
test_loader_holdout = DataLoader(test_dataset, batch_size = config.batch_size/2, num_workers = 1, sampler = test_holdout_sampler)
test_loader_all = DataLoader(test_dataset, batch_size=config.batch_size)
train_dataset.printDistributions(train_idx, msg = "Training", logger= logger, hold_out = -1)
train_dataset.printDistributions(val_idx, msg = "Val", logger= logger, hold_out = -1)
test_dataset.printDistributions(range(len(test_dataset)), msg="Test", logger= logger, hold_out = -1)
config.train_loader = train_loader
config.val_loader = val_loader
config.test_loader_all = test_loader_all
config.test_loader_finetuning = test_loader_finetuning
config.test_loader_holdout = test_loader_holdout
optimizer = optim.Adam(model.parameters(), lr = config.lr)
loss_fn = nn.CrossEntropyLoss().type(config.dtype)
best_model = train(model, loss_fn, optimizer, config.epochs, logger = logger, hold_out = -1)
#test on the held out speaker
eval_on_test_set(best_model, loss_fn, config.finetuning_epochs, logger = logger, hold_out = -1)
if __name__ == '__main__':
main() |
13,793 | d4e4ffe6ce060099ff360bbf7db6f24a63527a98 | from __future__ import print_function
from Crypto.Cipher import AES
input = raw_input
import socket, sys, getpass, pwd, os, stat, time, base64
try:
try:
host = sys.argv[1]
fil = sys.argv[2]
mode = sys.argv[3]
except Exception, e:
print("Usage:")
print("\tQuirky-c <ip> <filename> [--option]\n")
print("\t--download Download file from server")
print("\t--upload Upload file to server")
print("\t--shell Request a limited shell from the server\n\t (run without filename)\n")
exit(-1)
port = 554
s = socket.socket()
print("[\033[1;94m+\033[00m] Opening Socket")
try:
s.connect((host, port))
BLOCK_SIZE = 32
key = s.recv(1024)
PADDING = '{'
pad = lambda s: s + (BLOCK_SIZE - len(s) % BLOCK_SIZE) * PADDING
cipher = AES.new(key)
EncodeAES = lambda c, s: base64.b64encode(c.encrypt(pad(s)))
DecodeAES = lambda c, e: c.decrypt(base64.b64decode(e)).rstrip(PADDING)
except socket.error:
print("[\033[1;31m!\033[00m] Connection was Refused... is the server running?")
exit()
i_passwd = getpass.getpass("[\033[1;94m+\033[00m] Password: ")
e_passwd = EncodeAES(cipher, i_passwd)
s.send(e_passwd)
time.sleep(1)
if mode == '--download':
s.send("d")
time.sleep(0.10)
elif mode == '--upload':
s.send("u")
time.sleep(0.10)
elif mode == '--shell':
time.sleep(0.10)
s.send("s")
def shell(host):
time.sleep(1)
cmd = ''
print("[\033[1;94m+\033[00m] Serving Shell")
user = getpass.getuser()
while cmd != 'q':
cmd = input("\033[1;31m{}{}{}\033[00m \033[1;94m{}\033[00m".format(user, '@', host, '~$ '))
s.send(cmd)
print(s.recv(9999))
def upload():
s.send(fil)
time.sleep(1)
s.send(str(os.path.getsize(fil)))
filesize = long(os.path.getsize(fil))
with open(fil, 'rb') as f:
bytesToSend = f.read(1024)
bytecount = len(bytesToSend)
s.send(bytesToSend)
while bytesToSend != "":
bytesToSend = f.read(1024)
bytecount += len(bytesToSend)
s.send(bytesToSend)
sys.stdout.write("\r[\033[1;94m+\033[00m] {0:2f}".format((bytecount/float(filesize))*100)+"%")
sys.stdout.flush()
print("\n[\033[1;94m+\033[00m] Upload Complete")
s.close()
def download():
print("[\033[1;94m+\033[00m] Connecting to {}:{}".format(host,port))
filename = fil
if filename != 'q':
s.send(filename)
data = s.recv(1024) #(" + str(filesize)+" Bytes)
if data[:6] == 'EXISTS':
filesize = long(data[6:])
message = input("[\033[1;94m+\033[00m] Download "+ filename+ " (" + str(filesize)+" Bytes) [y/n]: ").lower()
if message == 'y':
s.send("OK")
f = open("new" + filename, 'wb')
data = s.recv(1024)
totalrecv = len(data)
f.write(data)
while totalrecv < filesize:
data = s.recv(1024)
totalrecv += len(data)
f.write(data)
sys.stdout.write("\r[\033[1;94m+\033[00m] {0:2f}".format((totalrecv/float(filesize))*100)+"%")
sys.stdout.flush()
print("\n[\033[1;94m+\033[00m] Download Complete")
else:
print("[\033[1;31m!\033[00m] File Does not Exist!!")
s.close()
except KeyboardInterrupt:
pass
if __name__ == '__main__':
try:
if str(sys.argv[3]) == '--download':
download()
elif str(sys.argv[3]) == '--upload':
upload()
elif str(sys.argv[3]) == '--shell':
shell(host)
except KeyboardInterrupt:
pass |
13,794 | 19754ea08fadb0cebe7c7bcdec2649abdfc2b2c2 | from django.shortcuts import render
from django.http import HttpResponse
from django.contrib.auth.models import User
from django.contrib.auth import authenticate
class Main_page():
def main_page(request):
url = ''
url_string = ''
user = authenticate(username=username, password=password)
# return HttpResponse(f"User: {user}")
if user:
return HttpResponse(f"User passed if")
else:
return HttpResponse(f"Error")
# url = users.create_logout_uri(self.request.uri)
# url_string = 'Logout'
# else:
# url = users.create_login_uri(self.request.uri)
# url_string = 'login'
# template_values = {
# 'url': url,
# 'url_string': url_string,
# 'user': user
# }
# return render(request, 'main_page.html', template_values)
|
13,795 | 3f250e1614f792ee309d22105ff743aaf594e3b4 | import numpy as np
import cv2
class UIColor:
def __init__(self, npx, scale):
self.npx = npx
self.scale = scale
self.img = np.zeros((npx, npx, 3), np.uint8)
self.mask = np.zeros((npx, npx, 1), np.uint8)
self.width = int(2*scale)
def update(self, points, color):
n_pnts = len(points)
w = int(max(1, self.width / self.scale))
c = (color.red(), color.green(), color.blue())
for i in range(0, n_pnts - 1):
pnt1 = (int(points[i].x() / self.scale), int(points[i].y() / self.scale))
pnt2 = (int(points[i + 1].x() / self.scale), int(points[i + 1].y() / self.scale))
cv2.line(self.img, pnt1, pnt2, c, w)
cv2.line(self.mask, pnt1, pnt2, 255, w)
# utils.CVShow(self.img, 'color input image')
# utils.CVShow(self.mask, 'color image mask')
def get_constraints(self):
return self.img, self.mask
def update_width(self, d):
self.width = min(20, max(1, self.width+ d))
return self.width
def reset(self):
self.img = np.zeros((self.npx, self.npx, 3), np.uint8)
self.mask = np.zeros((self.npx, self.npx, 1), np.uint8)
|
13,796 | 03881abc0d2a314d574954e268b30c2dd2e113d9 | from django.core.management.base import BaseCommand
from dojo.models import Finding
import logging
deduplicationLogger = logging.getLogger("dojo.specific-loggers.deduplication")
"""
Author: Marian Gawron
This script will identify loop dependencies in findings
"""
class Command(BaseCommand):
help = 'No input commands for fixing Loop findings.'
def handle(self, *args, **options):
fix_loop_duplicates()
def fix_loop_duplicates():
candidates = Finding.objects.filter(duplicate_finding__isnull=False, original_finding__isnull=False).all().order_by("-id")
deduplicationLogger.info("Identified %d Findings with Loops" % len(candidates))
for find_id in candidates.values_list('id', flat=True):
removeLoop(find_id, 5)
new_originals = Finding.objects.filter(duplicate_finding__isnull=True, duplicate=True)
for f in new_originals:
deduplicationLogger.info("New Original: %d " % f.id)
f.duplicate = False
super(Finding, f).save()
loop_count = Finding.objects.filter(duplicate_finding__isnull=False, original_finding__isnull=False).count()
deduplicationLogger.info("%d Finding found with Loops" % loop_count)
def removeLoop(finding_id, counter):
# get latest status
finding = Finding.objects.get(id=finding_id)
real_original = finding.duplicate_finding
if not real_original or real_original is None:
return
if finding_id == real_original.id:
finding.duplicate_finding = None
super(Finding, finding).save()
return
# Only modify the findings if the original ID is lower to get the oldest finding as original
if (real_original.id > finding_id) and (real_original.duplicate_finding is not None):
tmp = finding_id
finding_id = real_original.id
real_original = Finding.objects.get(id=tmp)
finding = Finding.objects.get(id=finding_id)
if real_original in finding.original_finding.all():
# remove the original from the duplicate list if it is there
finding.original_finding.remove(real_original)
super(Finding, finding).save()
if counter <= 0:
# Maximum recursion depth as safety method to circumvent recursion here
return
for f in finding.original_finding.all():
# for all duplicates set the original as their original, get rid of self in between
f.duplicate_finding = real_original
super(Finding, f).save()
super(Finding, real_original).save()
removeLoop(f.id, counter - 1)
|
13,797 | af7bf9e60a9ce82985669beb4c0a452d5b464e0b | import socket
tcp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# 设置套接字地址可重用
tcp_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
addr = ("127.0.0.1", 4399)
tcp_socket.bind(addr)
tcp_socket.listen(128)
while True:
tcp_request, ip_port = tcp_socket.accept()
print("客户端加入")
file_name = tcp_request.recv(1024).decode()
# 根据文件名读取文件内容
try:
with open("../"+file_name, "rb") as file:
# 把读取的文件内容发送给客户端
while True:
file_data = file.read(1024)
if file_data:
# 发送文件
tcp_request.send(file_data)
else:
print("读取完毕!已发送")
break
except Exception as e:
tcp_request.send("404".encode())
print("文件下载失败,错误为:{}".format(e))
tcp_request.close()
tcp_socket.close() |
13,798 | 6704852a593f3bd7b7bcaae59eb09802f588dde3 | from numpy import array, linalg, matmul
a = array([[1, -1, 0], [3, 0, -1], [1, 3, -2]])
print(a)
b = array([[2, -1, 1], [0, 3, -1], [-1, 2, 0]])
print(b)
inv = linalg.inv(a)
print(inv)
i = linalg.inv(b)
print(i)
ab = matmul(a, b)
print(ab)
|
13,799 | db82ff4b9cf28123fe8bb276d8bcb1a81fa822bb | import math
n1 = 255
n2 = 1000
print(hex(n1))
print(hex(n2))
print('Another lab start here..........')
def quadratic(a,b,c):
judgement = b ** 2 - 4 * a * c
if judgement < 0:
result = "No result"
return result
elif judgement == 0:
result = -b / (2 * a)
return result
else:
result1 = ((-b) + math.sqrt(judgement)) / (2 * a)
result2 = ((-b) - math.sqrt(judgement)) / (2 * a)
return result1,result2
print(quadratic(2,3,1))
print(quadratic(1,3,-4))
print('Another lab start here..........')
L = [m + '=>' + n for m in "ABC" for n in "ABC" if m != n]
print(L)
print('Another lab start here..........')
L1 = ['Hello','World',18,'Apple']
L2 = [s.lower() for s in L1 if isinstance(s,str)]
print(L2) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.