content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
from django.shortcuts import redirect
from django.http import HttpResponse
from django.http import HttpResponseBadRequest
from django.http import Http404
import datetime as dt
from shared.Monte import Monte
def simulate(request):
"""
Generates the Monte object from the user input parameters in the request and reponds with an html string.
"""
if request.method == 'GET':
print(request.GET)
ticker = str(request.GET.get("ticker", ''))
sim_amount = int(request.GET.get("sim_amount", ''))
time_steps = int(request.GET.get("time_steps", ''))
start = dt.datetime.strptime((request.GET.get("start", '')), '%Y-%m-%d')
end = dt.datetime.strptime((request.GET.get("end", '')), '%Y-%m-%d')
width = int(float(request.GET.get('width')))
height = int(float(request.GET.get('height')))
dpi = 100
monte_object = Monte(ticker=ticker, sim_amount=sim_amount, time_steps=time_steps, start=start,
end=end, width=width, height=height, dpi=dpi)
monte_object.create_DataFrame()
monte_object.simulate()
html_str1 = monte_object.plot_history()
html_str2 = monte_object.plot_pdf()
html_str3 = monte_object.plot_single()
html_str4 = monte_object.plot_multi()
html_str = monte_object.get_json(html_str1, html_str2, html_str3, html_str4)
#html_dict = monte_object.get_json(html_dict1, html_dict2, html_dict3, html_dict4)
'''
html_file = open("plots.html","w") # writes string to html file
html_file.write(html_str)
html_file.close()
json_file = open("plots.json","w") # writes string to html file
json_file.write(html_dict)
json_file.close()
'''
monte_object.clear_figures()
return HttpResponse(html_str)
return HttpResponseBadRequest()
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
18941,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
22069,
18453,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
... | 2.321078 | 816 |
from .core import Variable, Model
# Hodgkin-Huxley Model
V = Variable(name='V', init_value=-60, unit='mV',
ddt='(1/Cm)*(-gk*n**4*(V-Vk)-gna*m**3*h*(V-Vna)-gl*(V-Vl)+Iapp)')
n = Variable(name='n', ddt='alpha_n*(1-n)-beta_n*n', init_value=1/3)
m = Variable(name='m', ddt='alpha_m*(1-m)-beta_m*m', init_value=0)
h = Variable(name='h', ddt='alpha_h*(1-h)-beta_h*h', init_value=2/3)
HH = Model(V, n, m, h, Cm=1, gk=36, gna=120, spike_when='V>=30',
gl=0.3, Vk=-77, Vna=50, Vl=-54.4,
alpha_n='0.01*(-V-55)/(exp((-V-55)/10) -1)',
beta_n='0.125*exp((-V-65)/80)',
alpha_m='0.1*(-V-40)/(exp((-V-40)/10) -1)',
beta_m='4*exp((-V-65)/18)',
alpha_h='0.07*exp((-V-65)/20)',
beta_h='1/(1+exp((-V-36)/10))',
Iapp=0,
simul_method='rk4')
# FitzHugh-Nagumo Model
v = Variable(name='v', ddt='-w+v-(1/3)*v**3+I', init_value=-1.25, unit='mV')
w = Variable(name='w', ddt='(1/tau)*(v+a-b*w)', init_value=-1)
FHN = Model(v, w, spike_when='v>=0', a=0.7, b=1.2, tau=12.5, I=0)
# Izhikievich Model
v = Variable(name='v', ddt='+0.04*v**2+5*v+140-u+I', init_value=-65,
reset_value='c', unit='mV')
u = Variable(name='u', ddt='a*(b*v-u)', init_value=-15, reset_value='u+d')
IZHIKIEVICH = Model(v, u, spike_when='v>=30', max_spike_value=30,
a=0.02, b=0.2, c=-65, d=8, I=0)
# Leaky integrate-and-fire model
u = Variable(name='u', init_value=0, unit='mV',
ddt='(1/tau_m)*(-u+R*I)', reset_value='ur')
LEAKY_INTEGRATE_AND_FIRE = Model(u, spike_when='u>=1', max_spike_value=1,
tau_m=10, R=1, ur=0, I=0)
| [
6738,
764,
7295,
1330,
35748,
11,
9104,
201,
198,
201,
198,
2,
37480,
5116,
12,
39,
2821,
1636,
9104,
201,
198,
53,
796,
35748,
7,
3672,
11639,
53,
3256,
2315,
62,
8367,
10779,
1899,
11,
4326,
11639,
76,
53,
3256,
201,
198,
220,
2... | 1.688933 | 1,003 |
import pandas as pd
import numpy as np
from .utils import calculate_ao, calculate_sma, calculate_smma, mad
__version__ = '1.9.0'
class Indicators:
"""
Add technical indicators data to a pandas data frame
Example:
~~~~~~~~
>>> import pandas as pd
>>> from tapy import Indicators
>>> df = pd.read_csv('EURUSD60.csv')
>>> indicators = Indicators(df)
>>> indicators.accelerator_oscillator(column_name='AC')
>>> indicators.sma()
>>> df = indicators.df
>>> df.tail()
Date Time Open High Low Close Volume AC sma
3723 2019.09.20 16:00 1.10022 1.10105 1.10010 1.10070 2888 -0.001155 1.101296
3724 2019.09.20 17:00 1.10068 1.10193 1.10054 1.10184 6116 -0.000820 1.101158
3725 2019.09.20 18:00 1.10186 1.10194 1.10095 1.10144 3757 -0.000400 1.101056
3726 2019.09.20 19:00 1.10146 1.10215 1.10121 1.10188 3069 0.000022 1.101216
3727 2019.09.20 20:00 1.10184 1.10215 1.10147 1.10167 1224 0.000388 1.101506
"""
def __init__(
self,
df,
open_col='Open',
high_col='High',
low_col='Low',
close_col='Close',
volume_col='Volume'
):
"""
Initiate Indicators object
:param pandas data frame df: Should contain OHLC columns and Volume column
:param str open_col: Name of Open column in df
:param str high_col: Name of High column in df
:param str low_col: Name of Low column in df
:param str close_col: Name of Close column in df
:param str volume_col: Name of Volume column in df. This column is optional
and require only if indicator use this data.
"""
self.df = df
self._columns = {
'Open': open_col,
'High': high_col,
'Low': low_col,
'Close': close_col,
'Volume': volume_col
}
def sma(self, period=5, column_name='sma', apply_to='Close'):
"""
Simple Moving Average (SMA)
---------------------
https://www.metatrader4.com/en/trading-platform/help/analytics/tech_indicators/moving_average#simple_moving_average
>>> Indicators.sma(period=5, column_name='sma', apply_to='Close')
:param int period: the number of calculation periods, default: 5
:param str column_name: Column name, default: sma
:param str apply_to: Which column use for calculation.
Can be *"Open"*, *"High"*, *"Low"* and *"Close"*.
**Default**: Close
:return: None
"""
calculate_sma(self.df, period, column_name, apply_to)
def smma(self, period=5, column_name='smma', apply_to='Close'):
"""
Smoothed Moving Average (SMMA)
---------------------
https://www.metatrader4.com/ru/trading-platform/help/analytics/tech_indicators/moving_average#smoothed_moving_average
>>> Indicators.smma(period=5, column_name='smma', apply_to='Close')
:param int period: the number of calculation periods, default: 5
:param str column_name: Column name, default: smma
:param str apply_to: Which column use for calculation.
Can be *"Open"*, *"High"*, *"Low"* and *"Close"*.
**Default**: Close
:return: None
"""
df_smma = calculate_smma(self.df, period, column_name, apply_to)
self.df = self.df.merge(df_smma, left_index=True, right_index=True)
def ema(self, period=5, column_name='ema', apply_to='Close'):
"""
Exponential Moving Average (EMA)
---------------------
https://www.metatrader4.com/en/trading-platform/help/analytics/tech_indicators/moving_average#exponential_moving_average
>>> Indicators.ema(period=5, column_name='ema', apply_to='Close')
:param int period: the number of calculation periods, default: 5
:param str column_name: Column name, default: ema
:param str apply_to: Which column use for calculation.
Can be *"Open"*, *"High"*, *"Low"* and *"Close"*.
**Default**: Close
:return: None
"""
self.df[column_name] = self.df[self._columns[apply_to]].ewm(
span=period, adjust=False).mean()
def awesome_oscillator(self, column_name='ao'):
"""
Awesome Oscillator (AO)
-----------------------
https://www.metatrader4.com/en/trading-platform/help/analytics/tech_indicators/awesome_oscillator
>>> Indicators.awesome_oscillator(column_name='ao')
:param str column_name: Column name, default: ao
:return: None
"""
# Data frame for storing temporary data
df_tmp = pd.DataFrame()
df_tmp['High'] = self.df[self._columns['High']]
df_tmp['Low'] = self.df[self._columns['Low']]
# Calculate Awesome Oscillator
calculate_ao(df_tmp, column_name)
df_tmp = df_tmp[[column_name]]
self.df = self.df.merge(df_tmp, left_index=True, right_index=True)
def accelerator_oscillator(self, column_name='ac'):
"""
Accelerator Oscillator (AC)
-----------------------
https://www.metatrader4.com/en/trading-platform/help/analytics/tech_indicators/accelerator_decelerator
>>> Indicators.accelerator_oscillator(column_name='ac')
:param str column_name: Column name, default: ac
:return: None
"""
pass
# Data frame for storing temporary data
df_tmp = pd.DataFrame()
df_tmp['High'] = self.df[self._columns['High']]
df_tmp['Low'] = self.df[self._columns['Low']]
# Calculate Awesome Oscillator
calculate_ao(df_tmp, 'ao')
# Calculate SMA for Awesome Oscillator
calculate_sma(df_tmp, 5, 'sma_ao', 'ao')
# Calculate Accelerator Oscillator
df_tmp[column_name] = df_tmp['ao'] - df_tmp['sma_ao']
df_tmp = df_tmp[[column_name]]
self.df = self.df.merge(df_tmp, left_index=True, right_index=True)
def accumulation_distribution(self, column_name='a/d'):
"""
Accumulation/Distribution (A/D)
---------------------
https://www.metatrader4.com/en/trading-platform/help/analytics/tech_indicators/accumulation_distribution
>>> Indicators.accumulation_distribution(column_name='a/d')
:param str column_name: Column name, default: a/d
:return: None
"""
# Temporary df
df_tmp = pd.DataFrame()
df_tmp['close'] = self.df[self._columns['Close']]
df_tmp['high'] = self.df[self._columns['High']]
df_tmp['low'] = self.df[self._columns['Low']]
df_tmp['volume'] = self.df[self._columns['Volume']]
df_tmp['calc'] = (
(df_tmp['close'] - df_tmp['low']) - (df_tmp['high'] - df_tmp['close'])
) * df_tmp['volume'] / (df_tmp['high'] - df_tmp['low'])
df_tmp[column_name] = df_tmp['calc'].explode().sum()
df_tmp = df_tmp[[column_name]]
self.df = self.df.merge(df_tmp, left_index=True, right_index=True)
def alligator(self,
period_jaws=13,
period_teeth=8,
period_lips=5,
shift_jaws=8,
shift_teeth=5,
shift_lips=3,
column_name_jaws='alligator_jaws',
column_name_teeth='alligator_teeth',
column_name_lips='alligator_lips'):
"""
Alligator
------------------
https://www.metatrader4.com/en/trading-platform/help/analytics/tech_indicators/alligator
>>> Indicators.alligator(period_jaws=13, period_teeth=8, period_lips=5, shift_jaws=8, shift_teeth=5, shift_lips=3, column_name_jaws='alligator_jaw', column_name_teeth='alligator_teeth', column_name_lips='alligator_lips')
:param int period_jaws: Period for Alligator' Jaws, default: 13
:param int period_teeth: Period for Alligator' Teeth, default: 8
:param int period_lips: Period for Alligator' Lips, default: 5
:param int shift_jaws: Period for Alligator' Jaws, default: 8
:param int shift_teeth: Period for Alligator' Teeth, default: 5
:param int shift_lips: Period for Alligator' Lips, default: 3
:param str column_name_jaws: Column Name for Alligator' Jaws, default: alligator_jaws
:param str column_name_teeth: Column Name for Alligator' Teeth, default: alligator_teeth
:param str column_name_lips: Column Name for Alligator' Lips, default: alligator_lips
:return: None
"""
df_median = self.df[[self._columns['High'], self._columns['Low']]]
median_col = 'median_col'
df_median = df_median.assign(
median_col=lambda x: (x[self._columns['High']] + x[self._columns['Low']]) / 2
)
df_j = calculate_smma(df_median, period_jaws, column_name_jaws, median_col)
df_t = calculate_smma(df_median, period_teeth, column_name_teeth, median_col)
df_l = calculate_smma(df_median, period_lips, column_name_lips, median_col)
# Shift SMMAs
df_j[column_name_jaws] = df_j[column_name_jaws].shift(shift_jaws)
df_t[column_name_teeth] = df_t[column_name_teeth].shift(shift_teeth)
df_l[column_name_lips] = df_l[column_name_lips].shift(shift_lips)
self.df = self.df.merge(df_j, left_index=True, right_index=True)
self.df = self.df.merge(df_t, left_index=True, right_index=True)
self.df = self.df.merge(df_l, left_index=True, right_index=True)
def atr(self, period=14, column_name='atr'):
"""
Average True Range (ATR)
------------------------
https://www.metatrader4.com/en/trading-platform/help/analytics/tech_indicators/average_true_range
>>> Indicators.atr(period=14, column_name='atr')
:param int period: Period, default: 14
:param str column_name: Column name, default: atr
:return: None
"""
df_tmp = self.df[[self._columns['High'], self._columns['Low'], self._columns['Close']]]
df_tmp = df_tmp.assign(max_min=df_tmp[self._columns['High']] - df_tmp[self._columns['Low']])
df_tmp['prev_close-high'] = df_tmp[self._columns['Close']].shift(1) - df_tmp[self._columns['High']]
df_tmp['prev_close-min'] = df_tmp[self._columns['Close']].shift(1) - df_tmp[self._columns['Low']]
df_tmp['max_val'] = df_tmp.apply(lambda x: max([x['max_min'], x['prev_close-high'], x['prev_close-min']]),
axis=1)
calculate_sma(df_tmp, period, column_name, 'max_val')
df_tmp = df_tmp[[column_name]]
self.df = self.df.merge(df_tmp, left_index=True, right_index=True)
def bears_power(self, period=13, column_name='bears_power'):
"""
Bears Power
------------------------
https://www.metatrader4.com/en/trading-platform/help/analytics/tech_indicators/bears_power
>>> Indicators.bears_power(period=13, column_name='bears_power')
:param int period: Period, default: 13
:param str column_name: Column name, default: bears_power
:return: None
"""
df_tmp = self.df[[self._columns['Close'], self._columns['Low']]]
df_tmp = df_tmp.assign(ema=df_tmp[self._columns['Close']].ewm(span=period, adjust=False).mean())
df_tmp[column_name] = df_tmp['ema'] - df_tmp[self._columns['Low']]
df_tmp = df_tmp[[column_name]]
self.df = self.df.merge(df_tmp, left_index=True, right_index=True)
def bollinger_bands(self, period=20, deviation=2, column_name_top='bollinger_top',
column_name_mid='bollinger_mid', column_name_bottom='bollinger_bottom'):
"""
Bollinger Bands
---------------
https://www.metatrader4.com/en/trading-platform/help/analytics/tech_indicators/bollinger_bands
>>> Indicators.bollinger_bands(self, period=20, deviation=2, column_name_top='bollinger_up', column_name_mid='bollinger_mid', column_name_bottom='bollinger_bottom')
:param int period: Period, default 20
:param int deviation: Number of Standard Deviations, default 2
:param str column_name_top: default bollinger_up
:param str column_name_mid: default bollinger_mid
:param str column_name_bottom: default bollinger_down
:return: None
"""
df_tmp = self.df[[self._columns['Close']]]
df_tmp = df_tmp.assign(mid=df_tmp[self._columns['Close']].rolling(window=period).mean())
df_tmp = df_tmp.assign(stdev=df_tmp[self._columns['Close']].rolling(window=period).std(ddof=0))
df_tmp = df_tmp.assign(tl=df_tmp.mid + deviation * df_tmp.stdev)
df_tmp = df_tmp.assign(bl=df_tmp.mid - deviation * df_tmp.stdev)
df_tmp = df_tmp[['mid', 'tl', 'bl']]
df_tmp = df_tmp.rename(columns={'mid': column_name_mid, 'tl': column_name_top, 'bl': column_name_bottom})
self.df = self.df.merge(df_tmp, left_index=True, right_index=True)
def bulls_power(self, period=13, column_name='bulls_power'):
"""
Bulls Power
------------------------
https://www.metatrader4.com/en/trading-platform/help/analytics/tech_indicators/bulls_power
>>> Indicators.bulls_power(period=13, column_name='bulls_power')
:param int period: Period, default: 13
:param str column_name: Column name, default: bulls_power
:return: None
"""
df_tmp = self.df[[self._columns['Close'], self._columns['High']]]
df_tmp = df_tmp.assign(ema=df_tmp[self._columns['Close']].ewm(span=period, adjust=False).mean())
df_tmp[column_name] = df_tmp[self._columns['High']] - df_tmp['ema']
df_tmp = df_tmp[[column_name]]
self.df = self.df.merge(df_tmp, left_index=True, right_index=True)
def cci(self, period=14, column_name='cci'):
"""
Commodity Channel Index (CCI)
-----------------------------
https://www.metatrader4.com/en/trading-platform/help/analytics/tech_indicators/commodity_channel_index
>>> Indicators.cci(period=14, column_name='cci')
:param int period: Period, default: 14
:param str column_name: Column name, default: cci
:return: None
"""
pd.set_option('display.max_columns', 500)
df_tmp = self.df[[self._columns['High'], self._columns['Low'], self._columns['Close']]]
df_tmp = df_tmp.assign(tp=(df_tmp[self._columns['High']]
+ df_tmp[self._columns['Low']]
+ df_tmp[self._columns['Close']]) / 3)
df_tmp = df_tmp.assign(tp_sma=df_tmp.tp.rolling(window=period).mean())
df_tmp = df_tmp.assign(tp_mad=df_tmp.tp.rolling(window=period).apply(mad, raw=False))
df_tmp = df_tmp.assign(tp_min_sma=df_tmp.tp - df_tmp.tp_sma)
df_tmp = df_tmp.assign(cci=(1 / 0.015) * (df_tmp.tp_min_sma / df_tmp.tp_mad))
df_tmp = df_tmp[['cci']]
df_tmp = df_tmp.rename(columns={'cci': column_name})
self.df = self.df.merge(df_tmp, left_index=True, right_index=True)
def de_marker(self, period=14, column_name='dem'):
"""
DeMarker (DeM)
--------------
https://www.metatrader4.com/en/trading-platform/help/analytics/tech_indicators/demarker
>>> Indicators.de_marker(period=14, column_name='dem')
:param int period: Period, default: 14
:param str column_name: Column name, default: dem
:return: None
"""
df_tmp = self.df[[self._columns['High'], self._columns['Low']]]
df_tmp = df_tmp.assign(
hdif=(df_tmp[self._columns['High']] > df_tmp[self._columns['High']].shift(1)).astype(int))
df_tmp = df_tmp.assign(hsub=df_tmp[self._columns['High']] - df_tmp[self._columns['High']].shift(1))
df_tmp = df_tmp.assign(demax=np.where(df_tmp.hdif == 0, 0, df_tmp.hsub))
df_tmp = df_tmp.assign(ldif=(df_tmp[self._columns['Low']] < df_tmp[self._columns['Low']].shift(1)).astype(int))
df_tmp = df_tmp.assign(lsub=df_tmp[self._columns['Low']].shift(1) - df_tmp[self._columns['Low']])
df_tmp = df_tmp.assign(demin=np.where(df_tmp.ldif == 0, 0, df_tmp.lsub))
df_tmp['sma_demax'] = df_tmp['demax'].rolling(window=period).mean()
df_tmp['sma_demin'] = df_tmp['demin'].rolling(window=period).mean()
df_tmp = df_tmp.assign(dem=df_tmp.sma_demax / (df_tmp.sma_demax + df_tmp.sma_demin))
df_tmp = df_tmp[['dem']]
df_tmp = df_tmp.rename(columns={'dem': column_name})
self.df = self.df.merge(df_tmp, left_index=True, right_index=True)
def force_index(self, period=13, method='sma', apply_to='Close', column_name='frc'):
"""
Force Index (FRC)
------------------
https://www.metatrader4.com/en/trading-platform/help/analytics/tech_indicators/force_index
>>> Indicators.force_index(period=13, method='sma', apply_to='Close', column_name='frc')
:param int period: Period, default: 13
:param str method: Moving average method. Can be 'sma', 'smma' or 'ema'. Default: sma
:param str apply_to: Apply indicator to column, default: Close
:param str column_name: Column name, default: frc
:return: None
"""
df_tmp = self.df[[apply_to, self._columns['Volume']]]
if method == 'sma':
df_tmp = df_tmp.assign(ma=df_tmp[apply_to].rolling(window=period).mean())
elif method == 'smma':
df_tmp_smma = calculate_smma(df_tmp, period, 'ma', apply_to)
df_tmp = df_tmp.merge(df_tmp_smma, left_index=True, right_index=True)
elif method == 'ema':
df_tmp = df_tmp.assign(ma=df_tmp[apply_to].ewm(span=period, adjust=False).mean())
else:
raise ValueError('The "method" can be only "sma", "ema" or "smma"')
df_tmp = df_tmp.assign(frc=(df_tmp.ma - df_tmp.ma.shift(1)) * df_tmp[self._columns['Volume']])
df_tmp = df_tmp[['frc']]
df_tmp = df_tmp.rename(columns={'frc': column_name})
self.df = self.df.merge(df_tmp, left_index=True, right_index=True)
def fractals(self, column_name_high='fractals_high', column_name_low='fractals_low'):
"""
Fractals
---------
https://www.metatrader4.com/en/trading-platform/help/analytics/tech_indicators/fractals
>>> Indicators.fractals(column_name_high='fractals_high', column_name_low='fractals_low')
:param str column_name_high: Column name for High values, default: fractals_high
:param str column_name_low: Column name for Low values, default: fractals_low
:return: None
"""
df_tmp = self.df[[self._columns['High'], self._columns['Low']]]
df_tmp = df_tmp.assign(fh=np.where(
(df_tmp[self._columns['High']] > df_tmp[self._columns['High']].shift(1)) &
(df_tmp[self._columns['High']] > df_tmp[self._columns['High']].shift(2)) &
(df_tmp[self._columns['High']] > df_tmp[self._columns['High']].shift(-1)) &
(df_tmp[self._columns['High']] > df_tmp[self._columns['High']].shift(-2)),
True, False
))
df_tmp = df_tmp.assign(fl=np.where(
(df_tmp[self._columns['Low']] < df_tmp[self._columns['Low']].shift(1)) &
(df_tmp[self._columns['Low']] < df_tmp[self._columns['Low']].shift(2)) &
(df_tmp[self._columns['Low']] < df_tmp[self._columns['Low']].shift(-1)) &
(df_tmp[self._columns['Low']] < df_tmp[self._columns['Low']].shift(-2)),
True, False
))
df_tmp = df_tmp[['fh', 'fl']]
df_tmp = df_tmp.rename(columns={'fh': column_name_high, 'fl': column_name_low})
self.df = self.df.merge(df_tmp, left_index=True, right_index=True)
def gator(self,
period_jaws=13,
period_teeth=8,
period_lips=5,
shift_jaws=8,
shift_teeth=5,
shift_lips=3,
column_name_val1='value1',
column_name_val2='value2'):
"""
Gator Oscillator
-----------------
https://www.metatrader4.com/en/trading-platform/help/analytics/tech_indicators/gator_oscillator
>>> Indicators.gator(period_jaws=13, period_teeth=8, period_lips=5, shift_jaws=8, shift_teeth=5, shift_lips=3, column_name_val1='value1', column_name_val2='value2')
:param int period_jaws: Jaws period, default: 13
:param int period_teeth: Teeth period, default: 8
:param int period_lips: Lips period, default: 5
:param int shift_jaws: Jaws shift, default: 8
:param int shift_teeth: Teeth shift, default: 5
:param int shift_lips: Lips shift, default: 3
:param str column_name_val1: Column name for Value1, default value1
:param str column_name_val2: Column name for Value2, default value2
:return: None
"""
df_tmp = self.df[[self._columns['High'], self._columns['Low']]]
df_tmp = df_tmp.assign(hc=(df_tmp[self._columns['High']] + df_tmp[self._columns['Low']]) / 2)
df_j = calculate_smma(df_tmp, period_jaws, 'jaws', 'hc')
df_t = calculate_smma(df_tmp, period_teeth, 'teeth', 'hc')
df_l = calculate_smma(df_tmp, period_lips, 'lips', 'hc')
# Shift SMMAs
df_j['jaws'] = df_j['jaws'].shift(shift_jaws)
df_t['teeth'] = df_t['teeth'].shift(shift_teeth)
df_l['lips'] = df_l['lips'].shift(shift_lips)
df_tmp = df_tmp.merge(df_j, left_index=True, right_index=True)
df_tmp = df_tmp.merge(df_t, left_index=True, right_index=True)
df_tmp = df_tmp.merge(df_l, left_index=True, right_index=True)
df_tmp = df_tmp.assign(val1=df_tmp['jaws'] - df_tmp['teeth'])
df_tmp = df_tmp.assign(val2=-(df_tmp['teeth'] - df_tmp['lips']))
df_tmp = df_tmp[['val1', 'val2']]
df_tmp = df_tmp.rename(columns={'val1': column_name_val1, 'val2': column_name_val2})
self.df = self.df.merge(df_tmp, left_index=True, right_index=True)
def ichimoku_kinko_hyo(
self,
period_tenkan_sen=9,
period_kijun_sen=26,
period_senkou_span_b=52,
column_name_chikou_span='chikou_span',
column_name_tenkan_sen='tenkan_sen',
column_name_kijun_sen='kijun_sen',
column_name_senkou_span_a='senkou_span_a',
column_name_senkou_span_b='senkou_span_b'
):
"""
Ichimoku Kinko Hyo
------------------
https://www.metatrader4.com/en/trading-platform/help/analytics/tech_indicators/ichimoku
>>> Indicators.ichimoku_kinko_hyo(period_tenkan_sen=9, period_kijun_sen=26, period_senkou_span_b=52, column_name_chikou_span='chikou_span', column_name_tenkan_sen='tenkan_sen', column_name_kijun_sen='kijun_sen', column_name_senkou_span_a='senkou_span_a', column_name_senkou_span_b='senkou_span_b')
:param int period_tenkan_sen: Period for Tenkan-sen, default: 9
:param int period_kijun_sen: Period for Kijun-sen, default: 26
:param int period_senkou_span_b: Period for Senkou-span, default: 52
:param str column_name_chikou_span: Column name for Chikou-span, default: chikou_span
:param str column_name_tenkan_sen: Column name for Tenkan-sen, default: tenkan_sen
:param str column_name_kijun_sen: Column name for Kijun-sen, default: kijun_sen
:param str column_name_senkou_span_a: Column name for Senkou Span A, default: senkou_span_a
:param str column_name_senkou_span_b: Column name for Senkou Span B, default: senkou_span_b
:return: None
"""
df_tmp = self.df[[self._columns['High'], self._columns['Low'], self._columns['Close']]]
df_tmp = df_tmp.assign(tenkan_h=df_tmp[self._columns['High']].rolling(window=period_tenkan_sen).max())
df_tmp = df_tmp.assign(tenkan_l=df_tmp[self._columns['Low']].rolling(window=period_tenkan_sen).min())
df_tmp = df_tmp.assign(tenkan=(df_tmp.tenkan_h + df_tmp.tenkan_l) / 2)
del df_tmp['tenkan_h']
del df_tmp['tenkan_l']
df_tmp = df_tmp.assign(kijun_h=df_tmp[self._columns['High']].rolling(window=period_kijun_sen).max())
df_tmp = df_tmp.assign(kijun_l=df_tmp[self._columns['Low']].rolling(window=period_kijun_sen).min())
df_tmp = df_tmp.assign(kijun=(df_tmp.kijun_h + df_tmp.kijun_l) / 2)
del df_tmp['kijun_h']
del df_tmp['kijun_l']
df_tmp = df_tmp.assign(ssa=((df_tmp.tenkan + df_tmp.kijun) / 2).shift(period_kijun_sen))
df_tmp = df_tmp.assign(ssb_h=df_tmp[self._columns['High']].rolling(window=period_senkou_span_b).max())
df_tmp = df_tmp.assign(ssb_l=df_tmp[self._columns['Low']].rolling(window=period_senkou_span_b).min())
df_tmp = df_tmp.assign(ssb=((df_tmp.ssb_h + df_tmp.ssb_l) / 2).shift(period_kijun_sen))
del df_tmp['ssb_h']
del df_tmp['ssb_l']
df_tmp = df_tmp.assign(chikou=df_tmp[self._columns['Close']].shift(-period_kijun_sen))
df_tmp = df_tmp[['tenkan', 'kijun', 'ssa', 'ssb', 'chikou']]
df_tmp = df_tmp.rename(columns={
'tenkan': column_name_tenkan_sen,
'kijun': column_name_kijun_sen,
'ssa': column_name_senkou_span_a,
'ssb': column_name_senkou_span_b,
'chikou': column_name_chikou_span
})
self.df = self.df.merge(df_tmp, left_index=True, right_index=True)
def bw_mfi(self, column_name='bw_mfi'):
"""
Market Facilitation Index (BW MFI)
----------------------------------
https://www.metatrader4.com/en/trading-platform/help/analytics/tech_indicators/market_facilitation_index
>>> Indicators.bw_mfi(column_name='bw_mfi')
:param str column_name: Column name, default: bw_mfi
:return: None
"""
df_tmp = self.df[[self._columns['High'], self._columns['Low'], self._columns['Volume']]]
df_tmp = df_tmp.assign(
bw=(df_tmp[self._columns['High']] - df_tmp[self._columns['Low']]) / df_tmp[self._columns['Volume']] * 100000
)
df_tmp = df_tmp[['bw']]
df_tmp = df_tmp.rename(columns={'bw': column_name})
self.df = self.df.merge(df_tmp, left_index=True, right_index=True)
def momentum(self, period=14, column_name='momentum'):
"""
Momentum
--------
https://www.metatrader4.com/ru/trading-platform/help/analytics/tech_indicators/momentum
>>> Indicators.momentum(period=14, column_name='momentum')
:param int period: Period, default: 14
:param strr column_name: Column name, default: momentum
:return:
"""
close = self._columns['Close']
df_tmp = self.df[[close]]
df_tmp = df_tmp.assign(
m=df_tmp[close] / df_tmp[close].shift(period) * 100
)
df_tmp = df_tmp[['m']]
df_tmp = df_tmp.rename(columns={'m': column_name})
self.df = self.df.merge(df_tmp, left_index=True, right_index=True)
def mfi(self, period=5, column_name='mfi'):
"""
Money Flow Index (MFI)
-----------------------
https://www.metatrader4.com/en/trading-platform/help/analytics/tech_indicators/money_flow_index
>>> Indicators.mfi(period=5, column_name='mfi')
:param int period: Period, default: 5
:param str column_name: Column name, default: mfi
:return: None
"""
high, low, close, volume = self._columns['High'], self._columns['Low'], self._columns['Close'], self._columns[
'Volume']
df_tmp = self.df[[high, low, close, volume]]
df_tmp = df_tmp.assign(tp=(df_tmp[high] + df_tmp[low] + df_tmp[close]) / 3)
df_tmp = df_tmp.assign(mf=df_tmp['tp'] * df_tmp[volume])
df_tmp = df_tmp.assign(
pmf=np.where(
df_tmp['tp'] > df_tmp['tp'].shift(1),
df_tmp['mf'],
0.0
)
)
df_tmp = df_tmp.assign(
nmf=np.where(
df_tmp['tp'] < df_tmp['tp'].shift(1),
df_tmp['mf'],
0.0
)
)
df_tmp['pmfs'] = df_tmp.pmf.rolling(window=period).sum()
df_tmp['nmfs'] = df_tmp.nmf.rolling(window=period).sum()
del df_tmp['tp']
del df_tmp['mf']
del df_tmp['pmf']
del df_tmp['nmf']
df_tmp = df_tmp.round(decimals=10)
df_tmp = df_tmp.assign(mr=df_tmp.pmfs / df_tmp.nmfs)
df_tmp = df_tmp.assign(mfi=100 - (100 / (1 + df_tmp.mr)))
df_tmp = df_tmp[['mfi']]
df_tmp = df_tmp.rename(columns={'mfi': column_name})
self.df = self.df.merge(df_tmp, left_index=True, right_index=True)
def macd(
self,
period_fast=12,
period_slow=26,
period_signal=9,
column_name_value='macd_value',
column_name_signal='macd_signal'
):
"""
Moving Average Convergence/Divergence (MACD)
--------------------------------------------
https://www.metatrader4.com/en/trading-platform/help/analytics/tech_indicators/macd
>>> Indicators.macd(self, period_fast=12, period_slow=26, period_signal=9, column_name_value='macd_value', column_name_signal='macd_signal')
:param int period_fast: Period for Fast EMA, default: 12
:param int period_slow: Period for Slow EMA, default: 26
:param int period_signal: Period for Signal Line, default 9
:param str column_name_value: Column name for MACD Value, default macd_value
:param str column_name_signal: Column name for MACD Signal, default macd_signal
:return: None
"""
close = self._columns['Close']
df_tmp = self.df[[close]]
df_tmp = df_tmp.assign(fast=df_tmp[close].ewm(span=period_fast, adjust=False).mean())
df_tmp = df_tmp.assign(slow=df_tmp[close].ewm(span=period_slow, adjust=False).mean())
df_tmp = df_tmp.assign(value=df_tmp['fast'] - df_tmp['slow'])
df_tmp = df_tmp.assign(signal=df_tmp['value'].rolling(window=period_signal).mean())
df_tmp = df_tmp[['value', 'signal']]
df_tmp = df_tmp.rename(columns={'value': column_name_value, 'signal': column_name_signal})
self.df = self.df.merge(df_tmp, left_index=True, right_index=True)
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
764,
26791,
1330,
15284,
62,
5488,
11,
15284,
62,
82,
2611,
11,
15284,
62,
5796,
2611,
11,
8805,
198,
198,
834,
9641,
834,
796,
705,
16,
13,
24,
... | 2.07911 | 15,017 |
default_app_config = 'info_transportation.apps.InfoTransportationConfig'
| [
12286,
62,
1324,
62,
11250,
796,
705,
10951,
62,
7645,
10189,
13,
18211,
13,
12360,
8291,
10189,
16934,
6,
198
] | 3.65 | 20 |
# Copyright 2019 Jeremy Schulman, nwkautomaniac@gmail.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file provides the pytest framework and test functions used to dynamically
load NRFU optic-inventory test-cases based on the --nrfu-testcasedir command
line option. This file retrieves the device specific "show" directly from the
device.
"""
import json
import pytest
# import the EOS specific NRFU optic inventory module so the functions in this
# file can generate the test-case names and invoke the actual NRFU validation
# function.
import nrfupytesteos.nrfu_optic_inventory as nrfu
@pytest.fixture(scope='module')
def device_inventory(device):
"""
This fixture is used to return the EOS result of the "show inventory"
command as structured data.
Parameters
----------
device : Device instance
Returns
-------
dict
The dictionary output of the "show inventory" command
"""
return nrfu.snapshot_testdata(device)
def pytest_generate_tests(metafunc):
"""
pytest will invoke this hook allowing us to dynamically load the device
specific optic status test cases based on the directory the User provided
as the --nrfu-testcasedir command line argument.
Parameters
----------
metafunc : Metafunc instance used to parametrize the test function
"""
testcases_file = metafunc.config._nrfu.testcases_dir.joinpath(
f'{nrfu.TEST_CASE_NAME}.json')
metafunc.parametrize('testcase',
json.load(testcases_file.open()),
ids=nrfu.name_test)
def test_optic_inventory(device, device_inventory, testcase):
"""
pytest will call this function for each test-case item loaded via the
pytest_generate_tests hook function. This function will in turn call the
actual NRFU EOS specific test function `nrfu.test_optic_inventory` that
will validate the specific `testcase` against the actual data
`device_inventory`. If `nrfu.test_optic_inventory` detects a failure it
will raise a specific NRFU exception. The pytest framework will catch that
exception and report the test as failed.
Parameters
----------
device : Device instance
device_inventory : dict
The EOS structured output of the "show inventory" command
testcase : dict
A specific test-case from the list of all test-cases loaded.
Raises
------
See `nrfu.test_optic_inventory` docs for details
"""
nrfu.test_optic_inventory(
device=device,
actual=device_inventory,
testcase=testcase)
| [
2,
220,
15069,
13130,
11753,
3059,
377,
805,
11,
299,
43021,
2306,
296,
3216,
330,
31,
14816,
13,
785,
198,
2,
198,
2,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
220,
345,
743... | 3.097222 | 1,008 |
# Copyright (c) 2021, Moritz E. Beber.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provide an abstract interface for a sequence alignment service."""
from abc import ABC, abstractmethod
from Bio.Seq import Seq
from ..model import SequenceAlignment
class SequenceAlignmentService(ABC):
"""Define the abstract interface for a sequence alignment service."""
@classmethod
@abstractmethod
def align(
cls,
query: Seq,
target: Seq,
gap_open_penalty: float = 2.0,
gap_extension_penalty: float = 10.0,
**kwargs,
) -> SequenceAlignment:
"""Return a local alignment of two given sequences."""
| [
2,
15069,
357,
66,
8,
33448,
11,
3461,
4224,
412,
13,
1355,
527,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
3... | 3.161725 | 371 |
import sys
input_path = './partial_lihkg.txt'
output_path = './partial_lihkg_no_dup.txt'
seen = set()
with open(input_path,encoding='utf-8',mode='r') as fin, open(output_path,mode='w',encoding='utf-8') as fout:
while True:
line = str(fin.readline())
if len(line) == 0:
break
line_hash = hash(line)
if line_hash not in seen:
fout.write(line)
seen.add(line_hash)
print(sys.getsizeof(seen))
| [
11748,
25064,
198,
198,
15414,
62,
6978,
796,
705,
19571,
47172,
62,
75,
4449,
10025,
13,
14116,
6,
198,
22915,
62,
6978,
796,
705,
19571,
47172,
62,
75,
4449,
10025,
62,
3919,
62,
646,
79,
13,
14116,
6,
198,
198,
15898,
796,
900,
... | 2.066964 | 224 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
###Author
#Nathaniel Watson
#2017-09-18
#nathankw@stanford.edu
###
"""
Clones the specified CrisprModification onto the desired biosamples.
"""
import argparse
import pdb
import pulsarpy.models as models
import pulsarpy.utils
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
21017,
13838,
198,
2,
45,
776,
6321,
14959,
198,
2,
5539,
12,
2931,
12,
1507,
198,
2,
77,
776,
962,
86,
... | 2.700855 | 117 |
import json
from os import path
from django.conf import settings
from web3 import Web3
from web3.middleware import geth_poa_middleware
from fluctua_nft_backend.nfts import models
# load abi
abi_path = path.join(
path.dirname(__file__), "..", "..", "contracts", "RumiaNFT.json"
)
with open(abi_path) as f:
abi = json.load(f)["abi"]
# new mint tx
w3 = Web3(Web3.HTTPProvider(settings.ETHEREUM_NODE_URL, request_kwargs={'timeout': 60}))
w3.middleware_onion.inject(geth_poa_middleware, layer=0)
nft_contract = w3.eth.contract(address=settings.NFT_ADDRESS, abi=abi)
nft_total_supply = nft_contract.functions.totalSupply().call()
# iterate all nfts
for contract_id in range(nft_total_supply):
token_uri = nft_contract.functions.tokenURI(contract_id).call()
# update the nft with the same token URI with the contract_id, we assume only one token uri per nft
# is used in the contract level, eventhough that's not enforced
nfts = models.Nft.objects.get(metadata_ipfs_uri=token_uri).update(contract_id=contract_id)
| [
11748,
33918,
198,
6738,
28686,
1330,
3108,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
3992,
18,
1330,
5313,
18,
198,
6738,
3992,
18,
13,
27171,
1574,
1330,
651,
71,
62,
7501,
64,
62,
27171,
1574,
198,
198,
6738,
... | 2.686528 | 386 |
import gi
gi.require_version('Gst', '1.0')
from gi.repository import GObject, Gst, Gtk
# Initializing threads used by the Gst various elements
GObject.threads_init()
#Initializes the GStreamer library, setting up internal path lists, registering built-in elements, and loading standard plugins.
Gst.init(None)
#handler taking care of linking the decoder's newly created source pad to the sink
#running the shit
start=Main()
start.run()
| [
11748,
308,
72,
198,
12397,
13,
46115,
62,
9641,
10786,
38,
301,
3256,
705,
16,
13,
15,
11537,
198,
6738,
308,
72,
13,
260,
1930,
37765,
1330,
402,
10267,
11,
402,
301,
11,
402,
30488,
198,
198,
2,
20768,
2890,
14390,
973,
416,
26... | 3.038961 | 154 |
from decimal import Decimal
import pytest
import dask.dataframe as dd
from dask.dataframe.utils import assert_eq, PANDAS_VERSION
pd = pytest.importorskip("pandas", minversion="0.23.4")
from pandas.tests.extension.decimal.array import DecimalArray, DecimalDtype
from dask.dataframe.extensions import make_array_nonempty, make_scalar
@make_array_nonempty.register(DecimalDtype)
@make_scalar.register(Decimal)
| [
6738,
32465,
1330,
4280,
4402,
198,
11748,
12972,
9288,
198,
198,
11748,
288,
2093,
13,
7890,
14535,
355,
49427,
198,
6738,
288,
2093,
13,
7890,
14535,
13,
26791,
1330,
6818,
62,
27363,
11,
350,
6981,
1921,
62,
43717,
198,
198,
30094,
... | 2.916084 | 143 |
from setuptools import setup, find_packages
setup(
name="phyllo",
version="0.1",
description="PHilologicallY Linguistic LegwOrk.",
author="Jordan Nguyen, Christan Grant",
author_email="Jordan.Nguyen-1@ou.edu, cgrant@ou.edu",
url="https://github.com/oudalab/phyllo",
download_url="https://github.com/oudalab/phyllo",
license='GPLv3',
#packages=["phyllo", "phyllo.extractors"],
packages=find_packages(exclude=('tests', 'docs')),
keywords=["latin", "search"],
install_requires=[
"cltk",
"beautifulsoup4",
"html5lib"
],
setup_requires=['pytest-runner'],
tests_require=['pytest'],
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Topic :: Text Processing :: Linguistic",
],
long_description="""\
TBD...
"""
)
| [
198,
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
198,
40406,
7,
198,
220,
220,
220,
1438,
2625,
6883,
18798,
1600,
198,
220,
220,
220,
2196,
2625,
15,
13,
16,
1600,
198,
220,
220,
220,
6764,
2625,
11909,
346,
2078... | 2.416201 | 358 |
import pygame as pg
import random
from settings import *
from main import *
import os
vec = pg.math.Vector2
Game = Game()
Game.show_start_screen()
while Game.running:
Game.level1()
Game.show_go_screen()
pg.quit()
| [
11748,
12972,
6057,
355,
23241,
198,
11748,
4738,
198,
6738,
6460,
1330,
1635,
198,
6738,
1388,
1330,
1635,
198,
11748,
28686,
198,
35138,
796,
23241,
13,
11018,
13,
38469,
17,
628,
198,
8777,
796,
3776,
3419,
198,
8777,
13,
12860,
62,
... | 2.848101 | 79 |
# -*- coding: utf-8 -*-
from .database import *
from .lessons import *
from .news import *
from .organization import *
from .topics import *
from .users import * | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
764,
48806,
1330,
1635,
198,
6738,
764,
1203,
684,
1330,
1635,
198,
6738,
764,
10827,
1330,
1635,
198,
6738,
764,
9971,
1634,
1330,
1635,
198,
6738,
764,
4852... | 3.056604 | 53 |
from unittest import TestCase
from fireant import (
Dimension,
Metric,
)
from fireant.slicer.dimensions import DisplayDimension
from .mocks import slicer
| [
6738,
555,
715,
395,
1330,
6208,
20448,
198,
198,
6738,
2046,
415,
1330,
357,
198,
220,
220,
220,
34024,
11,
198,
220,
220,
220,
3395,
1173,
11,
198,
8,
198,
6738,
2046,
415,
13,
82,
677,
263,
13,
27740,
5736,
1330,
16531,
29271,
... | 3.09434 | 53 |
# Divide and Conquer Algorithm for finding the maximum sub array sum
arr = [-2,-5,6,-2,-3,1,5,-6]
print("Maximum Sub Array Sum")
print(maxSubArraySum(arr,0,len(arr)-1))
print()
# Similar problem: Given a sum find the pair of numbers which add upto the sum
arr = [6,8,2,3,10,11]
print("Two sum problem")
print(twoSumProblemSort(arr,10))
print()
'''
1. Highly depends on the pivot element i.e. the middle element.
2. If the middle element is smaller than both its neighbours, it will tend to finding the element in the left sub half
3. Otherwise right half's left part will get pre-dominance.
'''
arr = [2,20,19,21,23,90,67]
n = len(arr)
print("Find peak element")
print(findPeakEle(arr,0,n-1,n))
print() | [
2,
46894,
290,
40963,
978,
42289,
329,
4917,
262,
5415,
850,
7177,
2160,
198,
198,
3258,
796,
25915,
17,
12095,
20,
11,
21,
12095,
17,
12095,
18,
11,
16,
11,
20,
12095,
21,
60,
198,
4798,
7203,
40541,
3834,
15690,
5060,
4943,
198,
... | 2.898374 | 246 |
from threeML.plugins.OGIPLike import OGIPLike
__instrument_name = "Swift XRT"
# At the moment this is just another name for the GenericOGIPLike spectrum
| [
6738,
1115,
5805,
13,
37390,
13,
7730,
4061,
7594,
1330,
34498,
4061,
7594,
198,
198,
834,
259,
43872,
62,
3672,
796,
366,
10462,
2135,
1395,
14181,
1,
628,
198,
2,
1629,
262,
2589,
428,
318,
655,
1194,
1438,
329,
262,
42044,
7730,
... | 3.391304 | 46 |
import unittest
from pyalink.alink import *
import numpy as np
import pandas as pd | [
11748,
555,
715,
395,
198,
6738,
12972,
282,
676,
13,
282,
676,
1330,
1635,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67
] | 3.037037 | 27 |
#!/usr/bin/env python
'''
TLE propagation with SGP4
============================
This shows that TLEs really should be propagated as TLEs, not as states.
'''
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sorts.propagator import SGP4
from sgp4.api import Satrec
# Uncomment this to see what is actually recovered as mean elements from just one point
# def print_args(func):
# def pfunc(*args, **kwargs):
# #print the arguments, except the "self"
# print(args[1:])
# return func(*args, **kwargs)
# return pfunc
# #hook the sgp4init to print its input elements
# Satrec.sgp4init = print_args(Satrec.sgp4init)
prop = SGP4(
settings = dict(
out_frame='ITRS',
tle_input=True,
),
)
print(prop)
l1 = '1 5U 58002B 20251.29381767 +.00000045 +00000-0 +68424-4 0 9990'
l2 = '2 5 034.2510 336.1746 1845948 000.5952 359.6376 10.84867629214144'
#JD epoch calculated from lines
epoch = 2459099.79381767
t = np.linspace(0,3600*24.0,num=5000)
states_tle = prop.propagate(t, [l1, l2])
prop.set(
tle_input=False,
in_frame='ITRS',
epoch_format = 'jd',
)
states_teme = prop.propagate(t, states_tle[:,0], epoch=epoch, A=1.0, C_R = 1.0, C_D = 1.0)
fig = plt.figure(figsize=(15,15))
ax = fig.add_subplot(111, projection='3d')
ax.plot(states_tle[0,:], states_tle[1,:], states_tle[2,:],"-b")
ax.plot(states_teme[0,:], states_teme[1,:], states_teme[2,:],"-r")
ax.set_title('TLE versus State propagation /w SGP4')
ax2 = fig.add_axes([0.1, 0.1, 0.6, 0.3])
ax2.plot(t/3600.0, np.linalg.norm(states_tle[:3,:] - states_teme[:3,:], axis=0)*1e-3)
ax2.set_ylabel('Absolute position difference [km]')
ax2.set_xlabel('Time [h]')
plt.show() | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
7061,
6,
198,
51,
2538,
43594,
351,
311,
16960,
19,
198,
4770,
25609,
198,
198,
1212,
2523,
326,
309,
2538,
82,
1107,
815,
307,
8928,
515,
355,
309,
2538,
82,
11,
407,
355,
25... | 2.236271 | 783 |
#!/usr/bin/env python3
import database
import http
import subprocess
db = database.Database()
# Delete old tokens and qr images from the server
# Command for testing:
# SELECT token, UNIX_TIMESTAMP(CURRENT_TIMESTAMP) - UNIX_TIMESTAMP(creation_time) FROM join_token WHERE(UNIX_TIMESTAMP(CURRENT_TIMESTAMP) - UNIX_TIMESTAMP(creation_time) > 300);
del_condition = "UNIX_TIMESTAMP(CURRENT_TIMESTAMP) - UNIX_TIMESTAMP(creation_time) > 300"
sql = "SELECT token, creation_time FROM join_token WHERE(" + del_condition + ");"
db.cur.execute(sql)
result = db.cur.fetchall()
# Delete images and database entries
# This is a bit inefficient, but I didn't to end up in a situation where a database entry was removed
# and an image left behind
for row in result:
path = "/var/www/html/img/" + row[0] + ".png"
subprocess.call(["rm", path])
sql = "DELETE FROM join_token WHERE(token = '" + row[0] + "')";
db.cur.execute(sql)
# Delete database entries
#sql = "DELETE FROM join_token WHERE(" + del_condition + ");"
#database.cur.execute(sql);
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
6831,
198,
11748,
2638,
198,
11748,
850,
14681,
198,
198,
9945,
796,
6831,
13,
38105,
3419,
198,
198,
2,
23520,
1468,
16326,
290,
10662,
81,
4263,
422,
262,
4382,
198,
... | 2.883978 | 362 |
# 题意:剪绳子,给你一根长度为 n 的绳子,请把绳子剪成整数长度的 m 段(m、n都是整数,n>1并且m>1),每段绳子的长度记为 k[0],k[1]...k[m-1] 。请问 k[0]*k[1]*...*k[m-1] 可能的最大乘积是多少?
# 例如,当绳子的长度是8时,我们把它剪成长度分别为2、3、3的三段,此时得到的最大乘积是18。
# 题解1: 动态规划。注意到我们所求的乘积可以当作动态规划中的状态。设置1-n的状态数组dp[n]在存,动态归化的状态方程是:dp[i] = max(dp[i-j]*j, (i-j)*j)即对于一段长度i的绳子,其最大乘积dp[i]
# 可以分为对这一段进行裁剪和不裁剪两种情形,对于裁剪情形,我们可以设置另一个指针去循环,得到的中间结果存入dp对以后的判别都有用。dp实际上也是一个查表算数的过程。
# 题解2: 数学方法,通过不等式原理可以知道一段绳子分成尽可能等长时乘积最大,通过求导证明得当这个等长为3的时候最优。所以接下来需要考虑的就是若不能整除3余数分别为12时候的情形。
| [
2,
16268,
95,
246,
35707,
237,
171,
120,
248,
30298,
103,
163,
119,
111,
36310,
171,
120,
234,
163,
119,
247,
19526,
254,
31660,
43718,
117,
165,
243,
123,
41753,
99,
10310,
118,
299,
13328,
248,
226,
163,
119,
111,
36310,
171,
120,... | 0.588308 | 804 |
# Copyright 2018-2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the tf interface"""
import pytest
tf = pytest.importorskip("tensorflow", minversion="2.1")
import numpy as np
import pennylane as qml
from pennylane.qnode_old import qnode, QNode
from pennylane.tape import JacobianTape
@pytest.mark.parametrize(
"dev_name,diff_method",
[
["default.qubit", "finite-diff"],
["default.qubit", "parameter-shift"],
["default.qubit", "backprop"],
["default.qubit", "adjoint"],
],
)
class TestQNode:
"""Tests the tensorflow interface used with a QNode."""
def test_import_error(self, dev_name, diff_method, mocker):
"""Test that an exception is caught on import error"""
if diff_method == "backprop":
pytest.skip("Test does not support backprop")
mock = mocker.patch("pennylane.interfaces.tf.TFInterface.apply")
mock.side_effect = ImportError()
dev = qml.device(dev_name, wires=2)
qn = QNode(func, dev, interface="tf", diff_method=diff_method)
with pytest.raises(
qml.QuantumFunctionError,
match="TensorFlow not found. Please install the latest version of TensorFlow to enable the 'tf' interface",
):
qn(0.1, 0.1)
def test_execution_no_interface(self, dev_name, diff_method):
"""Test execution works without an interface, and that trainable parameters
are correctly inferred within a gradient tape."""
if diff_method == "backprop":
pytest.skip("Test does not support backprop")
dev = qml.device(dev_name, wires=1)
@qnode(dev, diff_method=diff_method)
a = tf.Variable(0.1)
with tf.GradientTape() as tape:
res = circuit(a)
assert circuit.qtape.interface == "autograd"
# without the interface, the tape simply returns an array of results
assert isinstance(res, np.ndarray)
assert res.shape == tuple()
# without the interface, the tape is unable to deduce
# trainable parameters
assert circuit.qtape.trainable_params == [0]
# gradients should cause an error
with pytest.raises(AttributeError, match="has no attribute '_id'"):
assert tape.gradient(res, a) is None
def test_execution_with_interface(self, dev_name, diff_method):
"""Test execution works with the interface"""
if diff_method == "backprop":
pytest.skip("Test does not support backprop")
dev = qml.device(dev_name, wires=1)
@qnode(dev, interface="tf", diff_method=diff_method)
a = tf.Variable(0.1)
circuit(a)
# if executing outside a gradient tape, the number of trainable parameters
# cannot be determined by TensorFlow
assert circuit.qtape.trainable_params == []
with tf.GradientTape() as tape:
res = circuit(a)
assert circuit.qtape.interface == "tf"
# with the interface, the tape returns tensorflow tensors
assert isinstance(res, tf.Tensor)
assert res.shape == tuple()
# the tape is able to deduce trainable parameters
assert circuit.qtape.trainable_params == [0]
# gradients should work
grad = tape.gradient(res, a)
assert isinstance(grad, tf.Tensor)
assert grad.shape == tuple()
def test_interface_swap(self, dev_name, diff_method, tol):
"""Test that the TF interface can be applied to a QNode
with a pre-existing interface"""
if diff_method == "backprop":
pytest.skip("Test does not support backprop")
dev = qml.device(dev_name, wires=1)
@qnode(dev, interface="autograd", diff_method=diff_method)
from pennylane import numpy as anp
a = anp.array(0.1, requires_grad=True)
res1 = circuit(a)
grad_fn = qml.grad(circuit)
grad1 = grad_fn(a)
# switch to TF interface
circuit.to_tf()
a = tf.Variable(0.1, dtype=tf.float64)
with tf.GradientTape() as tape:
res2 = circuit(a)
grad2 = tape.gradient(res2, a)
assert np.allclose(res1, res2, atol=tol, rtol=0)
assert np.allclose(grad1, grad2, atol=tol, rtol=0)
def test_drawing(self, dev_name, diff_method):
"""Test circuit drawing when using the TF interface"""
x = tf.Variable(0.1, dtype=tf.float64)
y = tf.Variable([0.2, 0.3], dtype=tf.float64)
z = tf.Variable(0.4, dtype=tf.float64)
dev = qml.device(dev_name, wires=2)
@qnode(dev, interface="tf", diff_method=diff_method)
circuit(p1=x, p3=z)
result = circuit.draw()
expected = """\
0: ──RX(0.1)───RX(0.4)──╭C──╭┤ State
1: ──RY(0.06)───────────╰X──╰┤ State
"""
assert result == expected
def test_jacobian(self, dev_name, diff_method, mocker, tol):
"""Test jacobian calculation"""
spy = mocker.spy(JacobianTape, "jacobian")
a = tf.Variable(0.1, dtype=tf.float64)
b = tf.Variable(0.2, dtype=tf.float64)
dev = qml.device(dev_name, wires=2)
@qnode(dev, diff_method=diff_method, interface="tf")
with tf.GradientTape() as tape:
res = circuit(a, b)
assert circuit.qtape.trainable_params == [0, 1]
assert isinstance(res, tf.Tensor)
assert res.shape == (2,)
expected = [tf.cos(a), -tf.cos(a) * tf.sin(b)]
assert np.allclose(res, expected, atol=tol, rtol=0)
res = tape.jacobian(res, [a, b])
expected = [[-tf.sin(a), tf.sin(a) * tf.sin(b)], [0, -tf.cos(a) * tf.cos(b)]]
assert np.allclose(res, expected, atol=tol, rtol=0)
if diff_method == "finite-diff":
spy.assert_called()
elif diff_method == "backprop":
spy.assert_not_called()
def test_jacobian_dtype(self, dev_name, diff_method, tol):
"""Test calculating the jacobian with a different datatype"""
if diff_method == "backprop":
pytest.skip("Test does not support backprop")
a = tf.Variable(0.1, dtype=tf.float32)
b = tf.Variable(0.2, dtype=tf.float32)
dev = qml.device("default.qubit", wires=2)
@qnode(dev, diff_method=diff_method)
circuit.to_tf(dtype=tf.float32)
assert circuit.dtype is tf.float32
with tf.GradientTape() as tape:
res = circuit(a, b)
assert circuit.qtape.interface == "tf"
assert circuit.qtape.trainable_params == [0, 1]
assert isinstance(res, tf.Tensor)
assert res.shape == (2,)
assert res.dtype is tf.float32
res = tape.jacobian(res, [a, b])
assert [r.dtype is tf.float32 for r in res]
def test_jacobian_options(self, dev_name, diff_method, mocker, tol):
"""Test setting finite-difference jacobian options"""
if diff_method != "finite-diff":
pytest.skip("Test only works with finite diff")
spy = mocker.spy(JacobianTape, "numeric_pd")
a = tf.Variable([0.1, 0.2])
dev = qml.device("default.qubit", wires=1)
@qnode(dev, interface="tf", h=1e-8, order=2, diff_method=diff_method)
with tf.GradientTape() as tape:
res = circuit(a)
tape.jacobian(res, a)
for args in spy.call_args_list:
assert args[1]["order"] == 2
assert args[1]["h"] == 1e-8
def test_changing_trainability(self, dev_name, diff_method, mocker, tol):
"""Test changing the trainability of parameters changes the
number of differentiation requests made"""
if diff_method == "backprop":
pytest.skip("Test does not support backprop")
a = tf.Variable(0.1, dtype=tf.float64)
b = tf.Variable(0.2, dtype=tf.float64)
dev = qml.device("default.qubit", wires=2)
@qnode(dev, interface="tf", diff_method="finite-diff")
with tf.GradientTape() as tape:
res = circuit(a, b)
# the tape has reported both gate arguments as trainable
assert circuit.qtape.trainable_params == [0, 1]
expected = [tf.cos(a), -tf.cos(a) * tf.sin(b)]
assert np.allclose(res, expected, atol=tol, rtol=0)
spy = mocker.spy(JacobianTape, "numeric_pd")
jac = tape.jacobian(res, [a, b])
expected = [
[-tf.sin(a), tf.sin(a) * tf.sin(b)],
[0, -tf.cos(a) * tf.cos(b)],
]
assert np.allclose(jac, expected, atol=tol, rtol=0)
# JacobianTape.numeric_pd has been called for each argument
assert len(spy.call_args_list) == 2
# make the second QNode argument a constant
a = tf.Variable(0.54, dtype=tf.float64)
b = tf.constant(0.8, dtype=tf.float64)
with tf.GradientTape() as tape:
res = circuit(a, b)
# the tape has reported only the first argument as trainable
assert circuit.qtape.trainable_params == [0]
expected = [tf.cos(a), -tf.cos(a) * tf.sin(b)]
assert np.allclose(res, expected, atol=tol, rtol=0)
spy.call_args_list = []
jac = tape.jacobian(res, a)
expected = [-tf.sin(a), tf.sin(a) * tf.sin(b)]
assert np.allclose(jac, expected, atol=tol, rtol=0)
# JacobianTape.numeric_pd has been called only once
assert len(spy.call_args_list) == 1
def test_classical_processing(self, dev_name, diff_method, tol):
"""Test classical processing within the quantum tape"""
a = tf.Variable(0.1, dtype=tf.float64)
b = tf.constant(0.2, dtype=tf.float64)
c = tf.Variable(0.3, dtype=tf.float64)
dev = qml.device(dev_name, wires=1)
@qnode(dev, diff_method=diff_method, interface="tf")
with tf.GradientTape() as tape:
res = circuit(a, b, c)
if diff_method == "finite-diff":
assert circuit.qtape.trainable_params == [0, 2]
assert circuit.qtape.get_parameters() == [a * c, c + c**2 + tf.sin(a)]
res = tape.jacobian(res, [a, b, c])
assert isinstance(res[0], tf.Tensor)
assert res[1] is None
assert isinstance(res[2], tf.Tensor)
def test_no_trainable_parameters(self, dev_name, diff_method, tol):
"""Test evaluation if there are no trainable parameters"""
dev = qml.device(dev_name, wires=2)
@qnode(dev, diff_method=diff_method, interface="tf")
a = 0.1
b = tf.constant(0.2, dtype=tf.float64)
with tf.GradientTape() as tape:
res = circuit(a, b)
if diff_method == "finite-diff":
assert circuit.qtape.trainable_params == []
assert res.shape == (2,)
assert isinstance(res, tf.Tensor)
@pytest.mark.parametrize("U", [tf.constant([[0, 1], [1, 0]]), np.array([[0, 1], [1, 0]])])
def test_matrix_parameter(self, dev_name, diff_method, U, tol):
"""Test that the TF interface works correctly
with a matrix parameter"""
a = tf.Variable(0.1, dtype=tf.float64)
dev = qml.device(dev_name, wires=2)
@qnode(dev, diff_method=diff_method, interface="tf")
with tf.GradientTape() as tape:
res = circuit(U, a)
if diff_method == "finite-diff":
assert circuit.qtape.trainable_params == [1]
assert np.allclose(res, -tf.cos(a), atol=tol, rtol=0)
res = tape.jacobian(res, a)
assert np.allclose(res, tf.sin(a), atol=tol, rtol=0)
def test_differentiable_expand(self, dev_name, diff_method, tol):
"""Test that operation and nested tapes expansion
is differentiable"""
dev = qml.device(dev_name, wires=1)
a = np.array(0.1)
p = tf.Variable([0.1, 0.2, 0.3], dtype=tf.float64)
@qnode(dev, diff_method=diff_method, interface="tf")
with tf.GradientTape() as tape:
res = circuit(a, p)
assert circuit.qtape.trainable_params == [1, 2, 3, 4]
assert [i.name for i in circuit.qtape.operations] == ["RX", "Rot", "PhaseShift"]
assert np.all(circuit.qtape.get_parameters() == [p[2], p[0], -p[2], p[1] + p[2]])
expected = tf.cos(a) * tf.cos(p[1]) * tf.sin(p[0]) + tf.sin(a) * (
tf.cos(p[2]) * tf.sin(p[1]) + tf.cos(p[0]) * tf.cos(p[1]) * tf.sin(p[2])
)
assert np.allclose(res, expected, atol=tol, rtol=0)
res = tape.jacobian(res, p)
expected = np.array(
[
tf.cos(p[1]) * (tf.cos(a) * tf.cos(p[0]) - tf.sin(a) * tf.sin(p[0]) * tf.sin(p[2])),
tf.cos(p[1]) * tf.cos(p[2]) * tf.sin(a)
- tf.sin(p[1])
* (tf.cos(a) * tf.sin(p[0]) + tf.cos(p[0]) * tf.sin(a) * tf.sin(p[2])),
tf.sin(a)
* (tf.cos(p[0]) * tf.cos(p[1]) * tf.cos(p[2]) - tf.sin(p[1]) * tf.sin(p[2])),
]
)
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_probability_differentiation(self, dev_name, diff_method, tol):
"""Tests correct output shape and evaluation for a tape
with multiple probs outputs"""
if diff_method == "adjoint":
pytest.skip("The adjoint method does not currently support returning probabilities")
dev = qml.device(dev_name, wires=2)
x = tf.Variable(0.543, dtype=tf.float64)
y = tf.Variable(-0.654, dtype=tf.float64)
@qnode(dev, diff_method=diff_method, interface="tf")
with tf.GradientTape() as tape:
res = circuit(x, y)
expected = np.array(
[
[tf.cos(x / 2) ** 2, tf.sin(x / 2) ** 2],
[(1 + tf.cos(x) * tf.cos(y)) / 2, (1 - tf.cos(x) * tf.cos(y)) / 2],
]
)
assert np.allclose(res, expected, atol=tol, rtol=0)
res = tape.jacobian(res, [x, y])
expected = np.array(
[
[
[-tf.sin(x) / 2, tf.sin(x) / 2],
[-tf.sin(x) * tf.cos(y) / 2, tf.cos(y) * tf.sin(x) / 2],
],
[
[0, 0],
[-tf.cos(x) * tf.sin(y) / 2, tf.cos(x) * tf.sin(y) / 2],
],
]
)
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_ragged_differentiation(self, dev_name, diff_method, tol):
"""Tests correct output shape and evaluation for a tape
with prob and expval outputs"""
if diff_method == "adjoint":
pytest.skip("The adjoint method does not currently support returning probabilities")
dev = qml.device(dev_name, wires=2)
x = tf.Variable(0.543, dtype=tf.float64)
y = tf.Variable(-0.654, dtype=tf.float64)
@qnode(dev, diff_method=diff_method, interface="tf")
with tf.GradientTape() as tape:
res = circuit(x, y)
expected = np.array(
[
tf.cos(x),
(1 + tf.cos(x) * tf.cos(y)) / 2,
(1 - tf.cos(x) * tf.cos(y)) / 2,
]
)
assert np.allclose(res, expected, atol=tol, rtol=0)
res = tape.jacobian(res, [x, y])
expected = np.array(
[
[-tf.sin(x), -tf.sin(x) * tf.cos(y) / 2, tf.cos(y) * tf.sin(x) / 2],
[0, -tf.cos(x) * tf.sin(y) / 2, tf.cos(x) * tf.sin(y) / 2],
]
)
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_sampling(self, dev_name, diff_method):
"""Test sampling works as expected"""
if diff_method == "backprop":
pytest.skip("Sampling not possible with backprop differentiation.")
dev = qml.device(dev_name, wires=2, shots=10)
@qnode(dev, diff_method=diff_method, interface="tf")
with tf.GradientTape() as tape:
res = circuit()
assert res.shape == (2, 10)
assert isinstance(res, tf.Tensor)
def test_second_derivative(self, dev_name, diff_method, mocker, tol):
"""Test second derivative calculation of a scalar valued QNode"""
if diff_method not in {"parameter-shift", "backprop"}:
pytest.skip("Test only supports parameter-shift or backprop")
dev = qml.device(dev_name, wires=1)
@qnode(dev, diff_method=diff_method, interface="tf")
x = tf.Variable([1.0, 2.0], dtype=tf.float64)
with tf.GradientTape() as tape1:
with tf.GradientTape() as tape2:
res = circuit(x)
g = tape2.gradient(res, x)
res2 = tf.reduce_sum(g)
spy = mocker.spy(JacobianTape, "hessian")
g2 = tape1.gradient(res2, x)
if diff_method == "parameter-shift":
spy.assert_called_once()
elif diff_method == "backprop":
spy.assert_not_called()
a, b = x * 1.0
expected_res = tf.cos(a) * tf.cos(b)
assert np.allclose(res, expected_res, atol=tol, rtol=0)
expected_g = [-tf.sin(a) * tf.cos(b), -tf.cos(a) * tf.sin(b)]
assert np.allclose(g, expected_g, atol=tol, rtol=0)
expected_g2 = [
-tf.cos(a) * tf.cos(b) + tf.sin(a) * tf.sin(b),
tf.sin(a) * tf.sin(b) - tf.cos(a) * tf.cos(b),
]
assert np.allclose(g2, expected_g2, atol=tol, rtol=0)
def test_hessian(self, dev_name, diff_method, mocker, tol):
"""Test hessian calculation of a scalar valued QNode"""
if diff_method not in {"parameter-shift", "backprop"}:
pytest.skip("Test only supports parameter-shift or backprop")
dev = qml.device(dev_name, wires=1)
@qnode(dev, diff_method=diff_method, interface="tf")
x = tf.Variable([1.0, 2.0], dtype=tf.float64)
with tf.GradientTape() as tape1:
with tf.GradientTape() as tape2:
res = circuit(x)
g = tape2.gradient(res, x)
spy = mocker.spy(JacobianTape, "hessian")
hess = tape1.jacobian(g, x)
if diff_method == "parameter-shift":
spy.assert_called_once()
elif diff_method == "backprop":
spy.assert_not_called()
a, b = x * 1.0
expected_res = tf.cos(a) * tf.cos(b)
assert np.allclose(res, expected_res, atol=tol, rtol=0)
expected_g = [-tf.sin(a) * tf.cos(b), -tf.cos(a) * tf.sin(b)]
assert np.allclose(g, expected_g, atol=tol, rtol=0)
expected_hess = [
[-tf.cos(a) * tf.cos(b), tf.sin(a) * tf.sin(b)],
[tf.sin(a) * tf.sin(b), -tf.cos(a) * tf.cos(b)],
]
assert np.allclose(hess, expected_hess, atol=tol, rtol=0)
def test_hessian_vector_valued(self, dev_name, diff_method, mocker, tol):
"""Test hessian calculation of a vector valued QNode"""
if diff_method not in {"parameter-shift", "backprop"}:
pytest.skip("Test only supports parameter-shift or backprop")
dev = qml.device(dev_name, wires=1)
@qnode(dev, diff_method=diff_method, interface="tf")
x = tf.Variable([1.0, 2.0], dtype=tf.float64)
with tf.GradientTape(persistent=True) as tape1:
with tf.GradientTape(persistent=True) as tape2:
res = circuit(x)
spy = mocker.spy(JacobianTape, "hessian")
g = tape2.jacobian(res, x, experimental_use_pfor=False)
hess = tape1.jacobian(g, x, experimental_use_pfor=False)
if diff_method == "parameter-shift":
spy.assert_called_once()
elif diff_method == "backprop":
spy.assert_not_called()
a, b = x * 1.0
expected_res = [
0.5 + 0.5 * tf.cos(a) * tf.cos(b),
0.5 - 0.5 * tf.cos(a) * tf.cos(b),
]
assert np.allclose(res, expected_res, atol=tol, rtol=0)
expected_g = [
[-0.5 * tf.sin(a) * tf.cos(b), -0.5 * tf.cos(a) * tf.sin(b)],
[0.5 * tf.sin(a) * tf.cos(b), 0.5 * tf.cos(a) * tf.sin(b)],
]
assert np.allclose(g, expected_g, atol=tol, rtol=0)
expected_hess = [
[
[-0.5 * tf.cos(a) * tf.cos(b), 0.5 * tf.sin(a) * tf.sin(b)],
[0.5 * tf.sin(a) * tf.sin(b), -0.5 * tf.cos(a) * tf.cos(b)],
],
[
[0.5 * tf.cos(a) * tf.cos(b), -0.5 * tf.sin(a) * tf.sin(b)],
[-0.5 * tf.sin(a) * tf.sin(b), 0.5 * tf.cos(a) * tf.cos(b)],
],
]
np.testing.assert_allclose(hess, expected_hess, atol=tol, rtol=0, verbose=True)
def test_hessian_vector_valued_postprocessing(self, dev_name, diff_method, mocker, tol):
"""Test hessian calculation of a vector valued QNode with post-processing"""
if diff_method not in {"parameter-shift", "backprop"}:
pytest.skip("Test only supports parameter-shift or backprop")
dev = qml.device(dev_name, wires=1)
@qnode(dev, diff_method=diff_method, interface="tf")
x = tf.Variable([0.76, -0.87], dtype=tf.float64)
with tf.GradientTape(persistent=True) as tape1:
with tf.GradientTape(persistent=True) as tape2:
res = tf.tensordot(x, circuit(x), axes=[0, 0])
spy = mocker.spy(JacobianTape, "hessian")
g = tape2.jacobian(res, x, experimental_use_pfor=False)
hess = tape1.jacobian(g, x, experimental_use_pfor=False)
if diff_method == "parameter-shift":
spy.assert_called_once()
elif diff_method == "backprop":
spy.assert_not_called()
a, b = x * 1.0
expected_res = a * tf.cos(a) * tf.cos(b) + b * tf.cos(a) * tf.cos(b)
assert np.allclose(res, expected_res, atol=tol, rtol=0)
expected_g = [
tf.cos(b) * (tf.cos(a) - (a + b) * tf.sin(a)),
tf.cos(a) * (tf.cos(b) - (a + b) * tf.sin(b)),
]
assert np.allclose(g, expected_g, atol=tol, rtol=0)
expected_hess = [
[
-(tf.cos(b) * ((a + b) * tf.cos(a) + 2 * tf.sin(a))),
-(tf.cos(b) * tf.sin(a)) + (-tf.cos(a) + (a + b) * tf.sin(a)) * tf.sin(b),
],
[
-(tf.cos(b) * tf.sin(a)) + (-tf.cos(a) + (a + b) * tf.sin(a)) * tf.sin(b),
-(tf.cos(a) * ((a + b) * tf.cos(b) + 2 * tf.sin(b))),
],
]
assert np.allclose(hess, expected_hess, atol=tol, rtol=0)
def test_hessian_ragged(self, dev_name, diff_method, mocker, tol):
"""Test hessian calculation of a ragged QNode"""
if diff_method not in {"parameter-shift", "backprop"}:
pytest.skip("Test only supports parameter-shift or backprop")
dev = qml.device(dev_name, wires=2)
@qnode(dev, diff_method=diff_method, interface="tf")
x = tf.Variable([1.0, 2.0], dtype=tf.float64)
res = circuit(x)
with tf.GradientTape(persistent=True) as tape1:
with tf.GradientTape(persistent=True) as tape2:
res = circuit(x)
spy = mocker.spy(JacobianTape, "hessian")
g = tape2.jacobian(res, x, experimental_use_pfor=False)
hess = tape1.jacobian(g, x, experimental_use_pfor=False)
if diff_method == "parameter-shift":
spy.assert_called_once()
elif diff_method == "backprop":
spy.assert_not_called()
a, b = x * 1.0
expected_res = [
tf.cos(a) * tf.cos(b),
0.5 + 0.5 * tf.cos(a) * tf.cos(b),
0.5 - 0.5 * tf.cos(a) * tf.cos(b),
]
assert np.allclose(res, expected_res, atol=tol, rtol=0)
expected_g = [
[-tf.sin(a) * tf.cos(b), -tf.cos(a) * tf.sin(b)],
[-0.5 * tf.sin(a) * tf.cos(b), -0.5 * tf.cos(a) * tf.sin(b)],
[0.5 * tf.sin(a) * tf.cos(b), 0.5 * tf.cos(a) * tf.sin(b)],
]
assert np.allclose(g, expected_g, atol=tol, rtol=0)
expected_hess = [
[
[-tf.cos(a) * tf.cos(b), tf.sin(a) * tf.sin(b)],
[tf.sin(a) * tf.sin(b), -tf.cos(a) * tf.cos(b)],
],
[
[-0.5 * tf.cos(a) * tf.cos(b), 0.5 * tf.sin(a) * tf.sin(b)],
[0.5 * tf.sin(a) * tf.sin(b), -0.5 * tf.cos(a) * tf.cos(b)],
],
[
[0.5 * tf.cos(a) * tf.cos(b), -0.5 * tf.sin(a) * tf.sin(b)],
[-0.5 * tf.sin(a) * tf.sin(b), 0.5 * tf.cos(a) * tf.cos(b)],
],
]
np.testing.assert_allclose(hess, expected_hess, atol=tol, rtol=0, verbose=True)
def qtransform(qnode, a, framework=tf):
"""Transforms every RY(y) gate in a circuit to RX(-a*cos(y))"""
def construct(self, args, kwargs):
"""New quantum tape construct method, that performs
the transform on the tape in a define-by-run manner"""
# the following global variable is defined simply for testing
# purposes, so that we can easily extract the transformed operations
# for verification.
global t_op
t_op = []
QNode.construct(self, args, kwargs)
new_ops = []
for o in self.qtape.operations:
# here, we loop through all tape operations, and make
# the transformation if a RY gate is encountered.
if isinstance(o, qml.RY):
t_op.append(qml.RX(-a * framework.cos(o.data[0]), wires=o.wires))
new_ops.append(t_op[-1])
else:
new_ops.append(o)
self.qtape._ops = new_ops
self.qtape._update()
import copy
new_qnode = copy.deepcopy(qnode)
new_qnode.construct = construct.__get__(new_qnode, QNode)
return new_qnode
@pytest.mark.parametrize(
"dev_name,diff_method",
[("default.qubit", "finite-diff"), ("default.qubit.tf", "backprop")],
)
def test_transform(dev_name, diff_method, tol):
"""Test an example transform"""
dev = qml.device(dev_name, wires=1)
@qnode(dev, interface="tf", diff_method=diff_method)
weights = tf.Variable([0.32, 0.543], dtype=tf.float64)
a = tf.Variable(0.5, dtype=tf.float64)
with tf.GradientTape(persistent=True) as tape:
# transform the circuit QNode with trainable weight 'a'
new_qnode = qtransform(circuit, a)
# evaluate the transformed QNode
res = new_qnode(weights)
# evaluate the original QNode with pre-processed parameters
res2 = circuit(tf.sin(weights))
# the loss is the sum of the two QNode evaluations
loss = res + res2
# verify that the transformed QNode has the expected operations
assert circuit.qtape.operations == [op1, op2]
assert new_qnode.qtape.operations[0] == t_op[0]
assert new_qnode.qtape.operations[1].name == op2.name
assert new_qnode.qtape.operations[1].wires == op2.wires
# check that the incident gate arguments of both QNode tapes are correct
assert np.all(circuit.qtape.get_parameters() == tf.sin(weights))
assert np.all(new_qnode.qtape.get_parameters() == [-a * tf.cos(weights[0]), weights[1]])
# verify that the gradient has the correct shape
grad = tape.gradient(loss, [weights, a])
assert len(grad) == 2
assert grad[0].shape == weights.shape
assert grad[1].shape == a.shape
# compare against the expected values
assert np.allclose(loss, 1.8244501889992706, atol=tol, rtol=0)
assert np.allclose(grad[0], [-0.26610258, -0.47053553], atol=tol, rtol=0)
assert np.allclose(grad[1], 0.06486032, atol=tol, rtol=0)
| [
2,
15069,
2864,
12,
42334,
47482,
324,
84,
29082,
21852,
3457,
13,
201,
198,
201,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
201,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
... | 1.974771 | 14,626 |
# -*- coding: utf-8 -*-
"""
Created by Pierluigi on 2020-02-01
project: elasticizefiles
"""
import logging
import os
import re
from datetime import datetime
from hashlib import sha256
from multiprocessing import cpu_count
from time import time
from joblib import Parallel
from joblib import delayed
from elasticizefiles.base import Elastic
from elasticizefiles.utils.files import explore_path
from elasticizefiles.utils.files import filestat
from elasticizefiles.utils.files import get_hash
from elasticizefiles.utils.files import get_machine_info
class ElasticizeEngine(object):
""" The main engine to scan, process and store files
:param path: the path to be scanned
:param rules: the rules to be applied
:param elastic_hosts: list of nodes ['{host}:{port}', ...]
:param elastic_index: an index name
:param elastic_doc_type: the elastic doc_type name
:param index_create_if_not_exists: if True create the index if not exists
:param index_drop_if_exists: if True drop the existing index
:param index_config: additional config params to create the index
:param index_mapping: the data type mapping for the current index
:param index_alias_name: an alias name for this index
"""
def crawl_and_process(self, n_jobs=-1):
""" Crawl files and apply extractor on them.
:param n_jobs: number of parallel job, if -1 will be automatically set
to the number of cpus available
"""
if n_jobs < 1:
n_jobs = cpu_count()
tot = 0
buffer = []
tik = time()
for dirname, filename in explore_path(self._path):
full_filename = os.path.abspath(os.path.join(dirname, filename)).replace('\\', '/')
exts = []
for rule_name, rule in self._rules.items():
for pattern in rule['pattern']:
if re.match(pattern, full_filename):
logging.debug(f'matched: {full_filename}')
for extractor in rule['extractor']:
for n, e in extractor.items():
exts.append({f'{rule_name}.{n}': e})
if len(exts) > 0:
buffer.append((full_filename, exts))
if len(buffer) >= 5 * n_jobs:
Parallel(n_jobs=n_jobs, verbose=1, backend='threading')(map(delayed(self._applier), buffer))
tot += len(buffer)
logging.info(f'completed: {tot} files ({(time() - tik) / tot:.2f}s per file)')
buffer = []
if len(buffer) > 0:
Parallel(n_jobs=n_jobs, verbose=1, backend='threading')(map(delayed(self._applier), buffer))
tot += len(buffer)
logging.info(f'completed: {tot} files ({(time() - tik) / tot:.2f}s per file)')
logging.info(f'completed {tot} in {(time() - tik):.2f}s')
@staticmethod
def _check_rules(rules):
""" Check the `rules` to ensure patterns and extractors.
:raise: exceptions if `patterns` are not regex or `extractors` are not
derived from :class:`Extractor`.
"""
c = {
'rules': 0,
'patterns': 0,
'extractors': 0,
}
for _, r in rules.items():
c['rules'] += 1
for p in r['pattern']:
c['patterns'] += 1
re.compile(p)
for e in r['extractor']:
c['extractors'] += 1
for _, obj in e.items():
if not (hasattr(obj, 'extract') and callable(obj.extract)):
logging.warning(f'{type(obj)} does not implement extract seems not to be an Extractor')
raise Exception(f'{type(obj)} seems not to be an Extractor')
for k, v in c.items():
logging.info(f'checked: {v} {k}')
@staticmethod
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
416,
13762,
2290,
25754,
319,
12131,
12,
2999,
12,
486,
198,
16302,
25,
27468,
528,
891,
2915,
198,
37811,
198,
11748,
18931,
198,
11748,
28686,
198,
... | 2.222476 | 1,753 |
"""
TODO:
問題ごとに階層クラスタリングをlinkage()で実行。
fcluster()でクたスター番号の索引付けを行う。
"""
import os
import mysql.connector as cn
from pyquery import PyQuery as pq
from enum import Enum
import re
import subprocess
import sys
import csv
import pandas as pd
import copy
import matplotlib.pyplot as plt
import sys
from pandas.plotting import scatter_matrix
from scipy.cluster.hierarchy import linkage, dendrogram
import numpy as np
from scipy.cluster.hierarchy import fcluster
sys.setrecursionlimit(10000)
# グローバル変数
cnx = cn.connect(
host='127.0.0.1',
user='kosuke',
password='localhost',
port='3306',
database='codeforces'
)
cur = cnx.cursor(buffered=True, dictionary=True)
path_src = r'./../../Database/src_original/src_original/'
path_input_files = r'./../input_files/'
path_output_files = r'./../output_files/'
path_vector_files = r'./../vector_files/'
path_plot_results = r'./../plot_results/'
num_clusters = 8
# problem_idを引数にして、クラスタリングを実行する
# 改善の余地あり
if __name__ == "__main__":
main() | [
37811,
201,
198,
51,
3727,
46,
25,
201,
198,
161,
243,
237,
165,
94,
234,
2515,
242,
30201,
28618,
49694,
236,
161,
109,
97,
14099,
9263,
8943,
23376,
12675,
6527,
26095,
31758,
8726,
496,
3419,
30640,
22522,
253,
26193,
234,
16764,
2... | 2.063953 | 516 |
# Define a function that helps to calculate the area of circle with different radius and print the area only when the area is divisible by 4
# 23.48, 56.78, 45.67, 78.28 Please consider the unit of these values as cm
# Define a function using Python that helps to find the first five character of string in all the possible ways
string = HAPPY CODING
string[9:len(string)]
string[9:]
string[-3:]
# Define a function using Python that helps to find the last 3 character of string in all the possible ways
for i in range(5, 51):
print(1)
i = 5
while i <= 50:
print(i)
i = i + 5
#Design a function that accept 6 values & find the average of those values which are only divisible by 6.
avg = sum / count
print(avg)
| [
2,
2896,
500,
257,
2163,
326,
5419,
284,
15284,
262,
1989,
286,
9197,
351,
1180,
16874,
290,
3601,
262,
1989,
691,
618,
262,
1989,
318,
2659,
12843,
416,
604,
198,
198,
2,
2242,
13,
2780,
11,
7265,
13,
3695,
11,
4153,
13,
3134,
11... | 3.276316 | 228 |
#!/usr/bin/env python
# coding: utf-8
#
# This script converts xrdb (X11) color scheme format to PuTTY registry
# file which creates a new session with proper colors.
#
# Usage:
# xrdb2putty.py path/to/xrdb/files -d path/to/putty/files
#
# Author: Caesar Kabalan <caesar.kabalan@gmail.com>
# Adapted from xrdb2konsole by Stéphane Travostino
# Adapted from xrdb2terminator by Xabier Larrakoetxea
import os
import sys
import re
import argparse
# Takes #000A0B and returns (0, 10, 11)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Translate X color schemes to termiantor format')
parser.add_argument('xrdb_path', type=str, help='path to xrdb files')
parser.add_argument('-d', '--out-directory', type=str, dest='output_path',
help='path where putty config files will be' +
' created, if not provided then will be printed')
args = parser.parse_args()
main(args.xrdb_path, args.output_path)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
2,
198,
2,
770,
4226,
26161,
2124,
4372,
65,
357,
55,
1157,
8,
3124,
7791,
5794,
284,
11634,
51,
9936,
20478,
220,
198,
2,
2393,
543,
8075,
2... | 2.798799 | 333 |
from django.apps import AppConfig
from django.conf import settings
from prometheus_client import start_http_server
import django_prometheus_metrics
| [
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
1552,
36916,
62,
16366,
1330,
923,
62,
4023,
62,
15388,
198,
11748,
42625,
14208,
62,
16963,
36916,
62,
4164,
10466,
198
] | 3.794872 | 39 |
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from appsec import APPSEC
from model import ContactInfo
if __name__ == '__main__':
application = _CreateApplication()
main()
| [
2,
15069,
3717,
3012,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
... | 3.675105 | 237 |
try:
from matplotlib import pyplot as plt
import matplotlib
except:
import matplotlib
matplotlib.rcParams['backend'] = 'TkAgg'
from matplotlib import pyplot as plt
import numpy as np
import pdb
class DataPlt():
'''
Dynamic plot context, intended for displaying geometries.
like removing axes, equal axis, dynamically tune your figure and save it.
Args:
figsize (tuple, default=(6,4)): figure size.
filename (filename, str): filename to store generated figure, if None, it will not save a figure.
Attributes:
figsize (tuple, default=(6,4)): figure size.
filename (filename, str): filename to store generated figure, if None, it will not save a figure.
ax (Axes): matplotlib Axes instance.
Examples:
with DynamicShow() as ds:
c = Circle([2, 2], radius=1.0)
ds.ax.add_patch(c)
'''
class NoBoxPlt():
'''
Dynamic plot context, intended for displaying geometries.
like removing axes, equal axis, dynamically tune your figure and save it.
Args:
figsize (tuple, default=(6,4)): figure size.
filename (filename, str): filename to store generated figure, if None, it will not save a figure.
Attributes:
figsize (tuple, default=(6,4)): figure size.
graph_layout (tuple|None): number of graphs, None for single graph.
filename (filename, str): filename to store generated figure, if None, it will not save a figure.
ax (Axes): matplotlib Axes instance.
Examples:
with DynamicShow() as ds:
c = Circle([2, 2], radius=1.0)
ds.ax.add_patch(c)
'''
def _setup_mpl():
'''customize matplotlib.'''
plt.rcParams['lines.linewidth'] = 2
plt.rcParams['axes.labelsize'] = 16
plt.rcParams['axes.titlesize'] = 18
plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.serif'] = 'Ubuntu'
plt.rcParams["mathtext.fontset"] = "dejavuserif"
plt.rcParams['font.monospace'] = 'Ubuntu Mono'
plt.rcParams['axes.labelweight'] = 'bold'
plt.rcParams['xtick.labelsize'] = 16
plt.rcParams['ytick.labelsize'] = 16
plt.rcParams['legend.fontsize'] = 14
plt.rcParams['figure.titlesize'] = 18
| [
28311,
25,
198,
220,
220,
220,
422,
2603,
29487,
8019,
1330,
12972,
29487,
355,
458,
83,
198,
220,
220,
220,
1330,
2603,
29487,
8019,
198,
16341,
25,
198,
220,
220,
220,
1330,
2603,
29487,
8019,
198,
220,
220,
220,
2603,
29487,
8019,
... | 2.471885 | 907 |
from unittest.mock import patch
import pytest
from pinga.config import BadConfigException, get_kafka_config, get_pg_uri, get_sites_list
@pytest.mark.parametrize(
"sites_file, error_msg", [
("", "Configured sites_list file '' does not exist"),
("notexists.json", "Configured sites_list file 'notexists.json' does not exist"),
("tests/config/sites-test-bad.json",
"Required configuration is missing or malformed: 'sites' is a required property"),
("tests/config/sites-test-bad2.json",
"Configured file tests/config/sites-test-bad2.json is not a valid JSON")
]
)
@patch("pinga.config.ConfigParser.get")
| [
6738,
555,
715,
395,
13,
76,
735,
1330,
8529,
198,
198,
11748,
12972,
9288,
198,
6738,
29400,
64,
13,
11250,
1330,
7772,
16934,
16922,
11,
651,
62,
74,
1878,
4914,
62,
11250,
11,
651,
62,
6024,
62,
9900,
11,
651,
62,
49315,
62,
48... | 2.618677 | 257 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.TuitionISVAgentInfoDTO import TuitionISVAgentInfoDTO
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
33918,
198,
198,
6738,
435,
541,
323,
13,
64,
404,
13,
15042,
13,
9979,
415,
13,
22973,
34184,
1187,
1330,
163... | 2.532468 | 77 |
# Encoding: UTF-8
import argparse
import numpy as np
import torch
import cv2
import os
from src.model import SketchKeras
import src.image_cutter as img_ctr
import src.image_stitcher as img_str
import random
from tqdm import tqdm
# import copy
# import glob
# import time
device = "cuda" if torch.cuda.is_available() else "cpu"
# def cv_imread(file_path):
# cv_img = cv2.imdecode(np.fromfile(file_path,dtype=np.uint8),-1)
# return cv_img
#
#
# def cv_imwrite(filename, src):
# cv2.imencode('.jpg', src)[1].tofile(filename)
if __name__ == "__main__":
args = parse_args()
model = SketchKeras().to(device)
dir_check()
names = os.listdir(args.input)
names2 = os.listdir(args.output + 'train/sketch/') + os.listdir(args.output + 'val/sketch/')
total_number = len(names)
total_train = 0
total_val = 0
# random.shuffle(names)
# names.sort()
# names2.sort()
if len(args.weight) > 0:
model.load_state_dict(torch.load(args.weight))
print(f"{args.weight} loaded..")
for num_of_img in tqdm(range(len(names))):
name = names[num_of_img]
if name not in names2:
try:
img = cv2.imread(args.input + '/' + name)
if (args.size_limit < img.shape[0]) or (args.size_limit < img.shape[1]):
img = resize_img(img, args.size_limit)
img_height = img.shape[0]
img_width = img.shape[1]
except AttributeError:
print(name + ' 读取失败')
else:
train_not_val = random.random() < args.ratio
# 彩图处理部分
if train_not_val:
cv2.imwrite(
args.output + '/train/color/' + name,
img
)
else:
cv2.imwrite(
args.output + '/val/color/' + name,
img
)
# 黑白处理部分
if train_not_val:
cv2.imwrite(
args.output + '/train/gray/' + name,
cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
)
else:
cv2.imwrite(
args.output + '/val/gray/' + name,
cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
)
# 线稿处理部分
part, num_width, num_height = img_ctr.main(img)
new_part = np.zeros((num_height - 1, num_width - 1, 512, 512), dtype=np.uint8)
for i in range(num_height - 1):
for j in range(num_width - 1):
img = part[i][j]
# preprocess
img = preprocess(img)
x = img.reshape(1, *img.shape).transpose(3, 0, 1, 2)
x = torch.tensor(x).float()
# feed into the network
with torch.no_grad():
pred = model(x.to(device))
pred = pred.squeeze()
# postprocess
output = pred.cpu().detach().numpy()
output = postprocess(output, thresh=0.1, smooth=False)
new_part[i][j] = output
'''# cv2.namedWindow('test', cv2.WINDOW_NORMAL)
# cv2.imshow('test', new_part[i][j])
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# input()
# cv2.imwrite(
# args.output + '/' + '%s%s_' % (i, j) + name,
# new_part[i][j]
# )'''
new_img = np.zeros((256 * num_height, 256 * num_width), dtype=np.uint8)
'''下面逻辑比较复杂,调好了就别瞎鸡儿动了 之所以i是-1,j是-2是因为组合rows比组合每一行的part晚一步,
因为part已经全有了,但是跑一轮才能有第一个row。所以处理part和row的逻辑不统一,这里可以改但没必要'''
# 从上到下合并每行
for i in range(num_height - 1):
new_row = np.zeros((512, 256 * num_width), dtype=np.uint8)
# 先把这一行中的第一幅图片粘贴到应有的位置准备就绪
# new_row[0:512, 0:512] = new_part[i][0]
# 从左到右合并一行中的每张图片
for j in range(num_width - 2):
if j == 0:
new_row[0:512, 0:((j + 3) * 256)] = \
img_str.main(
new_part[i][0],
new_part[i][j + 1],
True
)
'''cv2.namedWindow('0+1', cv2.WINDOW_NORMAL)
cv2.imshow('0+1', new_row[0:512, 0:((j + 3) * 256)])
cv2.waitKey(0)
cv2.destroyAllWindows()
input()'''
else:
# 很坑爹,必须用deepcopy不然不是复制一份数据而是复制的索引!!!(然而之后发现问题似乎并不是出在这里
# tmp = copy.deepcopy(new_row[0:512, 0:((j + 2) * 256)])
new_row[0:512, 0:((j + 3) * 256)] = \
img_str.main(
new_row[0:512, 0:((j + 2) * 256)],
new_part[i][j + 1],
True
)
'''# cv2.imwrite(
# args.output + '/' + '%s' % j + name,
# new_row[0:512, 0:((j + 3) * 256)]
# )
# cv2.namedWindow('a full row', cv2.WINDOW_NORMAL)
# cv2.imshow('a full row', new_row)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# input()'''
if i == 0:
new_img[0:512, 0:256 * num_width] = new_row
else:
# tmp = copy.deepcopy(new_img[0:512 + 256 * (i - 1), 0:256 * num_width])
new_img[0:512 + 256 * i, 0:256 * num_width] = \
img_str.main(
new_img[0:512 + 256 * (i - 1), 0:256 * num_width],
new_row,
False
)
'''# cv2.namedWindow('a full img', cv2.WINDOW_NORMAL)
# cv2.imshow('a full img', new_img[256:256 + img_height, 256:256 + img_width])
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# input()'''
if train_not_val:
cv2.imwrite(
args.output + '/train/sketch/' + name,
new_img[256:256 + img_height, 256:256 + img_width]
)
print(name + '处理完成,为train')
total_train += 1
else:
cv2.imwrite(
args.output + '/val/sketch/' + name,
new_img[256:256 + img_height, 256:256 + img_width]
)
print(name + '处理完成,为val')
total_val += 1
print('全部', total_number, '张图片处理完成,其中', total_train, '张为train,', total_val, '张为val')
| [
2,
14711,
7656,
25,
41002,
12,
23,
198,
11748,
1822,
29572,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
11748,
269,
85,
17,
198,
11748,
28686,
198,
6738,
12351,
13,
19849,
1330,
17001,
42,
263,
292,
198,
11748,
12351,
... | 1.43768 | 5,207 |
from __future__ import absolute_import, print_function, with_statement
import Crypto.Hash.MD5
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
3601,
62,
8818,
11,
351,
62,
26090,
198,
198,
11748,
36579,
13,
26257,
13,
12740,
20,
198
] | 3.653846 | 26 |
#!/usr/bin/env python
import sys
if(sys.stdin.isatty()):
print("Usage: cat <annotation file> | ./countannots.py")
quit()
ids_to_counts = dict()
# Parse input
for line in sys.stdin:
line_list = line.strip().split("\t")
seqid = line_list[0]
if ids_to_counts.has_key(seqid):
ids_to_counts[seqid] = ids_to_counts[seqid] + 1
else:
ids_to_counts[seqid] = 1
# Print dict contents
ids_list = ids_to_counts.keys()
for seqid in ids_list:
count = ids_to_counts[seqid]
print(str(count) + "\t" + seqid)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
25064,
198,
198,
361,
7,
17597,
13,
19282,
259,
13,
271,
265,
774,
3419,
2599,
198,
220,
220,
220,
3601,
7203,
28350,
25,
3797,
1279,
1236,
14221,
2393,
29,
930,
24457,
9127,
... | 2.116732 | 257 |
from __future__ import absolute_import
import unittest
from main.maincontroller import MainController
from tests.xroad_local_group import xroad_local_group
class XroadRemoveLocalGroupMembers(unittest.TestCase):
"""
UC SERVICE_27 Remove Members from a Local Group
RIA URL: https://jira.ria.ee/browse/XT-284, https://jira.ria.ee/browse/XTKB-154
Depends on finishing other test(s): None
Requires helper scenarios:
xroad_client_registration_in_ss_221\XroadSecurityServerClientRegistration.py
xroad_configure_service_222\XroadConfigureService.py
X-Road version: 6.16.0
"""
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
198,
11748,
555,
715,
395,
198,
198,
6738,
1388,
13,
12417,
36500,
1330,
8774,
22130,
198,
6738,
5254,
13,
87,
6344,
62,
12001,
62,
8094,
1330,
2124,
6344,
62,
12001,
62,
8094,
628,... | 2.951456 | 206 |
import os
import json
import datetime
import logging
import pathlib
import jinja2
import networkx
import importlib.resources as pkg_resources
from collections import Counter
from . import templates, format_logger
from .objects import force_partial_parse
from .parsers import fullprogram
class sasProgram(object):
"""
Abstracted SAS program class.
This class represents a .sas program file. Initialised with a valid file path
to the .sas file, the parser will then parse any valid SAS object it can find
within the file and return them to a list in the contents attribute.
The percentage of complete parsing will also be stored in the parsedRate attribute.
Attributes
----------
path : pathlib.Path
File path to the source of the SAS program
contents : list
List of parsed sasdocs.objects found in the program
failedLoad : int
Flag if there was a failure to load/parse the program file
raw : str
Raw string version of the program file
parsedRate : float
Percentage of the program file successfully parsed
"""
def load_file(self, path):
"""
load_file(path)
Attempt to load the given path and parse into a sasProgram object. Errors logged on failure
to resolve path, read file and parse.
Sets values of path, raw, contents and parsed rate if successful.
Parameters
----------
path : str
Filepath to the SAS file to be parsed.
"""
try:
self.path = pathlib.Path(path).resolve(strict=True)
except Exception as e:
self.path = pathlib.Path(path)
self.logger.error("Unable to resolve path: {}".format(e))
return False
try:
with open(self.path,'r') as f :
self.raw = f.read()
except Exception as e:
self.logger.exception("Unable to read file: {}".format(e))
return False
try:
self.contents, self.parsedRate = force_partial_parse(fullprogram, self.raw, stats=True, mark=True)
except Exception as e:
self.logger.exception("Unable to parse file: {}".format(e))
return False
def get_objects(self, object=None, objectType=None):
"""
get_objects(object=None, objectType=None)
Recursively loop through parsed objects in the programs contents, yielding each object. If the object
is a macro object, enter and yield sas objects found in the macro's contents.
This function will never return a macro object.
If passed with optional objectType, this function will only yield objects of type equal to objectType.
Parameters
----------
object : None, macro
Recursion parameter, if none loop through self.contents else loop through object.contents
objectType : str
If not none, only yield objects where the object is of type objectType.
Yields
------
sasdocs.object
"""
if object is None:
object = self
for obj in object.contents:
if type(obj).__name__ == 'macro':
if objectType == 'macro':
yield obj
yield from self.get_objects(obj, objectType=objectType)
elif objectType is not None:
if type(obj).__name__ == objectType:
yield obj
else:
yield obj
def get_data_objects(self):
"""
get_data_objects
Loop through all datasteps and procedures and add any valid dataobjects
to a list self.dataObjects
"""
self.dataObjects = {}
for validObject in ('dataStep', 'procedure'):
for proc in self.get_objects(objectType=validObject):
for dataset in proc.inputs + proc.outputs:
if dataset.UID not in self.dataObjects.keys():
self.dataObjects[dataset.UID] = [{'obj':dataset, 'start':proc.start, 'end':proc.end}]
else:
self.dataObjects[dataset.UID].append({'obj':dataset, 'start':proc.start, 'end':proc.end})
def build_network(self):
"""
build_network
Generate a JSON containing the network diagram for the SAS code and add to class variable self.networkJSON
Add class varaible self.hasNodes containing a bool as to whether this code contains any valid data objects.
"""
self.networkGraph = networkx.DiGraph()
for validObject in ('dataStep','procedure'):
for obj in self.get_objects(objectType=validObject):
for input in obj.inputs:
if self.networkGraph.has_node(input.UID) is False:
self.networkGraph.add_node(input.UID, library=input._lib, dataset=input._ds, line=obj.start[0])
for output in obj.outputs:
if self.networkGraph.has_node(output.UID) is False:
self.networkGraph.add_node(output.UID, library=output._lib, dataset=output._ds, line=obj.start[0])
if input.UID != output.UID:
if hasattr(obj,'type'):
self.networkGraph.add_edge(input.UID, output.UID, label=f'proc {obj.type}')
else:
self.networkGraph.add_edge(input.UID, output.UID)
network = networkx.readwrite.json_graph.node_link_data(self.networkGraph)
self.hasNodes = len(network['nodes']) > 0
self.networkJSON = json.dumps(network)
def summarise_objects(self, object=None):
"""
summarise_objects(object=None)
Recursively loop through parsed objects in the programs contents, counting each object by object type.
This function will count macros and the contents of said macros.
Parameters
----------
object : None, macro
Recursion parameter, if none loop through self.contents else loop through object.contents
Returns
-------
Counter
Collections Counter object for all sasdoc.object types found in program.
"""
if object is None:
object = self
counter = Counter(type(obj).__name__ for obj in object.contents)
for obj in object.contents:
if type(obj).__name__ == 'macro':
counter += self.summarise_objects(obj)
return counter
def get_extended_info(self):
"""
get_extended_info()
Creates class attributes for extended information about the parsed SAS code.
.. code-block:: rst
name : Filename of the SAS code,
path : Full path to the SAS code,
lines : Number of lines in the SAS code,
lastEdit : Timestamp for the last edit of the SAS code,
summary : Counter object returned by summarise_objects,
parsed : Percentage of the SAS code succesfully parsed
"""
self.name = self.path.stem
self.nameURL = self.name.replace(' ','%20')
self.lines = self.raw.count('\n')
self.lastEdit = "{:%Y-%m-%d %H:%M}".format(datetime.datetime.fromtimestamp(os.stat(self.path).st_mtime))
self.summary = dict(self.summarise_objects())
self.parsed = "{:.2%}".format(self.parsedRate)
def parse_code_documentation(self):
"""
parse_code_documentation
Generate class variables self.documentation and self.documented containing the first set of
comments in the SAS program.
self.documentation: str
The first parsed comments.
self.documented: bool
True if the first object parsed in the SAS code is a comment.
"""
cmnts = []
for obj in self.contents:
if type(obj).__name__ == 'comment':
cmnts.append(obj)
else:
break
if len(cmnts) == 0:
self.documentation = 'No documentation found.'
self.documented = False
else:
self.documentation = '\n'.join([comment.text for comment in cmnts])
self.documented = True
def generate_documentation(self, template='program.md'):
"""
generate_documentation
Generate documentation for the program using the jinja2 template
Returns
-------
str
jinja2 templated version of this program
"""
template = jinja2.Template(pkg_resources.read_text(templates, template))
return template.render(program=self)
| [
11748,
28686,
201,
198,
11748,
33918,
201,
198,
11748,
4818,
8079,
220,
201,
198,
11748,
18931,
201,
198,
11748,
3108,
8019,
201,
198,
11748,
474,
259,
6592,
17,
201,
198,
11748,
3127,
87,
201,
198,
11748,
1330,
8019,
13,
37540,
355,
... | 2.201299 | 4,158 |
import unittest
from collections import Counter
from libs.TfidfModel import TFIDFModel
from libs.nlp_length_functions import NLPLengths
from libs.review_db import ReviewDB
| [
11748,
555,
715,
395,
198,
6738,
17268,
1330,
15034,
198,
6738,
9195,
82,
13,
51,
69,
312,
69,
17633,
1330,
24958,
2389,
37,
17633,
198,
6738,
9195,
82,
13,
21283,
79,
62,
13664,
62,
12543,
2733,
1330,
22879,
6489,
3286,
82,
198,
67... | 3.222222 | 54 |
from Player import Player
from ClassicCardGame import ClassicCardGame
| [
6738,
7853,
1330,
7853,
198,
6738,
13449,
16962,
8777,
1330,
13449,
16962,
8777,
198
] | 5 | 14 |
from django.urls import path, include
#importamos de la primer app lo relacionado
#a las vistas
from pagina1app import views
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.contrib.auth import views as auth_views #Password reset
urlpatterns = [
path('',views.home, name="Home"),
path('servicios/',views.servicios, name="Servicios"),
path('albergue/',views.albergue, name="Albergue"),
#path('iniciosesion',views.iniciosesion, name="Inicio de sesión"),
path('contacto/',views.contacto, name="Contacto"),
path('registro_y_adopcion/',views.registro_y_adopcion, name="Formularios registro y adopcion"),
path('formulario_registro_mascota/',views.formulario_registro_mascota, name="Formulario registro"),
path('formulario_adopcion/',views.formulario_adopcion, name="Formulario adopcion"),
path('registro/',views.registro, name="Formulario Registro"),
#Configurando restablecer contraseña
#Video: https://www.youtube.com/watch?v=sFPcd6myZrY&ab_channel=DennisIvy
#Documentación actualizada: https://docs.djangoproject.com/en/4.0/topics/auth/default/
path('accounts/password_reset/',
auth_views.PasswordResetView.as_view(template_name="accounts/password_reset.html"),
name = "password_reset"),
path('accounts/password_reset/done/',
auth_views.PasswordResetDoneView.as_view(template_name="accounts/password_reset_sent.html"),
name = "password_reset_done"),
path('accounts/reset/<uidb64>/<token>/',
auth_views.PasswordResetConfirmView.as_view(template_name="accounts/password_reset_form.html"),
name = "password_reset_confirm"),
path('accounts/reset/done/',
auth_views.PasswordResetCompleteView.as_view(template_name="accounts/password_reset_done.html"),
name = "password_reset_complete"),
#Personalizando tema en admin
#path('jet/', include('jet.urls', 'jet')),
#path('admin/', include(admin.site.urls)),
#path('jet/dashboard/', include('jet.dashboard.urls', 'jet-dashboard'))
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
'''
1 - Submit email form //PasswordResetView.as_view()
2 - Email sent succes massage //PasswordResetDoneView.as_view()
3 - Link to password Reset form in email //PasswordResetConfirmView.as_view()
4 - Password succesfully changed message //PasswordResetCompleteVies.as_view()
'''
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
11,
2291,
198,
2,
11748,
321,
418,
390,
8591,
33270,
598,
2376,
823,
49443,
4533,
198,
2,
64,
39990,
410,
37503,
198,
6738,
42208,
1437,
16,
1324,
1330,
5009,
198,
6738,
42625,
14208,
13,
... | 2.592784 | 970 |
from microbit import *
from ultrasonic import *
rf = Rangefinder(pin1)
display.show(Image.YES)
while True:
dist = rf.distance_cm()
print((dist,))
sleep(10)
| [
6738,
4580,
2545,
1330,
1635,
198,
6738,
23212,
30189,
1330,
1635,
198,
198,
41871,
796,
13667,
22805,
7,
11635,
16,
8,
198,
198,
13812,
13,
12860,
7,
5159,
13,
43335,
8,
198,
198,
4514,
6407,
25,
198,
220,
220,
220,
1233,
796,
374,... | 2.552239 | 67 |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Functional API for applying algorithms in your own training loop.
.. code-block:: python
from composer import functional as cf
from torchvision import models
model = models.resnet50()
# replace some layers with blurpool
cf.apply_blurpool(model)
# replace some layers with squeeze-excite
cf.apply_squeeze_excite(model, latent_channels=64, min_channels=128)
"""
from composer.algorithms.agc import apply_agc
from composer.algorithms.alibi.alibi import apply_alibi
from composer.algorithms.augmix import augmix_image
from composer.algorithms.blurpool import apply_blurpool
from composer.algorithms.channels_last import apply_channels_last
from composer.algorithms.colout import colout_batch
from composer.algorithms.cutmix import cutmix_batch
from composer.algorithms.cutout import cutout_batch
from composer.algorithms.ema import compute_ema
from composer.algorithms.factorize import apply_factorization
from composer.algorithms.ghost_batchnorm.ghost_batchnorm import apply_ghost_batchnorm
from composer.algorithms.label_smoothing import smooth_labels
from composer.algorithms.layer_freezing import freeze_layers
from composer.algorithms.mixup import mixup_batch
from composer.algorithms.progressive_resizing import resize_batch
from composer.algorithms.randaugment import randaugment_image
from composer.algorithms.selective_backprop import select_using_loss, should_selective_backprop
from composer.algorithms.seq_length_warmup import set_batch_sequence_length
from composer.algorithms.squeeze_excite import apply_squeeze_excite
from composer.algorithms.stochastic_depth import apply_stochastic_depth
# All must be manually defined so sphinx automodule will work properly
__all__ = [
'apply_agc',
'apply_alibi',
'augmix_image',
'apply_blurpool',
'apply_channels_last',
'colout_batch',
'compute_ema',
'cutmix_batch',
'cutout_batch',
'apply_factorization',
'apply_ghost_batchnorm',
'smooth_labels',
'freeze_layers',
'mixup_batch',
'resize_batch',
'randaugment_image',
'should_selective_backprop',
'select_using_loss',
'set_batch_sequence_length',
'apply_squeeze_excite',
'apply_stochastic_depth',
]
| [
2,
15069,
33160,
5826,
18452,
5805,
29936,
263,
7035,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
24843,
12,
17,
13,
15,
198,
198,
37811,
22203,
282,
7824,
329,
11524,
16113,
287,
534,
898,
3047,
9052,
13,
198,
198,
492,
2438... | 3.001305 | 766 |
"""
Скрипт, отправляющий хэш-сумму файла для проверки на VirusTotal.
Для работы необходим ключ доступа к API сервиса VirusTotal. Ключ можно получить
после регистрации на VirusTotal. Для корректной работы скрипта ключ необходимо
прописать в переменную среды VT_API_KEY. В качестве параметра скрипту передается
md5, sha1 или sha256 хэш файла, который необходимо отправить.
Пример использования:
python get_virustotal_info.py 44d88612fea8a8f36de82e1278abb02f
"""
import sys
import os
import requests
if 'VT_API_KEY' in os.environ:
vt_api_key = os.environ['VT_API_KEY']
else:
print('Не задан ключ доступа к API VirusTotal.')
sys.exit(0)
api_url = 'https://www.virustotal.com/vtapi/v2/file/report'
try:
params = dict(apikey=vt_api_key, resource=str(sys.argv[1]))
except IndexError:
print('Неверные аргументы.')
sys.exit(0)
response = requests.get(api_url, params=params)
if response.status_code == 200:
result=response.json()
if result['response_code'] == 1:
print('Обнаружено:', result['positives'], '/', result['total'])
print('Результаты сканирования:')
for key in result['scans']:
print('\t' + key, '==>', result['scans'][key]['result'])
elif result['response_code'] == -2:
print('Запрашиваемый объект находится в очереди на анализ.')
elif result['response_code'] == 0:
print('Запрашиваемый объект отсутствует в базе VirusTotal.')
else:
print('Ошибка ответа VirusTotal.')
elif response.status_code == 204:
print('Превышено максимально допустимое количество запросов.')
elif response.status_code == 400:
print('Неверный формат запроса.')
elif response.status_code == 403:
print('Неверный ключ доступа к API VirusTotal.')
else:
print('Ошибка ответа VirusTotal.')
| [
37811,
201,
198,
140,
94,
31583,
21169,
18849,
140,
123,
20375,
11,
12466,
122,
20375,
140,
123,
21169,
16142,
38857,
30143,
40623,
141,
236,
141,
231,
18849,
140,
117,
220,
141,
227,
141,
235,
141,
230,
12,
21727,
35072,
43108,
43108,
... | 1.465339 | 1,255 |
#!/usr/bin/python
from functools import total_ordering
@total_ordering
class Event:
"""The class defining an event."""
LEFT = 0 # BentleyOttmann, ShamosHoey
CROSSING = 1 # BentleyOttmann
RIGHT = 2 # BentleyOttmann, ShamosHoey
HORIZONTAL = 3 # HorizontalVertical
VERTICAL = 4 # HorizontalVertical
BOTTOM = 5 # HorizontalVertical
TOP = 6 # HorizontalVertical
def __eq__(self, other):
"""Comparison of events (event1 == event2)."""
return self.point == other.point
def __ne__(self, other):
"""Comparison of events (event1 != event2)."""
return self.point != other.point
def __lt__(self, other):
"""Comparison of events (event1 < event2)."""
return (self.point.x, self.point.y) < (other.point.x, other.point.y)
def __hash__(self):
"""Hashable events."""
return hash((self.point.x, self.point.y, self.type)) # hash based on tuple
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
6738,
1257,
310,
10141,
1330,
2472,
62,
34555,
198,
198,
31,
23350,
62,
34555,
198,
4871,
8558,
25,
198,
220,
220,
220,
37227,
464,
1398,
16215,
281,
1785,
526,
15931,
198,
220,
220,
2... | 2.469231 | 390 |
# Containing API to load the count matrix data
import anndata
import numpy as np
import pandas as pd
from scipy.sparse import csc_matrix
from anndata import read_h5ad
from .gtf_utils import load_genes as read_gff
def convert_to_annData(Rmat_dict, effLen_tensor, cell_note, gene_note,
fill_missing=True):
"""Convert matrices and annotation to annData
"""
Rmat = {}
for _key in Rmat_dict:
Rmat[_key] = Rmat_dict[_key].astype(np.float32)#.toarray()
Rmat.keys()
if fill_missing:
_input_keys = list(Rmat.keys())
_shape = Rmat[_input_keys[0]].shape
for _key in ['0', '1', '2', '3']:
if _key not in _input_keys:
print("key %s not exist in .mtx file, fill with zeros." %(_key))
Rmat[_key] = np.zeros(_shape, dtype=np.float32)
X = Rmat['1'] + Rmat['2'] + Rmat['3']
layers = {}
layers['isoform1'] = Rmat['1']
layers['isoform2'] = Rmat['2']
layers['ambiguous'] = Rmat['3']
layers['poorQual'] = Rmat['0']
obs = pd.DataFrame(cell_note[1:, :],
index = cell_note[1:, 0],
columns = cell_note[0, :])
var = pd.DataFrame(gene_note[1:, :],
index = gene_note[1:, 0],
columns = gene_note[0, :])
Prob_tensor = effLen_tensor / effLen_tensor.sum(2, keepdims=True)
varm = {}
varm['effLen'] = np.append(effLen_tensor[:, 0, :],
effLen_tensor[:, 1, :], axis=1)
varm['p_ambiguous'] = Prob_tensor[:, :, 2]
adata = anndata.AnnData(X=X, obs=obs, var=var, varm=varm,
layers=layers, dtype='float32')
return adata
def read_npz(path):
"""Read count data in the npz format into anaData
"""
brie_dat = np.load(path, allow_pickle=True)
cell_note = brie_dat['cell_note']
gene_note = brie_dat['gene_note']
Rmat_dict = brie_dat['Rmat_dict'].item()
effLen_tensor = brie_dat['effLen_tensor']
adata = convert_to_annData(Rmat_dict, effLen_tensor, cell_note, gene_note)
return adata
def read_brieMM(path):
"""Read brie count generated Market martrix: dictionary-format
sparse count matrix
"""
fid = open(path, 'r')
lines = fid.readlines()
fid.close()
# check mtx file format
n_gene, n_cell, size = lines[1].strip().split("\t")
n_gene, n_cell, size = int(n_gene), int(n_cell), int(size)
dat_dict = {}
for _line in lines[2:]:
i, j, _str = _line.strip().split("\t")
_dat = eval(_str)
for _key in _dat:
if _key not in dat_dict:
dat_dict[_key] = []
dat_dict[_key].append([i, j, _dat[_key]])
mat_dict = {}
for _key in dat_dict:
_mat = np.array(dat_dict[_key], dtype='int')
_mat[:, :2] -= 1 # 0-based index
mat_dict[_key] = csc_matrix(
(_mat[:, 2], (_mat[:, 0], _mat[:, 1])),
shape=(n_gene, n_cell)
)
return mat_dict
def fetch_gene_info(genes, fraglen=None, out_file=None):
"""
Extract the isoform information from a list of Gene
"""
out_all = []
for g in genes:
tran_ids, tran_lens = [], []
for t in g.trans:
tran_ids.append(t.tranID)
tran_lens.append(str(t.tranL))
out_list = [g.geneID, g.geneName, ",".join(tran_lens),
",".join(tran_ids)]
out_all.append(out_list)
if out_file is not None:
fid = open(out_dir + "/gene_note.tsv", "w")
fid.writelines("GeneID\tGeneName\tTranLens\tTranIDs\n")
for _line_val in out_all:
fid.writelines("\t".join(_line_val) + "\n")
fid.close()
return out_all
def dump_results(adata):
"""Dump splicing phenotype detection results to pandas.DataFrame
"""
df = adata.var[['n_counts', 'n_counts_uniq']].copy()
df['n_counts'] = df['n_counts'].astype(int)
df['n_counts_uniq'] = df['n_counts_uniq'].astype(int)
df['cdr'] = np.array((adata.X > 0).mean(0))[0, :]
cdr = np.array((adata.X > 0).mean(0))[0, :]
if 'intercept' in adata.varm:
df['intercept'] = adata.varm['intercept'][:, 0]
else:
df['intercept'] = [None] * adata.shape[1]
if 'sigma' in adata.varm:
df['sigma'] = adata.varm['sigma'][:, 0]
else:
df['sigma'] = [None] * adata.shape[1]
if 'brie_param' in adata.uns:
LRT_index = adata.uns['brie_param']['LRT_index']
else:
LRT_index = []
## feature columns
for i in range(len(LRT_index)):
_idx = LRT_index[i]
if 'Xc_ids' in adata.uns and adata.uns['Xc_ids'] is not None:
_Xc_ids = adata.uns['Xc_ids'][_idx]
else:
_Xc_ids = 'X%d' %i
df[_Xc_ids + '_ceoff'] = adata.varm['cell_coeff'][:, i]
df[_Xc_ids + '_ELBO_gain'] = adata.varm['ELBO_gain'][:, i]
df[_Xc_ids + '_pval'] = adata.varm['pval'][:, i]
df[_Xc_ids + '_FDR'] = adata.varm['fdr'][:, i]
return df
| [
2,
2345,
1397,
7824,
284,
3440,
262,
954,
17593,
1366,
198,
198,
11748,
281,
358,
1045,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
629,
541,
88,
13,
82,
29572,
1330,
269,
1416,
62,
6759,
860... | 1.93959 | 2,632 |
import argparse
import os
import time
import sqlite3
from tqdm.auto import tqdm
import funcx
from coffea.processor.funcx.detail import MappedFuncXFuture
funcx.set_file_logger('funcx.log')
client = funcx.sdk.client.FuncXClient(funcx_service_address='https://dev.funcx.org/api/v1')
parser = argparse.ArgumentParser()
parser.add_argument("--tasks_per_core", default=10, help="number of cores per task")
parser.add_argument("--sleep", default=60, help="number of cores per task")
parser.add_argument("--tag", default='after yadu updates (ndcrc)', help="any extra info to save to DB")
parser.add_argument("--cores_per_manager", default=16)
# parser.add_argument("--endpoint", default='07ad6996-3505-4b86-b95a-aa33acf842d8')
parser.add_argument("--endpoint", default='8bd5cb36-1eec-4769-b001-6b34fa8f9dc7') # ndcrc
parser.add_argument("--batch_size", default=5000)
args = parser.parse_args()
db = sqlite3.connect('data.db')
db.execute("""create table if not exists analyses(
tag text,
start_submit,
end_submit,
returned int,
tasks int,
connected_managers int,
cores_per_manager int,
sleep_seconds int
)""")
db.commit()
db.close()
if not os.path.isfile('sleep_uuid.txt'):
sleep_uuid = client.register_function(sleep)
with open('sleep_uuid.txt', 'w') as f:
print(sleep_uuid, file=f)
else:
with open('sleep_uuid.txt', 'r') as f:
sleep_uuid = f.read().strip()
fake_args = [
('VBFHToBB_M_125_13TeV_powheg_pythia8_weightfix',
'root://cmseos.fnal.gov//eos/uscms/store/user/lpcbacon/dazsle/zprimebits-v15.01/skim/VBFHToBB_M_125_13TeV_powheg_pythia8_weightfix_0.root',
'otree',
59309,
0),
'file:///scratch365/awoodard/funcx'
]
with open(os.path.expanduser('~/connected_managers'), 'r') as f:
connected_managers = int(f.read())
cores = connected_managers * args.cores_per_manager
tasks = int(args.tasks_per_core * cores)
task_args = [fake_args for _ in range(tasks)]
batched_args = [task_args[i:i + args.batch_size] for i in range(0, len(task_args), args.batch_size)]
start_submit = time.time()
futures = []
for batch in batched_args:
futures += [MappedFuncXFuture(
batch,
args.endpoint,
sleep_uuid
)
]
print('submitted batch of {} tasks'.format(len(batch)))
end_submit = time.time()
print([f.result() for f in futures])
returned = time.time()
print('finished in {:.0f}s'.format(returned - start_submit))
db = sqlite3.connect('data.db')
db.execute("""insert into analyses(
tag,
start_submit,
end_submit,
returned,
tasks,
connected_managers,
cores_per_manager,
sleep_seconds
)
values (?, ?, ?, ?, ?, ?, ?, ?)""", (
args.tag,
start_submit,
end_submit,
returned,
tasks,
connected_managers,
args.cores_per_manager,
args.sleep
)
)
db.commit()
db.close()
| [
11748,
1822,
29572,
198,
11748,
28686,
198,
11748,
640,
198,
11748,
44161,
578,
18,
198,
6738,
256,
80,
36020,
13,
23736,
1330,
256,
80,
36020,
198,
198,
11748,
25439,
87,
198,
6738,
763,
16658,
64,
13,
41341,
13,
20786,
87,
13,
49170... | 2.328275 | 1,252 |
[
{
'date': '2017-01-01',
'description': 'Nieuwjaarsdag',
'locale': 'nl-NL',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2017-04-14',
'description': 'Goede Vrijdag',
'locale': 'nl-NL',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2017-04-16',
'description': 'Eerste Paasdag',
'locale': 'nl-NL',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2017-04-17',
'description': 'Tweede Paasdag',
'locale': 'nl-NL',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2017-04-27',
'description': 'Koningsdag',
'locale': 'nl-NL',
'notes': '',
'region': '',
'type': 'NV'
},
{
'date': '2017-05-04',
'description': 'Dodenherdenking',
'locale': 'nl-NL',
'notes': '',
'region': '',
'type': 'F'
},
{
'date': '2017-05-05',
'description': 'Bevrijdingsdag',
'locale': 'nl-NL',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2017-05-25',
'description': 'Hemelvaartsdag',
'locale': 'nl-NL',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2017-06-04',
'description': 'Eerste Pinksterdag',
'locale': 'nl-NL',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2017-06-05',
'description': 'Tweede Pinksterdag',
'locale': 'nl-NL',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2017-12-05',
'description': 'Sinterklaas',
'locale': 'nl-NL',
'notes': '',
'region': '',
'type': 'RF'
},
{
'date': '2017-12-15',
'description': 'Koninkrijksdag',
'locale': 'nl-NL',
'notes': '',
'region': '',
'type': 'NV'
},
{
'date': '2017-12-25',
'description': 'Eerste Kerstdag',
'locale': 'nl-NL',
'notes': '',
'region': '',
'type': 'NRF'
},
{
'date': '2017-12-26',
'description': 'Tweede Kerstdag',
'locale': 'nl-NL',
'notes': '',
'region': '',
'type': 'NRF'
}
] | [
58,
198,
220,
220,
220,
1391,
198,
220,
220,
220,
220,
220,
220,
220,
705,
4475,
10354,
705,
5539,
12,
486,
12,
486,
3256,
198,
220,
220,
220,
220,
220,
220,
220,
705,
11213,
10354,
705,
45,
22304,
86,
6592,
945,
67,
363,
3256,
... | 1.67074 | 1,473 |
# Faça um programa que leia três números e mostre qual é o maior e qual é o menor.
a = int(input('Digite primeiro valor: '))
b = int(input('Digite segundo valor: '))
c = int(input('Digite terceiro valor: '))
#Verificar quem é o menor número
menor = a
if b < a and b < c:
menor = b
if c < a and c < b:
menor = c
# verificar quem é o maior numero
maior = a
if b > a and b > c:
maior = b
if c > a and c > b:
maior = c
print ('O menor valor digitado foi {}'.format(menor))
print ('O maior valor digitado foi {}'.format(maior)) | [
2,
18350,
50041,
23781,
1430,
64,
8358,
443,
544,
491,
25792,
82,
299,
21356,
647,
418,
304,
749,
260,
4140,
38251,
267,
17266,
1504,
304,
4140,
38251,
267,
1450,
273,
13,
198,
198,
64,
796,
493,
7,
15414,
10786,
19511,
578,
6994,
7... | 2.343478 | 230 |
from random import randint
from time import sleep
jogo = dict()
print('-=' * 25)
print(f'{"Jogo de dados":^50}')
print('-=' * 25)
n = input('\nJogador 1 aperte [Q] e [ENTER] para jogar o dado: ')
jogo['jogador1'] = randint(1, 6)
rolar_dados()
print(f'O dado caiu no {jogo["jogador1"]}')
print('-' * 50)
n = input('\nJogador 2 aperte [Q] e [ENTER] para jogar o dado: ')
jogo['jogador2'] = randint(1, 6)
rolar_dados()
print(f'O dado caiu no {jogo["jogador2"]}')
print('-' * 50)
n = input('\nJogador 3 aperte [Q] e [ENTER] para jogar o dado: ')
jogo['jogador3'] = randint(1, 6)
rolar_dados()
print(f'O dado caiu no {jogo["jogador3"]}')
print('-' * 50)
n = input('\nJogador 4 aperte [Q] e [ENTER] para jogar o dado: ')
jogo['jogador4'] = randint(1, 6)
rolar_dados()
print(f'O dado caiu no {jogo["jogador4"]}')
print('-' * 50)
print(f'{"Resultados":^50}')
print('-' * 50)
cont = 0
for k, v in sorted(jogo.items(), key=lambda item: item[1], reverse=True):
cont += 1
print(f' {cont}º lugar - O {k} tirou o numero {v}')
print('-' * 50) | [
6738,
4738,
1330,
43720,
600,
198,
6738,
640,
1330,
3993,
198,
198,
73,
24076,
796,
8633,
3419,
198,
198,
4798,
10786,
12,
11639,
1635,
1679,
8,
198,
4798,
7,
69,
6,
4895,
41,
24076,
390,
9955,
418,
1298,
61,
1120,
92,
11537,
198,
... | 2.145492 | 488 |
"""
Script for aggregation of IDEA events. Aggregates are marked in json IDEA message and send to output kafka topic.
Example of marking: {..., '_aida’:{'Duplicate’: 'true’, 'Continuing’: <first_idea_id>}, ...}
"""
import os
import sys
import argparse
import ujson as json
from idea import Idea
from kafka import KafkaProducer
from pyspark import SparkConf
from pyspark import SparkContext
from pyspark import AccumulatorParam
from pyspark.streaming.kafka import KafkaUtils
from pyspark.streaming import StreamingContext
class IDAccumulatorParam(AccumulatorParam):
"""
Custom accumulator for duplicate/continuous events
- for each key there is a tuple (ID, DetectTime)
"""
def get_args():
"""
Argument parser
"""
argp = argparse.ArgumentParser(
description="Set Kafka topic, zookeeper for input and output")
argp.add_argument("-i", "--input",
default="input",
dest="input",
action="store",
help="set Kafka input topic name")
argp.add_argument("-o", "--output",
default="aggregated",
dest="output",
action="store",
help="set Kafka output topic name")
argp.add_argument("-zi", "--zookeeper-in",
default="localhost:2181",
dest="zookeeper_in",
action="store",
help="set zookeeper of input for Kafka")
argp.add_argument("--kafka-brokers-out",
default="localhost:9092",
dest="kafka_brokers_out",
action="store",
help="set Kafka brokers for output topic")
argp.add_argument("-of", "--offset",
default="offset01",
dest="offset",
action="store",
help="set offset of Kafka topic")
return argp.parse_args()
def sendToKafka(producer, topic_out, list):
"""
Save idea messages to kafka topic
:param producer: producer for output topic
:param topic_out: topic to which will be messages sent
:param list: list with idea messages
"""
for idea in list:
if idea:
producer.send(topic_out, json.dumps(idea).encode('utf-8'))
def leaveOlder(x, y):
"""
In reduceByKey, leave idea with older DetectTime (x[1]/y[1])
:param x: first element in reduce process
:param y: second element in reduce process
"""
if x[1] <= y[1]:
return x
else:
return y
def markDuplicate(key, idea, oldest_idea_id):
"""
Mark duplicate. Mark is for statistics purpose.
:return: marked key, IDEA
"""
# If idea is present
if idea:
# Equality of ID's in tuple and idea, if true mark will be added
if oldest_idea_id != idea.id:
# Add True mark for duplicate event
idea.aida_duplicate='True'
# Return tuple: key for next deduplication phase and IDEA
return (key[0:4], idea)
def markContinuing(key, idea, oldest_idea_id, oldest_idea_detect_time, accum):
"""
Mark IDEA as continuing event.
:return: marked key, IDEA
"""
# If idea is present
if idea:
# Equality of ID's in tuple and idea, if true mark will be added
if oldest_idea_id != idea.id:
# Add {key: (ID, DetectTime)} to accumulator
accum.add(dict([(key, (oldest_idea_id, oldest_idea_detect_time))]))
# Add id mark for continuing event
idea.aida_continuing=oldest_idea_id
# Return tuple: key for next deduplication phase and IDEA
return (key[0:3], idea)
def markOverlapp(key, idea, oldest_idea_id, oldest_idea_node_name):
"""
Mark IDEA as overlapping event.
:return: marked key, IDEA
"""
# If idea is present
if idea:
# Node.Name has to be different, if true mark will be added
if oldest_idea_node_name != idea.node_name:
# Add id mark for overlapping event
idea.aida_overlapping=oldest_idea_id
# Return tuple: key for next deduplication phase and IDEA
return (key[0:2], idea)
def markNonoverlapp(idea, oldest_idea_id, oldest_idea_node_name, oldest_idea_target):
"""
Mark IDEA as non-overlapping event.
:return: marked IDEA
"""
# If idea is present
if idea:
# Node.Name and Target has to be different, if true mark will be added
if oldest_idea_node_name != idea.node_name and oldest_idea_target != idea.target_ip4:
# Add id mark for non-overlapping event
idea.aida_non_overlapping=oldest_idea_id
# Return only IDEA
return idea
def getAccumulatorValue(accum):
"""
Make array of objects which will be then parallelized to RDD
:param acc: value of accumulator is key:(ID, DetectTime) of duplicates in last batch
:return rddQueue: list of tuples which will be parallized into RDD and joined with DStream
"""
rddQueue = accum.value.items()
# Content of accum is going to be joined with current DStream - current content is not needed anymore
accum.add('del')
return rddQueue
if __name__ == '__main__':
args = get_args()
# Creating Kafka stuffs
topic_in = args.input
topic_out = args.output
zookeper_in = args.zookeeper_in
kafka_brokers_out = args.kafka_brokers_out
offset = args.offset
# Windows and batch
# Slide windows has to be the same size as bacth, otherwise applying window on first tuples is needed
batch_size = 60
slide_size = 60
window_duplicate = 300
window_continuing = 4200
window_overlapping = 300
window_nonoverlapping = 300
# Creating Spark/Streaming context and conf
sc = SparkContext(appName=" ".join(sys.argv[0:]))
ssc = StreamingContext(sc, batch_size)
# Creating accumulator
accum = sc.accumulator({}, IDAccumulatorParam())
""" Kafka init producer and load messages from topic """
# Producer for topic_out
producer = KafkaProducer(bootstrap_servers=kafka_brokers_out)
# Kafka messages are input, groupId (id8641233) has to be unique - if 2 consumers are present
kafkaStream = KafkaUtils.createStream(ssc, zookeper_in, offset, {topic_in: 1})
""" Mark duplicates """
# Build key:(category, source_ip, target_ip, node_name, detect_time) !Do not change order! and value:idea
tuples = kafkaStream.map(lambda message: Idea(message[1])). \
map(lambda idea: ((idea.category,
idea.source_ip4,
idea.target_ip4,
idea.node_name,
idea.detect_time), idea))
# Reduce current batch
batch_reduced = tuples.mapValues(lambda idea: (idea.id, idea.detect_time)). \
reduceByKey(lambda x, y: leaveOlder(x, y))
# Apply window and leave only needed idea values (id, detect_time) then reduce
reduced = batch_reduced.window(window_duplicate, slide_size). \
reduceByKey(lambda x, y: leaveOlder(x, y))
# Join reduced and raw IDEA with key, apply marking function (see def for tuple members) and filter None
tuples = reduced.leftOuterJoin(tuples). \
map(lambda tuple: markDuplicate(tuple[0], tuple[1][1], tuple[1][0][0])). \
filter(lambda x: x)
# Filter duplicates and map to take only idea (without key)
duplicates = tuples.filter(lambda tuple: tuple[1].aida_duplicate). \
map(lambda tuple: tuple[1])
# Filter non-duplicates and leave key
tuples = tuples.filter(lambda tuple: not tuple[1].aida_duplicate)
""" Mark continuing events """
# Leave only needed idea values and union with keys in accumulator
joined = tuples.mapValues(lambda idea: (idea.id, idea.detect_time)). \
transform(lambda rdd: rdd.union(sc.parallelize(getAccumulatorValue(accum))))
# Reduce current batch
batch_reduced = joined.reduceByKey(lambda x, y: leaveOlder(x, y))
# Apply window and reduce
window_reduced = batch_reduced.window(window_continuing, slide_size). \
reduceByKey(lambda x, y: leaveOlder(x, y))
# Join reduced and raw IDEA with key, apply marking function and filter None
tuples = window_reduced.leftOuterJoin(tuples). \
map(lambda tuple: markContinuing(tuple[0], tuple[1][1], tuple[1][0][0], tuple[1][0][1], accum)). \
filter(lambda x: x)
""" Overlapping sensors """
# Reduce current batch
batch_reduced = tuples.mapValues(lambda idea: (idea.id, idea.detect_time, idea.node_name)). \
reduceByKey(lambda x, y: leaveOlder(x, y))
# Apply window and leave only needed idea values then reduce
reduced = batch_reduced.window(window_overlapping, slide_size). \
reduceByKey(lambda x, y: leaveOlder(x, y))
# Join reduced and raw IDEA with key, apply marking function and filter None
tuples = reduced.leftOuterJoin(tuples). \
map(lambda tuple: markOverlapp(tuple[0], tuple[1][1], tuple[1][0][0], tuple[1][0][2])). \
filter(lambda x: x)
""" Non-overlapping sensors """
# Reduce current batch
batch_reduced = tuples.mapValues(lambda idea: (idea.id, idea.detect_time, idea.node_name, idea.target_ip4)). \
reduceByKey(lambda x, y: leaveOlder(x, y))
# Apply window and leave only needed idea values then reduce
reduced = batch_reduced.window(window_nonoverlapping, slide_size). \
reduceByKey(lambda x, y: leaveOlder(x, y))
# Join reduced and raw IDEA with key, apply marking,filter None, union with duplicates, make json and send to Kafka
reduced.leftOuterJoin(tuples). \
map(lambda tuple: markNonoverlapp(tuple[1][1], tuple[1][0][0], tuple[1][0][2], tuple[1][0][3])). \
filter(lambda x: x). \
union(duplicates). \
map(lambda idea: idea.json). \
foreachRDD(lambda rdd: sendToKafka(producer, topic_out, rdd.collect()))
ssc.start()
ssc.awaitTermination()
| [
37811,
201,
198,
12327,
329,
46500,
286,
4522,
16412,
2995,
13,
19015,
2301,
689,
389,
7498,
287,
33918,
4522,
16412,
3275,
290,
3758,
284,
5072,
479,
1878,
4914,
7243,
13,
201,
198,
17934,
286,
18730,
25,
1391,
986,
11,
705,
62,
3054... | 2.422985 | 4,168 |
import os
import os.path as op
import shutil
import tempfile
from bento.compat.api.moves \
import \
unittest
from bento.core.node \
import \
create_root_with_source_tree
from bento.core.testing \
import \
create_fake_package_from_bento_infos
from bento.commands.command_contexts \
import \
ConfigureContext
from bento.commands.hooks \
import \
create_hook_module, find_pre_hooks, find_post_hooks
from bento.commands.tests.utils \
import \
prepare_configure
| [
11748,
28686,
198,
11748,
28686,
13,
6978,
355,
1034,
198,
11748,
4423,
346,
198,
11748,
20218,
7753,
198,
198,
6738,
17157,
78,
13,
5589,
265,
13,
15042,
13,
76,
5241,
3467,
198,
220,
220,
220,
1330,
3467,
198,
220,
220,
220,
220,
... | 2.38565 | 223 |
"""
Test `sinethesizer.effects` package.
Author: Nikolay Lysenko
"""
| [
37811,
198,
14402,
4600,
31369,
316,
956,
7509,
13,
34435,
63,
5301,
13,
198,
198,
13838,
25,
48127,
323,
46749,
32720,
198,
37811,
198
] | 2.916667 | 24 |
import textwrap
import tokenize
import ast
import io
import unittest
import flake8_intsights
from . import texts
| [
11748,
2420,
37150,
198,
11748,
11241,
1096,
198,
11748,
6468,
198,
11748,
33245,
198,
11748,
555,
715,
395,
198,
198,
11748,
781,
539,
23,
62,
29503,
2337,
198,
198,
6738,
764,
1330,
13399,
628
] | 3.411765 | 34 |
# usando generator, ele comsome menos memoria
generator = (i ** 2 for i in range(10) if i % 2 == 0)
#aqui diferente das vercoes anteriores ele faz o sistema de stremer.
#ele vai carregando sobre a nessecidade!
for numero in generator:
print(numero) | [
2,
514,
25440,
17301,
11,
9766,
401,
11246,
1450,
418,
1066,
7661,
198,
8612,
1352,
796,
357,
72,
12429,
362,
329,
1312,
287,
2837,
7,
940,
8,
611,
1312,
4064,
362,
6624,
657,
8,
198,
198,
2,
36129,
72,
288,
361,
9100,
68,
288,
... | 2.635417 | 96 |
import typing
from dataclasses import dataclass
import yaml
import attr
@attr.s(slots=True)
@attr.s(slots=True)
@attr.s(slots=True)
param_location_choices = ('query', 'header', 'path', 'cookie')
@attr.s(slots=True)
@attr.s(slots=True)
if __name__ == '__main__':
info = InfoObject(title='SLAP API', version='1.0.0')
| [
11748,
19720,
198,
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198,
198,
11748,
331,
43695,
198,
11748,
708,
81,
628,
198,
198,
31,
35226,
13,
82,
7,
6649,
1747,
28,
17821,
8,
628,
198,
31,
35226,
13,
82,
7,
6649,
1747,
28,
17... | 2.414286 | 140 |
import networkx as nx
from phylo._core.phylogenytree import SASCPhylogeny
import pytest as pt
| [
11748,
3127,
87,
355,
299,
87,
198,
6738,
37763,
78,
13557,
7295,
13,
746,
2645,
6644,
20760,
631,
1330,
35516,
34,
2725,
2645,
6644,
88,
198,
11748,
12972,
9288,
355,
42975,
628,
198,
220,
220,
220,
220,
220,
220,
220,
220,
198
] | 2.5 | 42 |
import hyppo.discrim
import hyppo.independence
import hyppo.ksample
import hyppo.time_series
import hyppo.kgof
import hyppo.tools
import hyppo.d_variate
import hyppo.conditional_independence
__version__ = "0.3.2"
| [
11748,
2537,
16634,
13,
15410,
3036,
198,
11748,
2537,
16634,
13,
39894,
198,
11748,
2537,
16634,
13,
591,
1403,
198,
11748,
2537,
16634,
13,
2435,
62,
25076,
198,
11748,
2537,
16634,
13,
10025,
1659,
198,
11748,
2537,
16634,
13,
31391,
... | 2.972222 | 72 |
import math
import logging
import numpy as np
import torch
from torch.optim import Optimizer
from torch.nn.utils import clip_grad_norm_
logger = logging.getLogger(__name__)
def lr_schedule(step, total_steps, lr, lr_warmup, lr_cooldown):
"""Calculate a scheduled learning rate value."""
if lr_warmup is None:
lr_warmup = 0
else:
if 0 < lr_warmup <= 1:
lr_warmup = math.ceil(lr_warmup * total_steps)
if step <= lr_warmup:
return lr * (step / lr_warmup)
if lr_cooldown == 'linear':
return lr * (1.0 - (step - lr_warmup) /
(total_steps - lr_warmup))
else:
return lr
class Adam(Optimizer):
"""
Implementation of the Adam optimization algorithm including ideas from:
- Adam: A Method for Stochastic Optimization (https://arxiv.org/abs/1412.6980)
- Decoupled Weight Decay Regularization (https://arxiv.org/abs/1711.05101)
- https://github.com/huggingface/pytorch-pretrained-BERT/
"""
def __init__(self, named_parameters, lr=1e-3, beta1=0.9, beta2=0.999,
eps=1e-8, weight_decay=None, max_grad_norm=None,
lr_warmup=None, lr_cooldown=None, total_steps=None):
"""
Arguments:
named_parameters: Iterable yielding both the name of the
parameter as well as the parameter itself.
lr: Initial learning rate.
beta1, beta2: Exponential decay rates for moving average of
gradient values (beta1) and square gradient values (beta2).
eps: Term added to the denominator of the update rule
to improve numerical stability.
weight_decay: Weight decay factor. None means no decay.
max_grad_norm: Maximum norm for gradient clipping. None means no clipping.
lr_warmup: Linearly increase the learning rate for the first steps.
Supported values: None (disabled), int (number of warmup steps),
float in (0,1] (warmup steps as a ratio of total_steps).
lr_cooldown: Schedule followed to reduce the learning rate.
Supported values: None (disabled), 'linear' (decrease it linearly
to zero after total_steps steps are completed).
total_steps: Total number of parameter update steps.
Required for certain lr_cooldown schedules.
"""
if not lr >= 0.0:
raise ValueError('Invalid learning rate: {}'.format(lr))
for beta in (beta1, beta2):
if not 0.0 <= beta <= 1.0:
raise ValueError('Invalid beta value: {}'.format(beta))
if not eps >= 0.0:
raise ValueError('Invalid epsilon value: {}'.format(eps))
if not (weight_decay is None or 0.0 <= weight_decay <= 1.0):
raise ValueError('Invalid weight decay: {}'.format(weight_decay))
if not (max_grad_norm is None or max_grad_norm >= 0.0):
raise ValueError('Invalid maximum norm for gradient clipping: {}'.format(max_grad_norm))
if not (total_steps is None or total_steps > 0):
raise ValueError('Invalid total number of steps: {}'.format(total_steps))
if not (lr_warmup is None or lr_warmup >= 0.0):
raise ValueError('Invalid learning rate warmup: {}'.format(lr_warmup))
if lr_warmup is not None and 0 < lr_warmup <= 1 and total_steps is None:
raise ValueError('total_steps is required if 0 < lr_warmup <= 1')
if lr_cooldown not in (None, 'linear'):
raise ValueError('Invalid learning rate cooldown: {}'.format(lr_cooldown))
if lr_cooldown == 'linear' and total_steps is None:
raise ValueError("total_steps is required if lr_cooldown is 'linear'")
# Collect the parameters
param_count = 0
with_weight_decay, without_weight_decay = [], []
for name, param in named_parameters:
if param.requires_grad:
param_size = np.prod(param.size())
param_count += param_size
if weight_decay is not None and \
name.endswith('.weight') and 'norm' not in name:
with_weight_decay.append(param)
logger.info('Parameter: %s (size = %d, weight decay = %g)',
name, param_size, weight_decay)
else:
without_weight_decay.append(param)
logger.info('Parameter: %s (size = %d, weight decay = None)',
name, param_size)
param_groups = [
{'params': with_weight_decay, 'weight_decay': weight_decay},
{'params': without_weight_decay, 'weight_decay': None},
]
logger.info('Optimizing %d parameters', param_count)
defaults = dict(lr=lr, beta1=beta1, beta2=beta2, eps=eps,
weight_decay=weight_decay, max_grad_norm=max_grad_norm,
lr_warmup=lr_warmup, lr_cooldown=lr_cooldown,
total_steps=total_steps)
super().__init__(param_groups, defaults)
def step(self, closure=None):
"""Perform a single parameter update step."""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
# State initialization
state = self.state[p]
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values (m)
state['grad_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values (v)
state['squared_grad_avg'] = torch.zeros_like(p.data)
beta1, beta2 = group['beta1'], group['beta2']
grad_avg, squared_grad_avg = state['grad_avg'], state['squared_grad_avg']
# Gradient clipping
if group['max_grad_norm'] is not None:
clip_grad_norm_(p, group['max_grad_norm'])
# Decay the first and second moment running average coefficients
grad = p.grad.data
grad_avg.mul_(beta1).add_(1 - beta1, grad)
squared_grad_avg.mul_(beta2).addcmul_(1 - beta2, grad, grad)
# Calculate the effective step size
state['step'] += 1
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
scheduled_lr = lr_schedule(
state['step'], group['total_steps'],
group['lr'], group['lr_warmup'], group['lr_cooldown'])
step_size = scheduled_lr * math.sqrt(bias_correction2) / bias_correction1
# Update the parameters
denom = squared_grad_avg.sqrt().add_(group['eps'])
p.data.addcdiv_(-step_size, grad_avg, denom)
if group['weight_decay'] is not None:
p.data.add_(-group['weight_decay'], p.data)
return loss
| [
11748,
10688,
198,
11748,
18931,
198,
11748,
299,
32152,
355,
45941,
198,
198,
11748,
28034,
198,
6738,
28034,
13,
40085,
1330,
30011,
7509,
198,
6738,
28034,
13,
20471,
13,
26791,
1330,
10651,
62,
9744,
62,
27237,
62,
628,
198,
6404,
1... | 2.111724 | 3,446 |
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from keras.datasets import mnist
import time
(x_train,y_train),(x_test,y_test)= tf.keras.datasets.mnist.load_data()
x_train = tf.keras.utils.normalize(x_train, axis=1)
x_test = tf.keras.utils.normalize(x_test, axis=1)
x_train = x_train.reshape(-1, 28, 28, 1)
x_test = x_test.reshape(-1, 28, 28, 1)
'''
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(128,activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(128,activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(10,activation=tf.nn.softmax))
'''
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, kernel_size=3, activation='relu'),
tf.keras.layers.MaxPooling2D(pool_size=2),
tf.keras.layers.Conv2D(64, kernel_size=3, activation='relu'),
tf.keras.layers.MaxPooling2D(pool_size=2),
tf.keras.layers.Conv2D(128, kernel_size=3, activation='relu'),
tf.keras.layers.MaxPooling2D(pool_size=2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
EPOCHS = 10
model.compile(optimizer="adam",
loss="sparse_categorical_crossentropy",
metrics=['accuracy'])
history = model.fit(x_train,y_train,epochs=EPOCHS)
plt.plot(history.history['accuracy'], label='accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.ylim([0.5, 1])
plt.legend(loc='lower right')
val_loss, val_acc = model.evaluate(x_test,y_test)
m_name = "mnist_numbers.model"
model.save(m_name)
| [
11748,
11192,
273,
11125,
355,
48700,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
201,
198,
6738,
41927,
292,
13,
19608,
292,
1039,
1330,
285,
77,
396,
201,
198,
11748,
6... | 2.132986 | 767 |
import atexit
import functools
from time import clock
line = "="*40
start = clock()
atexit.register(endlog)
| [
11748,
379,
37023,
198,
11748,
1257,
310,
10141,
198,
6738,
640,
1330,
8801,
198,
198,
1370,
796,
366,
2625,
9,
1821,
198,
198,
9688,
796,
8801,
3419,
198,
378,
10198,
13,
30238,
7,
437,
6404,
8,
628,
220,
220,
220,
220,
220,
220,
... | 2.644444 | 45 |
import pytest
import torch
import inspect
import itertools
from block.models.networks.fusions import fusions
bsize = 2
x_arg = [
[torch.randn(bsize, 10), torch.randn(bsize, 20)],
[torch.randn(bsize, 20), torch.randn(bsize, 10)],
[torch.randn(bsize, 10), torch.randn(bsize, 10)]
]
F_arg = [F for k, F in fusions.__dict__.items() \
if inspect.isclass(F) and k != 'ConcatMLP' and k != 'MCB']
args = [(F, x) for F, x in itertools.product(F_arg, x_arg)]
@pytest.mark.parametrize('F, x', args)
@pytest.mark.parametrize('x', x_arg)
@pytest.mark.mcb
@pytest.mark.parametrize('x', x_arg)
| [
11748,
12972,
9288,
198,
11748,
28034,
198,
11748,
10104,
198,
11748,
340,
861,
10141,
198,
6738,
2512,
13,
27530,
13,
3262,
5225,
13,
69,
15880,
1330,
277,
15880,
198,
198,
1443,
1096,
796,
362,
198,
87,
62,
853,
796,
685,
198,
220,
... | 2.228782 | 271 |
import sqlite3
| [
11748,
44161,
578,
18,
628
] | 3.2 | 5 |
# -*- coding: utf-8 -*-
import itertools
from typing import Iterable, Union
import numpy as np
def get_neighborhood_vectors_axes(radius: Union[float, Iterable[float]]):
"""This neighborhood definition lies on a sphere. Returns a list of 6
positions (up, down, left, right, behind, in front) at exactly `radius`
length. Good for RNN, for example.
If radius is an iterable of floats, returns a multi-radius neighborhood.
Hint: If you know your radius in mm only, use
dwi_ml.data.processing.space.convert_world_to_vox.convert_world_to_vox
Ex: radius_vox = convert_mm2vox(radius_mm, affine_mm_to_vox)
Note: We only support isometric voxels! Adding isometry would also require
remembering where the x,y,z directions are.
Parameters
----------
radius : number (int or float) or list or np.ndarray.
Distance to each neighbor on a sphere (in voxel space).
Returns
-------
neighborhood : np.ndarray[float]
A list of vectors with last dimension = 3 (x,y,z coordinate for each
neighbour per respect to the origin). Hint: You can now interpolate your
DWI data in each direction around your point of interest to get your
neighbourhood.
"""
tmp_axes = np.identity(3)
unit_axes = np.concatenate((tmp_axes, -tmp_axes))
if not isinstance(radius, Iterable):
radius = [radius]
neighborhood = []
for r in radius:
neighborhood.append(unit_axes * r)
neighborhood = np.asarray(neighborhood)
return neighborhood
def get_neighborhood_vectors_grid(radius: int):
"""Returns a list of points similar to the original voxel grid. Ex: with
radius 1, this is 27 points. With radius 2, that's 125 points. Good for
CNN, for example.
Note: We only support isometric voxels! Adding isometry would also require
remembering where the x,y,z directions are.
Parameters
----------
radius : int
Size of the neighborhood in each direction, in voxel space. Final
neighboorhood will be of dimension 2*radius x 2*radius x 2*radius.
Returns
-------
neighborhood : np.ndarray[float]
A list of vectors with last dimension = 3 (x,y,z coordinate for each
neighbour per respect to the origin). Hint: You can now interpolate
your DWI data in each direction around your point of interest to get
your neighbourhood.
"""
assert type(radius) == int
neighborhood = []
the_range = range(-radius, radius + 1)
for x, y, z in itertools.product(the_range, the_range, the_range):
neighborhood.append([x, y, z])
neighborhood = np.asarray(neighborhood)
return neighborhood
def extend_coordinates_with_neighborhood(coords: np.ndarray,
neighborhood_translations: np.ndarray):
"""
From a list of coordinates (e.g. [p1,p2,p3]) and neighborhood translation
vectors (e.g. [up, down, left, right]), get a new list of coordinates
with the product of all translations applied to all coordinates (new length
will be `n_coords + n_coords x n_translations`)
Parameters
------
coords: np.ndarray with shape (N, 3)
An array of [x,y,z] coordinates [p1, p2, ...].
neighborhood_translations: np.ndarray with shape (M, 3)
A list of translation vectors to apply to each point in coords.
Returns
-------
coords: np.ndarray with shape (N x (M+1), 3)
The new coordinates with all translations applied to all
coordinates, including the original coordinates.
"""
n_coords = coords.shape[0]
n_neighbors = neighborhood_translations.shape[0]
# 1. We repeat each coordinate to have the neighborhood size (+ 1 for
# original coordinate) before applying translations.
# coords = [p1 p1... p2 p2 ... ...]'
coords = np.repeat(coords, n_neighbors + 1, axis=0)
# 2. We translate each point based on the translations vector.
# Ex, if neighborhood_translations = [here, up, down, left, right, ...]
# coords = [p1+0 p1+up p1+down ..., p2+0 p2+up, p2+down, ...]'
tiled_vectors = np.tile(np.concatenate((np.zeros((1, 3)),
neighborhood_translations)),
(n_coords, 1))
coords += tiled_vectors
return coords
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
340,
861,
10141,
198,
6738,
19720,
1330,
40806,
540,
11,
4479,
198,
198,
11748,
299,
32152,
355,
45941,
628,
198,
4299,
651,
62,
710,
394,
2865,
2894,
62,
303,
... | 2.704857 | 1,606 |
# proxy module
from pyface.ui.qt4.python_editor import *
| [
2,
15741,
8265,
198,
6738,
12972,
2550,
13,
9019,
13,
39568,
19,
13,
29412,
62,
35352,
1330,
1635,
198
] | 3 | 19 |
#!/usr/bin/env python2.7
'''
Android Activity
Most Used Android Apps
'''
import re
from wrcsv import *
#--activity in HTML format--
a=gethtmldata()
#--Regex Expression--
androidapp=re.findall(r'\">([\w\s]+)</a><br>([\w\s\,\:]+)</div>',a)
#--write data to csv file--
writecsv("Android_Application_Activity",['APPLICATION','DATE'],androidapp)
#--read data from csv file--
readcsv("Android_Application_Activity")
#--print output to terminal-- | [
2,
48443,
14629,
14,
8800,
14,
24330,
220,
197,
29412,
17,
13,
22,
198,
7061,
6,
198,
25934,
24641,
198,
6943,
16718,
5565,
27710,
198,
7061,
6,
198,
11748,
302,
198,
6738,
1319,
40664,
1330,
1635,
198,
198,
2,
438,
21797,
287,
1153... | 2.713415 | 164 |
# Generated by Django 3.2 on 2021-08-11 08:54
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
17,
319,
33448,
12,
2919,
12,
1157,
8487,
25,
4051,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.966667 | 30 |
from django.conf.urls import url
from . import views
from django.contrib.auth.decorators import login_required
urlpatterns = [
url('', views.MapTemplate, name="default"),
url('template2/', views.MapTemplate2, name="default"),
url('template3/', views.MapTemplate3, name="default"),
]
| [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
198,
6738,
764,
1330,
5009,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
12501,
273,
2024,
1330,
17594,
62,
35827,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,... | 3.041237 | 97 |
# Crichton, Admirable Source Configuration Management
# Copyright 2012 British Broadcasting Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# South introspection rules for django-audit-log
# from http://south.aeracode.org/ticket/693
# can probably removed when south goes 1.0 (using 0.7)
# invoked from models.py
from south.modelsinspector import add_introspection_rules
from django.contrib.auth.models import User
__rules_added = False
| [
2,
3864,
488,
1122,
11,
21177,
540,
8090,
28373,
8549,
198,
2,
15069,
2321,
3517,
32250,
10501,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
220,
345,
743,
407,
779,
... | 3.581481 | 270 |
from .solution import minimumDeletions
from ..utils import io, printer
print('Enter string only consisting of "a"s and "b"s:', end=' ')
s = io.get(str).lower()
violating_characters = set(s) - set('ab')
assert len(violating_characters) == 0, f'Violating characters: {", ".join(violating_characters)}'
deletions = minimumDeletions(s)
print(f'Need at least {printer.pluralise(deletion=deletions)} to make "{s}" balanced.') | [
6738,
764,
82,
2122,
1330,
5288,
5005,
1616,
507,
198,
6738,
11485,
26791,
1330,
33245,
11,
20632,
198,
198,
4798,
10786,
17469,
4731,
691,
17747,
286,
366,
64,
1,
82,
290,
366,
65,
1,
82,
25,
3256,
886,
11639,
705,
8,
198,
82,
79... | 2.910345 | 145 |
from flask import Flask, session
from automlk.print import *
from automlk.folders import has_subfolders
app = Flask(__name__)
SESSION_TYPE = 'redis'
app.config.from_object('config')
app.jinja_env.globals.update(print_summary=print_summary)
app.jinja_env.globals.update(print_summary=print_summary)
app.jinja_env.globals.update(print_list=print_list)
app.jinja_env.globals.update(print_score=print_score)
app.jinja_env.globals.update(print_score_std=print_score_std)
app.jinja_env.globals.update(print_value=print_value)
app.jinja_env.globals.update(print_rounded=print_rounded)
app.jinja_env.globals.update(print_duration=print_duration)
app.jinja_env.globals.update(print_params=print_params)
app.jinja_env.globals.update(print_indent=print_indent)
app.jinja_env.globals.update(print_other_metrics=print_other_metrics)
app.jinja_env.globals.update(has_subfolders=has_subfolders)
from app import views
| [
6738,
42903,
1330,
46947,
11,
6246,
198,
6738,
3557,
75,
74,
13,
4798,
1330,
1635,
198,
6738,
3557,
75,
74,
13,
11379,
364,
1330,
468,
62,
7266,
11379,
364,
198,
198,
1324,
796,
46947,
7,
834,
3672,
834,
8,
198,
50,
47621,
62,
252... | 2.605187 | 347 |
# Generated by Django 2.1.1 on 2018-10-05 12:39
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
362,
13,
16,
13,
16,
319,
2864,
12,
940,
12,
2713,
1105,
25,
2670,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.766667 | 30 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2016 Daniel Estevez <daniel@destevez.net>.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy
from gnuradio import gr
import collections
import pmt
import array
import hdlc
class hdlc_framer(gr.basic_block):
"""
docstring for block hdlc_framer
"""
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
220,
198,
2,
15069,
1584,
7806,
412,
4169,
33425,
1279,
67,
6321,
31,
16520,
68,
33425,
13,
3262,
28401,
198,
2,
... | 3.494792 | 384 |
import six
from nefertari.utils.dictset import dictset
from nefertari.utils.utils import issequence
class FieldData(object):
""" Keeps field data in a generic format.
Is passed to field processors.
"""
def __init__(self, name, new_value, params=None):
"""
:param name: Name of field.
:param new_value: New value of field.
:param params: Dict containing DB field init params.
E.g. min_length, required.
"""
self.name = name
self.new_value = new_value
self.params = params
@classmethod
def from_dict(cls, data, model):
""" Generate map of `fieldName: clsInstance` from dict.
:param data: Dict where keys are field names and values are
new values of field.
:param model: Model class to which fields from :data: belong.
"""
model_provided = model is not None
result = {}
for name, new_value in data.items():
kwargs = {
'name': name,
'new_value': new_value,
}
if model_provided:
kwargs['params'] = model.get_field_params(name)
result[name] = cls(**kwargs)
return result
| [
11748,
2237,
198,
6738,
497,
69,
861,
2743,
13,
26791,
13,
11600,
2617,
1330,
8633,
2617,
198,
6738,
497,
69,
861,
2743,
13,
26791,
13,
26791,
1330,
318,
43167,
628,
628,
628,
628,
628,
198,
4871,
7663,
6601,
7,
15252,
2599,
198,
22... | 2.256318 | 554 |
import numpy as np
from ._skyproj import _Skyproj
__all__ = ['Skyproj', 'McBrydeSkyproj', 'LaeaSkyproj', 'MollweideSkyproj',
'HammerSkyproj', 'EqualEarthSkyproj', 'GnomonicSkyproj',
'ObliqueMollweideSkyproj']
class _Stadium:
"""Extension class to create a stadium-shaped projection boundary.
"""
class _Ellipse21:
"""Extension class to create an ellipse-shaped projection boundary.
"""
class _Circle:
"""Extension class to create a circular projection boundary.
"""
# The default skyproj is a cylindrical Plate Carree projection.
# docstring inherited
# Plate Carree
# The following skyprojs include the equal-area projections that are tested
# and known to work.
# docstring inherited
# McBryde-Thomas Flat Polar Quartic
# docstring inherited
# Equal Earth
class ObliqueMollweideSkyproj(_Skyproj, _Ellipse21):
"""Oblique Mollweide Projection.
Parameters
----------
lon_0 : `float`, optional
Central longitude of the underlying Mollweide projection.
lat_p : `float`, optional
Latitude of the North Pole of the unrotated coordinate system.
lon_p : `float`, optional
Longitude of the North Pole of the unrotated coordinate system.
**kwargs : `dict`, optional
Additional kwargs for `skyproj._Skyproj`.
"""
# Oblique Mollweide
@property
@property
@property
@property
# The Gnomonic (tangent plane) projection is not equal-area and
# is not available for full-sky plots. It is only for small
# zoomed regions
| [
11748,
299,
32152,
355,
45941,
198,
198,
6738,
47540,
15688,
1676,
73,
1330,
4808,
22308,
1676,
73,
198,
198,
834,
439,
834,
796,
37250,
22308,
1676,
73,
3256,
705,
9742,
33,
563,
2934,
22308,
1676,
73,
3256,
705,
43,
44705,
22308,
16... | 2.80354 | 565 |
import sys
import logging
import argparse
from logging.config import fileConfig
from calculator import ExpressionCalculator
def main():
"""
Driver function that collects user inputs and passes it to the underlying engine
:returns integer indicating success or failure
"""
logger = logging.getLogger(__name__)
logger.debug('Parsing arguments passed to the program')
parser = argparse.ArgumentParser(description='Process a set of expression files in a directory.')
parser.add_argument('source', help='source directory for input files')
parser.add_argument('target', help='destination directory for output files')
parser.add_argument('--extension', default='.xml', help='Extension for spec files')
args = parser.parse_args()
logger.info('Initializing the application engine')
application = ExpressionCalculator(args.source, args.target, args.extension)
logger.info('Initiating processing')
application.process()
return 0
if __name__ == '__main__':
fileConfig('logging.ini')
sys.exit(main())
| [
11748,
25064,
198,
11748,
18931,
198,
11748,
1822,
29572,
198,
198,
6738,
18931,
13,
11250,
1330,
2393,
16934,
198,
198,
6738,
28260,
1330,
41986,
9771,
3129,
1352,
628,
198,
4299,
1388,
33529,
198,
220,
220,
220,
37227,
198,
220,
220,
... | 3.448387 | 310 |
# Generated by Django 3.2.11 on 2022-01-16 01:51
import datetime
from django.conf import settings
import django.contrib.auth.validators
import django.contrib.postgres.fields
import django.core.validators
from django.db import migrations, models
from django.utils.timezone import utc
import django.utils.timezone
import integreat_cms.cms.models.media.media_file
import integreat_cms.cms.models.users.user
class Migration(migrations.Migration):
"""
Initial migration
"""
initial = True
dependencies = [
("auth", "0012_alter_user_first_name_max_length"),
("contenttypes", "0002_remove_content_type_name"),
]
operations = [
migrations.CreateModel(
name="User",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("password", models.CharField(max_length=128, verbose_name="password")),
(
"last_login",
models.DateTimeField(
blank=True, null=True, verbose_name="last login"
),
),
(
"is_superuser",
models.BooleanField(
default=False,
help_text="Designates that this user has all permissions without explicitly assigning them.",
verbose_name="superuser status",
),
),
(
"username",
models.CharField(
error_messages={
"unique": "A user with that username already exists."
},
help_text="Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.",
max_length=150,
unique=True,
validators=[
django.contrib.auth.validators.UnicodeUsernameValidator()
],
verbose_name="username",
),
),
(
"first_name",
models.CharField(
blank=True, max_length=150, verbose_name="first name"
),
),
(
"last_name",
models.CharField(
blank=True, max_length=150, verbose_name="last name"
),
),
(
"email",
models.EmailField(
blank=True, max_length=254, verbose_name="email address"
),
),
(
"is_staff",
models.BooleanField(
default=False,
help_text="Designates whether the user can log into this admin site.",
verbose_name="staff status",
),
),
(
"is_active",
models.BooleanField(
default=True,
help_text="Designates whether this user should be treated as active. Unselect this instead of deleting accounts.",
verbose_name="active",
),
),
(
"date_joined",
models.DateTimeField(
default=django.utils.timezone.now, verbose_name="date joined"
),
),
(
"chat_last_visited",
models.DateTimeField(
default=datetime.datetime(1, 1, 1, 0, 0, tzinfo=utc),
help_text="The date and time when the user did read the chat the last time",
verbose_name="last chat visit date",
),
),
(
"expert_mode",
models.BooleanField(
default=False,
help_text="Enable this option to show up additional features like XLIFF import/export, page filtering, mirrored pages, page-based permissions, Do-Not-Translate-Tag and recurring events",
verbose_name="experienced user",
),
),
(
"page_tree_tutorial_seen",
models.BooleanField(
default=False,
help_text="Will be set to true once the user dismissed the page tree tutorial",
verbose_name="Page tree tutorial seen",
),
),
(
"groups",
models.ManyToManyField(
blank=True,
help_text="The groups this user belongs to. A user will get all permissions granted to each of their groups.",
related_name="user_set",
related_query_name="user",
to="auth.Group",
verbose_name="groups",
),
),
],
options={
"verbose_name": "user",
"verbose_name_plural": "users",
"default_permissions": ("change", "delete", "view"),
},
managers=[
("objects", integreat_cms.cms.models.users.user.CustomUserManager()),
],
),
migrations.CreateModel(
name="Directory",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=255, verbose_name="name")),
(
"created_date",
models.DateTimeField(
auto_now_add=True,
help_text="The date and time when the directory was created",
verbose_name="creation date",
),
),
(
"parent",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="subdirectories",
to="cms.directory",
verbose_name="parent directory",
),
),
],
options={
"verbose_name": "media directory",
"verbose_name_plural": "media directories",
"ordering": ["-region", "name"],
},
),
migrations.CreateModel(
name="Event",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"created_date",
models.DateTimeField(
default=django.utils.timezone.now, verbose_name="creation date"
),
),
("start_date", models.DateField(verbose_name="start date")),
("start_time", models.TimeField(blank=True, verbose_name="start time")),
("end_date", models.DateField(verbose_name="end date")),
("end_time", models.TimeField(blank=True, verbose_name="end time")),
(
"archived",
models.BooleanField(default=False, verbose_name="archived"),
),
],
options={
"verbose_name": "event",
"verbose_name_plural": "events",
"ordering": ["start_date", "start_time"],
"permissions": (("publish_event", "Can publish events"),),
"default_permissions": ("change", "delete", "view"),
"default_related_name": "events",
},
),
migrations.CreateModel(
name="Feedback",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"rating",
models.BooleanField(
blank=True,
choices=[
(True, "Positive"),
(False, "Negative"),
(None, "Not stated"),
],
default=None,
help_text="Whether the feedback is positive or negative",
null=True,
verbose_name="rating",
),
),
("comment", models.TextField(blank=True, verbose_name="comment")),
(
"is_technical",
models.BooleanField(
help_text="Whether or not the feedback is targeted at the developers",
verbose_name="technical",
),
),
(
"created_date",
models.DateTimeField(
auto_now_add=True, verbose_name="creation date"
),
),
],
options={
"verbose_name": "feedback",
"verbose_name_plural": "feedback",
"ordering": ["-created_date"],
"default_permissions": ("change", "delete", "view"),
},
),
migrations.CreateModel(
name="ImprintPage",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"created_date",
models.DateTimeField(
default=django.utils.timezone.now, verbose_name="creation date"
),
),
(
"explicitly_archived",
models.BooleanField(
default=False,
help_text="Whether or not the page is explicitly archived",
verbose_name="explicitly archived",
),
),
],
options={
"verbose_name": "imprint",
"verbose_name_plural": "imprints",
"default_permissions": ("change", "delete", "view"),
"default_related_name": "imprints",
},
),
migrations.CreateModel(
name="Language",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"slug",
models.SlugField(
help_text="Unique string identifier used in URLs without spaces and special characters.",
max_length=8,
unique=True,
validators=[django.core.validators.MinLengthValidator(2)],
verbose_name="Language Slug",
),
),
(
"bcp47_tag",
models.SlugField(
help_text="Language identifier without spaces and special characters. This field usually contains a combination of subtags from the IANA Subtag Registry.",
max_length=35,
unique=True,
validators=[django.core.validators.MinLengthValidator(2)],
verbose_name="BCP47 Tag",
),
),
(
"native_name",
models.CharField(
help_text="The name of the language in this language.",
max_length=250,
verbose_name="native name",
),
),
(
"english_name",
models.CharField(
help_text="The name of the language in English.",
max_length=250,
verbose_name="name in English",
),
),
(
"text_direction",
models.CharField(
choices=[
("LEFT_TO_RIGHT", "Left to right"),
("RIGHT_TO_LEFT", "Right to left"),
],
default="LEFT_TO_RIGHT",
max_length=13,
verbose_name="text direction",
),
),
(
"primary_country_code",
models.CharField(
choices=[
("ad", "Andorra"),
("ae", "United Arab Emirates"),
("af", "Afghanistan"),
("al", "Albania"),
("am", "Armenia"),
("ao", "Angola"),
("ar", "Argentina"),
("at", "Austria"),
("au", "Australia"),
("az", "Azerbaijan"),
("ba", "Bosnia and Herzegovina"),
("bd", "Bangladesh"),
("be", "Belgium"),
("bf", "Burkina Faso"),
("bg", "Bulgaria"),
("bh", "Bahrain"),
("bi", "Burundi"),
("bj", "Benin"),
("bm", "Bermuda"),
("bn", "Brunei"),
("bo", "Bolivia"),
("br", "Brazil"),
("bt", "Bhutan"),
("bw", "Botswana"),
("by", "Belarus"),
("ca", "Canada"),
("cd", "Congo (Democratic Republic)"),
("cf", "Central African Republic"),
("cg", "Congo (Republic)"),
("ch", "Switzerland"),
("ci", "Côte d'Ivoire"),
("cl", "Chile"),
("cm", "Cameroon"),
("cn", "China"),
("co", "Colombia"),
("cr", "Costa Rica"),
("cu", "Cuba"),
("cy", "Cyprus"),
("cz", "Czechia"),
("de", "Germany"),
("dj", "Djibouti"),
("dk", "Denmark"),
("dm", "Dominica"),
("do", "Dominican Republic"),
("dz", "Algeria"),
("ec", "Ecuador"),
("ee", "Estonia"),
("eg", "Egypt"),
("er", "Eritrea"),
("es", "Spain"),
("et", "Ethiopia"),
("fi", "Finland"),
("fr", "France"),
("ga", "Gabon"),
(
"gb",
"United Kingdom of Great Britain and Northern Ireland",
),
("gd", "Grenada"),
("ge", "Georgia"),
("gf", "French Guiana"),
("gg", "Guernsey"),
("gh", "Ghana"),
("gi", "Gibraltar"),
("gl", "Greenland"),
("gm", "Gambia"),
("gn", "Guinea"),
("gp", "Guadeloupe"),
("gr", "Greece"),
("gt", "Guatemala"),
("gu", "Guam"),
("gy", "Guyana"),
("hk", "Hong Kong"),
("hn", "Honduras"),
("hr", "Croatia"),
("ht", "Haiti"),
("hu", "Hungary"),
("id", "Indonesia"),
("ie", "Ireland"),
("il", "Israel"),
("in", "India"),
("iq", "Iraq"),
("ir", "Iran"),
("is", "Iceland"),
("it", "Italy"),
("jm", "Jamaica"),
("jo", "Jordan"),
("jp", "Japan"),
("ke", "Kenya"),
("kg", "Kyrgyzstan"),
("kh", "Cambodia"),
("kp", "North Korea"),
("kr", "South Korea"),
("kw", "Kuwait"),
("kz", "Kazakhstan"),
("lb", "Lebanon"),
("li", "Liechtenstein"),
("lr", "Liberia"),
("ls", "Lesotho"),
("lt", "Lithuania"),
("lu", "Luxembourg"),
("lv", "Latvia"),
("ly", "Libya"),
("ma", "Morocco"),
("mc", "Monaco"),
("md", "Moldova"),
("me", "Montenegro"),
("mg", "Madagascar"),
("mk", "North Macedonia"),
("ml", "Mali"),
("mm", "Myanmar"),
("mn", "Mongolia"),
("mr", "Mauritania"),
("mt", "Malta"),
("mu", "Mauritius"),
("mv", "Maldives"),
("mw", "Malawi"),
("mx", "Mexico"),
("my", "Malaysia"),
("mz", "Mozambique"),
("na", "Namibia"),
("ne", "Niger"),
("ng", "Nigeria"),
("ni", "Nicaragua"),
("nl", "Netherlands"),
("no", "Norway"),
("np", "Nepal"),
("nz", "New Zealand"),
("om", "Oman"),
("pa", "Panama"),
("pe", "Peru"),
("pf", "French Polynesia"),
("pg", "Papua New Guinea"),
("ph", "Philippines"),
("pk", "Pakistan"),
("pl", "Poland"),
("ps", "Palestine"),
("pt", "Portugal"),
("py", "Paraguay"),
("qa", "Qatar"),
("ro", "Romania"),
("rs", "Serbia"),
("ru", "Russian Federation"),
("rw", "Rwanda"),
("sa", "Saudi Arabia"),
("sd", "Sudan"),
("se", "Sweden"),
("si", "Slovenia"),
("sk", "Slovakia"),
("sl", "Sierra Leone"),
("sn", "Senegal"),
("so", "Somalia"),
("ss", "South Sudan"),
("sv", "El Salvador"),
("sy", "Syrian Arab Republic"),
("td", "Chad"),
("th", "Thailand"),
("tj", "Tajikistan"),
("tm", "Turkmenistan"),
("tn", "Tunisia"),
("tr", "Turkey"),
("tw", "Taiwan"),
("tz", "Tanzania"),
("ua", "Ukraine"),
("ug", "Uganda"),
("us", "United States of America"),
("uy", "Uruguay"),
("uz", "Uzbekistan"),
("ve", "Venezuela"),
("vn", "Viet Nam"),
("xk", "Kosovo"),
("ye", "Yemen"),
("za", "South Africa"),
("zm", "Zambia"),
("zw", "Zimbabwe"),
],
help_text="The country with which this language is mainly associated. This flag is used to represent the language graphically.",
max_length=2,
verbose_name="primary country flag",
),
),
(
"secondary_country_code",
models.CharField(
blank=True,
choices=[
("ad", "Andorra"),
("ae", "United Arab Emirates"),
("af", "Afghanistan"),
("al", "Albania"),
("am", "Armenia"),
("ao", "Angola"),
("ar", "Argentina"),
("at", "Austria"),
("au", "Australia"),
("az", "Azerbaijan"),
("ba", "Bosnia and Herzegovina"),
("bd", "Bangladesh"),
("be", "Belgium"),
("bf", "Burkina Faso"),
("bg", "Bulgaria"),
("bh", "Bahrain"),
("bi", "Burundi"),
("bj", "Benin"),
("bm", "Bermuda"),
("bn", "Brunei"),
("bo", "Bolivia"),
("br", "Brazil"),
("bt", "Bhutan"),
("bw", "Botswana"),
("by", "Belarus"),
("ca", "Canada"),
("cd", "Congo (Democratic Republic)"),
("cf", "Central African Republic"),
("cg", "Congo (Republic)"),
("ch", "Switzerland"),
("ci", "Côte d'Ivoire"),
("cl", "Chile"),
("cm", "Cameroon"),
("cn", "China"),
("co", "Colombia"),
("cr", "Costa Rica"),
("cu", "Cuba"),
("cy", "Cyprus"),
("cz", "Czechia"),
("de", "Germany"),
("dj", "Djibouti"),
("dk", "Denmark"),
("dm", "Dominica"),
("do", "Dominican Republic"),
("dz", "Algeria"),
("ec", "Ecuador"),
("ee", "Estonia"),
("eg", "Egypt"),
("er", "Eritrea"),
("es", "Spain"),
("et", "Ethiopia"),
("fi", "Finland"),
("fr", "France"),
("ga", "Gabon"),
(
"gb",
"United Kingdom of Great Britain and Northern Ireland",
),
("gd", "Grenada"),
("ge", "Georgia"),
("gf", "French Guiana"),
("gg", "Guernsey"),
("gh", "Ghana"),
("gi", "Gibraltar"),
("gl", "Greenland"),
("gm", "Gambia"),
("gn", "Guinea"),
("gp", "Guadeloupe"),
("gr", "Greece"),
("gt", "Guatemala"),
("gu", "Guam"),
("gy", "Guyana"),
("hk", "Hong Kong"),
("hn", "Honduras"),
("hr", "Croatia"),
("ht", "Haiti"),
("hu", "Hungary"),
("id", "Indonesia"),
("ie", "Ireland"),
("il", "Israel"),
("in", "India"),
("iq", "Iraq"),
("ir", "Iran"),
("is", "Iceland"),
("it", "Italy"),
("jm", "Jamaica"),
("jo", "Jordan"),
("jp", "Japan"),
("ke", "Kenya"),
("kg", "Kyrgyzstan"),
("kh", "Cambodia"),
("kp", "North Korea"),
("kr", "South Korea"),
("kw", "Kuwait"),
("kz", "Kazakhstan"),
("lb", "Lebanon"),
("li", "Liechtenstein"),
("lr", "Liberia"),
("ls", "Lesotho"),
("lt", "Lithuania"),
("lu", "Luxembourg"),
("lv", "Latvia"),
("ly", "Libya"),
("ma", "Morocco"),
("mc", "Monaco"),
("md", "Moldova"),
("me", "Montenegro"),
("mg", "Madagascar"),
("mk", "North Macedonia"),
("ml", "Mali"),
("mm", "Myanmar"),
("mn", "Mongolia"),
("mr", "Mauritania"),
("mt", "Malta"),
("mu", "Mauritius"),
("mv", "Maldives"),
("mw", "Malawi"),
("mx", "Mexico"),
("my", "Malaysia"),
("mz", "Mozambique"),
("na", "Namibia"),
("ne", "Niger"),
("ng", "Nigeria"),
("ni", "Nicaragua"),
("nl", "Netherlands"),
("no", "Norway"),
("np", "Nepal"),
("nz", "New Zealand"),
("om", "Oman"),
("pa", "Panama"),
("pe", "Peru"),
("pf", "French Polynesia"),
("pg", "Papua New Guinea"),
("ph", "Philippines"),
("pk", "Pakistan"),
("pl", "Poland"),
("ps", "Palestine"),
("pt", "Portugal"),
("py", "Paraguay"),
("qa", "Qatar"),
("ro", "Romania"),
("rs", "Serbia"),
("ru", "Russian Federation"),
("rw", "Rwanda"),
("sa", "Saudi Arabia"),
("sd", "Sudan"),
("se", "Sweden"),
("si", "Slovenia"),
("sk", "Slovakia"),
("sl", "Sierra Leone"),
("sn", "Senegal"),
("so", "Somalia"),
("ss", "South Sudan"),
("sv", "El Salvador"),
("sy", "Syrian Arab Republic"),
("td", "Chad"),
("th", "Thailand"),
("tj", "Tajikistan"),
("tm", "Turkmenistan"),
("tn", "Tunisia"),
("tr", "Turkey"),
("tw", "Taiwan"),
("tz", "Tanzania"),
("ua", "Ukraine"),
("ug", "Uganda"),
("us", "United States of America"),
("uy", "Uruguay"),
("uz", "Uzbekistan"),
("ve", "Venezuela"),
("vn", "Viet Nam"),
("xk", "Kosovo"),
("ye", "Yemen"),
("za", "South Africa"),
("zm", "Zambia"),
("zw", "Zimbabwe"),
],
help_text="Another country with which this language is also associated. This flag is used in the language switcher.",
max_length=2,
verbose_name="secondary country flag",
),
),
(
"created_date",
models.DateTimeField(
default=django.utils.timezone.now, verbose_name="creation date"
),
),
(
"last_updated",
models.DateTimeField(
auto_now=True, verbose_name="modification date"
),
),
(
"table_of_contents",
models.CharField(
help_text='The native name for "Table of contents" in this language. This is used in exported PDFs.',
max_length=250,
verbose_name='"Table of contents" in this language',
),
),
],
options={
"verbose_name": "language",
"verbose_name_plural": "languages",
"ordering": ["bcp47_tag"],
"default_permissions": ("change", "delete", "view"),
},
),
migrations.CreateModel(
name="MediaFile",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"file",
models.FileField(
max_length=512,
upload_to=integreat_cms.cms.models.media.media_file.upload_path,
verbose_name="file",
),
),
(
"thumbnail",
models.FileField(
max_length=512,
upload_to=integreat_cms.cms.models.media.media_file.upload_path_thumbnail,
verbose_name="thumbnail file",
),
),
(
"type",
models.CharField(
choices=[
("image/png", "PNG image"),
("image/jpeg", "JPEG image"),
("application/pdf", "PDF document"),
("image/svg+xml", "SVG image"),
("image/gif", "GIF image"),
("application/msword", "DOC document"),
(
"application/vnd.openxmlformats-officedocument.wordprocessingml.document",
"DOCX document",
),
("application/vnd.ms-excel", "XLS document"),
(
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
"XLSX document",
),
("application/vnd.ms-powerpoint", "PPT document"),
(
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
"PPTX document",
),
],
max_length=128,
verbose_name="file type",
),
),
("name", models.CharField(max_length=512, verbose_name="name")),
(
"alt_text",
models.CharField(
blank=True, max_length=512, verbose_name="description"
),
),
(
"uploaded_date",
models.DateTimeField(
auto_now_add=True,
help_text="The date and time when the media file was uploaded",
verbose_name="uploaded date",
),
),
(
"parent_directory",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="files",
to="cms.directory",
verbose_name="parent directory",
),
),
],
options={
"verbose_name": "media file",
"verbose_name_plural": "media files",
"ordering": ["-region", "name"],
"permissions": (
("upload_mediafile", "Can upload media file"),
("replace_mediafile", "Can replace media file"),
),
"default_permissions": ("change", "delete", "view"),
},
),
migrations.CreateModel(
name="OfferTemplate",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=250, verbose_name="name")),
(
"slug",
models.SlugField(
help_text="String identifier without spaces and special characters. Unique per region and language. Leave blank to generate unique parameter from name",
max_length=60,
unique=True,
verbose_name="slug",
),
),
(
"thumbnail",
models.URLField(max_length=250, verbose_name="thumbnail URL"),
),
(
"url",
models.URLField(
help_text="This will be an external API endpoint in most cases.",
max_length=250,
verbose_name="URL",
),
),
(
"post_data",
models.JSONField(
blank=True,
default=dict,
help_text="Additional POST data for retrieving the URL. Specify as JSON.",
max_length=250,
verbose_name="POST parameter",
),
),
(
"use_postal_code",
models.CharField(
choices=[
("NONE", "Do not use postcode"),
("GET", "Append postal code to URL"),
("POST", "Add postal code to post parameters"),
],
default="NONE",
help_text="Whether and how to insert the postcode of the region into the URL or POST data",
max_length=4,
verbose_name="use postal code",
),
),
(
"created_date",
models.DateTimeField(
default=django.utils.timezone.now, verbose_name="creation date"
),
),
(
"last_updated",
models.DateTimeField(
auto_now=True, verbose_name="modification date"
),
),
],
options={
"verbose_name": "offer template",
"verbose_name_plural": "offer templates",
"default_permissions": ("change", "delete", "view"),
},
),
migrations.CreateModel(
name="Organization",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=200, verbose_name="name")),
(
"slug",
models.SlugField(
allow_unicode=True,
help_text="Unique string identifier without spaces and special characters.",
max_length=200,
unique=True,
verbose_name="slug",
),
),
(
"created_date",
models.DateTimeField(
default=django.utils.timezone.now, verbose_name="creation date"
),
),
(
"last_updated",
models.DateTimeField(
auto_now=True, verbose_name="modification date"
),
),
(
"icon",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="icon_organizations",
to="cms.mediafile",
verbose_name="logo",
),
),
],
options={
"verbose_name": "organization",
"verbose_name_plural": "organizations",
"default_permissions": ("change", "delete", "view"),
},
),
migrations.CreateModel(
name="Page",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"created_date",
models.DateTimeField(
default=django.utils.timezone.now, verbose_name="creation date"
),
),
(
"explicitly_archived",
models.BooleanField(
default=False,
help_text="Whether or not the page is explicitly archived",
verbose_name="explicitly archived",
),
),
("lft", models.PositiveIntegerField(db_index=True)),
("rgt", models.PositiveIntegerField(db_index=True)),
("tree_id", models.PositiveIntegerField(db_index=True)),
("depth", models.PositiveIntegerField(db_index=True)),
(
"mirrored_page_first",
models.BooleanField(
blank=True,
default=True,
help_text="If a mirrored page is set, this field determines whether the live content is embedded before the content of this page or after.",
null=True,
verbose_name="Position of mirrored page",
),
),
(
"editors",
models.ManyToManyField(
blank=True,
help_text="A list of users who have the permission to edit this specific page. Only has effect if these users do not have the permission to edit pages anyway.",
related_name="editable_pages",
to=settings.AUTH_USER_MODEL,
verbose_name="editors",
),
),
(
"icon",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="pages",
to="cms.mediafile",
verbose_name="icon",
),
),
(
"mirrored_page",
models.ForeignKey(
blank=True,
help_text="If the page embeds live content from another page, it is referenced here.",
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="mirroring_pages",
to="cms.page",
verbose_name="mirrored page",
),
),
(
"organization",
models.ForeignKey(
blank=True,
help_text="This allows all members of the organization to edit and publish this page.",
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="pages",
to="cms.organization",
verbose_name="responsible organization",
),
),
(
"parent",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="children",
to="cms.page",
verbose_name="parent",
),
),
(
"publishers",
models.ManyToManyField(
blank=True,
help_text="A list of users who have the permission to publish this specific page. Only has effect if these users do not have the permission to publish pages anyway.",
related_name="publishable_pages",
to=settings.AUTH_USER_MODEL,
verbose_name="publishers",
),
),
],
options={
"verbose_name": "page",
"verbose_name_plural": "pages",
"permissions": (
("publish_page", "Can publish page"),
("grant_page_permissions", "Can grant page permission"),
),
"default_permissions": ("change", "delete", "view"),
"default_related_name": "pages",
},
),
migrations.CreateModel(
name="POI",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"created_date",
models.DateTimeField(
default=django.utils.timezone.now, verbose_name="creation date"
),
),
(
"address",
models.CharField(
max_length=250, verbose_name="street and house number"
),
),
(
"postcode",
models.CharField(max_length=10, verbose_name="postal code"),
),
("city", models.CharField(max_length=250, verbose_name="city")),
("country", models.CharField(max_length=250, verbose_name="country")),
(
"latitude",
models.FloatField(
help_text="The latitude coordinate", verbose_name="latitude"
),
),
(
"longitude",
models.FloatField(
help_text="The longitude coordinate", verbose_name="longitude"
),
),
(
"location_not_on_map",
models.BooleanField(
default=False,
help_text="Tick if you do not show this location on map",
verbose_name="Do not show this location on map",
),
),
(
"archived",
models.BooleanField(
default=False,
help_text="Whether or not the location is read-only and hidden in the API.",
verbose_name="archived",
),
),
(
"icon",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="pois",
to="cms.mediafile",
verbose_name="icon",
),
),
],
options={
"verbose_name": "location",
"verbose_name_plural": "locations",
"default_permissions": ("change", "delete", "view"),
"default_related_name": "pois",
},
),
migrations.CreateModel(
name="RecurrenceRule",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"frequency",
models.CharField(
choices=[
("DAILY", "Daily"),
("WEEKLY", "Weekly"),
("MONTHLY", "Monthly"),
("YEARLY", "Yearly"),
],
default="WEEKLY",
help_text="How often the event recurs",
max_length=7,
verbose_name="frequency",
),
),
(
"interval",
models.IntegerField(
default=1,
help_text="The interval in which the event recurs.",
validators=[django.core.validators.MinValueValidator(1)],
verbose_name="Repeat every ... time(s)",
),
),
(
"weekdays_for_weekly",
django.contrib.postgres.fields.ArrayField(
base_field=models.IntegerField(
choices=[
(0, "Monday"),
(1, "Tuesday"),
(2, "Wednesday"),
(3, "Thursday"),
(4, "Friday"),
(5, "Saturday"),
(6, "Sunday"),
]
),
blank=True,
help_text="If the frequency is weekly, this field determines on which days the event takes place",
size=None,
verbose_name="weekdays",
),
),
(
"weekday_for_monthly",
models.IntegerField(
blank=True,
choices=[
(0, "Monday"),
(1, "Tuesday"),
(2, "Wednesday"),
(3, "Thursday"),
(4, "Friday"),
(5, "Saturday"),
(6, "Sunday"),
],
help_text="If the frequency is monthly, this field determines on which days the event takes place",
null=True,
verbose_name="weekday",
),
),
(
"week_for_monthly",
models.IntegerField(
blank=True,
choices=[
(1, "First week"),
(2, "Second week"),
(3, "Third week"),
(4, "Fourth week"),
],
help_text="If the frequency is monthly, this field determines on which week of the month the event takes place",
null=True,
verbose_name="week",
),
),
(
"recurrence_end_date",
models.DateField(
blank=True,
help_text="If the recurrence is not for an indefinite period, this field contains the end date",
null=True,
verbose_name="recurrence end date",
),
),
],
options={
"verbose_name": "recurrence rule",
"verbose_name_plural": "recurrence rules",
"default_permissions": (),
},
),
migrations.CreateModel(
name="EventListFeedback",
fields=[
(
"feedback_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="cms.feedback",
),
),
],
options={
"verbose_name": "event list feedback",
"verbose_name_plural": "event list feedback",
"default_permissions": (),
},
bases=("cms.feedback",),
),
migrations.CreateModel(
name="ImprintPageFeedback",
fields=[
(
"feedback_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="cms.feedback",
),
),
],
options={
"verbose_name": "imprint feedback",
"verbose_name_plural": "imprint feedback",
"default_permissions": (),
},
bases=("cms.feedback",),
),
migrations.CreateModel(
name="MapFeedback",
fields=[
(
"feedback_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="cms.feedback",
),
),
],
options={
"verbose_name": "map feedback",
"verbose_name_plural": "map feedback",
"default_permissions": (),
},
bases=("cms.feedback",),
),
migrations.CreateModel(
name="OfferListFeedback",
fields=[
(
"feedback_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="cms.feedback",
),
),
],
options={
"verbose_name": "offer list feedback",
"verbose_name_plural": "offer list feedback",
"default_permissions": (),
},
bases=("cms.feedback",),
),
migrations.CreateModel(
name="RegionFeedback",
fields=[
(
"feedback_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="cms.feedback",
),
),
],
options={
"verbose_name": "region feedback",
"verbose_name_plural": "region feedback",
"default_permissions": (),
},
bases=("cms.feedback",),
),
migrations.CreateModel(
name="SearchResultFeedback",
fields=[
(
"feedback_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="cms.feedback",
),
),
(
"search_query",
models.CharField(max_length=1000, verbose_name="search term"),
),
],
options={
"verbose_name": "search result feedback",
"verbose_name_plural": "search result feedback",
"default_permissions": (),
},
bases=("cms.feedback",),
),
migrations.CreateModel(
name="Role",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"name",
models.CharField(
choices=[
("MANAGEMENT", "Management"),
("EDITOR", "Editor"),
("EVENT_MANAGER", "Event manager"),
("MUNICIPALITY_TEAM", "Municipality team"),
("CMS_TEAM", "CMS team"),
("APP_TEAM", "App team"),
("MARKETING_TEAM", "Marketing team"),
],
max_length=50,
verbose_name="name",
),
),
(
"staff_role",
models.BooleanField(
default=False,
help_text="Whether or not this role is designed for staff members",
verbose_name="staff role",
),
),
(
"group",
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
related_name="role",
to="auth.group",
verbose_name="Django auth group",
),
),
],
options={
"verbose_name": "role",
"verbose_name_plural": "roles",
"default_permissions": (),
},
),
migrations.CreateModel(
name="Region",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=200, verbose_name="name")),
(
"common_id",
models.CharField(
blank=True,
help_text="Number sequence for identifying politically independent administrative units",
max_length=48,
verbose_name="community identification number",
),
),
(
"slug",
models.SlugField(
allow_unicode=True,
help_text="Unique string identifier without spaces and special characters. Leave blank to generate unique parameter from name",
max_length=200,
unique=True,
verbose_name="URL parameter",
),
),
(
"status",
models.CharField(
choices=[
("ACTIVE", "Active"),
("HIDDEN", "Hidden"),
("ARCHIVED", "Archived"),
],
default="HIDDEN",
max_length=8,
verbose_name="status",
),
),
(
"administrative_division",
models.CharField(
choices=[
("CITY", "City"),
("DISTRICT", "District"),
("RURAL_DISTRICT", "Rural district"),
("REGION", "Region"),
("CITY_AND_DISTRICT", "City and district"),
("URBAN_DISTRICT", "Urban district"),
("GOVERNMENTAL_DISTRICT", "Governmental district"),
("CITY_STATE", "City state"),
("AREA_STATE", "Area state"),
("FREE_STATE", "Free state"),
("FEDERAL_STATE", "Federal state"),
("MUNICIPALITY", "Municipality"),
("COLLECTIVE_MUNICIPALITY", "Collective municipality"),
("INITIAL_RECEPTION_CENTER", "Initial reception center"),
],
default="RURAL_DISTRICT",
max_length=24,
verbose_name="administrative division",
),
),
(
"aliases",
models.TextField(
blank=True,
help_text="E.g. smaller municipalities in that area. If empty, the CMS will try to fill this automatically. Specify as JSON.",
verbose_name="aliases",
),
),
(
"events_enabled",
models.BooleanField(
default=True,
help_text="Whether or not events are enabled in the region",
verbose_name="activate events",
),
),
(
"push_notifications_enabled",
models.BooleanField(
default=True,
help_text="Whether or not push notifications are enabled in the region",
verbose_name="activate push notifications",
),
),
(
"latitude",
models.FloatField(
help_text="The latitude coordinate of an approximate center of the region",
null=True,
verbose_name="latitude",
),
),
(
"longitude",
models.FloatField(
help_text="The longitude coordinate of an approximate center of the region",
null=True,
verbose_name="longitude",
),
),
(
"postal_code",
models.CharField(max_length=10, verbose_name="postal code"),
),
(
"admin_mail",
models.EmailField(
max_length=254,
verbose_name="email address of the administrator",
),
),
(
"created_date",
models.DateTimeField(
default=django.utils.timezone.now, verbose_name="creation date"
),
),
(
"last_updated",
models.DateTimeField(
auto_now=True, verbose_name="modification date"
),
),
(
"statistics_enabled",
models.BooleanField(
default=False,
help_text="Whether or not statistics are enabled for the region",
verbose_name="activate statistics",
),
),
(
"matomo_id",
models.PositiveSmallIntegerField(
blank=True,
help_text="The Matomo ID of this region. Will be automatically derived from the Matomo access token.",
null=True,
verbose_name="Matomo ID",
),
),
(
"matomo_token",
models.CharField(
blank=True,
default="",
help_text="The secret Matomo access token of the region is used to authenticate in API requests",
max_length=150,
verbose_name="Matomo authentication token",
),
),
(
"page_permissions_enabled",
models.BooleanField(
default=False,
help_text="This allows individual users to be granted the right to edit or publish a specific page.",
verbose_name="activate page-specific permissions",
),
),
(
"chat_enabled",
models.BooleanField(
default=True,
help_text="This gives all users of this region access to the cross-regional author chat.",
verbose_name="activate author chat",
),
),
(
"administrative_division_included",
models.BooleanField(
default=False,
help_text="Determines whether the administrative division is displayed next to the region name. Sorting is always based on the name, independently from the administrative division.",
verbose_name="include administrative division into name",
),
),
(
"short_urls_enabled",
models.BooleanField(
default=False,
help_text="Please check the box if you want to use short urls.",
verbose_name="Activate short urls",
),
),
(
"icon",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="icon_regions",
to="cms.mediafile",
verbose_name="logo",
),
),
(
"offers",
models.ManyToManyField(
blank=True,
help_text="Integreat offers are extended features apart from pages and events and are usually offered by a third party. In most cases, the url is an external API endpoint which the frontend apps can query and render the results inside the Integreat app.",
related_name="regions",
to="cms.OfferTemplate",
verbose_name="offers",
),
),
],
options={
"verbose_name": "region",
"verbose_name_plural": "regions",
"ordering": ["name"],
"default_permissions": ("change", "delete", "view"),
},
),
migrations.CreateModel(
name="PushNotification",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"channel",
models.CharField(
choices=[("News", "News")],
max_length=60,
verbose_name="channel",
),
),
(
"draft",
models.BooleanField(
default=True,
help_text="Whether or not the push notification is a draft (drafts cannot be sent)",
verbose_name="draft",
),
),
(
"sent_date",
models.DateTimeField(
blank=True,
help_text="The date and time when the push notification was sent.",
null=True,
verbose_name="sent date",
),
),
(
"created_date",
models.DateTimeField(
auto_now_add=True, verbose_name="creation date"
),
),
(
"mode",
models.CharField(
choices=[
("ONLY_AVAILABLE", "Only send available translations"),
(
"USE_MAIN_LANGUAGE",
"Use main language if no translation is available",
),
],
help_text="Sets behavior for dealing with not existing push notification translations",
max_length=128,
verbose_name="mode",
),
),
(
"region",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="push_notifications",
to="cms.region",
verbose_name="region",
),
),
],
options={
"verbose_name": "push notification",
"verbose_name_plural": "push notifications",
"permissions": (
("send_push_notification", "Can send push notification"),
),
"default_permissions": ("change", "delete", "view"),
},
),
migrations.CreateModel(
name="POITranslation",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("title", models.CharField(max_length=1024, verbose_name="title")),
(
"slug",
models.SlugField(
allow_unicode=True,
help_text="String identifier without spaces and special characters. Unique per region and language. Leave blank to generate unique parameter from title.",
max_length=1024,
verbose_name="link",
),
),
(
"status",
models.CharField(
choices=[
("DRAFT", "Draft"),
("REVIEW", "Pending Review"),
("PUBLIC", "Published"),
("AUTO_SAVE", "Auto Save"),
],
default="DRAFT",
max_length=9,
verbose_name="status",
),
),
("content", models.TextField(blank=True, verbose_name="content")),
(
"currently_in_translation",
models.BooleanField(
default=False,
help_text="Flag to indicate a translation is being updated by an external translator",
verbose_name="currently in translation",
),
),
(
"version",
models.PositiveIntegerField(default=0, verbose_name="revision"),
),
(
"minor_edit",
models.BooleanField(
default=False,
help_text="Tick if this change does not require an update of translations in other languages.",
verbose_name="minor edit",
),
),
(
"last_updated",
models.DateTimeField(
auto_now=True, verbose_name="modification date"
),
),
(
"short_description",
models.CharField(max_length=2048, verbose_name="short description"),
),
(
"creator",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="poi_translations",
to=settings.AUTH_USER_MODEL,
verbose_name="creator",
),
),
(
"language",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="poi_translations",
to="cms.language",
verbose_name="language",
),
),
(
"poi",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="translations",
to="cms.poi",
verbose_name="location",
),
),
],
options={
"verbose_name": "location translation",
"verbose_name_plural": "location translations",
"ordering": ["poi__pk", "-version"],
"default_permissions": (),
"default_related_name": "poi_translations",
},
),
migrations.AddField(
model_name="poi",
name="region",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="pois",
to="cms.region",
verbose_name="region",
),
),
migrations.CreateModel(
name="PageTranslation",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("title", models.CharField(max_length=1024, verbose_name="title")),
(
"slug",
models.SlugField(
allow_unicode=True,
help_text="String identifier without spaces and special characters. Unique per region and language. Leave blank to generate unique parameter from title.",
max_length=1024,
verbose_name="link",
),
),
(
"status",
models.CharField(
choices=[
("DRAFT", "Draft"),
("REVIEW", "Pending Review"),
("PUBLIC", "Published"),
("AUTO_SAVE", "Auto Save"),
],
default="DRAFT",
max_length=9,
verbose_name="status",
),
),
("content", models.TextField(blank=True, verbose_name="content")),
(
"currently_in_translation",
models.BooleanField(
default=False,
help_text="Flag to indicate a translation is being updated by an external translator",
verbose_name="currently in translation",
),
),
(
"version",
models.PositiveIntegerField(default=0, verbose_name="revision"),
),
(
"minor_edit",
models.BooleanField(
default=False,
help_text="Tick if this change does not require an update of translations in other languages.",
verbose_name="minor edit",
),
),
(
"last_updated",
models.DateTimeField(
auto_now=True, verbose_name="modification date"
),
),
(
"creator",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="page_translations",
to=settings.AUTH_USER_MODEL,
verbose_name="creator",
),
),
(
"language",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="page_translations",
to="cms.language",
verbose_name="language",
),
),
(
"page",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="translations",
to="cms.page",
verbose_name="page",
),
),
],
options={
"verbose_name": "page translation",
"verbose_name_plural": "page translations",
"ordering": ["page__pk", "-version"],
"default_permissions": (),
"default_related_name": "page_translations",
},
),
migrations.AddField(
model_name="page",
name="region",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="pages",
to="cms.region",
verbose_name="region",
),
),
migrations.AddField(
model_name="mediafile",
name="region",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="files",
to="cms.region",
verbose_name="region",
),
),
migrations.CreateModel(
name="ImprintPageTranslation",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("title", models.CharField(max_length=1024, verbose_name="title")),
(
"status",
models.CharField(
choices=[
("DRAFT", "Draft"),
("REVIEW", "Pending Review"),
("PUBLIC", "Published"),
("AUTO_SAVE", "Auto Save"),
],
default="DRAFT",
max_length=9,
verbose_name="status",
),
),
("content", models.TextField(blank=True, verbose_name="content")),
(
"currently_in_translation",
models.BooleanField(
default=False,
help_text="Flag to indicate a translation is being updated by an external translator",
verbose_name="currently in translation",
),
),
(
"version",
models.PositiveIntegerField(default=0, verbose_name="revision"),
),
(
"minor_edit",
models.BooleanField(
default=False,
help_text="Tick if this change does not require an update of translations in other languages.",
verbose_name="minor edit",
),
),
(
"last_updated",
models.DateTimeField(
auto_now=True, verbose_name="modification date"
),
),
(
"creator",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="imprint_translations",
to=settings.AUTH_USER_MODEL,
verbose_name="creator",
),
),
(
"language",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="imprint_translations",
to="cms.language",
verbose_name="language",
),
),
(
"page",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="translations",
to="cms.imprintpage",
verbose_name="imprint",
),
),
],
options={
"verbose_name": "imprint translation",
"verbose_name_plural": "imprint translations",
"ordering": ["page", "-version"],
"default_permissions": (),
"default_related_name": "imprint_translations",
},
),
migrations.AddField(
model_name="imprintpage",
name="region",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="imprints",
to="cms.region",
verbose_name="region",
),
),
migrations.AddField(
model_name="feedback",
name="language",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="feedback",
to="cms.language",
verbose_name="language",
),
),
migrations.AddField(
model_name="feedback",
name="polymorphic_ctype",
field=models.ForeignKey(
editable=False,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="polymorphic_cms.feedback_set+",
to="contenttypes.contenttype",
),
),
migrations.AddField(
model_name="feedback",
name="read_by",
field=models.ForeignKey(
blank=True,
help_text="The user who marked this feedback as read. If the feedback is unread, this field is empty.",
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="feedback",
to=settings.AUTH_USER_MODEL,
verbose_name="marked as read by",
),
),
migrations.AddField(
model_name="feedback",
name="region",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="feedback",
to="cms.region",
verbose_name="region",
),
),
migrations.CreateModel(
name="EventTranslation",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("title", models.CharField(max_length=1024, verbose_name="title")),
(
"slug",
models.SlugField(
allow_unicode=True,
help_text="String identifier without spaces and special characters. Unique per region and language. Leave blank to generate unique parameter from title.",
max_length=1024,
verbose_name="link",
),
),
(
"status",
models.CharField(
choices=[
("DRAFT", "Draft"),
("REVIEW", "Pending Review"),
("PUBLIC", "Published"),
("AUTO_SAVE", "Auto Save"),
],
default="DRAFT",
max_length=9,
verbose_name="status",
),
),
("content", models.TextField(blank=True, verbose_name="content")),
(
"currently_in_translation",
models.BooleanField(
default=False,
help_text="Flag to indicate a translation is being updated by an external translator",
verbose_name="currently in translation",
),
),
(
"version",
models.PositiveIntegerField(default=0, verbose_name="revision"),
),
(
"minor_edit",
models.BooleanField(
default=False,
help_text="Tick if this change does not require an update of translations in other languages.",
verbose_name="minor edit",
),
),
(
"last_updated",
models.DateTimeField(
auto_now=True, verbose_name="modification date"
),
),
(
"creator",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="event_translations",
to=settings.AUTH_USER_MODEL,
verbose_name="creator",
),
),
(
"event",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="translations",
to="cms.event",
verbose_name="event",
),
),
(
"language",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="event_translations",
to="cms.language",
verbose_name="language",
),
),
],
options={
"verbose_name": "event translation",
"verbose_name_plural": "event translations",
"ordering": ["event__pk", "-version"],
"default_permissions": (),
"default_related_name": "event_translations",
},
),
migrations.AddField(
model_name="event",
name="icon",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="events",
to="cms.mediafile",
verbose_name="icon",
),
),
migrations.AddField(
model_name="event",
name="location",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="events",
to="cms.poi",
verbose_name="location",
),
),
migrations.AddField(
model_name="event",
name="recurrence_rule",
field=models.OneToOneField(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="event",
to="cms.recurrencerule",
verbose_name="recurrence rule",
),
),
migrations.AddField(
model_name="event",
name="region",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="events",
to="cms.region",
verbose_name="region",
),
),
migrations.AddField(
model_name="directory",
name="region",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="media_directories",
to="cms.region",
verbose_name="region",
),
),
migrations.CreateModel(
name="ChatMessage",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("text", models.TextField(verbose_name="content")),
(
"sent_datetime",
models.DateTimeField(auto_now_add=True, verbose_name="sent date"),
),
(
"sender",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="chat_messages",
to=settings.AUTH_USER_MODEL,
verbose_name="sender",
),
),
],
options={
"verbose_name": "chat message",
"verbose_name_plural": "chat messages",
"ordering": ["-sent_datetime"],
"default_permissions": ("delete",),
},
),
migrations.AddField(
model_name="user",
name="organization",
field=models.ForeignKey(
blank=True,
help_text="This allows the user to edit and publish all pages for which the organisation is registered as the responsible organisation",
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="members",
to="cms.organization",
verbose_name="organization",
),
),
migrations.AddField(
model_name="user",
name="regions",
field=models.ManyToManyField(
blank=True,
help_text="The regions to which the user has access",
related_name="users",
to="cms.Region",
verbose_name="regions",
),
),
migrations.AddField(
model_name="user",
name="user_permissions",
field=models.ManyToManyField(
blank=True,
help_text="Specific permissions for this user.",
related_name="user_set",
related_query_name="user",
to="auth.Permission",
verbose_name="user permissions",
),
),
migrations.CreateModel(
name="UserMfaKey",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=200, verbose_name="key name")),
(
"key_id",
models.BinaryField(max_length=255, verbose_name="WebAuthn ID"),
),
(
"public_key",
models.BinaryField(
max_length=255,
verbose_name="multi-factor-authentication public key",
),
),
(
"sign_count",
models.IntegerField(
help_text="Token to prevent replay attacks.",
verbose_name="sign count",
),
),
(
"last_usage",
models.DateTimeField(null=True, verbose_name="last date of use"),
),
(
"created_at",
models.DateTimeField(
auto_now_add=True, verbose_name="creation date"
),
),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="mfa_keys",
to=settings.AUTH_USER_MODEL,
verbose_name="user",
),
),
],
options={
"verbose_name": "multi-factor authentication key",
"verbose_name_plural": "multi-factor authentication keys",
"default_permissions": (),
"unique_together": {("user", "name")},
},
),
migrations.CreateModel(
name="PushNotificationTranslation",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"title",
models.CharField(blank=True, max_length=250, verbose_name="title"),
),
(
"text",
models.TextField(
blank=True, max_length=250, verbose_name="content"
),
),
(
"last_updated",
models.DateTimeField(
auto_now=True, verbose_name="modification date"
),
),
(
"language",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="push_notification_translations",
to="cms.language",
verbose_name="language",
),
),
(
"push_notification",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="translations",
to="cms.pushnotification",
verbose_name="push notification",
),
),
],
options={
"verbose_name": "push notification translation",
"verbose_name_plural": "push notification translations",
"ordering": ["push_notification", "language"],
"default_permissions": (),
"unique_together": {("push_notification", "language")},
},
),
migrations.CreateModel(
name="POIFeedback",
fields=[
(
"feedback_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="cms.feedback",
),
),
(
"poi_translation",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="feedback",
to="cms.poitranslation",
verbose_name="location translation",
),
),
],
options={
"verbose_name": "location feedback",
"verbose_name_plural": "location feedback",
"default_permissions": (),
},
bases=("cms.feedback",),
),
migrations.CreateModel(
name="PageFeedback",
fields=[
(
"feedback_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="cms.feedback",
),
),
(
"page_translation",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="feedback",
to="cms.pagetranslation",
verbose_name="page translation",
),
),
],
options={
"verbose_name": "page feedback",
"verbose_name_plural": "page feedback",
"default_permissions": (),
},
bases=("cms.feedback",),
),
migrations.CreateModel(
name="OfferFeedback",
fields=[
(
"feedback_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="cms.feedback",
),
),
(
"offer",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="feedback",
to="cms.offertemplate",
verbose_name="offer",
),
),
],
options={
"verbose_name": "offer feedback",
"verbose_name_plural": "offer feedback",
"default_permissions": (),
},
bases=("cms.feedback",),
),
migrations.CreateModel(
name="LanguageTreeNode",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("lft", models.PositiveIntegerField(db_index=True)),
("rgt", models.PositiveIntegerField(db_index=True)),
("tree_id", models.PositiveIntegerField(db_index=True)),
("depth", models.PositiveIntegerField(db_index=True)),
(
"visible",
models.BooleanField(
default=True,
help_text="Defined if this language should be delivered via the API",
verbose_name="visible",
),
),
(
"active",
models.BooleanField(
default=True,
help_text="Defined if content in this language can be created or edited",
verbose_name="active",
),
),
(
"created_date",
models.DateTimeField(
default=django.utils.timezone.now, verbose_name="creation date"
),
),
(
"last_updated",
models.DateTimeField(
auto_now=True, verbose_name="modification date"
),
),
(
"language",
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
related_name="language_tree_nodes",
to="cms.language",
verbose_name="language",
),
),
(
"parent",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="children",
to="cms.languagetreenode",
verbose_name="parent",
),
),
(
"region",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="language_tree_nodes",
to="cms.region",
verbose_name="region",
),
),
],
options={
"verbose_name": "language tree node",
"verbose_name_plural": "language tree nodes",
"default_permissions": ("change", "delete", "view"),
"default_related_name": "language_tree_nodes",
"unique_together": {("language", "region")},
},
),
migrations.CreateModel(
name="EventFeedback",
fields=[
(
"feedback_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="cms.feedback",
),
),
(
"event_translation",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="feedback",
to="cms.eventtranslation",
verbose_name="event translation",
),
),
],
options={
"verbose_name": "event feedback",
"verbose_name_plural": "event feedback",
"default_permissions": (),
},
bases=("cms.feedback",),
),
]
| [
2,
2980,
515,
416,
37770,
513,
13,
17,
13,
1157,
319,
33160,
12,
486,
12,
1433,
5534,
25,
4349,
198,
198,
11748,
4818,
8079,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
11748,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
121... | 1.525104 | 71,245 |
'''Test service module
'''
# Copyright 2019 mickybart
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from multiprocessing import Process
import time
import unittest
import requests
from svc.app import App #pylint: disable=E0401
class ServiceTest(unittest.TestCase):
'''Test suite for the whole service
'''
def test_service_started(self):
'''Test that health endpoint responds correctly
'''
response = requests.get("http://localhost:5000/health")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), {'status' : True})
| [
7061,
6,
14402,
2139,
8265,
198,
7061,
6,
198,
198,
2,
15069,
13130,
285,
17479,
16575,
198,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
... | 3.277612 | 335 |
import re
from sensors.sensor import Sensor, SensorSchema
from marshmallow import fields
| [
11748,
302,
198,
6738,
15736,
13,
82,
22854,
1330,
35367,
11,
35367,
27054,
2611,
198,
6738,
22397,
42725,
1330,
7032,
628
] | 4.285714 | 21 |
# -*- coding: utf-8 -*-
"""Rule definitions
Rules are the core of the system. They express specifically what we want
to happen as data is processed. The RuleEngine runs in a slightly modified
version of the OPS5 model, but Rules generally follow.
"""
from weakref import proxy
from .memory import WorkingMemory
class Rule(object):
"""Foundational rule object.
A Rule has a name, and it consists of a conditional test, often called
the left-hand side (LHS), and an action, often called the right-hand
side (RHS). The conditional test examines the WorkingMemory, and
decides whether it may need to take an action. The RuleEngine will
then execute the action of the Rule that it selects.
Attributes:
_wm (WorkingMemory): A proxy object to the working memory
"""
__name__ = "Name of the Rule"
def __init__(self, working_memory):
"""Instantiate the rule.
We keep a weak-ref proxy to the WorkingMemory so that we don't create
any non-GCable garbage.
Args:
working_memory (WorkingMemory): In-flight work for this Rule
"""
self._wm = proxy(working_memory)
super(Rule, self).__init__()
def condition(self):
"""Predicate to decide if this rule needs to be applied.
To do this, it should examine the in-flight instance of
WorkingMemory.
Returns:
bool: True if action should be taken, False otherwise
"""
raise NotImplementedError()
def action(self):
"""Take action on the working memory.
Returns:
bool: True if action succeeded and rule should be kept in, False
if the rule should be removed from consideration.
"""
raise NotImplementedError()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
31929,
17336,
198,
198,
37766,
389,
262,
4755,
286,
262,
1080,
13,
1119,
4911,
5734,
644,
356,
765,
198,
1462,
1645,
355,
1366,
318,
13686,
13,
220,
383,
14330,
... | 2.901294 | 618 |
from .raw_update_handler import RawUpdateHandler
from .stream_ended_handler import StreamEndedHandler
| [
6738,
764,
1831,
62,
19119,
62,
30281,
1330,
16089,
10260,
25060,
198,
6738,
764,
5532,
62,
1631,
62,
30281,
1330,
13860,
12915,
276,
25060,
628
] | 4.12 | 25 |
# Copyright 2015 Tesora Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import uuid
from trove.cluster import models
from trove.cluster import tasks
from trove.common import cfg
from trove.common import exception
from trove.common import remote
from trove.common.strategies.cluster.mongodb import api
from trove.instance import models as inst_models
from trove.instance import tasks as inst_tasks
from trove.tests.unittests import trove_testtools
CONF = cfg.CONF
| [
2,
15069,
1853,
10696,
5799,
3457,
13,
198,
2,
1439,
6923,
33876,
13,
198,
2,
198,
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
220,
220,
220,
407,
779,
... | 3.493243 | 296 |
#!/usr/bin/env python
import os
from os import path
from django.conf import settings
from django.conf import settings
from django.http import HttpResponse
from PIL import Image, ImageFilter, ImageDraw, ImageFont, ImageOps
from localground.apps.lib.helpers.units import Units
from django.contrib.gis.geos import Point, LinearRing, Polygon
import cStringIO as StringIO
import logging, mapscript, urllib, json
class OutputFormat():
'''
Look-up object to enumerate possible formats to be returned by print
'''
PNG = 1
HTTP_RESPONSE = 2
class StaticMap():
"""
Creates static map (based on a pretty long set of possible
options). Reads the MapServer configuration file, and renders maps
according to user-specified preferences.
Helpful reference: http://mapserver.org/mapscript/mapscript.html for swig
"""
#http://caladapt/ows/ms.fcgi?request=getMap&layers=coastal_flood,sealevelrise,county_dark&format=aggpng24&version=1.1.1&height=512&width=512&srs=epsg%3A4326&bbox=-124.625,31.875,-113,44
def __init__(self):
"""
Initializes the object with an initial set of default parameters.
"""
self.MAPSERVER_URL = settings.SERVER_URL + '/ows/ms.fcgi?format=aggpng24&version=1.1.1'
self.layer_name = None
self.source_srs = Units.EPSG_4326
self.layers = [] #['hillshade_region', 'hillshade_ca', 'county_dark', 'cities']
self.south = None
self.west = None
self.north = None
self.east = None
@classmethod
@classmethod
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
28686,
198,
6738,
28686,
1330,
3108,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
... | 2.49537 | 648 |
# Copyright (c) 2010 by Cisco Systems, Inc.
"""
Report duplicate actions: same action in same workding dir.
"""
import getopt
import sys
from instmakelib import instmake_log as LOG
def make_key(dir, cmdline):
"""Make a single string, combining multiple fields."""
return dir + "|" + cmdline
def report_duplicate(records, same_wd):
"""Report an instance of duplicate actions."""
# Print info that is the same for all records
rec0 = records[0]
if rec0.tool:
print "Tool:", rec0.tool
if same_wd:
print "Working Directory:", rec0.cwd
print "Command-line:", rec0.cmdline
i = 1
for rec in records:
print "\t%3d. PPID=%s PID=%s" % (i, rec.ppid, rec.pid),
if not same_wd:
print "CWD=%s" % (rec.cwd,),
if rec.make_target:
print "TARGET=%s" % (rec.make_target,),
if rec.makefile_filename:
print "RULE=%s:%s" % (rec.makefile_filename,
rec.makefile_lineno),
print
i += 1
print
description = "Report duplicate jobs in same working directory."
| [
2,
15069,
357,
66,
8,
3050,
416,
28289,
11998,
11,
3457,
13,
198,
37811,
198,
19100,
23418,
4028,
25,
976,
2223,
287,
976,
670,
12083,
26672,
13,
198,
37811,
198,
198,
11748,
651,
8738,
198,
11748,
25064,
198,
6738,
916,
15883,
8019,
... | 2.367742 | 465 |
"""
Port of jUnitPerf to Python
**************************************
* Ported to Python by Grig Gheorghiu *
**************************************
"""
from threading import Thread
| [
37811,
198,
13924,
286,
474,
26453,
5990,
69,
284,
11361,
198,
41906,
2466,
1174,
198,
1635,
4347,
276,
284,
11361,
416,
1902,
328,
402,
258,
273,
456,
16115,
1635,
198,
41906,
2466,
1174,
198,
37811,
198,
198,
6738,
4704,
278,
1330,
... | 4.108696 | 46 |
# initial based on FreeCAD 0.17dev
#last edit: 2019-08
SourceFolder=[
("Base","Foundamental classes for FreeCAD",
"""import as FreeCAD in Python, see detailed description in later section"""),
("App","nonGUI code: Document, Property and DocumentObject",
"""import as FreeCAD in Python, see detailed description in later section"""),
("Gui","Qt-based GUI code: macro-recording, Workbench",
"""import as FreeCADGui in Python, see detailed description in later section"""),
("CXX","modified PyCXX containing both python 2 and python 3"),
("Ext","Source code for all modules with each module in one subfolder",
"""enable module import from FreeCAD to avoid python module name clashing"""),
("Main","main() function for FreeCADCmd.exe and FreeCADGui.exe",
""""Main() of FreeCADCmd.exe (build up CAD model without GUI but python scripting) and FreeCADGui.exe (Interactive mode)"""),
("Mod","Source code for all modules with each module in one subfolder",
"""Source code of ome modules will be explained in later section"""),
("Tools","Tool to build the source code: fcbt.py",
"""fcbt can generate a basic module from _TEMPLATE_ folder, """),
("Doc","Manual and documentation generated by doxygen"),
("CMakeLists.txt","topmost CMake config file, kind of high level cross-platform makefile generator",
"""
Module developer needs not to care about this file, CMakeLists.txt within module will be automatically included.
"""),
("FCConfig.h","preprocessor shared by all source for portability on diff platforms"),
("fc.sh","export environment variable for CASROOT -> OpenCASCADE",
"""
Module developer needs not to care about this file
"""),
("3rdParty","Third party code integration",
"""boost.CMakeLists.txt CxImage Pivy-0.5 zlib.CMakeLists.txt CMakeLists.txt Pivy salomesmesh"""),
("zipios++","source of zipios++ lib"),
("Build","set the version of FreeCAD"),
("MacAppBundle","config file to generate MacOSX bundle (installer)"),
("XDGData","FreeCAD.desktop file for linux package compliant Linux freedesktop standard"),
("WindowsInstaller","config files to generate windows installer"),
] | [
2,
4238,
1912,
319,
3232,
34,
2885,
657,
13,
1558,
7959,
198,
2,
12957,
4370,
25,
13130,
12,
2919,
198,
7416,
41092,
41888,
198,
198,
7203,
14881,
2430,
21077,
6860,
6097,
329,
3232,
34,
2885,
1600,
198,
37811,
11748,
355,
3232,
34,
... | 3.607945 | 579 |
# This file defines messages used by the Astron "Client Protocol"
# See https://astron.github.io/astron/en/master/10-client.html.
CLIENT_HELLO = 1 # Sent to handshake the protocol
CLIENT_HELLO_RESP = 2 #
CLIENT_DISCONNECT = 3 # Sent when client is leaving.
CLIENT_EJECT = 4 # Received when server is booting the client.
CLIENT_HEARTBEAT = 5
CLIENT_OBJECT_SET_FIELD = 120 # Sent and received when a DO field is updated
CLIENT_OBJECT_SET_FIELDS = 121 # Sent and received when multiple DO fields are updated
CLIENT_OBJECT_LOCATION = 140 # Received when DO changes location within clients interests
CLIENT_OBJECT_LEAVING = 132 # Received when DO leaves clients interests
CLIENT_OBJECT_LEAVING_OWNER = 161 # Received when client loses ownership of a DO
CLIENT_ENTER_OBJECT_REQUIRED = 142 # Received when a DO enters the clients interest visibility.
CLIENT_ENTER_OBJECT_REQUIRED_OTHER = 143 # As above; DO has optional fields.
CLIENT_ENTER_OBJECT_REQUIRED_OWNER = 172 # Received when the client gets ownership of a DO.
CLIENT_ENTER_OBJECT_REQUIRED_OTHER_OWNER = 173 # As above; DO has optional fields.
CLIENT_ADD_INTEREST = 200 # Sent to set interest in a location
CLIENT_ADD_INTEREST_MULTIPLE = 201 # Sent to set interest in multiple locations
CLIENT_REMOVE_INTEREST = 203 # Sent to remove interest in a location
CLIENT_DONE_INTEREST_RESP = 204 # Received when setting an interest in a location has been set and all relevant DOs have entered
| [
2,
770,
2393,
15738,
6218,
973,
416,
262,
25398,
366,
11792,
20497,
1,
198,
2,
4091,
3740,
1378,
459,
1313,
13,
12567,
13,
952,
14,
459,
1313,
14,
268,
14,
9866,
14,
940,
12,
16366,
13,
6494,
13,
198,
198,
5097,
28495,
62,
13909,
... | 3.021956 | 501 |
from django.shortcuts import render
from django.http import HttpResponse,HttpResponseRedirect
from django.views.generic import View
from .forms import LoginForm,RegisterForm
from .models import List
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth import login,logout,authenticate
from django.contrib.auth.models import User
# Create your views here.
@csrf_exempt
@csrf_exempt | [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
11,
43481,
31077,
7738,
1060,
198,
6738,
42625,
14208,
13,
33571,
13,
41357,
1330,
3582,
198,
6738,
764,
23914,
1330,
23093,
847... | 3.420168 | 119 |
from PyQt5.QtWidgets import QApplication, QWidget, QVBoxLayout
from PyQt5.QtGui import QColor, QPainter
from PyQt5.QtCore import Qt
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
w = MyWidget()
w.show()
sys.exit(app.exec_()) | [
198,
6738,
9485,
48,
83,
20,
13,
48,
83,
54,
312,
11407,
1330,
1195,
23416,
11,
1195,
38300,
11,
1195,
53,
14253,
32517,
198,
6738,
9485,
48,
83,
20,
13,
48,
83,
8205,
72,
1330,
1195,
10258,
11,
1195,
38490,
353,
198,
6738,
9485,
... | 2.318966 | 116 |
# coding: utf8
import pydot
from .base import Graph
class Pipeline(Graph):
"""Represents a pipeline diagram.
`nodes` is a dictionary of nodes `name` -> `shape`.
`edges` is a list of edges, tuples of the form `(name, name, label)`.
`**kwargs` is a list of args passed to `pydot` directly.
>>> Pipeline(['HULK', 'AST-HULK', 'AST-CIL', 'MIPS'], [
(0, 1, 'lexer/parser'),
(1, 1, 'semántica'),
(1, 2, 'generación'),
(2, 3, 'generación')
])
"""
| [
2,
19617,
25,
3384,
69,
23,
198,
198,
11748,
279,
5173,
313,
198,
198,
6738,
764,
8692,
1330,
29681,
628,
198,
4871,
37709,
7,
37065,
2599,
198,
220,
220,
220,
37227,
6207,
6629,
257,
11523,
16362,
13,
628,
220,
220,
220,
4600,
77,
... | 2.258929 | 224 |
# Generated by Django 3.0.9 on 2020-09-03 10:45
from django.db import migrations, models
from eth_abi.exceptions import DecodingError
from web3.exceptions import BadFunctionCallOutput
from gnosis.eth import EthereumClientProvider
| [
2,
2980,
515,
416,
37770,
513,
13,
15,
13,
24,
319,
12131,
12,
2931,
12,
3070,
838,
25,
2231,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
198,
6738,
4555,
62,
17914,
13,
1069,
11755,
1330,
4280,
7656,
... | 3.405797 | 69 |
from Server.utils.enums import MSG_FROM_MIRROR_KEYS
from Server.utils.enums import MSG_FROM_KINECT_KEYS
from Server.utils.enums import MSG_TO_MIRROR_KEYS
from amqpstorm import Connection
import configparser
import queue
import json
from datetime import datetime
# The format in which messages are shared accross the service
class Messaging:
# Callback for consuming incoming messages from the Mirror
# Callback for consuming incoming messages from the Kinect
''' Threadsafe sending of messages '''
''' HELPERS '''
| [
6738,
9652,
13,
26791,
13,
268,
5700,
1330,
49064,
62,
10913,
2662,
62,
44,
4663,
16411,
62,
7336,
16309,
198,
6738,
9652,
13,
26791,
13,
268,
5700,
1330,
49064,
62,
10913,
2662,
62,
42,
1268,
9782,
62,
7336,
16309,
198,
6738,
9652,
... | 3.421384 | 159 |
r"""
Symmetric Functions
- :class:`Introduction to Symmetric Functions <sage.combinat.sf.sf.SymmetricFunctions>`
- :ref:`sage.combinat.sf.sfa`
- :ref:`sage.combinat.sf.sf`
- :ref:`sage.combinat.sf.classical`
- :ref:`sage.combinat.sf.schur`
- :ref:`sage.combinat.sf.monomial`
- :ref:`sage.combinat.sf.multiplicative`
- :ref:`sage.combinat.sf.elementary`
- :ref:`sage.combinat.sf.homogeneous`
- :ref:`sage.combinat.sf.powersum`
- :ref:`sage.combinat.sf.character`
- :ref:`sage.combinat.sf.orthogonal`
- :ref:`sage.combinat.sf.symplectic`
- :ref:`sage.combinat.sf.dual`
- :ref:`sage.combinat.sf.orthotriang`
- :ref:`sage.combinat.sf.kfpoly`
- :ref:`sage.combinat.sf.hall_littlewood`
- :ref:`sage.combinat.sf.hecke`
- :ref:`sage.combinat.sf.jack`
- :ref:`k-Schur Functions <sage.combinat.sf.new_kschur>`
- :ref:`sage.combinat.sf.k_dual`
- :ref:`sage.combinat.sf.llt`
- :ref:`sage.combinat.sf.macdonald`
- :ref:`sage.combinat.sf.ns_macdonald`
- :ref:`sage.combinat.sf.witt`
"""
# install the docstring of this module to the containing package
from sage.misc.namespace_package import install_doc
install_doc(__package__, __doc__)
from sage.misc.lazy_import import lazy_import
# In the long run, this will be the single entry point
# Nothing else will be exported
lazy_import('sage.combinat.sf.sf', 'SymmetricFunctions')
# Advanced stuff:
lazy_import('sage.combinat.sf.kfpoly', 'KostkaFoulkesPolynomial')
lazy_import('sage.combinat.sf.ns_macdonald', ['NonattackingFillings',
'AugmentedLatticeDiagramFilling',
'LatticeDiagram'])
| [
81,
37811,
198,
13940,
3020,
19482,
40480,
198,
198,
12,
1058,
4871,
25,
63,
21906,
284,
1632,
3020,
19482,
40480,
1279,
82,
496,
13,
785,
8800,
265,
13,
28202,
13,
28202,
13,
13940,
3020,
19482,
24629,
2733,
29,
63,
198,
198,
12,
1... | 2.101167 | 771 |
#!/usr/bin/python
"""
ADE Web API
Copyright (C) 2011-2015 "Sébastien Celles" <s.celles@gmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
"""
import datetime
import hashlib
import logging
import time
from xml.etree import ElementTree as ET
import pytz
import requests
from sentry_sdk import add_breadcrumb
from .exception import ExceptionFactory
logger = logging.getLogger(__name__)
def hide_string(s, char_replace='*'):
"""Returns a string of same length but with '*'"""
return char_replace * len(s)
def hide_dict_values(d, hidden_keys=['password'], char_replace='*'):
"""Returns a dictionnary with some hidden values (such as password)
when a dict is given.
Characters are replaced with char_replace - default is '*'
Values which need to be hidden are given in a list named hidden_keys"""
d_hidden = d.copy()
for key in hidden_keys:
if key in d_hidden.keys():
d_hidden[key] = hide_string(d_hidden[key], char_replace)
return d_hidden
def replace_dict_values(d, replace_keys):
"""Returns a dictionnary with replaced values
replace_keys is a dictionary
replace_keys = {'key': 'value_to_display'}"""
d_hidden = d.copy()
for key, replace_value in replace_keys.items():
if key in d_hidden.keys():
d_hidden[key] = replace_value
return d_hidden
ENV_VAR_ROOT = 'ADE_WEB_API'
def get_info(key, default_value=None):
"""Returns a value (url, login, password)
using either default_value or using environment variable"""
ENV_VAR_KEY = ENV_VAR_ROOT + "_" + key.upper()
if default_value == '' or default_value is None:
try:
import os
return os.environ[ENV_VAR_KEY]
except:
logging.warning(
f"You should pass {key} using --{key} or using environment variable {ENV_VAR_KEY!r}"
)
return default_value
else:
return default_value
class HiddenDict(dict):
"""Class to manage keys/values like a dict
but that can "hide" some values
"""
class Config(HiddenDict):
"""Config class
password is never displayed but is stored in this class"""
@staticmethod
def timestamp2datetime(ts, tz=pytz.utc):
"""Converts Unix timestamp to Python datetime.datetime"""
return datetime.datetime.fromtimestamp(float(ts) / 1000.0, tz)
class BaseObject:
"""Base object class which can be easily initialize using
keyword parameters
Attributes can be access like a dict obj['myattribute']"""
class Project(BaseObject):
"""Project object
uid is automatically convert to datetime"""
class Resource(BaseObject):
"""Base object for resource (Trainee, Room, Instructor...)"""
pass
class Date(BaseObject):
"""Date object
time is automatically convert to datetime"""
class ObjectFactory:
"""A factory (see pattern factory) which can create Resource, Trainee, Room,
Instructor, Project, Activity, Event, Cost, Caracteristic, Date object"""
class ADEWebAPI:
"""Class to manage ADE Web API (reader only)"""
def _send_request(self, func, **params):
"""Send a request"""
params['function'] = func
if 'sessionId' not in params.keys():
if self.sessionId is not None:
params['sessionId'] = self.sessionId
# self.logger.debug("send %s" % hide_dict_values(params))
start = time.time()
response = requests.get(self.url, params=params)
elapsed_rq = time.time() - start
response.encoding = 'UTF-8'
data = response.text
add_breadcrumb(category='api', message=f"{data}")
element = ET.fromstring(data)
try:
self._parse_error(element)
except Exception as e:
if element.attrib['name'] == 'java.lang.NullPointerException':
raise e
else:
logger.error(f"{params}")
logger.error(f"{data}")
if 'hash' in params and params['hash']:
d_hash = hashlib.md5(data.encode('utf-8')).hexdigest()
return {'element': element, 'hash': d_hash}
return element
def connect(self):
"""Connect to server"""
function = 'connect'
element = self._send_request(function, login=self.login, password=self.password)
returned_sessionId = element.attrib["id"]
self.sessionId = returned_sessionId
return returned_sessionId is not None
def disconnect(self):
"""Disconnect from server"""
function = 'disconnect'
element = self._send_request(function)
returned_sessionId = element.attrib["sessionId"]
return returned_sessionId == self.sessionId
def _test_opt_params(self, given_params, function):
"""Test if kwargs parameters are in allowed optional parameters
of a given method"""
opt_params = self.opt_params[function]
given_params = set(given_params.keys())
msg = (
"One (or many) parameters of '%s' call are not allowed. %s is not in %s"
% ('getResources', given_params - opt_params, opt_params)
)
assert given_params <= opt_params, msg
def _create_list_of_dicts(self, category, lst):
"""Returns a list of dict (attributes of XML element)"""
return map(lambda elt: elt.attrib, lst)
def _create_list_of_objects(self, category, lst):
"""Returns a list of object using factory"""
return map(lambda elt: self.factory.create_object(category, **elt.attrib), lst)
def getProjects(self, **kwargs):
"""Returns (list of) projects"""
function = 'getProjects'
element = self._send_request(function, **kwargs)
lst_projects = element.findall('project')
lst_projects = self._create_list_of('project', lst_projects)
return lst_projects
def setProject(self, projectId):
"""Set current project"""
function = 'setProject'
element = self._send_request(function, projectId=projectId)
returned_projectId = element.attrib["projectId"]
returned_sessionId = element.attrib["sessionId"]
result = returned_sessionId == self.sessionId and returned_projectId == str(
projectId
)
if result:
self.project_init()
return result
def getResources(self, **kwargs):
"""Returns resource(s) from several optional arguments"""
function = 'getResources'
self._test_opt_params(kwargs, function)
response = self._send_request(function, **kwargs)
tree = self._tree(response['element'])
return {'data': tree, 'hash': response['hash']}
def getActivities(self, **kwargs):
"""Returns activity(ies) from several optional arguments"""
function = 'getActivities'
self._test_opt_params(kwargs, function)
typ = 'activity'
element = self._send_request(function, **kwargs)
lst_activities = element.findall(typ)
lst_activities = self._create_list_of(typ, lst_activities)
return lst_activities
def getEvents(self, **kwargs):
"""Returns event(s) from several optional arguments"""
function = 'getEvents'
self._test_opt_params(kwargs, function)
element = self._send_request(function, **kwargs)
tree = self._tree(element, **kwargs)
return {'data': tree}
def getCosts(self, **kwargs):
"""Returns cost(s) from several optional arguments"""
function = 'getCosts'
self._test_opt_params(kwargs, function)
element = self._send_request(function, **kwargs)
typ = 'cost'
lst = element.findall(typ)
lst = self._create_list_of(typ, lst)
return lst
def getCaracteristics(self, **kwargs):
"""Returns caracteristic(s) from several optional arguments"""
function = 'getCaracteristics'
self._test_opt_params(kwargs, function)
element = self._send_request(function, **kwargs)
typ = 'caracteristic'
lst = element.findall(typ)
lst = self._create_list_of(typ, lst)
return lst
def getDate(self, week, day, slot):
"""Returns date object from week, day, slot"""
function = 'getDate'
# self._test_opt_params(kwargs, function) # no keyword arguments (kwargs)
element = self._send_request(function, week=week, day=day, slot=slot)
date = Date(**element.attrib)
return date
# def imageET(self, resources, weeks, days, **kwargs):
def imageET(self, **kwargs):
"""Returns a GIF image (binary)"""
function = 'imageET'
if 'function' not in kwargs.keys():
kwargs['function'] = function
# self._test_opt_params(kwargs, function)
if 'sessionId' not in kwargs.keys():
if self.sessionId is not None:
kwargs['sessionId'] = self.sessionId
self.logger.debug("send %s" % hide_dict_values(kwargs))
response = requests.get(self.url, params=kwargs)
try:
element = ET.fromstring(response.text)
xml_response = True
except:
xml_response = False
if xml_response:
self._parse_error(element)
else: # binary response (gif)
return response.content
def first_date(self):
"""Returns first date of current project"""
self._first_date = self.getDate(0, 0, 0)['time'].date()
return self._first_date
def week_id(self, date=datetime.date.today()):
"""Returns week number for a given date"""
# week = ((date1-date0)/7).days
if self._first_date is None:
self._first_date = self.first_date()
week = int((date - self._first_date).days / 7)
return week
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
37811,
198,
220,
220,
220,
5984,
36,
5313,
7824,
628,
220,
220,
220,
15069,
357,
34,
8,
2813,
12,
4626,
366,
50,
2634,
65,
459,
2013,
12440,
274,
1,
1279,
82,
13,
3846,
274,
31,
14... | 2.527102 | 4,151 |
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: FBOutput
import tdw.flatbuffers
| [
2,
6338,
7560,
416,
262,
21939,
36474,
364,
17050,
11,
466,
407,
13096,
201,
198,
201,
198,
2,
25745,
25,
13186,
26410,
201,
198,
201,
198,
11748,
41560,
86,
13,
38568,
36873,
364,
201,
198
] | 3.457143 | 35 |
__version__ = 0.2
import sys
if sys.argv[0] != 'setup.py':
from ec2_helper import *
from cloudwatch_helper import *
from elb_helper import *
from model import *
from metric_queue import *
from executor import *
try:
from sockets import *
except Exception:
pass
| [
834,
9641,
834,
796,
657,
13,
17,
198,
198,
11748,
25064,
198,
198,
361,
25064,
13,
853,
85,
58,
15,
60,
14512,
705,
40406,
13,
9078,
10354,
628,
220,
220,
220,
422,
9940,
17,
62,
2978,
525,
1330,
1635,
198,
220,
220,
220,
422,
... | 2.504 | 125 |
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 22 09:45:25 2019
@author: Guo
"""
import socket
import sys
import os
import numpy as np
import math
expected_frame = 0
##创建 socket 对象
server_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server_sock.bind(("127.0.0.1", 8888))
file_size_byte, ad = server_sock.recvfrom(1024)
file_size = 0
file_size = int(file_size_byte)
print("recv file size:", file_size)
#setsock(server_sock)
while(1):
data, ad = server_sock.recvfrom(1024)
recv_str = str(data)
#find_error()
print(recv_str)
return_msg = bytes("1", encoding='gbk')
addr = ('127.0.0.1', 8889)
server_sock.sendto(return_msg, addr)
print("server return msg")
break
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
37811,
201,
198,
41972,
319,
2892,
2758,
2534,
7769,
25,
2231,
25,
1495,
13130,
201,
198,
201,
198,
31,
9800,
25,
1962,
78,
201,
198,
37811,
201,
198,
201,
198,
... | 2.055556 | 378 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
import re
from .common import InfoExtractor
from ..utils import (
js_to_json,
remove_end,
)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
11748,
33918,
198,
11748,
302,
198,
6738,
764,
11321,
1330,
14151,
11627,
40450,
198,
6738,
11485,
26... | 2.69697 | 66 |
import numpy as np
from scipy.stats import rankdata
import itertools
from PyEMD import EMD
import nolds
"""Complete package for calculating any kind of multiscale entropy features """
"""Coarse graining methods
- Normal
- Moving average (X)
- Volatility series (X)
- Moving average volatility series (X)
- EMD- Coarse to fine series (X) --> undetermind scale --> put limitation on it
- EMD - Fine to coarse series (X) --> undetermind scale --> put limitation on it
- Composite coarse graining (X)
Entropy measurement methods
THe permutation methods mentioned are returned in the same way as mentioned above
- Permutation entropy (X)
- Modified permutation entropy (X)
- Weighted permutation entropy (X)
- Weighted modified PE (X)
- Sample entropy (X)
- Composite variation for above (X)"""
def add_perm(perm):
"""Add extra permutations for modified PE case """
perm.append((1,1,0))
perm.append((0,1,1))
perm.append((1,0,1))
perm.append((0,1,0))
perm.append((0,0,0))
perm.append((0,0,1))
perm.append((1,0,0))
return perm
def get_perms(m,mod_flag=0):
"""get all the permutation for entropy calculation """
perm = (list(itertools.permutations(range(m))))
#adding similar instances
if mod_flag==1:
perm=add_perm(perm)
perm=np.array(perm)
return np.array(perm)
def get_1s_pe(x,m=3,lag=1,mod_flag=0,typ=''):
"""All the combinations of permutation entropy for a single scale """
mot_x, wt_x=make_mot_series(x,m,lag,mod_flag=0)
n_p=len(get_perms(m,mod_flag))
dist=get_mot_dist(mot_x,n_p,wt_x,typ=typ)
pe=perm_ent(dist)
return np.array(pe)
def make_mot_series(time_series,m=3,lag=1,mod_flag=0):
"""Creates a motif series and returns their with the motif distribution
Input:
- time_series
- m: permutaiton degree, lag: permutation lag
- mod_flag: flag to use modfied PE
Output:
- motif time series,
- corrsponding weights
"""
time_series=np.array(time_series).squeeze()
n=len(time_series)
mot_x, wt_x, mod_mot_x=[], [], []
perms=get_perms(m,0)
perms_mod=get_perms(m,1)
for i in range(n - lag * (m - 1)):
smp=time_series[i:i + lag * m:lag]
wt=np.var(smp)
#orginal dense ranking of data
mot_array1 = np.array(rankdata(smp, method='dense')-1)
val=np.where(np.all(perms==mot_array1,axis=1))[0]
val_mod=val
if val.shape[0]==0:
mot_array = np.array(rankdata(smp, method='ordinal')-1)
val=np.where(np.all(perms==mot_array,axis=1))[0]
val_mod=np.where(np.all(perms_mod==mot_array1,axis=1))[0]
mot_x.append(val[0])
mod_mot_x.append(val_mod[0])
wt_x.append(wt)
if mod_flag==1:
return np.array(mod_mot_x), np.array(wt_x)
elif mod_flag==0:
return np.array(mot_x), np.array(wt_x)
def get_mot_dist(mot_x,n_p,wt_x,typ=''):
"""Create the distribution of motifs
Input:
- mot_x: Motif time series,
- n_p: number of permutations,
- wt_x: weight time series
- typ: type of entropy, normal: '', or weighted: 'wt'
Output:
- motif distribution
"""
mot_dist = [0] * n_p
for j in range(n_p):
if typ=='wt':
wts=wt_x[np.where(abs(mot_x-j)==0)]
num_mots=np.ones(len(np.where(abs(mot_x-j)==0)[0]))
mot_dist[j]=sum(np.multiply(num_mots,wts))
else:
mot_dist[j] = len(np.where(abs(mot_x-j)==0)[0])
#removing non occring patterns as it breaks entropy
if len(mot_x)==0:
mot_dist=np.zeros(n_p)*np.nan
return mot_dist
def perm_ent(mot_dist,m=3):
"""Returns permutation entropy for the motif distribution given --> basic function for permutation entropy """
c=mot_dist
c = [element for element in c if element != 0]
p = np.divide(np.array(c), float(sum(c)))
pe = -sum(p * np.log(p))
return pe#/np.log(factorial(m))
def get_mot_ent_dist(RRs,m,lag,typ='',mod_flag=0):
"""
#RR series for all the scales (list of lists)
Returns four kind of motif distributions
--> normal motif distribution ('' + 0)
--> modified motif distribution ('' + 1)
--> weighted motif distribution ('wt' + 0)
--> weighted modified motif distribution ('wt' + 1)
"""
dist=[]
for rr in RRs:
mot_x , wt_x=make_mot_series(rr,m,lag,mod_flag = mod_flag)
n_p=len(get_perms(m,mod_flag))
mot_dist=get_mot_dist(mot_x,n_p,wt_x,typ='')
dist.append(mot_dist) #Contains motif distribution for all the different scales
d_all=[dist]
return d_all
def ord_dist(mot_dist_x,mot_dist_y):
"""Returns ordinal distance between two motif distributions
Not used anywhere in the code """
c_x=mot_dist_x
c_y=mot_dist_y
m=len(c_x)
p_x=np.divide(np.array(c_x), float(sum(c_x)))
p_y=np.divide(np.array(c_y), float(sum(c_y)))
sq_diff=0
for j in range(m):
sq_diff=sq_diff+(p_x[j] -p_y[j])**2
dm=np.sqrt(m/(m-1))*np.sqrt(sq_diff)
return dm
def get_com_mspe(distS,scale,mspe):
"""Calculate center of mass entropy using ordinal distances as weights
NOT USED ANYWHERE IN THE CODE
"""
distS=np.array(distS)
dm_mat=np.zeros((scale,scale))
for i in range(0,scale-1):
for j in range(i,scale):
dm=ord_dist(distS[i],distS[j])
dm_mat[i,j]=dm
dm_mat[j,i]=dm
com_wts=np.zeros(scale)
for i in range(0,scale):
com_wts[i]=np.sum(dm_mat[i,:])/(scale-1)
com_mspe=np.sum(np.multiply(com_wts,mspe))/np.sum(com_wts)
return com_mspe
def calc_mspe(distS):
"""Calculates the scaled permutation entropy and thier oridnal avg and normal average"""
"""Takes an input which is a list of lists where distS[i] is motif dist with scale i """
mspe=[]
scale=len(distS)
for s in range(0,scale):
distm=distS[s]
pe=perm_ent(distm)
mspe.append(pe)
mspe=np.array(mspe)
#com_mspe=get_com_mspe(distS,scale,mspe)
mspe_fin=np.hstack((mspe))
return mspe_fin
def scale_series(x,scale,cg_typ):
"""Get the different scales of the series based on specific scaling type
Except: composite and emd scaling types
Input:
Time series (x), number of scales (scale), scale type (cg_type)
"""
x_scale=[]
if cg_typ=='base':
for i in range(0,len(x),scale):
#not divided by scale
if i+scale<=len(x):
val=np.sum(x[i:i+scale])/len(x[i:i+scale])
x_scale.append(val)
elif cg_typ=='mov_avg':
wts = np.ones(scale) / scale
val=np.convolve(x, wts, mode='valid')
x_scale.append(val)
elif cg_typ=='mom':
for i in range(0,len(x),scale):
#not divided by scale
if i+scale<=len(x):
val=np.std(x[i:i+scale])
x_scale.append(val)
elif cg_typ=='mavg_mom':
for i in range(0,len(x)):
#not divided by scale
if i+scale<=len(x):
val=np.std(x[i:i+scale])
x_scale.append(val)
return np.array(x_scale)
def get_scale_series(xt,scale=5,cg_type='base'):
"""Returns a list of lists with different scaled RR series """
Xs=[]
if cg_type!='f2c' and cg_type!='c2f':
Xs.append(xt)
if cg_type=='comp':
for s in range(2,scale+1):
x_comp=[]
for j in range(0,s):
x_comp.append(scale_series(xt[j:],s,cg_typ='base'))
Xs.append(x_comp)
else:
for s in range(2,scale+1):
Xs.append(scale_series(xt,s,cg_type))
elif cg_type=='f2c' or cg_type=='c2f':
Xs=scale_series_emd(xt,cg_type)
return Xs
def calc_mse(x_cg,ent_typ='mspe',typ='',mod_flag=0,deg=3,lag=1,m=2,r=0.15):
"""
Recieves a scaled series and returns multiscale entropy from it
Possible options:
mspe: multi-scale permutation entropy
- with modifications of modified PE, and weighted PE
msse: multi-scale sample entropy
comp_msse: composite scaled sample entropy
comp_mspe: composite scaled permutation entropy
"""
ent=[]
if ent_typ=='mspe':
d_all= get_mot_ent_dist(x_cg,deg,lag,typ=typ,mod_flag=mod_flag)
for dist in d_all:
#dist will have distribution from given entropy type for all scales
# Squence of values - PE: PE1, PE2,...PEs,
ent.append(calc_mspe(dist))
elif ent_typ=='msse':
for x in x_cg:
x=np.array(x).squeeze()
x_std=np.std(x)
rval=r*x_std
ent.append(nolds.sampen(x,emb_dim=m,tolerance=rval))
elif ent_typ=='comp_msse':
for scl,x in enumerate(x_cg):
cmp=[]
if type(x) is np.ndarray:
x=np.array(x).squeeze()
x_std=np.std(x)
rval=r*x_std
cmp.append(nolds.sampen(x,emb_dim=m,tolerance=rval)/(scl+1))
else:
for x_cp in x:
x_cp=np.array(x_cp).squeeze()
x_std=np.std(x_cp)
rval=r*x_std
cmp.append(nolds.sampen(x_cp,emb_dim=m,tolerance=rval)/(scl+1))
#naking sure infinite doesnt bother the set-up
cmp=np.array(cmp)
cmp=cmp[np.isfinite(cmp)]
ent.append(np.sum(np.array(cmp),axis=0)) #protect against infinities
elif ent_typ=='comp_mspe':
for scl,x in enumerate(x_cg):
cmp=[]
if type(x) is np.ndarray:
cmp.append(get_1s_pe(x,deg,lag,typ=typ ,mod_flag=mod_flag)/(scl+1))
else:
for x_cp in x:
cmp.append(get_1s_pe(x_cp,deg,lag,typ=typ ,mod_flag=mod_flag)/(scl+1))
ent.append(np.sum(np.array(cmp),axis=0)) #protect against infinities
ent=np.array(ent).T
return np.array(ent)
| [
11748,
299,
32152,
355,
45941,
198,
6738,
629,
541,
88,
13,
34242,
1330,
4279,
7890,
198,
11748,
340,
861,
10141,
198,
6738,
9485,
3620,
35,
1330,
412,
12740,
198,
11748,
645,
335,
82,
198,
198,
37811,
20988,
5301,
329,
26019,
597,
16... | 2.213065 | 3,980 |
'''
Map PDBe residue to SIFTS residue
'''
#PDBDIR = "pdbtest"
#XMLDIR = "xmltest"
PDBDIR = "pdb"
#PDBDIR = "../ligandNet/2013_biounits"
XMLDIR = "xml"
PAIRFILE = "pair.txt"
SAMPLESIZE = 3000
| [
7061,
6,
198,
220,
220,
220,
9347,
14340,
3856,
35186,
284,
311,
5064,
4694,
35186,
198,
7061,
6,
198,
198,
2,
5760,
14529,
4663,
796,
366,
79,
9945,
9288,
1,
198,
2,
55,
5805,
34720,
796,
366,
87,
76,
2528,
395,
1,
198,
5760,
1... | 2.0625 | 96 |
from .backbone import Backbone
from .box_generator import SCRLBoxGenerator
from .heads import SingleLayerLinearHead, TwoLayerLinearHead
| [
6738,
764,
1891,
15992,
1330,
5157,
15992,
198,
6738,
764,
3524,
62,
8612,
1352,
1330,
6374,
7836,
14253,
8645,
1352,
198,
6738,
764,
16600,
1330,
14206,
49925,
14993,
451,
13847,
11,
4930,
49925,
14993,
451,
13847,
198
] | 3.675676 | 37 |
#!/usr/bin/env python
u"""
test_font_files.py
"""
import os
import pytest
import warnings
import matplotlib.font_manager
import matplotlib.pyplot as plt
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
84,
37811,
198,
9288,
62,
10331,
62,
16624,
13,
9078,
198,
37811,
198,
11748,
28686,
198,
11748,
12972,
9288,
198,
11748,
14601,
198,
11748,
2603,
29487,
8019,
13,
10331,
62,
37153,
198,... | 2.942308 | 52 |