code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
cv2.imshow('frame', frame)
cap.release()
cv2.destroyAllWindows() | [
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"cv2.imshow"
] | [((40, 59), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (56, 59), False, 'import cv2\n'), ((154, 177), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (175, 177), False, 'import cv2\n'), ((109, 135), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'frame'], {}), "('frame', frame)\n", (119, 135), False, 'import cv2\n')] |
# import key libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from wordcloud import WordCloud, STOPWORDS
import nltk
import re
from nltk.stem import PorterStemmer, WordNetLemmatizer
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize, sent_tokenize
import gensim
from gensim.utils import simple_preprocess
from gensim.parsing.preprocessing import STOPWORDS
import plotly.express as px
# Tensorflow
import tensorflow as tf
from tensorflow.keras.preprocessing.text import one_hot,Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten, Embedding, Input, LSTM, Conv1D, MaxPool1D, Bidirectional, Dropout
from tensorflow.keras.models import Model
from tensorflow.keras.utils import to_categorical
stock_df = pd.read_csv("stock_sentiment.csv")
# check for null values
stock_df.isnull().sum()
import string
string.punctuation
Test = '$I love AI & Machine learning!!'
Test_punc_removed = [char for char in Test if char not in string.punctuation]
Test_punc_removed_join = ''.join(Test_punc_removed)
Test_punc_removed_join
Test = 'Good morning beautiful people :)... #I am having fun learning Finance with Python!!'
Test_punc_removed = [char for char in Test if char not in string.punctuation]
Test_punc_removed
# Join the characters again to form the string.
Test_punc_removed_join = ''.join(Test_punc_removed)
Test_punc_removed_join
# Let's define a function to remove punctuations
def remove_punc(message):
Test_punc_removed = [char for char in message if char not in string.punctuation]
Test_punc_removed_join = ''.join(Test_punc_removed)
return Test_punc_removed_join
# Let's remove punctuations from our dataset
stock_df['Text Without Punctuation'] = stock_df['Text'].apply(remove_punc)
stock_df['Text'][2]
stock_df['Text Without Punctuation'][2]
# download stopwords
nltk.download("stopwords")
stopwords.words('english')
# Obtain additional stopwords from nltk
from nltk.corpus import stopwords
stop_words = stopwords.words('english')
stop_words.extend(['from', 'subject', 're', 'edu', 'use','will','aap','co','day','user','stock','today','week','year'])
# stop_words.extend(['from', 'subject', 're', 'edu', 'use','will','aap','co','day','user','stock','today','week','year', 'https'])
# Remove stopwords and remove short words (less than 2 characters)
def preprocess(text):
result = []
for token in gensim.utils.simple_preprocess(text):
if len(token) >= 3 and token not in stop_words:
result.append(token)
return result
# apply pre-processing to the text column
stock_df['Text Without Punc & Stopwords'] = stock_df['Text Without Punctuation'].apply(preprocess)
stock_df['Text'][0]
stock_df['Text Without Punc & Stopwords'][0]
# join the words into a string
#stock_df['Processed Text 2'] = stock_df['Processed Text 2'].apply(lambda x: " ".join(x))
# join the words into a string
stock_df['Text Without Punc & Stopwords Joined'] = stock_df['Text Without Punc & Stopwords'].apply(lambda x: " ".join(x))
# plot the word cloud for text with positive sentiment
plt.figure(figsize = (20, 20))
wc = WordCloud(max_words = 1000 , width = 1600 , height = 800).generate(" ".join(stock_df[stock_df['Sentiment'] == 1]['Text Without Punc & Stopwords Joined']))
plt.imshow(wc, interpolation = 'bilinear');
nltk.download('punkt')
# word_tokenize is used to break up a string into words
print(stock_df['Text Without Punc & Stopwords Joined'][0])
print(nltk.word_tokenize(stock_df['Text Without Punc & Stopwords Joined'][0]))
# Obtain the maximum length of data in the document
# This will be later used when word embeddings are generated
maxlen = -1
for doc in stock_df['Text Without Punc & Stopwords Joined']:
tokens = nltk.word_tokenize(doc)
if(maxlen < len(tokens)):
maxlen = len(tokens)
print("The maximum number of words in any document is:", maxlen)
tweets_length = [ len(nltk.word_tokenize(x)) for x in stock_df['Text Without Punc & Stopwords Joined'] ]
# Plot the distribution for the number of words in a text
fig = px.histogram(x = tweets_length, nbins = 50)
# Obtain the total words present in the dataset
list_of_words = []
for i in stock_df['Text Without Punc & Stopwords']:
for j in i:
list_of_words.append(j)
# Obtain the total number of unique words
total_words = len(list(set(list_of_words)))
total_words
# split the data into test and train
X = stock_df['Text Without Punc & Stopwords']
y = stock_df['Sentiment']
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.1)
# Create a tokenizer to tokenize the words and create sequences of tokenized words
tokenizer = Tokenizer(num_words = total_words)
tokenizer.fit_on_texts(X_train)
# Training data
train_sequences = tokenizer.texts_to_sequences(X_train)
# Testing data
test_sequences = tokenizer.texts_to_sequences(X_test)
print("The encoding for document\n", X_train[1:2],"\n is: ", train_sequences[1])
# Add padding to training and testing
padded_train = pad_sequences(train_sequences, maxlen = 29, padding = 'post', truncating = 'post')
padded_test = pad_sequences(test_sequences, maxlen = 29, truncating = 'post')
for i, doc in enumerate(padded_train[:3]):
print("The padded encoding for document:", i+1," is:", doc)
# Convert the data to categorical 2D representation
y_train_cat = to_categorical(y_train, 2)
y_test_cat = to_categorical(y_test, 2)
# Add padding to training and testing
padded_train = pad_sequences(train_sequences, maxlen = 15, padding = 'post', truncating = 'post')
padded_test = pad_sequences(test_sequences, maxlen = 15, truncating = 'post')
# Sequential Model
model = Sequential()
# embedding layer
model.add(Embedding(total_words, output_dim = 512))
# Bi-Directional RNN and LSTM
model.add(LSTM(256))
# Dense layers
model.add(Dense(128, activation = 'relu'))
model.add(Dropout(0.3))
model.add(Dense(2,activation = 'softmax'))
model.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['acc'])
model.summary()
# train the model
model.fit(padded_train, y_train_cat, batch_size = 32, validation_split = 0.2, epochs = 2)
# make prediction
pred = model.predict(padded_test)
# make prediction
prediction = []
for i in pred:
prediction.append(np.argmax(i))
# list containing original values
original = []
for i in y_test_cat:
original.append(np.argmax(i))
# acuracy score on text data
from sklearn.metrics import accuracy_score
accuracy = accuracy_score(original, prediction)
accuracy
# Plot the confusion matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(original, prediction)
sns.heatmap(cm, annot = True)
| [
"pandas.read_csv",
"nltk.download",
"tensorflow.keras.preprocessing.sequence.pad_sequences",
"tensorflow.keras.layers.Dense",
"gensim.utils.simple_preprocess",
"matplotlib.pyplot.imshow",
"nltk.corpus.stopwords.words",
"tensorflow.keras.models.Sequential",
"sklearn.metrics.confusion_matrix",
"tens... | [((925, 959), 'pandas.read_csv', 'pd.read_csv', (['"""stock_sentiment.csv"""'], {}), "('stock_sentiment.csv')\n", (936, 959), True, 'import pandas as pd\n'), ((2040, 2066), 'nltk.download', 'nltk.download', (['"""stopwords"""'], {}), "('stopwords')\n", (2053, 2066), False, 'import nltk\n'), ((2068, 2094), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (2083, 2094), False, 'from nltk.corpus import stopwords\n'), ((2187, 2213), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (2202, 2213), False, 'from nltk.corpus import stopwords\n'), ((3306, 3334), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 20)'}), '(figsize=(20, 20))\n', (3316, 3334), True, 'import matplotlib.pyplot as plt\n'), ((3500, 3540), 'matplotlib.pyplot.imshow', 'plt.imshow', (['wc'], {'interpolation': '"""bilinear"""'}), "(wc, interpolation='bilinear')\n", (3510, 3540), True, 'import matplotlib.pyplot as plt\n'), ((3547, 3569), 'nltk.download', 'nltk.download', (['"""punkt"""'], {}), "('punkt')\n", (3560, 3569), False, 'import nltk\n'), ((4296, 4335), 'plotly.express.histogram', 'px.histogram', ([], {'x': 'tweets_length', 'nbins': '(50)'}), '(x=tweets_length, nbins=50)\n', (4308, 4335), True, 'import plotly.express as px\n'), ((4818, 4855), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.1)'}), '(X, y, test_size=0.1)\n', (4834, 4855), False, 'from sklearn.model_selection import train_test_split\n'), ((4955, 4987), 'tensorflow.keras.preprocessing.text.Tokenizer', 'Tokenizer', ([], {'num_words': 'total_words'}), '(num_words=total_words)\n', (4964, 4987), False, 'from tensorflow.keras.preprocessing.text import one_hot, Tokenizer\n'), ((5309, 5385), 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['train_sequences'], {'maxlen': '(29)', 'padding': '"""post"""', 'truncating': '"""post"""'}), "(train_sequences, maxlen=29, padding='post', truncating='post')\n", (5322, 5385), False, 'from tensorflow.keras.preprocessing.sequence import pad_sequences\n'), ((5407, 5466), 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['test_sequences'], {'maxlen': '(29)', 'truncating': '"""post"""'}), "(test_sequences, maxlen=29, truncating='post')\n", (5420, 5466), False, 'from tensorflow.keras.preprocessing.sequence import pad_sequences\n'), ((5649, 5675), 'tensorflow.keras.utils.to_categorical', 'to_categorical', (['y_train', '(2)'], {}), '(y_train, 2)\n', (5663, 5675), False, 'from tensorflow.keras.utils import to_categorical\n'), ((5690, 5715), 'tensorflow.keras.utils.to_categorical', 'to_categorical', (['y_test', '(2)'], {}), '(y_test, 2)\n', (5704, 5715), False, 'from tensorflow.keras.utils import to_categorical\n'), ((5771, 5847), 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['train_sequences'], {'maxlen': '(15)', 'padding': '"""post"""', 'truncating': '"""post"""'}), "(train_sequences, maxlen=15, padding='post', truncating='post')\n", (5784, 5847), False, 'from tensorflow.keras.preprocessing.sequence import pad_sequences\n'), ((5869, 5928), 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['test_sequences'], {'maxlen': '(15)', 'truncating': '"""post"""'}), "(test_sequences, maxlen=15, truncating='post')\n", (5882, 5928), False, 'from tensorflow.keras.preprocessing.sequence import pad_sequences\n'), ((5962, 5974), 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (5972, 5974), False, 'from tensorflow.keras.models import Sequential\n'), ((6785, 6821), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['original', 'prediction'], {}), '(original, prediction)\n', (6799, 6821), False, 'from sklearn.metrics import accuracy_score\n'), ((6913, 6951), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['original', 'prediction'], {}), '(original, prediction)\n', (6929, 6951), False, 'from sklearn.metrics import confusion_matrix\n'), ((6953, 6980), 'seaborn.heatmap', 'sns.heatmap', (['cm'], {'annot': '(True)'}), '(cm, annot=True)\n', (6964, 6980), True, 'import seaborn as sns\n'), ((2595, 2631), 'gensim.utils.simple_preprocess', 'gensim.utils.simple_preprocess', (['text'], {}), '(text)\n', (2625, 2631), False, 'import gensim\n'), ((3694, 3765), 'nltk.word_tokenize', 'nltk.word_tokenize', (["stock_df['Text Without Punc & Stopwords Joined'][0]"], {}), "(stock_df['Text Without Punc & Stopwords Joined'][0])\n", (3712, 3765), False, 'import nltk\n'), ((3973, 3996), 'nltk.word_tokenize', 'nltk.word_tokenize', (['doc'], {}), '(doc)\n', (3991, 3996), False, 'import nltk\n'), ((6007, 6045), 'tensorflow.keras.layers.Embedding', 'Embedding', (['total_words'], {'output_dim': '(512)'}), '(total_words, output_dim=512)\n', (6016, 6045), False, 'from tensorflow.keras.layers import Dense, Flatten, Embedding, Input, LSTM, Conv1D, MaxPool1D, Bidirectional, Dropout\n'), ((6093, 6102), 'tensorflow.keras.layers.LSTM', 'LSTM', (['(256)'], {}), '(256)\n', (6097, 6102), False, 'from tensorflow.keras.layers import Dense, Flatten, Embedding, Input, LSTM, Conv1D, MaxPool1D, Bidirectional, Dropout\n'), ((6133, 6162), 'tensorflow.keras.layers.Dense', 'Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (6138, 6162), False, 'from tensorflow.keras.layers import Dense, Flatten, Embedding, Input, LSTM, Conv1D, MaxPool1D, Bidirectional, Dropout\n'), ((6177, 6189), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (6184, 6189), False, 'from tensorflow.keras.layers import Dense, Flatten, Embedding, Input, LSTM, Conv1D, MaxPool1D, Bidirectional, Dropout\n'), ((6202, 6232), 'tensorflow.keras.layers.Dense', 'Dense', (['(2)'], {'activation': '"""softmax"""'}), "(2, activation='softmax')\n", (6207, 6232), False, 'from tensorflow.keras.layers import Dense, Flatten, Embedding, Input, LSTM, Conv1D, MaxPool1D, Bidirectional, Dropout\n'), ((3344, 3393), 'wordcloud.WordCloud', 'WordCloud', ([], {'max_words': '(1000)', 'width': '(1600)', 'height': '(800)'}), '(max_words=1000, width=1600, height=800)\n', (3353, 3393), False, 'from wordcloud import WordCloud, STOPWORDS\n'), ((4147, 4168), 'nltk.word_tokenize', 'nltk.word_tokenize', (['x'], {}), '(x)\n', (4165, 4168), False, 'import nltk\n'), ((6578, 6590), 'numpy.argmax', 'np.argmax', (['i'], {}), '(i)\n', (6587, 6590), True, 'import numpy as np\n'), ((6683, 6695), 'numpy.argmax', 'np.argmax', (['i'], {}), '(i)\n', (6692, 6695), True, 'import numpy as np\n')] |
import sys
import math
positions = [int(x) for x in sys.stdin.readline().strip().split(",")]
mean = sum(positions) / len(positions)
mUpper = math.ceil(mean)
mLower = math.floor(mean)
cost = {mUpper: 0, mLower: 0}
for pos in positions:
for m in [mUpper, mLower]:
diff = abs(pos - m)
cost[m] += (diff * (diff + 1)) / 2
print(min(cost.values()))
| [
"sys.stdin.readline",
"math.ceil",
"math.floor"
] | [((143, 158), 'math.ceil', 'math.ceil', (['mean'], {}), '(mean)\n', (152, 158), False, 'import math\n'), ((168, 184), 'math.floor', 'math.floor', (['mean'], {}), '(mean)\n', (178, 184), False, 'import math\n'), ((53, 73), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (71, 73), False, 'import sys\n')] |
from django import template
register = template.Library()
@register.filter
def can_view(obj, user):
return obj.can_view(user)
@register.filter
def can_change(obj, user):
return obj.can_change(user)
@register.filter
def can_execute(obj, user):
return obj.can_execute(user)
@register.filter
def can_delete(obj, user):
return obj.can_delete(user)
| [
"django.template.Library"
] | [((40, 58), 'django.template.Library', 'template.Library', ([], {}), '()\n', (56, 58), False, 'from django import template\n')] |
from aiohttp import web
from aiohttp.web_response import ContentCoding
from functools import wraps
COMPRESS_FASTEST = 1
BASE_STRING_SIZE = 49
MTU_TCP_PACKET_SIZE = 1500
COMPRESS_THRESHOLD = MTU_TCP_PACKET_SIZE + BASE_STRING_SIZE
def json_response(func):
""" @json_response decorator adds header and dumps response object """
@wraps(func)
async def wrapper(self, request, *args, **kwargs):
res = await func(self, request, *args, **kwargs)
response = web.json_response(data=res)
if response.content_length > COMPRESS_THRESHOLD:
response.enable_compression(force=ContentCoding.gzip)
return response
return wrapper | [
"functools.wraps",
"aiohttp.web.json_response"
] | [((328, 339), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (333, 339), False, 'from functools import wraps\n'), ((456, 483), 'aiohttp.web.json_response', 'web.json_response', ([], {'data': 'res'}), '(data=res)\n', (473, 483), False, 'from aiohttp import web\n')] |
# modified from utils/models/segmentation/erfnet.py
# load pretrained weights during initialization of encoder
import torch
import torch.nn as nn
import torch.nn.functional as F
from .common_models import non_bottleneck_1d
from .builder import MODELS
class DownsamplerBlock(nn.Module):
def __init__(self, ninput, noutput):
super().__init__()
self.conv = nn.Conv2d(ninput, noutput-ninput, (3, 3), stride=2, padding=1, bias=True)
self.pool = nn.MaxPool2d(2, stride=2)
self.bn = nn.BatchNorm2d(noutput, eps=1e-3)
def forward(self, input):
output = torch.cat([self.conv(input), self.pool(input)], 1)
output = self.bn(output)
return F.relu(output)
@MODELS.register()
class ERFNetEncoder(nn.Module):
def __init__(self, num_classes, dropout_1=0.03, dropout_2=0.3, pretrained_weights=None):
super().__init__()
self.initial_block = DownsamplerBlock(3, 16)
self.layers = nn.ModuleList()
self.layers.append(DownsamplerBlock(16, 64))
for x in range(0, 5): # 5 times
self.layers.append(non_bottleneck_1d(64, dropout_1, 1))
self.layers.append(DownsamplerBlock(64, 128))
for x in range(0, 2): # 2 times
self.layers.append(non_bottleneck_1d(128, dropout_2, 2))
self.layers.append(non_bottleneck_1d(128, dropout_2, 4))
self.layers.append(non_bottleneck_1d(128, dropout_2, 8))
self.layers.append(non_bottleneck_1d(128, dropout_2, 16))
# need to initialize the weights
if pretrained_weights is not None:
self._load_encoder_weights(pretrained_weights) # Load ImageNet pre-trained weights
else:
self._init_weights() # initialize random weights
def _init_weights(self):
pass
def _load_encoder_weights(self, pretrained_weights):
# load weights from given file path
try:
saved_weights = torch.load(pretrained_weights)['state_dict']
except FileNotFoundError:
raise FileNotFoundError('pretrained_weights is not there! '
'Please set pretrained_weights=None if you are only testing.')
original_weights = self.state_dict()
for key in saved_weights.keys():
my_key = key.replace('module.features.', '')
if my_key in original_weights.keys():
original_weights[my_key] = saved_weights[key]
self.load_state_dict(original_weights)
def forward(self, input):
output = self.initial_block(input)
for layer in self.layers:
output = layer(output)
return output
| [
"torch.nn.BatchNorm2d",
"torch.nn.ModuleList",
"torch.load",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.nn.functional.relu"
] | [((378, 453), 'torch.nn.Conv2d', 'nn.Conv2d', (['ninput', '(noutput - ninput)', '(3, 3)'], {'stride': '(2)', 'padding': '(1)', 'bias': '(True)'}), '(ninput, noutput - ninput, (3, 3), stride=2, padding=1, bias=True)\n', (387, 453), True, 'import torch.nn as nn\n'), ((472, 497), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {'stride': '(2)'}), '(2, stride=2)\n', (484, 497), True, 'import torch.nn as nn\n'), ((516, 550), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['noutput'], {'eps': '(0.001)'}), '(noutput, eps=0.001)\n', (530, 550), True, 'import torch.nn as nn\n'), ((697, 711), 'torch.nn.functional.relu', 'F.relu', (['output'], {}), '(output)\n', (703, 711), True, 'import torch.nn.functional as F\n'), ((959, 974), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (972, 974), True, 'import torch.nn as nn\n'), ((1950, 1980), 'torch.load', 'torch.load', (['pretrained_weights'], {}), '(pretrained_weights)\n', (1960, 1980), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
# @Author: xiaodong
# @Date : 2021/5/27
from elasticsearch import Elasticsearch
from .question import ElasticSearchQuestion
from ...setting import ELASTICSEARCH_HOST
ElasticSearchQuestion.es = Elasticsearch(ELASTICSEARCH_HOST)
esq = ElasticSearchQuestion("mm_question")
| [
"elasticsearch.Elasticsearch"
] | [((222, 255), 'elasticsearch.Elasticsearch', 'Elasticsearch', (['ELASTICSEARCH_HOST'], {}), '(ELASTICSEARCH_HOST)\n', (235, 255), False, 'from elasticsearch import Elasticsearch\n')] |
"""Jednostavni SQL parser, samo za nizove CREATE i SELECT naredbi.
Ovaj fragment SQLa je zapravo regularan -- nigdje nema ugnježđivanja!
Semantički analizator u obliku name resolvera:
provjerava jesu li svi selektirani stupci prisutni, te broji pristupe.
Na dnu je lista ideja za dalji razvoj.
"""
from pj import *
from backend import PristupLog
import pprint
class SQL(enum.Enum):
class IME(Token): pass
class BROJ(Token): pass
SELECT, FROM, CREATE, TABLE = 'select', 'from', 'create', 'table'
OTVORENA, ZATVORENA, ZVJEZDICA, ZAREZ, TOČKAZAREZ = '()*,;'
def sql_lex(kôd):
lex = Tokenizer(kôd)
for znak in iter(lex.čitaj, ''):
if znak.isspace(): lex.zanemari()
elif znak.isdigit():
lex.zvijezda(str.isdigit)
yield lex.token(SQL.BROJ)
elif znak == '-':
lex.pročitaj('-')
lex.pročitaj_do('\n')
lex.zanemari()
elif znak.isalpha():
lex.zvijezda(str.isalnum)
yield lex.literal(SQL.IME, case=False)
else: yield lex.literal(SQL)
### Beskontekstna gramatika:
# start -> naredba | naredba start
# naredba -> ( select | create ) TOČKAZAREZ
# select -> SELECT ( ZVJEZDICA | stupci ) FROM IME
# stupci -> IME ZAREZ stupci | IME
# create -> CREATE TABLE IME OTVORENA spec_stupci ZATVORENA
# spec_stupci -> spec_stupac ZAREZ spec_stupci | spec_stupac
# spec_stupac -> IME IME (OTVORENA BROJ ZATVORENA)?
### Apstraktna sintaksna stabla:
# Skripta: naredbe - niz SQL naredbi, svaka završava znakom ';'
# Create: tablica, specifikacije - CREATE TABLE naredba
# Select: tablica, stupci - SELECT naredba; stupci == nenavedeno za SELECT *
# Stupac: ime, tip, veličina - specifikacija stupca u tablici (za Create)
class SQLParser(Parser):
def select(self):
if self >> SQL.ZVJEZDICA: stupci = nenavedeno
elif self >> SQL.IME:
stupci = [self.zadnji]
while self >> SQL.ZAREZ: stupci.append(self.pročitaj(SQL.IME))
else: raise self.greška()
self.pročitaj(SQL.FROM)
return Select(self.pročitaj(SQL.IME), stupci)
def spec_stupac(self):
ime, tip = self.pročitaj(SQL.IME), self.pročitaj(SQL.IME)
if self >> SQL.OTVORENA:
veličina = self.pročitaj(SQL.BROJ)
self.pročitaj(SQL.ZATVORENA)
else: veličina = nenavedeno
return Stupac(ime, tip, veličina)
def create(self):
self.pročitaj(SQL.TABLE)
tablica = self.pročitaj(SQL.IME)
self.pročitaj(SQL.OTVORENA)
stupci = [self.spec_stupac()]
while self >> SQL.ZAREZ: stupci.append(self.spec_stupac())
self.pročitaj(SQL.ZATVORENA)
return Create(tablica, stupci)
def naredba(self):
if self >> SQL.SELECT: rezultat = self.select()
elif self >> SQL.CREATE: rezultat = self.create()
else: raise self.greška()
self.pročitaj(SQL.TOČKAZAREZ)
return rezultat
def start(self):
naredbe = [self.naredba()]
while not self >> E.KRAJ: naredbe.append(self.naredba())
return Skripta(naredbe)
class Skripta(AST('naredbe')):
"""Niz SQL naredbi, svaka završava znakom ';'."""
def razriješi(self):
imena = {}
for naredba in self.naredbe: naredba.razriješi(imena)
return imena
class Create(AST('tablica specifikacije')):
"""CREATE TABLE naredba."""
def razriješi(self, imena):
tb = imena[self.tablica.sadržaj] = {}
for stupac in self.specifikacije:
tb[stupac.ime.sadržaj] = PristupLog(stupac.tip)
class Select(AST('tablica stupci')):
"""SELECT naredba."""
def razriješi(self, imena):
tn = self.tablica.sadržaj
if tn not in imena: raise self.tablica.nedeklaracija('nema tablice')
tb = imena[tn]
if self.stupci is nenavedeno:
for sl in tb.values(): sl.pristupi()
else:
for st in self.stupci:
sn = st.sadržaj
if sn not in tb:
raise st.nedeklaracija('stupca nema u {}'.format(tn))
tb[sn].pristupi()
class Stupac(AST('ime tip veličina')): """Specifikacija stupca u tablici."""
if __name__ == '__main__':
skripta = SQLParser.parsiraj(sql_lex('''\
CREATE TABLE Persons
(
PersonID int,
Name varchar(255), -- neki stupci imaju zadanu veličinu
Birthday date, -- a neki nemaju...
Married bool,
City varchar(9) -- zadnji nema zarez!
); -- Sada krenimo nešto selektirati
SELECT Name, City FROM Persons;
SELECT * FROM Persons;
CREATE TABLE Trivial (ID void(0)); -- još jedna tablica
SELECT*FROM Trivial; -- između simbola i riječi ne mora ići razmak
SELECT Name, Married FROM Persons;
SELECT Name from Persons;
'''))
prikaz(skripta, 4)
# Skripta(naredbe=[
# Create(tablica=IME'Persons', specifikacije=[
# Stupac(ime=IME'PersonID', tip=IME'int', veličina=nenavedeno),
# Stupac(ime=IME'Name', tip=IME'varchar', veličina=BROJ'255'),
# Stupac(ime=IME'Birthday', tip=IME'date', veličina=nenavedeno),
# Stupac(ime=IME'Married', tip=IME'bool', veličina=nenavedeno),
# Stupac(ime=IME'City', tip=IME'varchar', veličina=BROJ'9')
# ]),
# Select(tablica=IME'Persons', stupci=[IME'Name', IME'City']),
# Select(tablica=IME'Persons', stupci=nenavedeno),
# Create(tablica=IME'Trivial', specifikacije=
# [Stupac(ime=IME'ID', tip=IME'void', veličina=BROJ'0')]),
# Select(tablica=IME'Trivial', stupci=nenavedeno),
# Select(tablica=IME'Persons', stupci=[IME'Name', IME'Married']),
# Select(tablica=IME'Persons', stupci=[IME'Name'])
# ])
#raise SystemExit
pprint.pprint(skripta.razriješi())
# ideje za dalji razvoj:
# PristupLog.pristup umjesto samog broja može biti lista brojeva linija \
# skripte u kojima počinju SELECT naredbe koje pristupaju pojedinom stupcu
# za_indeks(skripta): lista natprosječno dohvaćivanih tablica/stupaca
# optimizacija: brisanje iz CREATE stupaca kojima nismo uopće pristupili
# implementirati INSERT INTO, da možemo doista nešto i raditi s podacima
# povratni tip za SELECT (npr. (varchar(255), bool) za predzadnji)
# interaktivni način rada (online - naredbu po naredbu analizirati)
| [
"backend.PristupLog"
] | [((3570, 3592), 'backend.PristupLog', 'PristupLog', (['stupac.tip'], {}), '(stupac.tip)\n', (3580, 3592), False, 'from backend import PristupLog\n')] |
# MIT License
# Copyright (c) 2018 <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import pytest
from pyTD.cache import MemCache
from pyTD.auth.tokens import EmptyToken
@pytest.fixture(scope='function', autouse=True)
def full_consumer_key():
return "<EMAIL>"
class TestMemCache(object):
def test_default_values(self):
c = MemCache()
assert isinstance(c.refresh_token, EmptyToken)
assert isinstance(c.access_token, EmptyToken)
def test_set_token(self, valid_refresh_token):
c = MemCache()
c.refresh_token = valid_refresh_token
assert c.refresh_token.token == "<PASSWORD>"
assert c.refresh_token.expires_in == 1000000
def test_clear(self, valid_refresh_token, valid_access_token):
c = MemCache()
c.refresh_token = valid_refresh_token
c.access_token == valid_access_token
c.clear()
assert isinstance(c.refresh_token, EmptyToken)
assert isinstance(c.access_token, EmptyToken)
| [
"pytest.fixture",
"pyTD.cache.MemCache"
] | [((1188, 1234), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""', 'autouse': '(True)'}), "(scope='function', autouse=True)\n", (1202, 1234), False, 'import pytest\n'), ((1359, 1369), 'pyTD.cache.MemCache', 'MemCache', ([], {}), '()\n', (1367, 1369), False, 'from pyTD.cache import MemCache\n'), ((1544, 1554), 'pyTD.cache.MemCache', 'MemCache', ([], {}), '()\n', (1552, 1554), False, 'from pyTD.cache import MemCache\n'), ((1788, 1798), 'pyTD.cache.MemCache', 'MemCache', ([], {}), '()\n', (1796, 1798), False, 'from pyTD.cache import MemCache\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11a1 on 2017-05-11 08:37
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='allBook',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('book_id', models.BigIntegerField()),
('ISBN', models.CharField(max_length=20)),
('name', models.CharField(max_length=20)),
('price', models.IntegerField()),
],
),
migrations.CreateModel(
name='favor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_id', models.BigIntegerField()),
('book_id', models.BigIntegerField()),
],
),
migrations.CreateModel(
name='message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('time', models.TimeField()),
('date', models.DateField(auto_now_add=True)),
('book_id', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='student_users',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_id', models.BigIntegerField(unique=True)),
('user_name', models.CharField(max_length=20)),
('name', models.CharField(max_length=20)),
('phone_number', models.CharField(max_length=15)),
('mail', models.EmailField(max_length=254)),
],
),
migrations.CreateModel(
name='subscribeBooks',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_id', models.BigIntegerField()),
('book_id', models.BigIntegerField()),
],
),
migrations.CreateModel(
name='Test',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20)),
],
),
]
| [
"django.db.models.EmailField",
"django.db.models.DateField",
"django.db.models.TimeField",
"django.db.models.IntegerField",
"django.db.models.AutoField",
"django.db.models.BigIntegerField",
"django.db.models.CharField"
] | [((368, 461), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (384, 461), False, 'from django.db import migrations, models\n'), ((488, 512), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {}), '()\n', (510, 512), False, 'from django.db import migrations, models\n'), ((540, 571), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (556, 571), False, 'from django.db import migrations, models\n'), ((599, 630), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (615, 630), False, 'from django.db import migrations, models\n'), ((659, 680), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (678, 680), False, 'from django.db import migrations, models\n'), ((811, 904), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (827, 904), False, 'from django.db import migrations, models\n'), ((931, 955), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {}), '()\n', (953, 955), False, 'from django.db import migrations, models\n'), ((986, 1010), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {}), '()\n', (1008, 1010), False, 'from django.db import migrations, models\n'), ((1143, 1236), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1159, 1236), False, 'from django.db import migrations, models\n'), ((1260, 1278), 'django.db.models.TimeField', 'models.TimeField', ([], {}), '()\n', (1276, 1278), False, 'from django.db import migrations, models\n'), ((1306, 1341), 'django.db.models.DateField', 'models.DateField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (1322, 1341), False, 'from django.db import migrations, models\n'), ((1372, 1403), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (1388, 1403), False, 'from django.db import migrations, models\n'), ((1542, 1635), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1558, 1635), False, 'from django.db import migrations, models\n'), ((1662, 1697), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {'unique': '(True)'}), '(unique=True)\n', (1684, 1697), False, 'from django.db import migrations, models\n'), ((1730, 1761), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (1746, 1761), False, 'from django.db import migrations, models\n'), ((1789, 1820), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (1805, 1820), False, 'from django.db import migrations, models\n'), ((1856, 1887), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(15)'}), '(max_length=15)\n', (1872, 1887), False, 'from django.db import migrations, models\n'), ((1915, 1948), 'django.db.models.EmailField', 'models.EmailField', ([], {'max_length': '(254)'}), '(max_length=254)\n', (1932, 1948), False, 'from django.db import migrations, models\n'), ((2088, 2181), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (2104, 2181), False, 'from django.db import migrations, models\n'), ((2208, 2232), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {}), '()\n', (2230, 2232), False, 'from django.db import migrations, models\n'), ((2263, 2287), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {}), '()\n', (2285, 2287), False, 'from django.db import migrations, models\n'), ((2417, 2510), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (2433, 2510), False, 'from django.db import migrations, models\n'), ((2534, 2565), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (2550, 2565), False, 'from django.db import migrations, models\n')] |
import os
import sys
sys.path.append('../')
import fire
import pickle
import json
def run_command(command):
if os.system(command) != 0:
raise RuntimeError()
def work_with_one_model(cleared_corpus_path, ling_data, output_dir):
if not os.path.exists(output_dir):
os.mkdir(output_dir)
print('Extracting features.============================================')
run_command(f'python ./run_extract_features.py --cleared-corpus={cleared_corpus_path}' +
f' --ling-data={ling_data} --known-preds=true --output-dir={output_dir}')
print('Done.===========================================================')
print('Vectorize features.=============================================')
feature_path = os.path.join(output_dir, 'features.pckl')
run_command(f'python ./run_vectorize_features.py --feature_path={feature_path} --output_dir={output_dir}')
print('Done.===========================================================')
print('Generating embeddings.==========================================')
run_command(f'python ./run_generate_embeddings.py --feature_path={feature_path} --output_dir={output_dir}')
print('Done.===========================================================')
print('Training model.=================================================')
run_command(f'python ./run_train_model.py --input_dir={output_dir} --output_dir={output_dir}')
print('Done.===========================================================')
def extract_known_predicates(features_path, workdir):
with open(features_path, 'rb') as f:
dataframe = pickle.load(f)
known_preds = [e.split('_')[0] for e in dataframe.pred_lemma.tolist()]
with open(os.path.join(workdir, 'known_preds.json'), 'w') as f:
json.dump(known_preds, f)
def extract_known_predicated(cleared_corpus_path, workdir):
def make_pred_dict(data_chunk, pred_dict):
for sentence in data_chunk[1]:
for word in sentence:
pred_number = word.get('fillpred')
if pred_number:
if not pred_dict.get(sentence[pred_number]['lemma']):
pred_dict[sentence[pred_number]['lemma']] = {word.get('rolepred1'): 1}
else:
if not pred_dict.get(sentence[pred_number]['lemma']).get(word.get('rolepred1')):
pred_dict[sentence[pred_number]['lemma']][word.get('rolepred1')] = 1
else:
pred_dict[sentence[pred_number]['lemma']][word.get('rolepred1')] += 1
def filter_roles(pred_dictionary, threshold=5):
filtered_dict = {}
for predicate in pred_dictionary.keys():
new_pred = {}
for (key, value) in pred_dictionary[predicate].items():
if value > threshold:
new_pred[key] = value
filtered_dict[predicate] = new_pred
for predicate in filtered_dict.keys():
if not filtered_dict[predicate]:
top = sorted(pred_dict[predicate], key=pred_dict[predicate].get, reverse=True)[0]
filtered_dict[predicate][top] = pred_dict[predicate][top]
return filtered_dict
with open(cleared_corpus_path, 'r') as f:
data = json.load(f)
pred_dict = {}
for instance in data:
make_pred_dict(instance, pred_dict)
pred_dict = filter_roles(pred_dict)
known_preds = {key: list(value.keys()) for key, value in pred_dictionary.items()}
with open(os.path.join(workdir, 'known_preds.json'), 'w') as f:
json.dump(known_preds, f)
def main(data_dir, workdir):
cleared_corpus_path = os.path.join(data_dir, 'cleared_corpus.json')
ling_data = os.path.join(data_dir, 'ling_data.pckl')
print('Generating the model for known predicates**********************************')
output_dir = os.path.join(workdir, 'known_preds')
work_with_one_model(cleared_corpus_path, ling_data, output_dir)
extract_known_predicates(os.path.join(output_dir, 'features.pckl'), workdir)
print('Generating the model for unknown predicates********************************')
output_dir = os.path.join(workdir, 'unknown_preds')
work_with_one_model(cleared_corpus_path, ling_data, output_dir)
if __name__ == "__main__":
fire.Fire(main)
| [
"os.path.exists",
"fire.Fire",
"os.path.join",
"pickle.load",
"os.mkdir",
"json.load",
"os.system",
"sys.path.append",
"json.dump"
] | [((21, 43), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (36, 43), False, 'import sys\n'), ((763, 804), 'os.path.join', 'os.path.join', (['output_dir', '"""features.pckl"""'], {}), "(output_dir, 'features.pckl')\n", (775, 804), False, 'import os\n'), ((3814, 3859), 'os.path.join', 'os.path.join', (['data_dir', '"""cleared_corpus.json"""'], {}), "(data_dir, 'cleared_corpus.json')\n", (3826, 3859), False, 'import os\n'), ((3876, 3916), 'os.path.join', 'os.path.join', (['data_dir', '"""ling_data.pckl"""'], {}), "(data_dir, 'ling_data.pckl')\n", (3888, 3916), False, 'import os\n'), ((4028, 4064), 'os.path.join', 'os.path.join', (['workdir', '"""known_preds"""'], {}), "(workdir, 'known_preds')\n", (4040, 4064), False, 'import os\n'), ((4326, 4364), 'os.path.join', 'os.path.join', (['workdir', '"""unknown_preds"""'], {}), "(workdir, 'unknown_preds')\n", (4338, 4364), False, 'import os\n'), ((4466, 4481), 'fire.Fire', 'fire.Fire', (['main'], {}), '(main)\n', (4475, 4481), False, 'import fire\n'), ((118, 136), 'os.system', 'os.system', (['command'], {}), '(command)\n', (127, 136), False, 'import os\n'), ((262, 288), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (276, 288), False, 'import os\n'), ((298, 318), 'os.mkdir', 'os.mkdir', (['output_dir'], {}), '(output_dir)\n', (306, 318), False, 'import os\n'), ((1644, 1658), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1655, 1658), False, 'import pickle\n'), ((1819, 1844), 'json.dump', 'json.dump', (['known_preds', 'f'], {}), '(known_preds, f)\n', (1828, 1844), False, 'import json\n'), ((3382, 3394), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3391, 3394), False, 'import json\n'), ((3728, 3753), 'json.dump', 'json.dump', (['known_preds', 'f'], {}), '(known_preds, f)\n', (3737, 3753), False, 'import json\n'), ((4167, 4208), 'os.path.join', 'os.path.join', (['output_dir', '"""features.pckl"""'], {}), "(output_dir, 'features.pckl')\n", (4179, 4208), False, 'import os\n'), ((1757, 1798), 'os.path.join', 'os.path.join', (['workdir', '"""known_preds.json"""'], {}), "(workdir, 'known_preds.json')\n", (1769, 1798), False, 'import os\n'), ((3666, 3707), 'os.path.join', 'os.path.join', (['workdir', '"""known_preds.json"""'], {}), "(workdir, 'known_preds.json')\n", (3678, 3707), False, 'import os\n')] |
from ...isa.inst import *
import numpy as np
class Vwmacc_vv(Inst):
name = 'vwmacc.vv'
# vwmacc.vv vd, vs1, vs2, vm
def golden(self):
if self['vl']==0:
return self['ori']
result = self['ori'].copy()
maskflag = 1 if 'mask' in self else 0
vstart = self['vstart'] if 'vstart' in self else 0
for ii in range(vstart, self['vl']):
if (maskflag == 0) or (maskflag == 1 and np.unpackbits(self['mask'], bitorder='little')[ii] ):
result[ii] = self['vs2'][ii].astype(object) * self['vs1'][ii]+ self['ori'][ii].astype(object)
return result
class Vwmaccu_vv(Vwmacc_vv):
name = 'vwmaccu.vv'
class Vwmaccsu_vv(Vwmacc_vv):
name = 'vwmaccsu.vv'
| [
"numpy.unpackbits"
] | [((455, 501), 'numpy.unpackbits', 'np.unpackbits', (["self['mask']"], {'bitorder': '"""little"""'}), "(self['mask'], bitorder='little')\n", (468, 501), True, 'import numpy as np\n')] |
# Generated by Django 2.0.9 on 2018-12-08 12:25
from django.db import migrations, models
import django_reactive.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='TestModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('simple', django_reactive.fields.ReactJSONSchemaField(default=dict, help_text='Simple')),
('nested', django_reactive.fields.ReactJSONSchemaField(default=dict, help_text='Nested')),
('arrays', django_reactive.fields.ReactJSONSchemaField(default=dict, help_text='Arrays')),
('numbers', django_reactive.fields.ReactJSONSchemaField(default=dict, help_text='Numbers')),
('widgets', django_reactive.fields.ReactJSONSchemaField(default=dict, help_text='Widgets')),
('ordering', django_reactive.fields.ReactJSONSchemaField(default=dict, help_text='Ordering')),
('references', django_reactive.fields.ReactJSONSchemaField(default=dict)),
('errors', django_reactive.fields.ReactJSONSchemaField(default=dict, help_text='Errors')),
('large', django_reactive.fields.ReactJSONSchemaField(default=dict, help_text='Large')),
('date_and_time', django_reactive.fields.ReactJSONSchemaField(
default=dict, help_text='Date and time')),
('validation', django_reactive.fields.ReactJSONSchemaField(default=dict, help_text='Validation')),
('file', django_reactive.fields.ReactJSONSchemaField(default=dict, help_text='Files')),
('alternatives', django_reactive.fields.ReactJSONSchemaField(default=dict, help_text='Alternatives')),
('property_dependencies',
django_reactive.fields.ReactJSONSchemaField(default=dict, help_text='Property dependencies')),
('schema_dependencies',
django_reactive.fields.ReactJSONSchemaField(default=dict, help_text='Schema dependencies')),
],
),
]
| [
"django.db.models.AutoField"
] | [((334, 427), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (350, 427), False, 'from django.db import migrations, models\n')] |
from jinfo.tables import (
DNA_VOCAB,
RNA_VOCAB,
AA_VOCAB,
CODON_TABLE,
RC_TABLE,
NT_MW_TABLE,
AA_MW_TABLE,
)
class SeqVocabError(Exception):
pass
class SeqLengthError(Exception):
pass
class UnknownBaseError(Exception):
pass
class BaseSeq:
"""
Parent class for DNA/RNA/AA sequence objects
"""
def __init__(self, sequence: str = "", label: str = "", vocab: set = None) -> None:
self.vocab = vocab
self.label = label
self.update_seq(sequence)
self.len = len(self.seq)
return
def __str__(self):
return f"{self.label}\t{self.seq}"
def check_seq_valid(self) -> None:
"""
Ensure that the sequence string is consistant with the vocab
"""
if self.vocab is not None:
if not self.vocab.issuperset(set(self.new_seq)):
raise SeqVocabError("Seq contains bases not in vocab")
return
def update_seq(self, sequence: str = "") -> None:
"""
Replace the sequence string with a new string
"""
self.new_seq = sequence.upper()
self.check_seq_valid()
self.seq = self.new_seq
self.len = len(sequence)
return
def update_label(self, label: str = "") -> None:
"""
Replace the sequence string with a new string
"""
self.label = label
return
def align(self, seq2, maxiters: int = 16):
"""
Perform alignment of two sequences, optionally control the number of iterations
***Requires MUSCLE package***
Returns Alignment object
"""
from jinfo.utils.multialign import multialign
return multialign([self, seq2], maxiters=maxiters)
def identity(self, seq2) -> float:
"""
Calculate the percentage identity between two sequences
Returns: float
"""
from jinfo.utils.percentage_identity import percentage_identity
return percentage_identity(self, seq2)
def save_fasta(self, file_name: str) -> None:
"""
Save sequence to fasta file
"""
import textwrap
seq_formatted = textwrap.fill(self.seq, width=80)
if self.label == "":
out_label = "jinfo_sequence"
else:
out_label = self.label
with open(file_name, "w") as text_file:
text_file.write(f">{out_label}\n{seq_formatted}")
return
class DNASeq(BaseSeq):
"""
Class to hold sequences of DNA
"""
def __init__(self, sequence: str = "", label: str = "") -> None:
"""
Call the superclass constructor with new default vocab argument
"""
super(DNASeq, self).__init__(sequence=sequence, label=label, vocab=DNA_VOCAB)
return
def transcribe(self) -> str:
"""
Returns: RNA transcript of the DNA sequence
"""
return self.seq.replace("T", "U")
def translate(self) -> str:
"""
Returns: translated protein sequence of the DNA sequence
"""
transcript = self.transcribe()
if len(transcript) % 3 != 0:
raise SeqLengthError("Seq cannot be split into codons, not a multiple of 3")
codon_list = [transcript[i : i + 3] for i in range(0, len(transcript), 3)]
return "".join([CODON_TABLE[codon] for codon in codon_list])
def reverse_complement(self) -> str:
"""
Returns: reverse complement of the DNA sequence
"""
return "".join([RC_TABLE[base] for base in self.seq][::-1])
def find_CDS(self):
return
def MW(self) -> float:
"""
Calculate MW of linear double stranded DNA
Returns: Molecular weight float
"""
if "X" in self.seq:
raise UnknownBaseError("X base in sequence")
fw_mw = sum([NT_MW_TABLE[base] for base in self.seq]) + 17.01
rv_mw = sum([NT_MW_TABLE[base] for base in self.reverse_complement()]) + 17.01
return fw_mw + rv_mw
def GC(self, dp: int = 2) -> float:
"""
Calculate the GC% of the DNA sequence with optional arg to control precision
Returns: GC percentage float
"""
return round(100 * (self.seq.count("C") + self.seq.count("G")) / self.len, dp)
def tm(self, dp: int = 2) -> float:
"""
Calculate DNA sequence tm with optional arg to control precision
Returns: melting temperature float
"""
import primer3
return round(primer3.calcTm(self.seq), dp)
def one_hot(self, max_len: int = None):
"""
"""
from jinfo import one_hot_dna
if max_len:
return one_hot_dna(self, max_len)
else:
return one_hot_dna(self, self.len)
class RNASeq(BaseSeq):
"""
Class to hold RNA sequences
"""
def __init__(self, sequence: str = "", label: str = "") -> None:
"""
Call the superclass constructor with new default vocab argument
"""
super(RNASeq, self).__init__(sequence=sequence, label=label, vocab=RNA_VOCAB)
return
def reverse_transcribe(self) -> str:
"""
Returns: DNA template of the RNA sequence
"""
return self.seq.replace("U", "T")
def translate(self) -> str:
"""
Returns: the translated protein sequence of the DNA sequence
"""
if len(self.seq) % 3 != 0:
raise SeqLengthError("Seq cannot be split into codons, not a multiple of 3")
codon_list = [self.seq[i : i + 3] for i in range(0, len(self.seq), 3)]
return "".join([CODON_TABLE[codon] for codon in codon_list])
def MW(self) -> float:
"""
Calculate MW of single stranded RNA
Returns: Molecular weight float
"""
if "X" in self.seq:
raise UnknownBaseError("X base in sequence")
return sum([NT_MW_TABLE[base] for base in self.seq]) + 17.01
class AASeq(BaseSeq):
"""
Class to hold amino acid sequences
"""
def __init__(self, sequence: str = "", label: str = ""):
"""
Call the superclass constructor with new default vocab argument
"""
super(AASeq, self).__init__(sequence=sequence, label=label, vocab=AA_VOCAB)
return
def MW(self) -> float:
"""
Calculate protein MW
Returns: Molecular weight float
"""
if "X" in self.seq:
raise UnknownBaseError("X residue in sequence")
return sum([AA_MW_TABLE[base] for base in self.seq])
if __name__ == "__main__":
pass
| [
"jinfo.one_hot_dna",
"jinfo.utils.percentage_identity.percentage_identity",
"textwrap.fill",
"jinfo.utils.multialign.multialign",
"primer3.calcTm"
] | [((1724, 1767), 'jinfo.utils.multialign.multialign', 'multialign', (['[self, seq2]'], {'maxiters': 'maxiters'}), '([self, seq2], maxiters=maxiters)\n', (1734, 1767), False, 'from jinfo.utils.multialign import multialign\n'), ((2008, 2039), 'jinfo.utils.percentage_identity.percentage_identity', 'percentage_identity', (['self', 'seq2'], {}), '(self, seq2)\n', (2027, 2039), False, 'from jinfo.utils.percentage_identity import percentage_identity\n'), ((2201, 2234), 'textwrap.fill', 'textwrap.fill', (['self.seq'], {'width': '(80)'}), '(self.seq, width=80)\n', (2214, 2234), False, 'import textwrap\n'), ((4568, 4592), 'primer3.calcTm', 'primer3.calcTm', (['self.seq'], {}), '(self.seq)\n', (4582, 4592), False, 'import primer3\n'), ((4754, 4780), 'jinfo.one_hot_dna', 'one_hot_dna', (['self', 'max_len'], {}), '(self, max_len)\n', (4765, 4780), False, 'from jinfo import one_hot_dna\n'), ((4814, 4841), 'jinfo.one_hot_dna', 'one_hot_dna', (['self', 'self.len'], {}), '(self, self.len)\n', (4825, 4841), False, 'from jinfo import one_hot_dna\n')] |
# coding: utf-8
import pygame
from engine.flappy_engine import FlappyEngine
from entities.bird import Bird
class ManualFlappyEngine(FlappyEngine):
def __init__(self):
self.birds = [Bird(name="Manual")]
def get_birds(self):
return self.birds
def on_update(self, next_pipe_x, next_pipe_y):
for bird in self.birds:
bird.refresh()
def draw(self, screen):
for bird in self.birds:
if not bird.dead:
screen.blit(bird.image, bird.rect)
def on_event(self, event):
if event.type == pygame.KEYDOWN and (event.key == pygame.K_SPACE or event.key == pygame.K_UP):
for bird in self.birds:
bird.jump()
def check_pipes_collision(self, pipes):
for bird in self.birds:
if pipes.is_collision(bird.rect):
bird.dead = True
def check_floor_collision(self, floor):
for bird in self.birds:
if floor.rect.y < bird.rect.centery:
bird.dead = True
def on_finish(self, game, score_panel):
from scenes.home_scene import HomeScene
game.change_scene(HomeScene(game))
| [
"scenes.home_scene.HomeScene",
"entities.bird.Bird"
] | [((197, 216), 'entities.bird.Bird', 'Bird', ([], {'name': '"""Manual"""'}), "(name='Manual')\n", (201, 216), False, 'from entities.bird import Bird\n'), ((1156, 1171), 'scenes.home_scene.HomeScene', 'HomeScene', (['game'], {}), '(game)\n', (1165, 1171), False, 'from scenes.home_scene import HomeScene\n')] |
from .todo_item_repository_interface import TodoItemRepositoryInterface
from ..models import TodoItem
from asyncpg.connection import Connection
from holobot.sdk.database import DatabaseManagerInterface
from holobot.sdk.database.queries import Query
from holobot.sdk.database.queries.enums import Equality
from holobot.sdk.ioc.decorators import injectable
from typing import Optional, Tuple
@injectable(TodoItemRepositoryInterface)
class TodoItemRepository(TodoItemRepositoryInterface):
def __init__(self, database_manager: DatabaseManagerInterface) -> None:
super().__init__()
self.__database_manager: DatabaseManagerInterface = database_manager
async def count(self, user_id: str) -> int:
async with self.__database_manager.acquire_connection() as connection:
connection: Connection
async with connection.transaction():
result: Optional[int] = await connection.fetchval("SELECT COUNT(*) FROM todo_lists WHERE user_id = $1", user_id)
return result or 0
async def get(self, todo_id: int) -> Optional[TodoItem]:
async with self.__database_manager.acquire_connection() as connection:
connection: Connection
async with connection.transaction():
record = await Query.select().columns(
"id", "user_id", "created_at", "message"
).from_table("todo_lists").where().field(
"id", Equality.EQUAL, todo_id
).compile().fetchrow(connection)
return TodoItemRepository.__parse_todo_item(record) if record is not None else None
async def get_many(self, user_id: str, start_offset: int, page_size: int) -> Tuple[TodoItem, ...]:
async with self.__database_manager.acquire_connection() as connection:
connection: Connection
async with connection.transaction():
records = await Query.select().columns(
"id", "user_id", "created_at", "message"
).from_table("todo_lists").where().field(
"user_id", Equality.EQUAL, user_id
).limit().start_index(start_offset).max_count(page_size).compile().fetch(connection)
return tuple([TodoItemRepository.__parse_todo_item(record) for record in records])
async def store(self, todo_item: TodoItem) -> None:
async with self.__database_manager.acquire_connection() as connection:
connection: Connection
async with connection.transaction():
await Query.insert().in_table("todo_lists").field(
"user_id", todo_item.user_id
).field(
"message", todo_item.message
).compile().execute(connection)
async def delete_by_user(self, user_id: str, todo_id: int) -> int:
async with self.__database_manager.acquire_connection() as connection:
connection: Connection
async with connection.transaction():
record = await connection.fetchrow((
"WITH deleted AS"
" (DELETE FROM todo_lists WHERE user_id = $1 AND id = $2 RETURNING *)"
" SELECT COUNT(*) AS deleted_count FROM deleted"
), user_id, todo_id)
return record["deleted_count"] if record is not None else 0
async def delete_all(self, user_id: str) -> int:
async with self.__database_manager.acquire_connection() as connection:
connection: Connection
async with connection.transaction():
record = await connection.fetchrow((
"WITH deleted AS"
" (DELETE FROM todo_lists WHERE user_id = $1 RETURNING *)"
" SELECT COUNT(*) AS deleted_count FROM deleted"
), user_id)
return record["deleted_count"] if record is not None else 0
@staticmethod
def __parse_todo_item(record) -> TodoItem:
todo_item = TodoItem()
todo_item.id = record["id"]
todo_item.user_id = record["user_id"]
todo_item.created_at = record["created_at"]
todo_item.message = record["message"]
return todo_item
| [
"holobot.sdk.ioc.decorators.injectable",
"holobot.sdk.database.queries.Query.select",
"holobot.sdk.database.queries.Query.insert"
] | [((392, 431), 'holobot.sdk.ioc.decorators.injectable', 'injectable', (['TodoItemRepositoryInterface'], {}), '(TodoItemRepositoryInterface)\n', (402, 431), False, 'from holobot.sdk.ioc.decorators import injectable\n'), ((2596, 2610), 'holobot.sdk.database.queries.Query.insert', 'Query.insert', ([], {}), '()\n', (2608, 2610), False, 'from holobot.sdk.database.queries import Query\n'), ((1307, 1321), 'holobot.sdk.database.queries.Query.select', 'Query.select', ([], {}), '()\n', (1319, 1321), False, 'from holobot.sdk.database.queries import Query\n'), ((1952, 1966), 'holobot.sdk.database.queries.Query.select', 'Query.select', ([], {}), '()\n', (1964, 1966), False, 'from holobot.sdk.database.queries import Query\n')] |
import collections
import re
import numpy
import pytest
import random
import time
import nidaqmx
from nidaqmx.constants import (
AcquisitionType, BusType, RegenerationMode)
from nidaqmx.error_codes import DAQmxErrors
from nidaqmx.utils import flatten_channel_string
from nidaqmx.tests.fixtures import x_series_device
from nidaqmx.tests.helpers import generate_random_seed
class TestWriteExceptions(object):
"""
Contains a collection of pytest tests that validate the Write error behavior
in the NI-DAQmx Python API.
These tests use only a single X Series device by utilizing the internal
loopback routes on the device.
"""
def test_overwrite(self, x_series_device):
# USB streaming is very tricky.
if not (x_series_device.bus_type == BusType.PCIE or x_series_device.bus_type == BusType.PXIE):
pytest.skip("Requires a plugin device.")
number_of_samples = 100
sample_rate = 1000
fifo_size = 8191
host_buffer_size = 1000
with nidaqmx.Task() as write_task:
samp_clk_terminal = '/{0}/Ctr0InternalOutput'.format(
x_series_device.name)
write_task.ao_channels.add_ao_voltage_chan(
x_series_device.ao_physical_chans[0].name, max_val=10, min_val=-10)
write_task.timing.cfg_samp_clk_timing(
sample_rate, source=samp_clk_terminal, sample_mode=AcquisitionType.CONTINUOUS,
samps_per_chan=number_of_samples)
# Don't allow regeneration - this enables explicit hardware flow control.
write_task.out_stream.regen_mode = RegenerationMode.DONT_ALLOW_REGENERATION
# This is the only entrypoint that correctly sets number_of_samples_written in error
# conditions prior to DAQmx 21.8.
writer = nidaqmx.stream_writers.AnalogUnscaledWriter(write_task.out_stream, auto_start=False)
# Fill up the host buffer first.
initial_write_data = numpy.zeros((1, host_buffer_size), dtype=numpy.int16)
writer.write_int16(initial_write_data)
# Start the write task. All data from the host buffer should be in the FIFO.
write_task.start()
# Now write more data than can fit in the FIFO + host buffer.
large_write_data = numpy.zeros((1, fifo_size*2), dtype=numpy.int16)
with pytest.raises(nidaqmx.DaqWriteError) as timeout_exception:
writer.write_int16(large_write_data, timeout=2.0)
assert timeout_exception.value.error_code == DAQmxErrors.SAMPLES_CAN_NOT_YET_BE_WRITTEN
# Some of the data should have been written successfully. This test doesn't
# need to get into the nitty gritty device details on how much.
assert timeout_exception.value.samps_per_chan_written > 0
def test_overwrite_during_prime(self, x_series_device):
# USB streaming is very tricky.
if not (x_series_device.bus_type == BusType.PCIE or x_series_device.bus_type == BusType.PXIE):
pytest.skip("Requires a plugin device.")
number_of_samples = 100
sample_rate = 1000
fifo_size = 8191
host_buffer_size = 1000
total_buffer_size = fifo_size + host_buffer_size
with nidaqmx.Task() as write_task:
samp_clk_terminal = '/{0}/Ctr0InternalOutput'.format(
x_series_device.name)
write_task.ao_channels.add_ao_voltage_chan(
x_series_device.ao_physical_chans[0].name, max_val=10, min_val=-10)
write_task.timing.cfg_samp_clk_timing(
sample_rate, source=samp_clk_terminal, sample_mode=AcquisitionType.CONTINUOUS,
samps_per_chan=number_of_samples)
# Don't allow regeneration - this enables explicit hardware flow control.
write_task.out_stream.regen_mode = RegenerationMode.DONT_ALLOW_REGENERATION
# Make the host buffer small.
write_task.out_stream.output_buf_size = number_of_samples
# This is the only entrypoint that correctly sets number_of_samples_written in error
# conditions prior to DAQmx 21.8.
writer = nidaqmx.stream_writers.AnalogUnscaledWriter(write_task.out_stream, auto_start=False)
# This is more data than can be primed, so this should fail.
initial_write_data = numpy.zeros((1, total_buffer_size*2), dtype=numpy.int16)
with pytest.raises(nidaqmx.DaqWriteError) as timeout_exception:
writer.write_int16(initial_write_data)
assert timeout_exception.value.error_code == DAQmxErrors.NO_MORE_SPACE
# The driver detects that the write will fail immediately, so no data was written.
assert timeout_exception.value.samps_per_chan_written == 0
| [
"nidaqmx.Task",
"nidaqmx.stream_writers.AnalogUnscaledWriter",
"numpy.zeros",
"pytest.raises",
"pytest.skip"
] | [((859, 899), 'pytest.skip', 'pytest.skip', (['"""Requires a plugin device."""'], {}), "('Requires a plugin device.')\n", (870, 899), False, 'import pytest\n'), ((1031, 1045), 'nidaqmx.Task', 'nidaqmx.Task', ([], {}), '()\n', (1043, 1045), False, 'import nidaqmx\n'), ((1842, 1930), 'nidaqmx.stream_writers.AnalogUnscaledWriter', 'nidaqmx.stream_writers.AnalogUnscaledWriter', (['write_task.out_stream'], {'auto_start': '(False)'}), '(write_task.out_stream,\n auto_start=False)\n', (1885, 1930), False, 'import nidaqmx\n'), ((2006, 2059), 'numpy.zeros', 'numpy.zeros', (['(1, host_buffer_size)'], {'dtype': 'numpy.int16'}), '((1, host_buffer_size), dtype=numpy.int16)\n', (2017, 2059), False, 'import numpy\n'), ((2338, 2388), 'numpy.zeros', 'numpy.zeros', (['(1, fifo_size * 2)'], {'dtype': 'numpy.int16'}), '((1, fifo_size * 2), dtype=numpy.int16)\n', (2349, 2388), False, 'import numpy\n'), ((3080, 3120), 'pytest.skip', 'pytest.skip', (['"""Requires a plugin device."""'], {}), "('Requires a plugin device.')\n", (3091, 3120), False, 'import pytest\n'), ((3309, 3323), 'nidaqmx.Task', 'nidaqmx.Task', ([], {}), '()\n', (3321, 3323), False, 'import nidaqmx\n'), ((4232, 4320), 'nidaqmx.stream_writers.AnalogUnscaledWriter', 'nidaqmx.stream_writers.AnalogUnscaledWriter', (['write_task.out_stream'], {'auto_start': '(False)'}), '(write_task.out_stream,\n auto_start=False)\n', (4275, 4320), False, 'import nidaqmx\n'), ((4424, 4482), 'numpy.zeros', 'numpy.zeros', (['(1, total_buffer_size * 2)'], {'dtype': 'numpy.int16'}), '((1, total_buffer_size * 2), dtype=numpy.int16)\n', (4435, 4482), False, 'import numpy\n'), ((2404, 2440), 'pytest.raises', 'pytest.raises', (['nidaqmx.DaqWriteError'], {}), '(nidaqmx.DaqWriteError)\n', (2417, 2440), False, 'import pytest\n'), ((4498, 4534), 'pytest.raises', 'pytest.raises', (['nidaqmx.DaqWriteError'], {}), '(nidaqmx.DaqWriteError)\n', (4511, 4534), False, 'import pytest\n')] |
import argparse
import json
import os
import re
import shutil
import subprocess
from pathlib import Path
"""
See https://wphelp365.com/blog/ultimate-guide-downloading-converting-aax-mp3/
on how to use.
Step 3 + 4 will get activation bytes.
Example:
python convert.py -i "The Tower of the Swallow.aax" -a xxxxxx
where -a is the activation code
"""
def get_chapters(input):
ffprobe = shutil.which('ffprobe')
if not ffprobe:
raise FileNotFoundError('ffprobe not found!')
cmd = [ffprobe, '-show_chapters', '-loglevel', 'error', '-print_format',
'json', input]
output = subprocess.check_output(cmd, universal_newlines=True)
chapters = json.loads(output)
return chapters
def parse_chapters(chapters, input, activation_bytes, album):
ffmpeg = shutil.which('ffmpeg')
if not ffmpeg:
raise FileNotFoundError('ffmpeg not found!')
for i, chapter in enumerate(chapters['chapters']):
title = chapter['tags']['title']
cmd = [ffmpeg, '-y',
'-activation_bytes', activation_bytes,
'-i', input,
'-ss', chapter['start_time'],
'-to', chapter['end_time'],
'-metadata', 'title=%s' % title]
if album is not None:
cmd.extend(['-metadata', 'album=%s' % album])
out_arg = Path(input).stem
if codec == 'copy':
ext = 'm4a'
else:
ext = 'mp3'
output = f'{out_arg}_{i+1}.{ext}'
cmd.extend(['-c:a', namespace.codec, '-vn', output])
print(cmd)
subprocess.check_output(cmd, universal_newlines=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input', help='Input file name', required=True)
parser.add_argument('-a', '--activation-bytes', help='Activation bytes',
required=True)
parser.add_argument('-c', '--codec', help='Select a codec (copy = lossless rip)',
default='mp3', choices = ['mp3','copy'])
parser.add_argument('--album',
help='ID3v2 tag for Album, if not specified, '
'uses from aax')
namespace = parser.parse_args()
print(namespace)
# Collate args
input = namespace.input
activation_bytes = namespace.activation_bytes
album = namespace.album
codec = namespace.codec
chapters = get_chapters(input)
print(chapters)
parse_chapters(chapters, input, activation_bytes, album)
| [
"subprocess.check_output",
"json.loads",
"argparse.ArgumentParser",
"pathlib.Path",
"shutil.which"
] | [((392, 415), 'shutil.which', 'shutil.which', (['"""ffprobe"""'], {}), "('ffprobe')\n", (404, 415), False, 'import shutil\n'), ((606, 659), 'subprocess.check_output', 'subprocess.check_output', (['cmd'], {'universal_newlines': '(True)'}), '(cmd, universal_newlines=True)\n', (629, 659), False, 'import subprocess\n'), ((675, 693), 'json.loads', 'json.loads', (['output'], {}), '(output)\n', (685, 693), False, 'import json\n'), ((791, 813), 'shutil.which', 'shutil.which', (['"""ffmpeg"""'], {}), "('ffmpeg')\n", (803, 813), False, 'import shutil\n'), ((1672, 1697), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1695, 1697), False, 'import argparse\n'), ((1576, 1629), 'subprocess.check_output', 'subprocess.check_output', (['cmd'], {'universal_newlines': '(True)'}), '(cmd, universal_newlines=True)\n', (1599, 1629), False, 'import subprocess\n'), ((1338, 1349), 'pathlib.Path', 'Path', (['input'], {}), '(input)\n', (1342, 1349), False, 'from pathlib import Path\n')] |
import logging
import discord
from redbot.core import Config, bank, commands
from redbot.core.utils.chat_formatting import escape, humanize_list, humanize_number, inline
log = logging.getLogger("red.flare.gamenotify")
class Gamenotify(commands.Cog):
"""Sub to game pings"""
__version__ = "0.0.1"
def format_help_for_context(self, ctx):
pre_processed = super().format_help_for_context(ctx)
return f"{pre_processed}\nCog Version: {self.__version__}"
def __init__(self, bot):
self.bot = bot
self.config = Config.get_conf(self, 95932766180343808, force_registration=True)
self.config.register_guild(games={})
@commands.cooldown(1, 300, commands.BucketType.user)
@commands.command()
@commands.guild_only()
async def notify(self, ctx, *, game: str):
"""Ping a game."""
game = game.lower()
games = await self.config.guild(ctx.guild).games()
if game not in games:
await ctx.send(
f"That game doesn't exist, did you mean one of the following? {humanize_list(list(map(inline, games.keys())))}"
)
return
users = []
for user in games[game]:
obj = ctx.guild.get_member(user)
if obj is None:
continue
users.append(obj.mention)
if not users:
await ctx.send("Nobody is signed up for pings for that game.")
return
msg = f"{escape(game, mass_mentions=True).title()}: {','.join(users)}"
await ctx.send(msg)
@commands.command()
@commands.guild_only()
async def addping(self, ctx, *, game: str):
"""Add/remove a ping for a game."""
game = game.lower()
async with self.config.guild(ctx.guild).games() as games:
if game in games:
if ctx.author.id in games[game]:
games[game].remove(ctx.author.id)
await ctx.send("You have been removed from pings for this game.")
else:
games[game].append(ctx.author.id)
await ctx.send(
f"You have been added to the ping list for {escape(game, mass_mentions=True)}."
)
else:
games[game] = []
games[game].append(ctx.author.id)
await ctx.send(
"That game has now been created and you have added to the ping list"
)
@commands.command()
@commands.guild_only()
async def listgames(self, ctx):
"""List games for notifying."""
games = await self.config.guild(ctx.guild).games()
if not games:
await ctx.send("No games are registered in this guild silly.")
return
new_games = [game for game in games if games[game]]
if not new_games:
await ctx.send("No games are registered in this guild silly.")
return
await ctx.send(f"Current registered games: {humanize_list(list(map(inline, new_games)))}")
@commands.command()
@commands.guild_only()
async def listpings(self, ctx, *, game: str):
"""List pings for a game."""
games = await self.config.guild(ctx.guild).games()
if game.lower() not in games:
await ctx.send("That game isn't registered for pings.")
return
users = []
for user in games[game.lower()]:
obj = ctx.guild.get_member(user)
if obj is not None:
users.append(str(obj))
if not users:
await ctx.send(f"No valid users registered for {game}.")
await ctx.send(
f"Current registered users for {game}: {humanize_list(list(map(inline, users)))}"
)
@commands.command()
@commands.guild_only()
@commands.mod()
async def delgame(self, ctx, *, game: str):
"""Deletea game."""
game = game.lower()
async with self.config.guild(ctx.guild).games() as games:
if game in games:
del games[game]
await ctx.send("That game has now deleted.")
else:
await ctx.send("That game does not exist.")
| [
"logging.getLogger",
"redbot.core.commands.mod",
"redbot.core.commands.cooldown",
"redbot.core.Config.get_conf",
"redbot.core.commands.guild_only",
"redbot.core.commands.command",
"redbot.core.utils.chat_formatting.escape"
] | [((178, 219), 'logging.getLogger', 'logging.getLogger', (['"""red.flare.gamenotify"""'], {}), "('red.flare.gamenotify')\n", (195, 219), False, 'import logging\n'), ((674, 725), 'redbot.core.commands.cooldown', 'commands.cooldown', (['(1)', '(300)', 'commands.BucketType.user'], {}), '(1, 300, commands.BucketType.user)\n', (691, 725), False, 'from redbot.core import Config, bank, commands\n'), ((731, 749), 'redbot.core.commands.command', 'commands.command', ([], {}), '()\n', (747, 749), False, 'from redbot.core import Config, bank, commands\n'), ((755, 776), 'redbot.core.commands.guild_only', 'commands.guild_only', ([], {}), '()\n', (774, 776), False, 'from redbot.core import Config, bank, commands\n'), ((1574, 1592), 'redbot.core.commands.command', 'commands.command', ([], {}), '()\n', (1590, 1592), False, 'from redbot.core import Config, bank, commands\n'), ((1598, 1619), 'redbot.core.commands.guild_only', 'commands.guild_only', ([], {}), '()\n', (1617, 1619), False, 'from redbot.core import Config, bank, commands\n'), ((2509, 2527), 'redbot.core.commands.command', 'commands.command', ([], {}), '()\n', (2525, 2527), False, 'from redbot.core import Config, bank, commands\n'), ((2533, 2554), 'redbot.core.commands.guild_only', 'commands.guild_only', ([], {}), '()\n', (2552, 2554), False, 'from redbot.core import Config, bank, commands\n'), ((3091, 3109), 'redbot.core.commands.command', 'commands.command', ([], {}), '()\n', (3107, 3109), False, 'from redbot.core import Config, bank, commands\n'), ((3115, 3136), 'redbot.core.commands.guild_only', 'commands.guild_only', ([], {}), '()\n', (3134, 3136), False, 'from redbot.core import Config, bank, commands\n'), ((3809, 3827), 'redbot.core.commands.command', 'commands.command', ([], {}), '()\n', (3825, 3827), False, 'from redbot.core import Config, bank, commands\n'), ((3833, 3854), 'redbot.core.commands.guild_only', 'commands.guild_only', ([], {}), '()\n', (3852, 3854), False, 'from redbot.core import Config, bank, commands\n'), ((3860, 3874), 'redbot.core.commands.mod', 'commands.mod', ([], {}), '()\n', (3872, 3874), False, 'from redbot.core import Config, bank, commands\n'), ((557, 622), 'redbot.core.Config.get_conf', 'Config.get_conf', (['self', '(95932766180343808)'], {'force_registration': '(True)'}), '(self, 95932766180343808, force_registration=True)\n', (572, 622), False, 'from redbot.core import Config, bank, commands\n'), ((1478, 1510), 'redbot.core.utils.chat_formatting.escape', 'escape', (['game'], {'mass_mentions': '(True)'}), '(game, mass_mentions=True)\n', (1484, 1510), False, 'from redbot.core.utils.chat_formatting import escape, humanize_list, humanize_number, inline\n'), ((2205, 2237), 'redbot.core.utils.chat_formatting.escape', 'escape', (['game'], {'mass_mentions': '(True)'}), '(game, mass_mentions=True)\n', (2211, 2237), False, 'from redbot.core.utils.chat_formatting import escape, humanize_list, humanize_number, inline\n')] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import edtf.fields
class Migration(migrations.Migration):
dependencies = [
('gk_collections_work_creator', '0029_auto_20170523_1149'),
]
operations = [
migrations.AddField(
model_name='creatorbase',
name='brief',
field=models.TextField(blank=True, help_text=b'A document brief describing the purpose of this content'),
),
migrations.AddField(
model_name='workbase',
name='brief',
field=models.TextField(blank=True, help_text=b'A document brief describing the purpose of this content'),
),
migrations.AlterField(
model_name='creatorbase',
name='admin_notes',
field=models.TextField(blank=True, help_text=b"Administrator's notes about this content"),
),
migrations.AlterField(
model_name='creatorbase',
name='birth_date_edtf',
field=edtf.fields.EDTFField(help_text=b"an <a href='http://www.loc.gov/standards/datetime/implementations.html'>EDTF</a>-formatted date, parsed from the display date, e.g. '1855/1860-06-04'", blank=True, upper_fuzzy_field=b'birth_date_sort_descending', null=True, lower_fuzzy_field=b'birth_date_sort_ascending', upper_strict_field=b'birth_date_latest', verbose_name=b'Date of creation (EDTF)', lower_strict_field=b'birth_date_earliest', natural_text_field=b'birth_date_display'),
),
migrations.AlterField(
model_name='creatorbase',
name='death_date_edtf',
field=edtf.fields.EDTFField(help_text=b"an <a href='http://www.loc.gov/standards/datetime/implementations.html'>EDTF</a>-formatted date, parsed from the display date, e.g. '1855/1860-06-04'", blank=True, upper_fuzzy_field=b'death_date_sort_descending', null=True, lower_fuzzy_field=b'death_date_sort_ascending', upper_strict_field=b'death_date_latest', verbose_name=b'Date of death (EDTF)', lower_strict_field=b'death_date_earliest', natural_text_field=b'death_date_display'),
),
migrations.AlterField(
model_name='workbase',
name='admin_notes',
field=models.TextField(blank=True, help_text=b"Administrator's notes about this content"),
),
migrations.AlterField(
model_name='workbase',
name='date_edtf',
field=edtf.fields.EDTFField(help_text=b"an <a href='http://www.loc.gov/standards/datetime/implementations.html'>EDTF</a>-formatted date, parsed from the display date, e.g. '1855/1860-06-04'", blank=True, upper_fuzzy_field=b'date_sort_descending', null=True, lower_fuzzy_field=b'date_sort_ascending', upper_strict_field=b'date_latest', verbose_name=b'Date of creation (EDTF)', lower_strict_field=b'date_earliest', natural_text_field=b'date_display'),
),
]
| [
"django.db.models.TextField"
] | [((393, 496), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'help_text': "b'A document brief describing the purpose of this content'"}), "(blank=True, help_text=\n b'A document brief describing the purpose of this content')\n", (409, 496), False, 'from django.db import migrations, models\n'), ((612, 715), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'help_text': "b'A document brief describing the purpose of this content'"}), "(blank=True, help_text=\n b'A document brief describing the purpose of this content')\n", (628, 715), False, 'from django.db import migrations, models\n'), ((842, 930), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'help_text': 'b"Administrator\'s notes about this content"'}), '(blank=True, help_text=\n b"Administrator\'s notes about this content")\n', (858, 930), False, 'from django.db import migrations, models\n'), ((2267, 2355), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'help_text': 'b"Administrator\'s notes about this content"'}), '(blank=True, help_text=\n b"Administrator\'s notes about this content")\n', (2283, 2355), False, 'from django.db import migrations, models\n')] |
import re
import six
import datetime
from urllib import urlencode
from django.conf import settings
from django.http import HttpResponse
from django.core.exceptions import ObjectDoesNotExist
from django.urls import reverse
from django.utils.encoding import force_text
import debug # pyflakes:ignore
import tastypie
import tastypie.resources
from tastypie.api import Api
from tastypie.bundle import Bundle
from tastypie.serializers import Serializer as BaseSerializer
from tastypie.exceptions import BadRequest, ApiFieldError
from tastypie.utils.mime import determine_format, build_content_type
from tastypie.utils import is_valid_jsonp_callback_value
from tastypie.fields import ApiField
import debug # pyflakes:ignore
_api_list = []
class ModelResource(tastypie.resources.ModelResource):
def generate_cache_key(self, *args, **kwargs):
"""
Creates a unique-enough cache key.
This is based off the current api_name/resource_name/args/kwargs.
"""
#smooshed = ["%s=%s" % (key, value) for key, value in kwargs.items()]
smooshed = urlencode(kwargs)
# Use a list plus a ``.join()`` because it's faster than concatenation.
return "%s:%s:%s:%s" % (self._meta.api_name, self._meta.resource_name, ':'.join(args), smooshed)
class Serializer(BaseSerializer):
def to_html(self, data, options=None):
"""
Reserved for future usage.
The desire is to provide HTML output of a resource, making an API
available to a browser. This is on the TODO list but not currently
implemented.
"""
from django.template.loader import render_to_string
options = options or {}
serialized = self.to_simple_html(data, options)
return render_to_string("api/base.html", {"data": serialized})
def to_simple_html(self, data, options):
"""
"""
from django.template.loader import render_to_string
#
if isinstance(data, (list, tuple)):
return render_to_string("api/listitem.html", {"data": [self.to_simple_html(item, options) for item in data]})
if isinstance(data, dict):
return render_to_string("api/dictitem.html", {"data": dict((key, self.to_simple_html(val, options)) for (key, val) in data.items())})
elif isinstance(data, Bundle):
return render_to_string("api/dictitem.html", {"data":dict((key, self.to_simple_html(val, options)) for (key, val) in data.data.items())})
elif hasattr(data, 'dehydrated_type'):
if getattr(data, 'dehydrated_type', None) == 'related' and data.is_m2m == False:
return render_to_string("api/relitem.html", {"fk": data.fk_resource, "val": self.to_simple_html(data.value, options)})
elif getattr(data, 'dehydrated_type', None) == 'related' and data.is_m2m == True:
render_to_string("api/listitem.html", {"data": [self.to_simple_html(bundle, options) for bundle in data.m2m_bundles]})
else:
return self.to_simple_html(data.value, options)
elif isinstance(data, datetime.datetime):
return self.format_datetime(data)
elif isinstance(data, datetime.date):
return self.format_date(data)
elif isinstance(data, datetime.time):
return self.format_time(data)
elif isinstance(data, bool):
return data
elif isinstance(data, (six.integer_types, float)):
return data
elif data is None:
return None
elif isinstance(data, basestring) and data.startswith("/api/v1/"): # XXX Will not work for Python 3
return render_to_string("api/relitem.html", {"fk": data, "val": data.split('/')[-2]})
else:
return force_text(data)
for _app in settings.INSTALLED_APPS:
_module_dict = globals()
if '.' in _app:
_root, _name = _app.split('.', 1)
if _root == 'ietf':
if not '.' in _name:
_api = Api(api_name=_name)
_module_dict[_name] = _api
_api_list.append((_name, _api))
def top_level(request):
available_resources = {}
apitop = reverse('ietf.api.top_level')
for name in sorted([ name for name, api in _api_list if len(api._registry) > 0 ]):
available_resources[name] = {
'list_endpoint': '%s/%s/' % (apitop, name),
}
serializer = Serializer()
desired_format = determine_format(request, serializer)
options = {}
if 'text/javascript' in desired_format:
callback = request.GET.get('callback', 'callback')
if not is_valid_jsonp_callback_value(callback):
raise BadRequest('JSONP callback name is invalid.')
options['callback'] = callback
serialized = serializer.serialize(available_resources, desired_format, options)
return HttpResponse(content=serialized, content_type=build_content_type(desired_format))
def autodiscover():
"""
Auto-discover INSTALLED_APPS resources.py modules and fail silently when
not present. This forces an import on them to register any admin bits they
may want.
"""
from importlib import import_module
from django.conf import settings
from django.utils.module_loading import module_has_submodule
for app in settings.INSTALLED_APPS:
mod = import_module(app)
# Attempt to import the app's admin module.
try:
import_module('%s.resources' % (app, ))
except:
# Decide whether to bubble up this error. If the app just
# doesn't have an admin module, we can ignore the error
# attempting to import it, otherwise we want it to bubble up.
if module_has_submodule(mod, "resources"):
raise
TIMEDELTA_REGEX = re.compile('^(?P<days>\d+d)?\s?(?P<hours>\d+h)?\s?(?P<minutes>\d+m)?\s?(?P<seconds>\d+s?)$')
class TimedeltaField(ApiField):
dehydrated_type = 'timedelta'
help_text = "A timedelta field, with duration expressed in seconds. Ex: 132"
def convert(self, value):
if value is None:
return None
if isinstance(value, six.string_types):
match = TIMEDELTA_REGEX.search(value)
if match:
data = match.groupdict()
return datetime.timedelta(int(data['days']), int(data['hours']), int(data['minutes']), int(data['seconds']))
else:
raise ApiFieldError("Timedelta provided to '%s' field doesn't appear to be a valid timedelta string: '%s'" % (self.instance_name, value))
return value
def hydrate(self, bundle):
value = super(TimedeltaField, self).hydrate(bundle)
if value and not hasattr(value, 'seconds'):
if isinstance(value, six.string_types):
try:
match = TIMEDELTA_REGEX.search(value)
if match:
data = match.groupdict()
value = datetime.timedelta(int(data['days']), int(data['hours']), int(data['minutes']), int(data['seconds']))
else:
raise ValueError()
except (ValueError, TypeError):
raise ApiFieldError("Timedelta provided to '%s' field doesn't appear to be a valid datetime string: '%s'" % (self.instance_name, value))
else:
raise ApiFieldError("Datetime provided to '%s' field must be a string: %s" % (self.instance_name, value))
return value
class ToOneField(tastypie.fields.ToOneField):
"Subclass of tastypie.fields.ToOneField which adds caching in the dehydrate method."
def dehydrate(self, bundle, for_list=True):
foreign_obj = None
if callable(self.attribute):
previous_obj = bundle.obj
foreign_obj = self.attribute(bundle)
elif isinstance(self.attribute, six.string_types):
foreign_obj = bundle.obj
for attr in self._attrs:
previous_obj = foreign_obj
try:
foreign_obj = getattr(foreign_obj, attr, None)
except ObjectDoesNotExist:
foreign_obj = None
if not foreign_obj:
if not self.null:
if callable(self.attribute):
raise ApiFieldError("The related resource for resource %s could not be found." % (previous_obj))
else:
raise ApiFieldError("The model '%r' has an empty attribute '%s' and doesn't allow a null value." % (previous_obj, attr))
return None
fk_resource = self.get_related_resource(foreign_obj)
# Up to this point we've copied the code from tastypie 0.13.1. Now
# we add caching.
cache_key = fk_resource.generate_cache_key('related', pk=foreign_obj.pk, for_list=for_list, )
dehydrated = fk_resource._meta.cache.get(cache_key)
if dehydrated is None:
fk_bundle = Bundle(obj=foreign_obj, request=bundle.request)
dehydrated = self.dehydrate_related(fk_bundle, fk_resource, for_list=for_list)
fk_resource._meta.cache.set(cache_key, dehydrated)
return dehydrated
| [
"django.utils.module_loading.module_has_submodule",
"importlib.import_module",
"tastypie.utils.is_valid_jsonp_callback_value",
"re.compile",
"tastypie.bundle.Bundle",
"tastypie.api.Api",
"tastypie.exceptions.ApiFieldError",
"django.utils.encoding.force_text",
"urllib.urlencode",
"django.urls.rever... | [((5887, 5996), 're.compile', 're.compile', (['"""^(?P<days>\\\\d+d)?\\\\s?(?P<hours>\\\\d+h)?\\\\s?(?P<minutes>\\\\d+m)?\\\\s?(?P<seconds>\\\\d+s?)$"""'], {}), "(\n '^(?P<days>\\\\d+d)?\\\\s?(?P<hours>\\\\d+h)?\\\\s?(?P<minutes>\\\\d+m)?\\\\s?(?P<seconds>\\\\d+s?)$'\n )\n", (5897, 5996), False, 'import re\n'), ((4249, 4278), 'django.urls.reverse', 'reverse', (['"""ietf.api.top_level"""'], {}), "('ietf.api.top_level')\n", (4256, 4278), False, 'from django.urls import reverse\n'), ((4523, 4560), 'tastypie.utils.mime.determine_format', 'determine_format', (['request', 'serializer'], {}), '(request, serializer)\n', (4539, 4560), False, 'from tastypie.utils.mime import determine_format, build_content_type\n'), ((1138, 1155), 'urllib.urlencode', 'urlencode', (['kwargs'], {}), '(kwargs)\n', (1147, 1155), False, 'from urllib import urlencode\n'), ((1815, 1870), 'django.template.loader.render_to_string', 'render_to_string', (['"""api/base.html"""', "{'data': serialized}"], {}), "('api/base.html', {'data': serialized})\n", (1831, 1870), False, 'from django.template.loader import render_to_string\n'), ((5427, 5445), 'importlib.import_module', 'import_module', (['app'], {}), '(app)\n', (5440, 5445), False, 'from importlib import import_module\n'), ((4699, 4738), 'tastypie.utils.is_valid_jsonp_callback_value', 'is_valid_jsonp_callback_value', (['callback'], {}), '(callback)\n', (4728, 4738), False, 'from tastypie.utils import is_valid_jsonp_callback_value\n'), ((4758, 4803), 'tastypie.exceptions.BadRequest', 'BadRequest', (['"""JSONP callback name is invalid."""'], {}), "('JSONP callback name is invalid.')\n", (4768, 4803), False, 'from tastypie.exceptions import BadRequest, ApiFieldError\n'), ((4986, 5020), 'tastypie.utils.mime.build_content_type', 'build_content_type', (['desired_format'], {}), '(desired_format)\n', (5004, 5020), False, 'from tastypie.utils.mime import determine_format, build_content_type\n'), ((5523, 5561), 'importlib.import_module', 'import_module', (["('%s.resources' % (app,))"], {}), "('%s.resources' % (app,))\n", (5536, 5561), False, 'from importlib import import_module\n'), ((9100, 9147), 'tastypie.bundle.Bundle', 'Bundle', ([], {'obj': 'foreign_obj', 'request': 'bundle.request'}), '(obj=foreign_obj, request=bundle.request)\n', (9106, 9147), False, 'from tastypie.bundle import Bundle\n'), ((4070, 4089), 'tastypie.api.Api', 'Api', ([], {'api_name': '_name'}), '(api_name=_name)\n', (4073, 4089), False, 'from tastypie.api import Api\n'), ((5806, 5844), 'django.utils.module_loading.module_has_submodule', 'module_has_submodule', (['mod', '"""resources"""'], {}), "(mod, 'resources')\n", (5826, 5844), False, 'from django.utils.module_loading import module_has_submodule\n'), ((6537, 6678), 'tastypie.exceptions.ApiFieldError', 'ApiFieldError', (['("Timedelta provided to \'%s\' field doesn\'t appear to be a valid timedelta string: \'%s\'"\n % (self.instance_name, value))'], {}), '(\n "Timedelta provided to \'%s\' field doesn\'t appear to be a valid timedelta string: \'%s\'"\n % (self.instance_name, value))\n', (6550, 6678), False, 'from tastypie.exceptions import BadRequest, ApiFieldError\n'), ((7496, 7600), 'tastypie.exceptions.ApiFieldError', 'ApiFieldError', (['("Datetime provided to \'%s\' field must be a string: %s" % (self.\n instance_name, value))'], {}), '("Datetime provided to \'%s\' field must be a string: %s" % (\n self.instance_name, value))\n', (7509, 7600), False, 'from tastypie.exceptions import BadRequest, ApiFieldError\n'), ((8440, 8532), 'tastypie.exceptions.ApiFieldError', 'ApiFieldError', (["('The related resource for resource %s could not be found.' % previous_obj)"], {}), "('The related resource for resource %s could not be found.' %\n previous_obj)\n", (8453, 8532), False, 'from tastypie.exceptions import BadRequest, ApiFieldError\n'), ((8579, 8703), 'tastypie.exceptions.ApiFieldError', 'ApiFieldError', (['("The model \'%r\' has an empty attribute \'%s\' and doesn\'t allow a null value." %\n (previous_obj, attr))'], {}), '(\n "The model \'%r\' has an empty attribute \'%s\' and doesn\'t allow a null value."\n % (previous_obj, attr))\n', (8592, 8703), False, 'from tastypie.exceptions import BadRequest, ApiFieldError\n'), ((7324, 7464), 'tastypie.exceptions.ApiFieldError', 'ApiFieldError', (['("Timedelta provided to \'%s\' field doesn\'t appear to be a valid datetime string: \'%s\'"\n % (self.instance_name, value))'], {}), '(\n "Timedelta provided to \'%s\' field doesn\'t appear to be a valid datetime string: \'%s\'"\n % (self.instance_name, value))\n', (7337, 7464), False, 'from tastypie.exceptions import BadRequest, ApiFieldError\n'), ((3840, 3856), 'django.utils.encoding.force_text', 'force_text', (['data'], {}), '(data)\n', (3850, 3856), False, 'from django.utils.encoding import force_text\n')] |
from kubernetes import client
from kubernetes.watch import Watch
from loguru import logger
from .consts import CONTAINER_NAME, DEPLOYMENT_PREFIX, NAMESPACE
def create_deployment(v1, image, num_replicas):
container = client.V1Container(name=CONTAINER_NAME, image=image)
container_spec = client.V1PodSpec(containers=[container])
meta = client.V1ObjectMeta(labels=dict(app="kbench"))
template_spec = client.V1PodTemplateSpec(spec=container_spec,
metadata=meta)
selector = client.V1LabelSelector(match_labels=dict(app="kbench"))
deployment_spec = client.V1DeploymentSpec(template=template_spec,
replicas=num_replicas,
selector=selector)
meta = client.V1ObjectMeta(generate_name=DEPLOYMENT_PREFIX)
deployment_spec = client.V1Deployment(spec=deployment_spec, metadata=meta)
deployment = v1.create_namespaced_deployment(body=deployment_spec,
namespace=NAMESPACE)
return deployment.metadata.name
def delete_deployment(v1, name):
v1.delete_namespaced_deployment(name=name, namespace=NAMESPACE)
def wait_for_deployment_rescale(v1, name, target_replicas):
watch = Watch()
for event in watch.stream(v1.list_namespaced_deployment,
namespace=NAMESPACE):
deployment = event["object"]
if deployment.metadata.name != name:
continue
ready_replicas = deployment.status.ready_replicas
if ready_replicas is None:
ready_replicas = 0
logger.trace("Deployment {} has {} replicas", name, ready_replicas)
if ready_replicas == target_replicas:
return
def rescale_deployment(v1, name, num_replicas):
logger.info("Rescaling deployment {} to {} replicas", name, num_replicas)
scale = client.V1Scale(spec=client.V1ScaleSpec(replicas=num_replicas))
v1.patch_namespaced_deployment_scale(name=name, namespace=NAMESPACE,
body=scale)
| [
"kubernetes.client.V1ObjectMeta",
"kubernetes.watch.Watch",
"loguru.logger.info",
"kubernetes.client.V1PodSpec",
"kubernetes.client.V1ScaleSpec",
"loguru.logger.trace",
"kubernetes.client.V1Deployment",
"kubernetes.client.V1PodTemplateSpec",
"kubernetes.client.V1Container",
"kubernetes.client.V1De... | [((223, 275), 'kubernetes.client.V1Container', 'client.V1Container', ([], {'name': 'CONTAINER_NAME', 'image': 'image'}), '(name=CONTAINER_NAME, image=image)\n', (241, 275), False, 'from kubernetes import client\n'), ((297, 337), 'kubernetes.client.V1PodSpec', 'client.V1PodSpec', ([], {'containers': '[container]'}), '(containers=[container])\n', (313, 337), False, 'from kubernetes import client\n'), ((416, 476), 'kubernetes.client.V1PodTemplateSpec', 'client.V1PodTemplateSpec', ([], {'spec': 'container_spec', 'metadata': 'meta'}), '(spec=container_spec, metadata=meta)\n', (440, 476), False, 'from kubernetes import client\n'), ((615, 708), 'kubernetes.client.V1DeploymentSpec', 'client.V1DeploymentSpec', ([], {'template': 'template_spec', 'replicas': 'num_replicas', 'selector': 'selector'}), '(template=template_spec, replicas=num_replicas,\n selector=selector)\n', (638, 708), False, 'from kubernetes import client\n'), ((808, 860), 'kubernetes.client.V1ObjectMeta', 'client.V1ObjectMeta', ([], {'generate_name': 'DEPLOYMENT_PREFIX'}), '(generate_name=DEPLOYMENT_PREFIX)\n', (827, 860), False, 'from kubernetes import client\n'), ((883, 939), 'kubernetes.client.V1Deployment', 'client.V1Deployment', ([], {'spec': 'deployment_spec', 'metadata': 'meta'}), '(spec=deployment_spec, metadata=meta)\n', (902, 939), False, 'from kubernetes import client\n'), ((1296, 1303), 'kubernetes.watch.Watch', 'Watch', ([], {}), '()\n', (1301, 1303), False, 'from kubernetes.watch import Watch\n'), ((1844, 1917), 'loguru.logger.info', 'logger.info', (['"""Rescaling deployment {} to {} replicas"""', 'name', 'num_replicas'], {}), "('Rescaling deployment {} to {} replicas', name, num_replicas)\n", (1855, 1917), False, 'from loguru import logger\n'), ((1656, 1723), 'loguru.logger.trace', 'logger.trace', (['"""Deployment {} has {} replicas"""', 'name', 'ready_replicas'], {}), "('Deployment {} has {} replicas', name, ready_replicas)\n", (1668, 1723), False, 'from loguru import logger\n'), ((1951, 1992), 'kubernetes.client.V1ScaleSpec', 'client.V1ScaleSpec', ([], {'replicas': 'num_replicas'}), '(replicas=num_replicas)\n', (1969, 1992), False, 'from kubernetes import client\n')] |
import magma as m
from magma import DefineCircuit, EndCircuit, In, Out, Bit, Clock, wire
from magma.backend.verilog import compile
from mantle.xilinx.spartan6 import FDCE
def test_fdce():
main = DefineCircuit('main', 'I', In(Bit), "O", Out(Bit), "CLK", In(Clock))
dff = FDCE()
wire(m.enable(1), dff.CE)
wire(0, dff.CLR)
wire(main.I, dff.D)
wire(dff.Q, main.O)
EndCircuit()
print(compile(main)) # compile will wire up the CLK
print(repr(main))
| [
"magma.EndCircuit",
"magma.backend.verilog.compile",
"magma.In",
"mantle.xilinx.spartan6.FDCE",
"magma.enable",
"magma.Out",
"magma.wire"
] | [((280, 286), 'mantle.xilinx.spartan6.FDCE', 'FDCE', ([], {}), '()\n', (284, 286), False, 'from mantle.xilinx.spartan6 import FDCE\n'), ((321, 337), 'magma.wire', 'wire', (['(0)', 'dff.CLR'], {}), '(0, dff.CLR)\n', (325, 337), False, 'from magma import DefineCircuit, EndCircuit, In, Out, Bit, Clock, wire\n'), ((342, 361), 'magma.wire', 'wire', (['main.I', 'dff.D'], {}), '(main.I, dff.D)\n', (346, 361), False, 'from magma import DefineCircuit, EndCircuit, In, Out, Bit, Clock, wire\n'), ((366, 385), 'magma.wire', 'wire', (['dff.Q', 'main.O'], {}), '(dff.Q, main.O)\n', (370, 385), False, 'from magma import DefineCircuit, EndCircuit, In, Out, Bit, Clock, wire\n'), ((390, 402), 'magma.EndCircuit', 'EndCircuit', ([], {}), '()\n', (400, 402), False, 'from magma import DefineCircuit, EndCircuit, In, Out, Bit, Clock, wire\n'), ((228, 235), 'magma.In', 'In', (['Bit'], {}), '(Bit)\n', (230, 235), False, 'from magma import DefineCircuit, EndCircuit, In, Out, Bit, Clock, wire\n'), ((242, 250), 'magma.Out', 'Out', (['Bit'], {}), '(Bit)\n', (245, 250), False, 'from magma import DefineCircuit, EndCircuit, In, Out, Bit, Clock, wire\n'), ((259, 268), 'magma.In', 'In', (['Clock'], {}), '(Clock)\n', (261, 268), False, 'from magma import DefineCircuit, EndCircuit, In, Out, Bit, Clock, wire\n'), ((296, 307), 'magma.enable', 'm.enable', (['(1)'], {}), '(1)\n', (304, 307), True, 'import magma as m\n'), ((414, 427), 'magma.backend.verilog.compile', 'compile', (['main'], {}), '(main)\n', (421, 427), False, 'from magma.backend.verilog import compile\n')] |
from bandit.containers.container import Container
import torch as torch
from collections import OrderedDict
from bandit.functional import estimator_sample,estimator_ledoit_wolf
class Portfolio(Container):
"""
portfolio class
"""
def __init__(self, container_id=-1, module={"file": "eg", "name": "EG"}, **kwargs):
if module["name"] in ["OPAMC","ROPATC"]:
self.risk = True
self.returns = []
else:
self.risk = False
self.returns = None
self.k = kwargs["k"]
import_str = "from bandit.modules.{} import {}".format(module["file"], module["name"])
exec(import_str)
module = eval(module["name"] + "(**kwargs)")
super(Portfolio, self).__init__(container_id=container_id, module=module)
def decide(self, bandit_data):
portfolio = OrderedDict()
weight = self._module.decide()["w"]
i = 0
for stock in bandit_data.arm_reward.keys():
portfolio[stock] = weight[i]
i += 1
return portfolio
def update(self, result, bandit_data, **kwargs):
stock_return = torch.tensor(list(bandit_data.arm_reward.values()))
last_weight = torch.tensor(list(result.values()))
last_weight_hat = last_weight * stock_return / torch.dot(last_weight, stock_return)
arm_context = bandit_data.arm_context
if self.risk:
self.returns.append(stock_return.numpy().tolist())
covariance = estimator_ledoit_wolf(self.returns)
self._module(mean=stock_return, last_weight=last_weight, last_weight_hat=last_weight_hat, covariance=covariance)
else:
self._module(arm_reward=stock_return, last_weight=last_weight, last_weight_hat=last_weight_hat, arm_context = arm_context)
class PortfolioCardinality(Container):
"""
portfolio class
"""
def __init__(self, container_id=-1, module={"file": "eg", "name": "EG"}, **kwargs):
if module["name"] in ["OPAMC","ROPATC"]:
self.risk = True
self.returns = []
else:
self.risk = False
self.returns = None
self.k = kwargs["k"]
import_str = "from bandit.modules.{} import {}".format(module["file"], module["name"])
exec(import_str)
module = eval(module["name"] + "(**kwargs)")
super(PortfolioCardinality, self).__init__(container_id=container_id, module=module)
def decide(self, bandit_data):
portfolio = OrderedDict()
weight = self._module.decide()["w"]
idx = torch.topk(weight, self.k).indices
weight_c = torch.zeros(len(weight))
weight_c[idx] = weight[idx]
weight_c = weight_c / torch.sum(weight_c)
i = 0
for stock in bandit_data.arm_reward.keys():
portfolio[stock] = weight_c[i]
i += 1
return portfolio
def update(self, result, bandit_data, **kwargs):
stock_return = torch.tensor(list(bandit_data.arm_reward.values()))
last_weight = torch.tensor(list(result.values()))
last_weight_hat = last_weight * stock_return / torch.dot(last_weight, stock_return)
arm_context = bandit_data.arm_context
if self.risk:
self.returns.append(stock_return.numpy().tolist())
covariance = estimator_ledoit_wolf(self.returns)
self._module(mean=stock_return, last_weight=last_weight, last_weight_hat=last_weight_hat, covariance=covariance)
else:
self._module(arm_reward=stock_return, last_weight=last_weight, last_weight_hat=last_weight_hat, arm_context = arm_context)
| [
"collections.OrderedDict",
"torch.topk",
"torch.sum",
"bandit.functional.estimator_ledoit_wolf",
"torch.dot"
] | [((856, 869), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (867, 869), False, 'from collections import OrderedDict\n'), ((2511, 2524), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2522, 2524), False, 'from collections import OrderedDict\n'), ((1307, 1343), 'torch.dot', 'torch.dot', (['last_weight', 'stock_return'], {}), '(last_weight, stock_return)\n', (1316, 1343), True, 'import torch as torch\n'), ((1500, 1535), 'bandit.functional.estimator_ledoit_wolf', 'estimator_ledoit_wolf', (['self.returns'], {}), '(self.returns)\n', (1521, 1535), False, 'from bandit.functional import estimator_sample, estimator_ledoit_wolf\n'), ((2583, 2609), 'torch.topk', 'torch.topk', (['weight', 'self.k'], {}), '(weight, self.k)\n', (2593, 2609), True, 'import torch as torch\n'), ((2728, 2747), 'torch.sum', 'torch.sum', (['weight_c'], {}), '(weight_c)\n', (2737, 2747), True, 'import torch as torch\n'), ((3143, 3179), 'torch.dot', 'torch.dot', (['last_weight', 'stock_return'], {}), '(last_weight, stock_return)\n', (3152, 3179), True, 'import torch as torch\n'), ((3336, 3371), 'bandit.functional.estimator_ledoit_wolf', 'estimator_ledoit_wolf', (['self.returns'], {}), '(self.returns)\n', (3357, 3371), False, 'from bandit.functional import estimator_sample, estimator_ledoit_wolf\n')] |
'''
Created on Mar 1, 2022
@author: mballance
'''
import importlib
import sys
from tblink_rpc_core.endpoint import Endpoint
from tblink_rpc_gw.rt.cocotb.mgr import Mgr
def run_cocotb(ep : Endpoint):
# cocotb has a native module for interfacing with the
# simulator. We need to provide our own cocotb 'simulator'
# module. We do this by inserting our own module prior
# to importing cocotb.
sys.modules['cocotb.simulator'] = importlib.import_module("tblink_rpc.rt.cocotb.simulator")
# Set the endpoint for when the user calls
# Note: it's required to import the module here so as
# to avoid messing up replacement of the simulator module
from tblink_rpc import cocotb_compat
cocotb_compat._set_ep(ep)
mgr = Mgr.inst()
mgr.ep = ep
# TODO: init
ep.init(None)
while not ep.is_init():
ep.process_one_message()
import cocotb
cocotb._initialise_testbench([])
pass
def main():
'''
- Initiate connection to hardware platform
- Pass endpoint to 'core' main
'''
pass
if __name__ == "__main__":
main()
| [
"cocotb._initialise_testbench",
"tblink_rpc_gw.rt.cocotb.mgr.Mgr.inst",
"importlib.import_module",
"tblink_rpc.cocotb_compat._set_ep"
] | [((449, 506), 'importlib.import_module', 'importlib.import_module', (['"""tblink_rpc.rt.cocotb.simulator"""'], {}), "('tblink_rpc.rt.cocotb.simulator')\n", (472, 506), False, 'import importlib\n'), ((742, 767), 'tblink_rpc.cocotb_compat._set_ep', 'cocotb_compat._set_ep', (['ep'], {}), '(ep)\n', (763, 767), False, 'from tblink_rpc import cocotb_compat\n'), ((783, 793), 'tblink_rpc_gw.rt.cocotb.mgr.Mgr.inst', 'Mgr.inst', ([], {}), '()\n', (791, 793), False, 'from tblink_rpc_gw.rt.cocotb.mgr import Mgr\n'), ((947, 979), 'cocotb._initialise_testbench', 'cocotb._initialise_testbench', (['[]'], {}), '([])\n', (975, 979), False, 'import cocotb\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*- ########################################################
# ____ _ __ #
# ___ __ __/ / /__ ___ ______ ______(_) /___ __ #
# / _ \/ // / / (_-</ -_) __/ // / __/ / __/ // / #
# /_//_/\_,_/_/_/___/\__/\__/\_,_/_/ /_/\__/\_, / #
# /___/ team #
# #
# nullscan #
# A modular framework designed to chain and automate security tests #
# #
# FILE #
# ssh.py #
# #
# AUTHOR #
# <EMAIL> #
# #
################################################################################
# sys imports
import concurrent.futures as cf
# own imports
from modules.libs.base import Base, tool, timeout
class SSH(Base):
""" SSH module (tcp/22) """
def __init__(self, target, opts):
""" init """
Base.__init__(self, target, opts)
return
@tool
def verify_ssh(self):
"""
DESCR: Verify SSH daemon by reading banner and supported alogrithms. (ext)
TOOLS: ncat
"""
opts = f"-w 3 -i 1 {self.target['host']} {self.target['port']}"
pre_cmd = "echo -e 'SSH-2.0-OpenSSH\\r\\n' |"
self._run_tool('ncat', opts, nullscan_tool='verify_ssh', precmd=pre_cmd)
return
@tool
def ssh_user_enum(self):
"""
DESCR: SSH user enumeration using the timing attack. (ext)
TOOLS: ssh-user-enum
"""
threads = len(self.opts['ulists'])
with cf.ThreadPoolExecutor(threads) as exe:
for u in self.opts['ulists']:
if self._check_file(u, block=False):
opts = f"-u {u} -i {self.target['host']} -p {self.target['port']}"
exe.submit(self._run_tool, 'ssh-user-enum', opts, 'ssh_user_enum')
return
@tool
def hydra_ssh(self):
"""
DESCR: Bruteforce SSH logins. (ext)
TOOLS: hydra
"""
opts = '-e nsr -f'
self._hydra('ssh', opts)
return
@tool
def nmap_ssh(self):
"""
DESCR: Scan SSH service with corresponding NSE scripts. (ext)
TOOLS: nmap
"""
opts = '-n -sS -Pn --open --nsock-engine epoll'
opts += ' --script ssh2-enum-algos,ssh-auth-methods,ssh-hostkey,sshv1'
opts += f" -p {self.target['port']} {self.target['host']}"
self._run_tool('nmap', opts, nullscan_tool='nmap_ssh')
return
# EOF
| [
"modules.libs.base.Base.__init__",
"concurrent.futures.ThreadPoolExecutor"
] | [((1606, 1639), 'modules.libs.base.Base.__init__', 'Base.__init__', (['self', 'target', 'opts'], {}), '(self, target, opts)\n', (1619, 1639), False, 'from modules.libs.base import Base, tool, timeout\n'), ((2197, 2227), 'concurrent.futures.ThreadPoolExecutor', 'cf.ThreadPoolExecutor', (['threads'], {}), '(threads)\n', (2218, 2227), True, 'import concurrent.futures as cf\n')] |
"""
Dynamic Connection Creation from a Variable
This file contains one ongoing DAG that executes every 15 minutes.
This DAG makes use of one custom operator:
- CreateConnectionsFromVariable
https://github.com/airflow-plugins/variable_connection_plugin/blob/master/operator/variable_connection_operator.py#L36
If using encrypted tokens in the Variable (recommended), it is necessary
to create a separate "Fernet Key Connection" with the relevant Fernet Key
kept in the password field. This Conn ID can then be specified in the
operator below.
"""
from datetime import datetime
from airflow import DAG
from airflow.operators import CreateConnectionsFromVariable
FERNET_KEY_CONN_ID = None
CONFIG_VARIABLE_KEY = ''
args = {
'owner': 'airflow',
'start_date': datetime(2018, 2, 22, 0, 0),
'provide_context': True,
'email': [],
'email_on_failure': True
}
dag = DAG(
'__VARIABLE_CONNECTION_CREATION__',
schedule_interval="*/15 * * * *",
default_args=args,
catchup=False
)
create_airflow_connections = CreateConnectionsFromVariable(
task_id='create_airflow_connections',
fernet_key_conn_id=FERNET_KEY_CONN_ID,
config_variable_key=CONFIG_VARIABLE_KEY,
dag=dag)
create_airflow_connections
| [
"datetime.datetime",
"airflow.operators.CreateConnectionsFromVariable",
"airflow.DAG"
] | [((890, 1001), 'airflow.DAG', 'DAG', (['"""__VARIABLE_CONNECTION_CREATION__"""'], {'schedule_interval': '"""*/15 * * * *"""', 'default_args': 'args', 'catchup': '(False)'}), "('__VARIABLE_CONNECTION_CREATION__', schedule_interval='*/15 * * * *',\n default_args=args, catchup=False)\n", (893, 1001), False, 'from airflow import DAG\n'), ((1046, 1211), 'airflow.operators.CreateConnectionsFromVariable', 'CreateConnectionsFromVariable', ([], {'task_id': '"""create_airflow_connections"""', 'fernet_key_conn_id': 'FERNET_KEY_CONN_ID', 'config_variable_key': 'CONFIG_VARIABLE_KEY', 'dag': 'dag'}), "(task_id='create_airflow_connections',\n fernet_key_conn_id=FERNET_KEY_CONN_ID, config_variable_key=\n CONFIG_VARIABLE_KEY, dag=dag)\n", (1075, 1211), False, 'from airflow.operators import CreateConnectionsFromVariable\n'), ((777, 804), 'datetime.datetime', 'datetime', (['(2018)', '(2)', '(22)', '(0)', '(0)'], {}), '(2018, 2, 22, 0, 0)\n', (785, 804), False, 'from datetime import datetime\n')] |
from __future__ import annotations
import json
from collections.abc import Callable, Sequence
from typing import Any
import pytest
import websockets
from asgiref.typing import ASGI3Application, HTTPScope, WebSocketScope
from asphalt.core import Component, Context, inject, require_resource, resource
from fastapi import FastAPI
from httpx import AsyncClient
from starlette.requests import Request
from starlette.responses import JSONResponse, PlainTextResponse, Response
from starlette.websockets import WebSocket
from asphalt.web.fastapi import AsphaltDepends, FastAPIComponent
from .test_asgi3 import TextReplacerMiddleware
@pytest.mark.parametrize("method", ["static", "dynamic"])
@pytest.mark.asyncio
async def test_http(unused_tcp_port: int, method: str):
async def root(
request: Request,
my_resource: str = AsphaltDepends(),
another_resource: str = AsphaltDepends("another"),
) -> Response:
require_resource(HTTPScope)
require_resource(Request)
return JSONResponse(
{
"message": request.query_params["param"],
"my resource": my_resource,
"another resource": another_resource,
}
)
application = FastAPI()
if method == "static":
application.add_api_route("/", root)
components = {}
else:
class RouteComponent(Component):
@inject
async def start(self, ctx: Context, app: FastAPI = resource()) -> None:
app.add_api_route("/", root)
components = {"myroutes": {"type": RouteComponent}}
async with Context() as ctx, AsyncClient() as http:
ctx.add_resource("foo")
ctx.add_resource("bar", name="another")
await FastAPIComponent(
components=components, app=application, port=unused_tcp_port
).start(ctx)
# Ensure that the application got added as a resource
asgi_app = ctx.require_resource(ASGI3Application)
fastapi_app = ctx.require_resource(FastAPI)
assert fastapi_app is asgi_app
response = await http.get(
f"http://12172.16.17.32:{unused_tcp_port}", params={"param": "Hello World"}
)
response.raise_for_status()
assert response.json() == {
"message": "Hello World",
"my resource": "foo",
"another resource": "bar",
}
@pytest.mark.parametrize("method", ["static", "dynamic"])
@pytest.mark.asyncio
async def test_ws(unused_tcp_port: int, method: str):
async def ws_root(
websocket: WebSocket,
my_resource: str = AsphaltDepends(),
another_resource: str = AsphaltDepends("another"),
):
require_resource(WebSocketScope)
await websocket.accept()
message = await websocket.receive_text()
await websocket.send_json(
{
"message": f"Hello {message}",
"my resource": my_resource,
"another resource": another_resource,
}
)
application = FastAPI()
if method == "static":
application.add_api_websocket_route("/ws", ws_root)
components = {}
else:
class RouteComponent(Component):
@inject
async def start(self, ctx: Context, app: FastAPI = resource()) -> None:
app.add_api_websocket_route("/ws", ws_root)
components = {"myroutes": {"type": RouteComponent}}
async with Context() as ctx:
ctx.add_resource("foo")
ctx.add_resource("bar", name="another")
await FastAPIComponent(
components=components, app=application, port=unused_tcp_port
).start(ctx)
# Ensure that the application got added as a resource
asgi_app = ctx.require_resource(ASGI3Application)
fastapi_app = ctx.require_resource(FastAPI)
assert fastapi_app is asgi_app
async with websockets.connect(f"ws://localhost:{unused_tcp_port}/ws") as ws:
await ws.send("World")
response = json.loads(await ws.recv())
assert response == {
"message": "Hello World",
"my resource": "foo",
"another resource": "bar",
}
@pytest.mark.asyncio
async def test_missing_type_annotation():
async def bad_root(request: Request, bad_resource=AsphaltDepends()) -> Response:
return Response("never seen")
application = FastAPI()
application.add_api_route("/", bad_root)
async with Context() as ctx:
component = FastAPIComponent(app=application)
with pytest.raises(
TypeError,
match="Dependency 'bad_resource' in endpoint / is missing a type annotation",
):
await component.start(ctx)
@pytest.mark.parametrize("method", ["direct", "dict"])
@pytest.mark.asyncio
async def test_middleware(unused_tcp_port: int, method: str):
middlewares: Sequence[Callable[..., ASGI3Application] | dict[str, Any]]
if method == "direct":
middlewares = [lambda app: TextReplacerMiddleware(app, "World", "Middleware")]
else:
middlewares = [
{
"type": f"{__name__}:TextReplacerMiddleware",
"text": "World",
"replacement": "Middleware",
}
]
async def root(request: Request) -> Response:
return PlainTextResponse("Hello World")
application = FastAPI()
application.add_api_route("/", root)
async with Context() as ctx, AsyncClient() as http:
await FastAPIComponent(
port=unused_tcp_port, app=application, middlewares=middlewares
).start(ctx)
# Ensure that the application responds correctly to an HTTP request
response = await http.get(
f"http://127.0.0.1:{unused_tcp_port}", params={"param": "Hello World"}
)
response.raise_for_status()
assert response.text == "Hello Middleware"
def test_bad_middleware_type():
with pytest.raises(
TypeError,
match="middleware must be either a callable or a dict, not 'foo'",
):
FastAPIComponent(middlewares=["foo"])
def test_bad_middleware_dict():
with pytest.raises(TypeError, match=r"Middleware \(1\) is not callable"):
FastAPIComponent(middlewares=[{"type": 1}])
| [
"asphalt.core.require_resource",
"fastapi.FastAPI",
"asphalt.web.fastapi.AsphaltDepends",
"starlette.responses.Response",
"asphalt.web.fastapi.FastAPIComponent",
"starlette.responses.JSONResponse",
"pytest.mark.parametrize",
"websockets.connect",
"pytest.raises",
"asphalt.core.Context",
"httpx.A... | [((633, 689), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""method"""', "['static', 'dynamic']"], {}), "('method', ['static', 'dynamic'])\n", (656, 689), False, 'import pytest\n'), ((2421, 2477), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""method"""', "['static', 'dynamic']"], {}), "('method', ['static', 'dynamic'])\n", (2444, 2477), False, 'import pytest\n'), ((4813, 4866), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""method"""', "['direct', 'dict']"], {}), "('method', ['direct', 'dict'])\n", (4836, 4866), False, 'import pytest\n'), ((1248, 1257), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (1255, 1257), False, 'from fastapi import FastAPI\n'), ((3077, 3086), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (3084, 3086), False, 'from fastapi import FastAPI\n'), ((4476, 4485), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (4483, 4485), False, 'from fastapi import FastAPI\n'), ((5470, 5479), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (5477, 5479), False, 'from fastapi import FastAPI\n'), ((840, 856), 'asphalt.web.fastapi.AsphaltDepends', 'AsphaltDepends', ([], {}), '()\n', (854, 856), False, 'from asphalt.web.fastapi import AsphaltDepends, FastAPIComponent\n'), ((890, 915), 'asphalt.web.fastapi.AsphaltDepends', 'AsphaltDepends', (['"""another"""'], {}), "('another')\n", (904, 915), False, 'from asphalt.web.fastapi import AsphaltDepends, FastAPIComponent\n'), ((944, 971), 'asphalt.core.require_resource', 'require_resource', (['HTTPScope'], {}), '(HTTPScope)\n', (960, 971), False, 'from asphalt.core import Component, Context, inject, require_resource, resource\n'), ((980, 1005), 'asphalt.core.require_resource', 'require_resource', (['Request'], {}), '(Request)\n', (996, 1005), False, 'from asphalt.core import Component, Context, inject, require_resource, resource\n'), ((1021, 1147), 'starlette.responses.JSONResponse', 'JSONResponse', (["{'message': request.query_params['param'], 'my resource': my_resource,\n 'another resource': another_resource}"], {}), "({'message': request.query_params['param'], 'my resource':\n my_resource, 'another resource': another_resource})\n", (1033, 1147), False, 'from starlette.responses import JSONResponse, PlainTextResponse, Response\n'), ((1632, 1641), 'asphalt.core.Context', 'Context', ([], {}), '()\n', (1639, 1641), False, 'from asphalt.core import Component, Context, inject, require_resource, resource\n'), ((1650, 1663), 'httpx.AsyncClient', 'AsyncClient', ([], {}), '()\n', (1661, 1663), False, 'from httpx import AsyncClient\n'), ((2633, 2649), 'asphalt.web.fastapi.AsphaltDepends', 'AsphaltDepends', ([], {}), '()\n', (2647, 2649), False, 'from asphalt.web.fastapi import AsphaltDepends, FastAPIComponent\n'), ((2683, 2708), 'asphalt.web.fastapi.AsphaltDepends', 'AsphaltDepends', (['"""another"""'], {}), "('another')\n", (2697, 2708), False, 'from asphalt.web.fastapi import AsphaltDepends, FastAPIComponent\n'), ((2725, 2757), 'asphalt.core.require_resource', 'require_resource', (['WebSocketScope'], {}), '(WebSocketScope)\n', (2741, 2757), False, 'from asphalt.core import Component, Context, inject, require_resource, resource\n'), ((3491, 3500), 'asphalt.core.Context', 'Context', ([], {}), '()\n', (3498, 3500), False, 'from asphalt.core import Component, Context, inject, require_resource, resource\n'), ((4388, 4404), 'asphalt.web.fastapi.AsphaltDepends', 'AsphaltDepends', ([], {}), '()\n', (4402, 4404), False, 'from asphalt.web.fastapi import AsphaltDepends, FastAPIComponent\n'), ((4434, 4456), 'starlette.responses.Response', 'Response', (['"""never seen"""'], {}), "('never seen')\n", (4442, 4456), False, 'from starlette.responses import JSONResponse, PlainTextResponse, Response\n'), ((4547, 4556), 'asphalt.core.Context', 'Context', ([], {}), '()\n', (4554, 4556), False, 'from asphalt.core import Component, Context, inject, require_resource, resource\n'), ((4585, 4618), 'asphalt.web.fastapi.FastAPIComponent', 'FastAPIComponent', ([], {'app': 'application'}), '(app=application)\n', (4601, 4618), False, 'from asphalt.web.fastapi import AsphaltDepends, FastAPIComponent\n'), ((5418, 5450), 'starlette.responses.PlainTextResponse', 'PlainTextResponse', (['"""Hello World"""'], {}), "('Hello World')\n", (5435, 5450), False, 'from starlette.responses import JSONResponse, PlainTextResponse, Response\n'), ((5536, 5545), 'asphalt.core.Context', 'Context', ([], {}), '()\n', (5543, 5545), False, 'from asphalt.core import Component, Context, inject, require_resource, resource\n'), ((5554, 5567), 'httpx.AsyncClient', 'AsyncClient', ([], {}), '()\n', (5565, 5567), False, 'from httpx import AsyncClient\n'), ((6040, 6136), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""middleware must be either a callable or a dict, not \'foo\'"""'}), '(TypeError, match=\n "middleware must be either a callable or a dict, not \'foo\'")\n', (6053, 6136), False, 'import pytest\n'), ((6164, 6201), 'asphalt.web.fastapi.FastAPIComponent', 'FastAPIComponent', ([], {'middlewares': "['foo']"}), "(middlewares=['foo'])\n", (6180, 6201), False, 'from asphalt.web.fastapi import AsphaltDepends, FastAPIComponent\n'), ((6245, 6313), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""Middleware \\\\(1\\\\) is not callable"""'}), "(TypeError, match='Middleware \\\\(1\\\\) is not callable')\n", (6258, 6313), False, 'import pytest\n'), ((6322, 6365), 'asphalt.web.fastapi.FastAPIComponent', 'FastAPIComponent', ([], {'middlewares': "[{'type': 1}]"}), "(middlewares=[{'type': 1}])\n", (6338, 6365), False, 'from asphalt.web.fastapi import AsphaltDepends, FastAPIComponent\n'), ((3947, 4005), 'websockets.connect', 'websockets.connect', (['f"""ws://localhost:{unused_tcp_port}/ws"""'], {}), "(f'ws://localhost:{unused_tcp_port}/ws')\n", (3965, 4005), False, 'import websockets\n'), ((4632, 4739), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""Dependency \'bad_resource\' in endpoint / is missing a type annotation"""'}), '(TypeError, match=\n "Dependency \'bad_resource\' in endpoint / is missing a type annotation")\n', (4645, 4739), False, 'import pytest\n'), ((1489, 1499), 'asphalt.core.resource', 'resource', ([], {}), '()\n', (1497, 1499), False, 'from asphalt.core import Component, Context, inject, require_resource, resource\n'), ((3333, 3343), 'asphalt.core.resource', 'resource', ([], {}), '()\n', (3341, 3343), False, 'from asphalt.core import Component, Context, inject, require_resource, resource\n'), ((1767, 1845), 'asphalt.web.fastapi.FastAPIComponent', 'FastAPIComponent', ([], {'components': 'components', 'app': 'application', 'port': 'unused_tcp_port'}), '(components=components, app=application, port=unused_tcp_port)\n', (1783, 1845), False, 'from asphalt.web.fastapi import AsphaltDepends, FastAPIComponent\n'), ((3603, 3681), 'asphalt.web.fastapi.FastAPIComponent', 'FastAPIComponent', ([], {'components': 'components', 'app': 'application', 'port': 'unused_tcp_port'}), '(components=components, app=application, port=unused_tcp_port)\n', (3619, 3681), False, 'from asphalt.web.fastapi import AsphaltDepends, FastAPIComponent\n'), ((5591, 5676), 'asphalt.web.fastapi.FastAPIComponent', 'FastAPIComponent', ([], {'port': 'unused_tcp_port', 'app': 'application', 'middlewares': 'middlewares'}), '(port=unused_tcp_port, app=application, middlewares=middlewares\n )\n', (5607, 5676), False, 'from asphalt.web.fastapi import AsphaltDepends, FastAPIComponent\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 7 17:56:07 2020
Author: <NAME>"""
import regex as re
import pandas as pd
import time;
from random import randint
import os
import os.path
import errno
from datetime import datetime
from tika import parser
import zipfile
import csv
class PdfParser:
"""A simple python pdf-scanner that receives 3 desired keywords in a textfile
and generates a folder where it places the those pdf's that contain the keywords.
This is useful for pre-selecting CV's from all candidates for specific skills.
On linux make sure you have done:
sudo apt install build-essential libpoppler-cpp-dev pkg-config python3-dev"""
def __init__(self, keywords='desired_skills.txt', cvs='cvs.zip', skills='desired_skills.txt'):
self.zipfolder = cvs
self.keywords = keywords
self.skills_file = csv.reader(
open(skills, "rb"), delimiter=',')
with open(keywords) as f:
self.keywords_words = f.read().splitlines()
print(self.keywords_words)
def make_dir_matches(self):
date_of_search = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
directory1 = str(date_of_search) + "great_fit"
try:
os.makedirs(directory1)
except OSError as e:
if e.errno != errno.EEXIST:
raise
full_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), directory1)
if not os.path.exists(full_path):
os.mkdir(full_path)
file_name_final = r'\great.csv'
file2write = open(full_path + file_name_final, 'w')
file2write.write("here goes the data")
file2write.close()
def scan_cvs(self):
directory2 = "tmp_cvs"
print(self.skills_file)
# with open(self.skills_file) as f:
# skills = f.readlines()
# tmp_cvs_path = os.path.join(os.path.dirname(
# os.path.realpath(__file__)), directory2)
with zipfile.ZipFile(self.zipfolder, 'r') as zip_ref:
zip_ref.extractall(directory2)
raw = parser.from_file(r'tmp_cvs/cvs/resume.pdf')
print(raw['content'])
if __name__ == '__main__':
run_routine = PdfParser()
run_routine.make_dir_matches()
run_routine.scan_cvs()
| [
"os.path.exists",
"zipfile.ZipFile",
"os.makedirs",
"os.path.realpath",
"tika.parser.from_file",
"datetime.datetime.now",
"os.mkdir"
] | [((2115, 2157), 'tika.parser.from_file', 'parser.from_file', (['"""tmp_cvs/cvs/resume.pdf"""'], {}), "('tmp_cvs/cvs/resume.pdf')\n", (2131, 2157), False, 'from tika import parser\n'), ((1247, 1270), 'os.makedirs', 'os.makedirs', (['directory1'], {}), '(directory1)\n', (1258, 1270), False, 'import os\n'), ((1473, 1498), 'os.path.exists', 'os.path.exists', (['full_path'], {}), '(full_path)\n', (1487, 1498), False, 'import os\n'), ((1511, 1530), 'os.mkdir', 'os.mkdir', (['full_path'], {}), '(full_path)\n', (1519, 1530), False, 'import os\n'), ((2008, 2044), 'zipfile.ZipFile', 'zipfile.ZipFile', (['self.zipfolder', '"""r"""'], {}), "(self.zipfolder, 'r')\n", (2023, 2044), False, 'import zipfile\n'), ((1122, 1136), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1134, 1136), False, 'from datetime import datetime\n'), ((1416, 1442), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1432, 1442), False, 'import os\n')] |
import math
def pythagorean(a, b):
return math.sqrt(a ** 2 + b ** 2)
| [
"math.sqrt"
] | [((48, 74), 'math.sqrt', 'math.sqrt', (['(a ** 2 + b ** 2)'], {}), '(a ** 2 + b ** 2)\n', (57, 74), False, 'import math\n')] |
"""add root_cause table
Revision ID: 7ddd008bcaaa
Revises: <PASSWORD>
Create Date: 2021-11-06 19:20:07.167512
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '7ddd008bcaaa'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('employees',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=64), nullable=True),
sa.Column('type', sa.String(length=64), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('managers',
sa.Column('id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['id'], ['employees.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('drivers',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('manager_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['id'], ['employees.id'], ),
sa.ForeignKeyConstraint(['manager_id'], ['managers.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('drivers')
op.drop_table('managers')
op.drop_table('employees')
# ### end Alembic commands ###
| [
"sqlalchemy.ForeignKeyConstraint",
"alembic.op.drop_table",
"sqlalchemy.PrimaryKeyConstraint",
"sqlalchemy.Integer",
"sqlalchemy.String"
] | [((1232, 1256), 'alembic.op.drop_table', 'op.drop_table', (['"""drivers"""'], {}), "('drivers')\n", (1245, 1256), False, 'from alembic import op\n'), ((1261, 1286), 'alembic.op.drop_table', 'op.drop_table', (['"""managers"""'], {}), "('managers')\n", (1274, 1286), False, 'from alembic import op\n'), ((1291, 1317), 'alembic.op.drop_table', 'op.drop_table', (['"""employees"""'], {}), "('employees')\n", (1304, 1317), False, 'from alembic import op\n'), ((591, 620), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (614, 620), True, 'import sqlalchemy as sa\n'), ((714, 763), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['id']", "['employees.id']"], {}), "(['id'], ['employees.id'])\n", (737, 763), True, 'import sqlalchemy as sa\n'), ((771, 800), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (794, 800), True, 'import sqlalchemy as sa\n'), ((951, 1000), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['id']", "['employees.id']"], {}), "(['id'], ['employees.id'])\n", (974, 1000), True, 'import sqlalchemy as sa\n'), ((1008, 1064), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['manager_id']", "['managers.id']"], {}), "(['manager_id'], ['managers.id'])\n", (1031, 1064), True, 'import sqlalchemy as sa\n'), ((1072, 1101), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (1095, 1101), True, 'import sqlalchemy as sa\n'), ((436, 448), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (446, 448), True, 'import sqlalchemy as sa\n'), ((489, 509), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (498, 509), True, 'import sqlalchemy as sa\n'), ((549, 569), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (558, 569), True, 'import sqlalchemy as sa\n'), ((679, 691), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (689, 691), True, 'import sqlalchemy as sa\n'), ((858, 870), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (868, 870), True, 'import sqlalchemy as sa\n'), ((917, 929), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (927, 929), True, 'import sqlalchemy as sa\n')] |
__source__ = 'https://leetcode.com/problems/mirror-reflection/'
# Time: O(logP)
# Space: O(1)
#
# Description: Leetcode # 858. Mirror Reflection
#
# There is a special square room with mirrors on each of the four walls.
# Except for the southwest corner, there are receptors on each of the remaining corners, numbered 0, 1, and 2.
#
# The square room has walls of length p,
# and a laser ray from the southwest corner first meets the east wall at a distance q from the 0th receptor.
#
# Return the number of the receptor that the ray meets first.
# (It is guaranteed that the ray will meet a receptor eventually.)
#
# Example 1:
#
# Input: p = 2, q = 1
# Output: 2
# Explanation: The ray meets receptor 2 the first time it gets reflected back to the left wall.
#
# Note:
# 1 <= p <= 1000
# 0 <= q <= p
#
import unittest
# 24ms 50%
class Solution(object):
def mirrorReflection(self, p, q):
"""
:type p: int
:type q: int
:rtype: int
"""
from fractions import gcd
g = gcd(p, q)
p = (p / g) % 2
q = (q / g) % 2
return 1 if p and q else 0 if p else 2
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/mirror-reflection/solution/
#
Approach 1: Simulation
Complexity Analysis
Time Complexity: O(p). We can prove (using Approach #2) that the number of bounces is bounded by this.
Space Complexity: O(1)
# 6ms 11.06%
class Solution {
double EPS = 1e-6;
public int mirrorReflection(int p, int q) {
double x = 0, y = 0;
double rx = p, ry = q;
// While it hasn't reached a receptor,...
while ( !(close(x, p) && (close(y, 0) || close(y, p)) || close(x, 0) && close(y, p))) {
// Want smallest t so that some x + rx, y + ry is 0 or p
// x + rxt = 0, then t = -x/rx etc.
double t = 1e9;
if ((-x / rx) > EPS) t = Math.min(t, -x / rx);
if ((-y / ry) > EPS) t = Math.min(t, -y / ry);
if (((p-x) / rx) > EPS) t = Math.min(t, (p-x) / rx);
if (((p-y) / ry) > EPS) t = Math.min(t, (p-y) / ry);
x += rx * t;
y += ry * t;
if (close(x, p) || close(x, 0)) rx *= -1;
if (close(y, p) || close(y, 0)) ry *= -1;
}
if (close(x, p) && close(y, p)) return 1;
return close(x, p) ? 0 : 2;
}
private boolean close(double x, double y) {
return Math.abs(x - y) < EPS;
}
}
Approach 2: Mathematical
Complexity Analysis
Time Complexity: O(logP), the complexity of the gcd operation.
Space Complexity: O(1)
The mathematical answer is k = p / gcd(p, q).
# 2ms 100%
class Solution {
public int mirrorReflection(int p, int q) {
int g = gcd(p, q);
p /= g; p %= 2;
q /= g; q %= 2;
if (p == 1 && q == 1) return 1;
return p == 1 ? 0 : 2;
}
private int gcd(int a, int b) {
return a == 0 ? b : gcd(b % a, a);
}
}
'''
| [
"unittest.main",
"fractions.gcd"
] | [((1268, 1283), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1281, 1283), False, 'import unittest\n'), ((1034, 1043), 'fractions.gcd', 'gcd', (['p', 'q'], {}), '(p, q)\n', (1037, 1043), False, 'from fractions import gcd\n')] |
#!/usr/bin/env python3
"""
#------------------------------------------------------------------------------
#
# SCRIPT: forecast_task_05.py
#
# PURPOSE: Computes the bias correction for the NMME dataset. Based on
# FORECAST_TASK_03.sh.
#
# REVISION HISTORY:
# 24 Oct 2021: <NAME>, first version
#
#------------------------------------------------------------------------------
"""
#
# Standard modules
#
import configparser
import os
import subprocess
import sys
#
# Local methods
#
def _usage():
"""Print command line usage."""
txt = "[INFO] Usage: {(sys.argv[0])} fcst_syr fcst_eyr clim_syr clim_eyr "\
"month_abbr month_num lat1 lat2 lon1 lon2 nmme_model lead_months "\
"config_file"
print(txt)
print("[INFO] where")
print("[INFO] fcst_syr: Start year of forecast")
print("[INFO] fcst_eyr: End year of forecast")
print("[INFO] clim_syr: Start year of the climatological period")
print("[INFO] clim_eyr: End year of the climatological period")
print("[INFO] month_abbr: Abbreviation of the initialization month")
print("[INFO] month_num: Integer number of the initialization month")
print("[INFO] lat1: Minimum latitudinal extent")
print("[INFO] lat2: Maximum latitudinal extent")
print("[INFO] lon1: Minimum longitudinal extent")
print("[INFO] lon2: Maximum longitudinal extent")
print("[INFO] nmme_model: NMME model name")
print("[INFO] lead_months: Number of lead months")
print("[INFO] config_file: Config file that sets up environment")
def _read_cmd_args():
"""Read command line arguments."""
if len(sys.argv) != 14:
print("[ERR] Invalid number of command line arguments!")
_usage()
sys.exit(1)
# fcst_syr
try:
fcst_syr = int(sys.argv[1])
except ValueError:
print(f"[ERR] Invalid argument for fcst_syr! Received {(sys.argv[1])}")
_usage()
sys.exit(1)
if fcst_syr < 0:
print(f"[ERR] Invalid argument for fcst_syr! Received {(sys.argv[1])}")
_usage()
sys.exit(1)
# fcst_eyr
try:
fcst_eyr = int(sys.argv[2])
except ValueError:
print(f"[ERR] Invalid argument for fcst_eyr! Received {(sys.argv[2])}")
_usage()
sys.exit(1)
if fcst_eyr < 0:
print(f"[ERR] Invalid argument for fcst_eyr! Received {(sys.argv[2])}")
_usage()
sys.exit(1)
# clim_syr
try:
clim_syr = int(sys.argv[3])
except ValueError:
print(f"[ERR] Invalid argument for clim_syr! Received {(sys.argv[3])}")
_usage()
sys.exit(1)
if clim_syr < 0:
print(f"[ERR] Invalid argument for clim_syr! Received {(sys.argv[3])}")
_usage()
sys.exit(1)
# clim_eyr
try:
clim_eyr = int(sys.argv[4])
except ValueError:
print(f"[ERR] Invalid argument for clim_eyr! Received {(sys.argv[4])}")
_usage()
sys.exit(1)
if clim_eyr < 0:
print(f"[ERR] Invalid argument for clim_eyr! Received {(sys.argv[4])}")
_usage()
sys.exit(1)
# month_abbr
month_abbr = str(sys.argv[5])
# month_num
try:
month_num = int(sys.argv[6])
except ValueError:
print(f"[ERR] Invalid argument for month_num! Received {(sys.argv[6])}")
_usage()
sys.exit(1)
if month_num < 1:
print(f"[ERR] Invalid argument for month_num! Received {(sys.argv[6])}")
_usage()
sys.exit(1)
if month_num > 12:
print(f"[ERR] Invalid argument for month_num! Received {(sys.argv[6])}")
_usage()
sys.exit(1)
# lat1
try:
lat1 = int(sys.argv[7])
except ValueError:
print(f"[ERR] Invalid argument for lat1! Received {(sys.argv[7])}")
_usage()
sys.exit(1)
# lat2
try:
lat2 = int(sys.argv[8])
except ValueError:
print(f"[ERR] Invalid argument for lat2! Received {(sys.argv[8])}")
_usage()
sys.exit(1)
# lon1
try:
lon1 = int(sys.argv[9])
except ValueError:
print(f"[ERR] Invalid argument for lon1! Received {(sys.argv[9])}")
_usage()
sys.exit(1)
# lon2
try:
lon2 = int(sys.argv[10])
except ValueError:
print(f"[ERR] Invalid argument for lon2! Received {(sys.argv[10])}")
_usage()
sys.exit(1)
# nmme_model
nmme_model = str(sys.argv[11])
# lead_months
try:
lead_months = int(sys.argv[12])
except ValueError:
print(f"[ERR] Invalid argument for lead_months! Received {(sys.argv[12])}")
_usage()
sys.exit(1)
if lead_months < 0:
print(f"[ERR] Invalid argument for lead_months! Received {(sys.argv[12])}")
_usage()
sys.exit(1)
# config_file
config_file = sys.argv[13]
if not os.path.exists(config_file):
print(f"[ERR] {config_file} does not exist!")
sys.exit(1)
return fcst_syr, fcst_eyr, clim_syr, clim_eyr, month_abbr, month_num,\
lat1, lat2, lon1, lon2, nmme_model, lead_months, config_file
def read_config(config_file):
"""Read from bcsd_preproc config file."""
config = configparser.ConfigParser()
config.read(config_file)
return config
def _gather_ensemble_info(nmme_model):
"""Gathers ensemble information based on NMME model."""
# Number of ensembles in the forecast (ENS_NUMF)
# Number of ensembles in the climatology (ENS_NUMC)
# Ensemble start index (ENS_START)
# Ensemble end index (ENS_END)
if nmme_model == "CFSv2":
ens_numf=24
ens_numc=12
ens_start=1
ens_end=24
elif nmme_model == "GEOSv2":
ens_numf=10
ens_numc=4
ens_start=25
ens_end=34
elif nmme_model == "CCM4":
ens_numf=10
ens_numc=10
ens_start=35
ens_end=44
elif nmme_model == "GNEMO":
ens_numf=10
ens_numc=10
ens_start=45
ens_end=54
elif nmme_model == "CCSM4":
ens_numf=10
ens_numc=10
ens_start=55
ens_end=64
elif nmme_model == "GFDL":
ens_numf=30
ens_numc=15
ens_start=65
ens_end=94
else:
print(f"[ERR] Invalid argument for nmme_model! Received {(nmme_model)}")
sys.exit(1)
return ens_numf, ens_numc, ens_start, ens_end
def _driver():
"""Main driver."""
fcst_syr, fcst_eyr, clim_syr, clim_eyr, month_abbr, month_num, lat1, lat2,\
lon1, lon2, nmme_model, lead_months, config_file = _read_cmd_args()
# Setup local directories
config = read_config(config_file)
# Path of the main project directory
projdir = config["bcsd_preproc"]["projdir"]
# Path of the directory where all the BC codes are kept
srcdir = config["bcsd_preproc"]["srcdir"]
# Log file output directory
logdir = config["bcsd_preproc"]["logdir"]
# Path of the directory where supplementary files are kept
supplementary_dir = config["bcsd_preproc"]["supplementary_dir"]
# Path for where observational files are located:
forcedir=f"{projdir}/data/forecast"
obs_clim_indir=f"{forcedir}/USAF-LIS7.3rc8_25km/raw/Climatology"
# Mask file
mask_file=f"{supplementary_dir}/Mask_nafpa.nc"
# Calculate bias correction for different variables separately:
obs_var="Rainf_f_tavg"
fcst_var="PRECTOT"
unit="kg/m^2/s"
var_type="PRCP"
# Path for where nmme forecast files are located:
fcst_clim_indir=f"{forcedir}/NMME/raw/Climatology/{month_abbr}01"
fcst_indir=f"{forcedir}/NMME/raw/Monthly/{month_abbr}01"
# Path for where output BC forecast file are located:
outdir=f"{forcedir}/NMME/bcsd/Monthly/{month_abbr}01"
if not os.path.exists(outdir):
os.makedirs(outdir)
print(f"[INFO] Processing forecast bias correction of NMME-{nmme_model} precip")
ens_numf, ens_numc, ens_start, ens_end = _gather_ensemble_info(nmme_model)
for year in range(fcst_syr, (fcst_eyr + 1)):
cmd = "sbatch"
cmd += f" {srcdir}/run_NMME_BCSD_calctest.scr"
cmd += f" {srcdir}"
cmd += f" {obs_var}"
cmd += f" {fcst_var}"
cmd += f" {month_num}"
cmd += f" {var_type}"
cmd += f" {unit}"
cmd += f" {lat1}"
cmd += f" {lat2}"
cmd += f" {lon1}"
cmd += f" {lon2}"
cmd += f" {ens_numc}"
cmd += f" {ens_numf}"
cmd += f" {nmme_model}"
cmd += f" {lead_months}"
cmd += f" {year}"
cmd += f" {year}"
cmd += f" {clim_syr}"
cmd += f" {clim_eyr}"
cmd += f" {mask_file}"
cmd += f" {fcst_clim_indir}"
cmd += f" {obs_clim_indir}"
cmd += f" {fcst_indir}"
cmd += f" {outdir}"
cmd += f" {logdir}"
cmd += f" {ens_start}"
cmd += f" {ens_end}"
returncode = subprocess.call(cmd, shell=True)
if returncode != 0:
print("[ERR] Problem calling sbatch!")
sys.exit(1)
print(f"[INFO] Completed processing NMME bias correction for: {(month_abbr)}")
#
# Main Method
#
if __name__ == "__main__":
_driver()
| [
"os.path.exists",
"configparser.ConfigParser",
"os.makedirs",
"subprocess.call",
"sys.exit"
] | [((5174, 5201), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (5199, 5201), False, 'import configparser\n'), ((1706, 1717), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1714, 1717), False, 'import sys\n'), ((2045, 2056), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2053, 2056), False, 'import sys\n'), ((2384, 2395), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2392, 2395), False, 'import sys\n'), ((2723, 2734), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2731, 2734), False, 'import sys\n'), ((3062, 3073), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3070, 3073), False, 'import sys\n'), ((3458, 3469), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3466, 3469), False, 'import sys\n'), ((3599, 3610), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3607, 3610), False, 'import sys\n'), ((4767, 4778), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4775, 4778), False, 'import sys\n'), ((4840, 4867), 'os.path.exists', 'os.path.exists', (['config_file'], {}), '(config_file)\n', (4854, 4867), False, 'import os\n'), ((4931, 4942), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4939, 4942), False, 'import sys\n'), ((7725, 7747), 'os.path.exists', 'os.path.exists', (['outdir'], {}), '(outdir)\n', (7739, 7747), False, 'import os\n'), ((7757, 7776), 'os.makedirs', 'os.makedirs', (['outdir'], {}), '(outdir)\n', (7768, 7776), False, 'import os\n'), ((8859, 8891), 'subprocess.call', 'subprocess.call', (['cmd'], {'shell': '(True)'}), '(cmd, shell=True)\n', (8874, 8891), False, 'import subprocess\n'), ((1907, 1918), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1915, 1918), False, 'import sys\n'), ((2246, 2257), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2254, 2257), False, 'import sys\n'), ((2585, 2596), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2593, 2596), False, 'import sys\n'), ((2924, 2935), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2932, 2935), False, 'import sys\n'), ((3318, 3329), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3326, 3329), False, 'import sys\n'), ((3788, 3799), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3796, 3799), False, 'import sys\n'), ((3977, 3988), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3985, 3988), False, 'import sys\n'), ((4166, 4177), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4174, 4177), False, 'import sys\n'), ((4357, 4368), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4365, 4368), False, 'import sys\n'), ((4622, 4633), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4630, 4633), False, 'import sys\n'), ((8983, 8994), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (8991, 8994), False, 'import sys\n'), ((6299, 6310), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (6307, 6310), False, 'import sys\n')] |
import io
import os
import time
import urllib.request
import zipfile
import numpy as np
from scipy.io.wavfile import read as wav_read
from tqdm import tqdm
class dclde:
"""
The high-frequency dataset consists of marked encounters with echolocation
clicks of species commonly found along the US Atlantic Coast, and in the
Gulf of Mexico:
Mesoplodon europaeus - Gervais' beaked whale
Ziphius cavirostris - Cuvier's beaked whale
Mesoplodon bidens - Sowerby's beaked whale
Lagenorhynchus acutus - Atlantic white-sided dolphin
Grampus griseus - Risso's dolphin
Globicephala macrorhynchus - Short-finned pilot whale
Stenella sp. - Stenellid dolphins
Delphinid type A
Delphinid type B
Unidentified delphinid - delphinid other than those described above
The goal for these datasets is to identify acoustic encounters by species
during times when animals were echolocating. Analysts examined data for
echolocation clicks and approximated the start and end times of acoustic
encounters. Any period that was separated from another one by five minutes
or more was marked as a separate encounter. Whistle activity was not
considered. Consequently, while the use of whistle information during
echolocation activity is appropriate, reporting a species based on whistles
in the absence of echolocation activity will be considered a false positive
for this classification task.
"""
def download(path):
"""ToDo"""
# Load the dataset (download if necessary) and set
# the class attributes.
print("Loading DCLDE")
t = time.time()
if not os.path.isdir(path + "DCLDE"):
print("\tCreating Directory")
os.mkdir(path + "DCLDE")
if not os.path.exists(path + "DCLDE/DCLDE_LF_Dev.zip"):
url = "http://sabiod.univ-tln.fr/workspace/DCLDE2018/DCLDE_LF_Dev.zip"
with DownloadProgressBar(
unit="B", unit_scale=True, miniters=1, desc="Wav files"
) as t:
urllib.request.urlretrieve(url, path + "DCLDE/DCLDE_LF_Dev.zip")
def load(window_size=441000, path=None):
"""ToDo"""
if path is None:
path = os.environ["DATASET_path"]
dclde.download(path)
# Loading the files
f = zipfile.ZipFile(path + "DCLDE/DCLDE_LF_Dev.zip")
wavs = list()
# labels = list()
for zipf in tqdm(f.filelist, ascii=True):
if ".wav" in zipf.filename and ".d100." in zipf.filename:
wavfile = f.read(zipf)
byt = io.BytesIO(wavfile)
wav = wav_read(byt)[1].astype("float32")
for s in range(len(wav) // window_size):
wavs.append(wav[s * window_size : (s + 1) * window_size])
# labels.append(zipf.filename.split('/')[2])
# return wavs,labels
wavs = np.expand_dims(np.asarray(wavs), 1)
dataset.add_variable({"signals": {"train_set": wavs}})
print(
"Dataset freefield1010 loaded in", "{0:.2f}".format(time.time() - t), "s."
)
return dataset
| [
"os.path.exists",
"zipfile.ZipFile",
"tqdm.tqdm",
"numpy.asarray",
"io.BytesIO",
"os.path.isdir",
"scipy.io.wavfile.read",
"os.mkdir",
"time.time"
] | [((1646, 1657), 'time.time', 'time.time', ([], {}), '()\n', (1655, 1657), False, 'import time\n'), ((2348, 2396), 'zipfile.ZipFile', 'zipfile.ZipFile', (["(path + 'DCLDE/DCLDE_LF_Dev.zip')"], {}), "(path + 'DCLDE/DCLDE_LF_Dev.zip')\n", (2363, 2396), False, 'import zipfile\n'), ((2469, 2497), 'tqdm.tqdm', 'tqdm', (['f.filelist'], {'ascii': '(True)'}), '(f.filelist, ascii=True)\n', (2473, 2497), False, 'from tqdm import tqdm\n'), ((1674, 1703), 'os.path.isdir', 'os.path.isdir', (["(path + 'DCLDE')"], {}), "(path + 'DCLDE')\n", (1687, 1703), False, 'import os\n'), ((1759, 1783), 'os.mkdir', 'os.mkdir', (["(path + 'DCLDE')"], {}), "(path + 'DCLDE')\n", (1767, 1783), False, 'import os\n'), ((1799, 1846), 'os.path.exists', 'os.path.exists', (["(path + 'DCLDE/DCLDE_LF_Dev.zip')"], {}), "(path + 'DCLDE/DCLDE_LF_Dev.zip')\n", (1813, 1846), False, 'import os\n'), ((2968, 2984), 'numpy.asarray', 'np.asarray', (['wavs'], {}), '(wavs)\n', (2978, 2984), True, 'import numpy as np\n'), ((2630, 2649), 'io.BytesIO', 'io.BytesIO', (['wavfile'], {}), '(wavfile)\n', (2640, 2649), False, 'import io\n'), ((3132, 3143), 'time.time', 'time.time', ([], {}), '()\n', (3141, 3143), False, 'import time\n'), ((2672, 2685), 'scipy.io.wavfile.read', 'wav_read', (['byt'], {}), '(byt)\n', (2680, 2685), True, 'from scipy.io.wavfile import read as wav_read\n')] |
import ecdsa
import hashlib
sk = ecdsa.SigningKey.generate(curve=ecdsa.NIST256p)
vk = sk.get_verifying_key()
a = b"Hello World!"
sig = sk.sign(a,hashfunc=hashlib.sha256)
result = vk.verify(sig,a,hashfunc=hashlib.sha256)
strsk = sk.to_string()
strvk = vk.to_string()
sk2 = ecdsa.SigningKey.from_string(strsk,curve=ecdsa.NIST256p)
vk2 = sk2.get_verifying_key()
| [
"ecdsa.SigningKey.generate",
"ecdsa.SigningKey.from_string"
] | [((34, 81), 'ecdsa.SigningKey.generate', 'ecdsa.SigningKey.generate', ([], {'curve': 'ecdsa.NIST256p'}), '(curve=ecdsa.NIST256p)\n', (59, 81), False, 'import ecdsa\n'), ((277, 334), 'ecdsa.SigningKey.from_string', 'ecdsa.SigningKey.from_string', (['strsk'], {'curve': 'ecdsa.NIST256p'}), '(strsk, curve=ecdsa.NIST256p)\n', (305, 334), False, 'import ecdsa\n')] |
import subprocess
import cifrado
import enviocorreos
import puertos
import metadata
import webscraping
import argparse
import os, time
import logging
logging.basicConfig(filename='app.log', level=logging.INFO)
if __name__ == "__main__":
description= ("Este script realiza una gran diversa cantidad de tareas " +
"las cuales son las siguientes: realizar cifrados, obtener metadata, " +
"escaneo de puertos, envio de correos y webscraping")
parser = argparse.ArgumentParser(description="PIA", epilog=description, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("-t", metavar='TAREA', dest="tarea", choices=['cifrar','correos', 'dns', 'puertos', 'metadata', 'web', 'hash'] , help='Se elige la tarea a realizar', required=True)
parser.add_argument("-m", metavar='MODO', dest="modo",
choices=['cifmensaje', 'desmensaje', 'cifgithub', 'destxt', 'ciftxt', 'busqueda', 'emails', 'pdf', 'img'] , help='Si desea utilizar la tarea de cifrado o de web scraping, es necesario especificiar el modo')
parser.add_argument("-msj", metavar='MENSAJE', dest="mensaje", type=str, help='Se debe poner un mensaje el cual se quiera cifrar o descifrar.')
parser.add_argument("-key", metavar='LLAVE', dest="llave", type=int, help='Se utiliza para saber a base de cual llave se cifra o descifra el mensaje')
parser.add_argument("-user", metavar='USUARIO', dest="usuario", type=str, help='Es un argumento necesario para la funcion de cifrar los resultados obtenidos de la API de Github')
parser.add_argument("-ru", metavar='RUTA', dest="ruta", type=str, help='Ruta necesaria para el txt que se va a descifrar o donde se encuentran los arctivos pata la funcion de metadata')
parser.add_argument("-rem", metavar='REMITENTE', dest="remitente", type=str, help='Correo del que se enviará el mensaje.')
parser.add_argument("-des", metavar='DESTINATARIO', dest="destinatario", type=str, help='Correo que recibirá el mensaje.')
parser.add_argument("-url", metavar= 'URL', dest="dominio", type=str, help='Url a investigar.')
parser.add_argument("-cont", metavar='CONTENIDO', dest="contenido", type=str, help='Se debe poner un mensaje el cual se quiera enviar.', default="Hola mundo mundial")
parser.add_argument("-asu", metavar='ASUNTO', dest="asunto", type=str, help='Se utiliza para poner el titulo que tendrá el correo.', default="Hola!")
parser.add_argument("-ip", metavar='IP', dest="ip", type=str, help='Se debe introducir la ip a consultar, solo el ID de red.', default="172.217.15.")
parser.add_argument("-ports", metavar='PUERTOS', dest="puertos", help='Introduce los puertos a revisar separados por una coma [80,800]', default= "80,800")
parser.add_argument("-a", metavar='ARCHIVO', dest="archivo", choices=['imagen', 'imagenes', 'pdf', 'pdfs', 'word', 'words', 'mp3', 'mp3s'] , help='Si desea utilizar la tarea de sacar la metadata, es necesario especificiar el tipo de archivo')
parser.add_argument("-mp", metavar= 'METAPATH', dest="metapath", type=str, help='Ruta donde se guardarán los metadatas encontrados.')
parser.add_argument("-bus", metavar='BUSQUEDA', dest="busqueda", type=str, help='Busqueda para realizar en google')
params = parser.parse_args()
try:
logging.info("Se escogió la tarea: ")
logging.info(params.tarea)
tarea = (params.tarea)
except Exception as e:
logging.error("Ocurrió un error: " + str(e))
print(e)
exit
try:
if tarea == 'cifrar':
modo = (params.modo)
logging.info("El modo es: " + modo)
llave = (params.llave)
if (modo == 'cifmensaje') or (modo == 'desmensaje'):
mensaje = (params.mensaje)
logging.info("El mensaje es: " + str(mensaje))
if modo == 'cifmensaje':
print(cifrado.cifrar_mensaje(mensaje, llave))
else:
print(cifrado.descifrar_mensaje(mensaje, llave))
elif modo == 'cifgithub':
usuario = (params.usuario)
logging.info("El usuario es: " + usuario)
cifrado.cifrar_github(usuario, llave)
elif modo == 'destxt':
ruta = (params.ruta)
logging.info("Usaremos la ruta: " + ruta)
cifrado.descifrar_txt(ruta, llave)
elif modo == 'ciftxt':
ruta = params.ruta
logging.info("Usaremos la ruta: " + ruta)
cifrado.cifrar_txt(ruta, llave)
else:
logging.error("Opcion no válida para cifrado")
print('Opción no válida para cifrado')
elif tarea == 'correos':
remitente = (params.remitente)
logging.info("El remitente es: " + remitente)
destinatario = (params.destinatario)
logging.info("El destinatario es: " + destinatario)
mensaje = (params.contenido)
logging.info("El mensaje es: " + mensaje)
asunto = (params.asunto)
logging.info("El asunto es: " + asunto)
orga = (params.dominio)
logging.info("La organizacion es: " + orga)
datos_encontrados = enviocorreos.Busqueda(orga)
if datos_encontrados is None:
logging.info("No se encontró nada")
print("No se encontró nada")
exit()
else:
enviocorreos.GuardarInformacion(datos_encontrados, orga, remitente, destinatario, asunto, mensaje)
elif tarea == 'dns':
logging.info("Se inicia la tarea de dns")
print()
script_p = subprocess.Popen([r'C:\WINDOWS\system32\WindowsPowerShell\v1.0\powershell.exe','-ExecutionPolicy', 'Unrestricted', './dns.ps1'], cwd=os.getcwd())
script_p.wait()
elif tarea == 'metadata':
logging.info("Tipo de archivo:")
archivo = (params.archivo)
logging.info(archivo)
if (archivo == 'imagen') or (archivo == 'imagenes'):
ruta = (params.ruta)
logging.info("En la ruta: " + ruta)
metapath = (params.metapath)
logging.info("El metadata se guardará en la ruta: " + metapath)
if archivo == 'imagen':
logging.info("Ingresamos a la función printOneMetaImg")
metadata.printOneMetaImg(ruta, metapath)
else:
logging.info("Ingresamos a la función printAllMetaImg")
metadata.printAllMetaImg(ruta, metapath)
elif (archivo == 'pdf') or (archivo == 'pdfs'):
ruta = (params.ruta)
logging.info("Usaremos la ruta: " + ruta)
metapath = (params.metapath)
logging.info("Guardaremos la metadata en: " + metapath)
if archivo == 'pdf':
logging.info("Ingresamos a la función printOneMetaPDF")
metadata.printOneMetaPDf(ruta, metapath)
else:
logging.info("Ingresamos a la función printAllMetaPDF")
metadata.printAllMetaPDf(ruta, metapath)
elif (archivo == 'word') or (archivo == 'words'):
ruta = (params.ruta)
logging.info("Usaremos la ruta: " + ruta)
metapath = (params.metapath)
logging.info("Guardaremos la metadata en: " + metapath)
if archivo == 'word':
logging.info("Ingresamos a la función printOneMetaDocx")
metadata.printOneMetaDocx(ruta, metapath)
else:
logging.info("Ingresamos a la función printAllMetaDocx")
metadata.printAllMetaDocx(ruta, metapath)
else:
ruta = (params.ruta)
logging.info("Usaremos la ruta: " + ruta)
metapath = (params.metapath)
logging.info("Guardaremos la metadata en: " + metapath)
if archivo == 'mp3':
logging.info("Ingresamos a la función printOneMetaMp3")
metadata.printOneMetaMp3(ruta, metapath)
else:
logging.info("Ingresamos a la función printAllMetaMp3")
metadata.printAllMetaMp3(ruta, metapath)
elif tarea == 'puertos':
logging.info("Se introdujo la ip: ")
ip = params.ip
logging.info(ip)
print("Se revisará la ip: " + ip)
logging.info("Se escanearan los puertos: ")
puertoss = params.puertos
logging.info(puertoss)
portlist = params.puertos.split(',')
for i in range (len(portlist)):
print("Con los puertos: " + portlist[i])
portlist[i] = int(portlist[i])
puertos.checoPuertos(ip, portlist, puertoss)
elif tarea == 'web':
logging.info("Con el modo: ")
modo = params.modo
logging.info(modo)
if modo == 'emails' or modo == 'pdf' or modo == 'img':
url = params.dominio
logging.info("El dominio es: ")
logging.info(url)
if modo == 'emails':
logging.info("Si el \"modo\" es: emails")
webscraping.find_mails(url)
elif modo == 'pdf':
logging.info("Si el \"modo\" es: pdf")
webscraping.descargar_pdfs(url)
else:
logging.info("Si el \"modo\" es: img")
webscraping.download_images(url)
elif modo == 'busqueda':
logging.info("Se buscará:")
busqueda = params.busqueda
logging.info(busqueda)
webscraping.busqueda_google(busqueda)
else:
logging.info("Ninguna opción es valida para hacer Web Scrapping" )
print('Opcion no válida para web scraping')
else:
print()
script_p = subprocess.Popen([r'C:\WINDOWS\system32\WindowsPowerShell\v1.0\powershell.exe',
'-ExecutionPolicy', 'Unrestricted', './rutas.ps1'], cwd=os.getcwd())
script_p.wait()
except Exception as e:
logging.error("Ha ocurrido un error: " + str(e))
print(e)
exit
| [
"webscraping.find_mails",
"cifrado.cifrar_txt",
"metadata.printAllMetaPDf",
"metadata.printAllMetaImg",
"cifrado.descifrar_txt",
"metadata.printOneMetaDocx",
"logging.info",
"logging.error",
"metadata.printOneMetaImg",
"argparse.ArgumentParser",
"cifrado.descifrar_mensaje",
"webscraping.descar... | [((152, 211), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': '"""app.log"""', 'level': 'logging.INFO'}), "(filename='app.log', level=logging.INFO)\n", (171, 211), False, 'import logging\n'), ((459, 579), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PIA"""', 'epilog': 'description', 'formatter_class': 'argparse.RawDescriptionHelpFormatter'}), "(description='PIA', epilog=description,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n", (482, 579), False, 'import argparse\n'), ((3216, 3253), 'logging.info', 'logging.info', (['"""Se escogió la tarea: """'], {}), "('Se escogió la tarea: ')\n", (3228, 3253), False, 'import logging\n'), ((3258, 3284), 'logging.info', 'logging.info', (['params.tarea'], {}), '(params.tarea)\n', (3270, 3284), False, 'import logging\n'), ((3476, 3511), 'logging.info', 'logging.info', (["('El modo es: ' + modo)"], {}), "('El modo es: ' + modo)\n", (3488, 3511), False, 'import logging\n'), ((4590, 4635), 'logging.info', 'logging.info', (["('El remitente es: ' + remitente)"], {}), "('El remitente es: ' + remitente)\n", (4602, 4635), False, 'import logging\n'), ((4689, 4740), 'logging.info', 'logging.info', (["('El destinatario es: ' + destinatario)"], {}), "('El destinatario es: ' + destinatario)\n", (4701, 4740), False, 'import logging\n'), ((4786, 4827), 'logging.info', 'logging.info', (["('El mensaje es: ' + mensaje)"], {}), "('El mensaje es: ' + mensaje)\n", (4798, 4827), False, 'import logging\n'), ((4869, 4908), 'logging.info', 'logging.info', (["('El asunto es: ' + asunto)"], {}), "('El asunto es: ' + asunto)\n", (4881, 4908), False, 'import logging\n'), ((4949, 4992), 'logging.info', 'logging.info', (["('La organizacion es: ' + orga)"], {}), "('La organizacion es: ' + orga)\n", (4961, 4992), False, 'import logging\n'), ((5022, 5049), 'enviocorreos.Busqueda', 'enviocorreos.Busqueda', (['orga'], {}), '(orga)\n', (5043, 5049), False, 'import enviocorreos\n'), ((3971, 4012), 'logging.info', 'logging.info', (["('El usuario es: ' + usuario)"], {}), "('El usuario es: ' + usuario)\n", (3983, 4012), False, 'import logging\n'), ((4025, 4062), 'cifrado.cifrar_github', 'cifrado.cifrar_github', (['usuario', 'llave'], {}), '(usuario, llave)\n', (4046, 4062), False, 'import cifrado\n'), ((5100, 5135), 'logging.info', 'logging.info', (['"""No se encontró nada"""'], {}), "('No se encontró nada')\n", (5112, 5135), False, 'import logging\n'), ((5222, 5324), 'enviocorreos.GuardarInformacion', 'enviocorreos.GuardarInformacion', (['datos_encontrados', 'orga', 'remitente', 'destinatario', 'asunto', 'mensaje'], {}), '(datos_encontrados, orga, remitente,\n destinatario, asunto, mensaje)\n', (5253, 5324), False, 'import enviocorreos\n'), ((5361, 5402), 'logging.info', 'logging.info', (['"""Se inicia la tarea de dns"""'], {}), "('Se inicia la tarea de dns')\n", (5373, 5402), False, 'import logging\n'), ((3762, 3800), 'cifrado.cifrar_mensaje', 'cifrado.cifrar_mensaje', (['mensaje', 'llave'], {}), '(mensaje, llave)\n', (3784, 3800), False, 'import cifrado\n'), ((3842, 3883), 'cifrado.descifrar_mensaje', 'cifrado.descifrar_mensaje', (['mensaje', 'llave'], {}), '(mensaje, llave)\n', (3867, 3883), False, 'import cifrado\n'), ((4139, 4180), 'logging.info', 'logging.info', (["('Usaremos la ruta: ' + ruta)"], {}), "('Usaremos la ruta: ' + ruta)\n", (4151, 4180), False, 'import logging\n'), ((4193, 4227), 'cifrado.descifrar_txt', 'cifrado.descifrar_txt', (['ruta', 'llave'], {}), '(ruta, llave)\n', (4214, 4227), False, 'import cifrado\n'), ((5665, 5697), 'logging.info', 'logging.info', (['"""Tipo de archivo:"""'], {}), "('Tipo de archivo:')\n", (5677, 5697), False, 'import logging\n'), ((5741, 5762), 'logging.info', 'logging.info', (['archivo'], {}), '(archivo)\n', (5753, 5762), False, 'import logging\n'), ((4302, 4343), 'logging.info', 'logging.info', (["('Usaremos la ruta: ' + ruta)"], {}), "('Usaremos la ruta: ' + ruta)\n", (4314, 4343), False, 'import logging\n'), ((4356, 4387), 'cifrado.cifrar_txt', 'cifrado.cifrar_txt', (['ruta', 'llave'], {}), '(ruta, llave)\n', (4374, 4387), False, 'import cifrado\n'), ((4414, 4460), 'logging.error', 'logging.error', (['"""Opcion no válida para cifrado"""'], {}), "('Opcion no válida para cifrado')\n", (4427, 4460), False, 'import logging\n'), ((5579, 5590), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5588, 5590), False, 'import os, time\n'), ((5869, 5904), 'logging.info', 'logging.info', (["('En la ruta: ' + ruta)"], {}), "('En la ruta: ' + ruta)\n", (5881, 5904), False, 'import logging\n'), ((5958, 6021), 'logging.info', 'logging.info', (["('El metadata se guardará en la ruta: ' + metapath)"], {}), "('El metadata se guardará en la ruta: ' + metapath)\n", (5970, 6021), False, 'import logging\n'), ((8020, 8056), 'logging.info', 'logging.info', (['"""Se introdujo la ip: """'], {}), "('Se introdujo la ip: ')\n", (8032, 8056), False, 'import logging\n'), ((8084, 8100), 'logging.info', 'logging.info', (['ip'], {}), '(ip)\n', (8096, 8100), False, 'import logging\n'), ((8148, 8191), 'logging.info', 'logging.info', (['"""Se escanearan los puertos: """'], {}), "('Se escanearan los puertos: ')\n", (8160, 8191), False, 'import logging\n'), ((8230, 8252), 'logging.info', 'logging.info', (['puertoss'], {}), '(puertoss)\n', (8242, 8252), False, 'import logging\n'), ((8431, 8475), 'puertos.checoPuertos', 'puertos.checoPuertos', (['ip', 'portlist', 'puertoss'], {}), '(ip, portlist, puertoss)\n', (8451, 8475), False, 'import puertos\n'), ((6074, 6129), 'logging.info', 'logging.info', (['"""Ingresamos a la función printOneMetaImg"""'], {}), "('Ingresamos a la función printOneMetaImg')\n", (6086, 6129), False, 'import logging\n'), ((6146, 6186), 'metadata.printOneMetaImg', 'metadata.printOneMetaImg', (['ruta', 'metapath'], {}), '(ruta, metapath)\n', (6170, 6186), False, 'import metadata\n'), ((6221, 6276), 'logging.info', 'logging.info', (['"""Ingresamos a la función printAllMetaImg"""'], {}), "('Ingresamos a la función printAllMetaImg')\n", (6233, 6276), False, 'import logging\n'), ((6293, 6333), 'metadata.printAllMetaImg', 'metadata.printAllMetaImg', (['ruta', 'metapath'], {}), '(ruta, metapath)\n', (6317, 6333), False, 'import metadata\n'), ((6435, 6476), 'logging.info', 'logging.info', (["('Usaremos la ruta: ' + ruta)"], {}), "('Usaremos la ruta: ' + ruta)\n", (6447, 6476), False, 'import logging\n'), ((6530, 6585), 'logging.info', 'logging.info', (["('Guardaremos la metadata en: ' + metapath)"], {}), "('Guardaremos la metadata en: ' + metapath)\n", (6542, 6585), False, 'import logging\n'), ((8512, 8541), 'logging.info', 'logging.info', (['"""Con el modo: """'], {}), "('Con el modo: ')\n", (8524, 8541), False, 'import logging\n'), ((8577, 8595), 'logging.info', 'logging.info', (['modo'], {}), '(modo)\n', (8589, 8595), False, 'import logging\n'), ((6635, 6690), 'logging.info', 'logging.info', (['"""Ingresamos a la función printOneMetaPDF"""'], {}), "('Ingresamos a la función printOneMetaPDF')\n", (6647, 6690), False, 'import logging\n'), ((6707, 6747), 'metadata.printOneMetaPDf', 'metadata.printOneMetaPDf', (['ruta', 'metapath'], {}), '(ruta, metapath)\n', (6731, 6747), False, 'import metadata\n'), ((6782, 6837), 'logging.info', 'logging.info', (['"""Ingresamos a la función printAllMetaPDF"""'], {}), "('Ingresamos a la función printAllMetaPDF')\n", (6794, 6837), False, 'import logging\n'), ((6854, 6894), 'metadata.printAllMetaPDf', 'metadata.printAllMetaPDf', (['ruta', 'metapath'], {}), '(ruta, metapath)\n', (6878, 6894), False, 'import metadata\n'), ((6998, 7039), 'logging.info', 'logging.info', (["('Usaremos la ruta: ' + ruta)"], {}), "('Usaremos la ruta: ' + ruta)\n", (7010, 7039), False, 'import logging\n'), ((7093, 7148), 'logging.info', 'logging.info', (["('Guardaremos la metadata en: ' + metapath)"], {}), "('Guardaremos la metadata en: ' + metapath)\n", (7105, 7148), False, 'import logging\n'), ((7522, 7563), 'logging.info', 'logging.info', (["('Usaremos la ruta: ' + ruta)"], {}), "('Usaremos la ruta: ' + ruta)\n", (7534, 7563), False, 'import logging\n'), ((7617, 7672), 'logging.info', 'logging.info', (["('Guardaremos la metadata en: ' + metapath)"], {}), "('Guardaremos la metadata en: ' + metapath)\n", (7629, 7672), False, 'import logging\n'), ((8704, 8735), 'logging.info', 'logging.info', (['"""El dominio es: """'], {}), "('El dominio es: ')\n", (8716, 8735), False, 'import logging\n'), ((8748, 8765), 'logging.info', 'logging.info', (['url'], {}), '(url)\n', (8760, 8765), False, 'import logging\n'), ((7199, 7255), 'logging.info', 'logging.info', (['"""Ingresamos a la función printOneMetaDocx"""'], {}), "('Ingresamos a la función printOneMetaDocx')\n", (7211, 7255), False, 'import logging\n'), ((7272, 7313), 'metadata.printOneMetaDocx', 'metadata.printOneMetaDocx', (['ruta', 'metapath'], {}), '(ruta, metapath)\n', (7297, 7313), False, 'import metadata\n'), ((7348, 7404), 'logging.info', 'logging.info', (['"""Ingresamos a la función printAllMetaDocx"""'], {}), "('Ingresamos a la función printAllMetaDocx')\n", (7360, 7404), False, 'import logging\n'), ((7421, 7462), 'metadata.printAllMetaDocx', 'metadata.printAllMetaDocx', (['ruta', 'metapath'], {}), '(ruta, metapath)\n', (7446, 7462), False, 'import metadata\n'), ((7722, 7777), 'logging.info', 'logging.info', (['"""Ingresamos a la función printOneMetaMp3"""'], {}), "('Ingresamos a la función printOneMetaMp3')\n", (7734, 7777), False, 'import logging\n'), ((7794, 7834), 'metadata.printOneMetaMp3', 'metadata.printOneMetaMp3', (['ruta', 'metapath'], {}), '(ruta, metapath)\n', (7818, 7834), False, 'import metadata\n'), ((7869, 7924), 'logging.info', 'logging.info', (['"""Ingresamos a la función printAllMetaMp3"""'], {}), "('Ingresamos a la función printAllMetaMp3')\n", (7881, 7924), False, 'import logging\n'), ((7941, 7981), 'metadata.printAllMetaMp3', 'metadata.printAllMetaMp3', (['ruta', 'metapath'], {}), '(ruta, metapath)\n', (7965, 7981), False, 'import metadata\n'), ((8815, 8854), 'logging.info', 'logging.info', (['"""Si el "modo" es: emails"""'], {}), '(\'Si el "modo" es: emails\')\n', (8827, 8854), False, 'import logging\n'), ((8873, 8900), 'webscraping.find_mails', 'webscraping.find_mails', (['url'], {}), '(url)\n', (8895, 8900), False, 'import webscraping\n'), ((9203, 9230), 'logging.info', 'logging.info', (['"""Se buscará:"""'], {}), "('Se buscará:')\n", (9215, 9230), False, 'import logging\n'), ((9282, 9304), 'logging.info', 'logging.info', (['busqueda'], {}), '(busqueda)\n', (9294, 9304), False, 'import logging\n'), ((9317, 9354), 'webscraping.busqueda_google', 'webscraping.busqueda_google', (['busqueda'], {}), '(busqueda)\n', (9344, 9354), False, 'import webscraping\n'), ((9381, 9446), 'logging.info', 'logging.info', (['"""Ninguna opción es valida para hacer Web Scrapping"""'], {}), "('Ninguna opción es valida para hacer Web Scrapping')\n", (9393, 9446), False, 'import logging\n'), ((9708, 9719), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (9717, 9719), False, 'import os, time\n'), ((8949, 8985), 'logging.info', 'logging.info', (['"""Si el "modo" es: pdf"""'], {}), '(\'Si el "modo" es: pdf\')\n', (8961, 8985), False, 'import logging\n'), ((9004, 9035), 'webscraping.descargar_pdfs', 'webscraping.descargar_pdfs', (['url'], {}), '(url)\n', (9030, 9035), False, 'import webscraping\n'), ((9070, 9106), 'logging.info', 'logging.info', (['"""Si el "modo" es: img"""'], {}), '(\'Si el "modo" es: img\')\n', (9082, 9106), False, 'import logging\n'), ((9125, 9157), 'webscraping.download_images', 'webscraping.download_images', (['url'], {}), '(url)\n', (9152, 9157), False, 'import webscraping\n')] |
#import requests
import json
import os
import mariadb
import logging
from dotenv import load_dotenv
class status(object):
def __init__(self):
load_dotenv()
self.logger = logging.getLogger('prometeo.status.status_webapp')
self.logger.debug('creating an instance of status')
def get_allstatus(self):
print("get_allstatus - entro en la funcion")
try:
conn = mariadb.connect(
user = os.getenv("MARIADB_USERNAME"),
password = os.getenv("<PASSWORD>"),
host = os.getenv("MARIADB_HOST"),
database = "prometeo",
port = int(os.getenv("MARIADB_PORT")))
cursor = conn.cursor()
print("get_allstatus - llamada a sql")
cursor.callproc('sp_select_all_status')
data = cursor.fetchall()
if len(data) > 0:
print("get_allstatus - Hay informacion")
for i in data:
print(i)
return(data)
else:
print("get_allstatus - NO HAY INFORMACION")
return None
except Exception as e:
print("get_allstatus - Estoy en la excepcion")
return None
finally:
cursor.close()
conn.close()
| [
"logging.getLogger",
"os.getenv",
"dotenv.load_dotenv"
] | [((157, 170), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (168, 170), False, 'from dotenv import load_dotenv\n'), ((193, 243), 'logging.getLogger', 'logging.getLogger', (['"""prometeo.status.status_webapp"""'], {}), "('prometeo.status.status_webapp')\n", (210, 243), False, 'import logging\n'), ((460, 489), 'os.getenv', 'os.getenv', (['"""MARIADB_USERNAME"""'], {}), "('MARIADB_USERNAME')\n", (469, 489), False, 'import os\n'), ((518, 541), 'os.getenv', 'os.getenv', (['"""<PASSWORD>"""'], {}), "('<PASSWORD>')\n", (527, 541), False, 'import os\n'), ((566, 591), 'os.getenv', 'os.getenv', (['"""MARIADB_HOST"""'], {}), "('MARIADB_HOST')\n", (575, 591), False, 'import os\n'), ((659, 684), 'os.getenv', 'os.getenv', (['"""MARIADB_PORT"""'], {}), "('MARIADB_PORT')\n", (668, 684), False, 'import os\n')] |
from apscheduler.schedulers.blocking import BlockingScheduler
from Todoist import TodoIstAPI
from Notion import NotionAPI
from Gcal import GCalAPI
import json
import os
from Main import run_sync
def create_notion_api():
notion_config = json.loads(os.environ['notion_config'])
notion = NotionAPI(os.environ['tz'], notion_config)
return notion
def create_todoist_api():
todoist = TodoIstAPI(os.environ['TODOIST_TOKEN'])
return todoist
def create_gcal_api():
gcal_config = json.loads(os.environ['gcal_config'])
gcal = GCalAPI(os.environ['tz'], gcal_config)
return gcal
if __name__ == '__main__':
notion = create_notion_api()
if os.environ['SYNC_TODOIST'] == "True" and os.environ['SYNC_GCAL'] == "True":
todoist = create_todoist_api()
gcal = create_gcal_api()
args = [notion, todoist, gcal]
elif os.environ['SYNC_TODOIST'] == "True":
todoist = create_todoist_api()
args = [notion, todoist, None]
elif os.environ['SYNC_GCAL'] == "True":
gcal = create_gcal_api()
args = [notion, None, gcal]
# Create an instance of scheduler and add function.
scheduler = BlockingScheduler()
scheduler.add_job(run_sync, 'interval', seconds=90, args=args)
scheduler.start()
| [
"Gcal.GCalAPI",
"json.loads",
"Todoist.TodoIstAPI",
"apscheduler.schedulers.blocking.BlockingScheduler",
"Notion.NotionAPI"
] | [((242, 281), 'json.loads', 'json.loads', (["os.environ['notion_config']"], {}), "(os.environ['notion_config'])\n", (252, 281), False, 'import json\n'), ((295, 337), 'Notion.NotionAPI', 'NotionAPI', (["os.environ['tz']", 'notion_config'], {}), "(os.environ['tz'], notion_config)\n", (304, 337), False, 'from Notion import NotionAPI\n'), ((397, 436), 'Todoist.TodoIstAPI', 'TodoIstAPI', (["os.environ['TODOIST_TOKEN']"], {}), "(os.environ['TODOIST_TOKEN'])\n", (407, 436), False, 'from Todoist import TodoIstAPI\n'), ((498, 535), 'json.loads', 'json.loads', (["os.environ['gcal_config']"], {}), "(os.environ['gcal_config'])\n", (508, 535), False, 'import json\n'), ((547, 585), 'Gcal.GCalAPI', 'GCalAPI', (["os.environ['tz']", 'gcal_config'], {}), "(os.environ['tz'], gcal_config)\n", (554, 585), False, 'from Gcal import GCalAPI\n'), ((1170, 1189), 'apscheduler.schedulers.blocking.BlockingScheduler', 'BlockingScheduler', ([], {}), '()\n', (1187, 1189), False, 'from apscheduler.schedulers.blocking import BlockingScheduler\n')] |
# Copyright (C) 2010 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyjamas import Factory
from pyjamas import DOM
from pyjamas.dnd.DNDHelper import dndHelper
from pyjamas.ui.Widget import Widget
from pyjamas.ui.DropHandler import DropHandler
import pyjd
class DropWidget(object):
"""
Mix-in class for a drop-target widget
"""
pass
class Html5DropWidget(Widget, DropHandler):
def __init__(self, **kw):
if (not hasattr(self, 'attached')) or kw:
Widget.__init__(self, **kw)
DropHandler.__init__(self)
self.addDropListener(self)
class EmulatedDropWidget(Html5DropWidget):
def __init__(self, **kw):
Html5DropWidget.__init__(self, **kw)
dndHelper.registerTarget(self)
def init(is_native=None):
global DropWidget
if is_native is None:
html5_dnd = hasattr(DOM.createElement('span'), 'draggable')
else:
html5_dnd = is_native
if html5_dnd:
DropWidget = Html5DropWidget
else:
DropWidget = EmulatedDropWidget
if pyjd.is_desktop:
init(pyjd.native_dnd)
else:
init(None)
Factory.registerClass('pyjamas.ui.DropWidget', 'DropWidget', DropWidget)
| [
"pyjamas.ui.Widget.Widget.__init__",
"pyjamas.ui.DropHandler.DropHandler.__init__",
"pyjamas.DOM.createElement",
"pyjamas.Factory.registerClass",
"pyjamas.dnd.DNDHelper.dndHelper.registerTarget"
] | [((1615, 1687), 'pyjamas.Factory.registerClass', 'Factory.registerClass', (['"""pyjamas.ui.DropWidget"""', '"""DropWidget"""', 'DropWidget'], {}), "('pyjamas.ui.DropWidget', 'DropWidget', DropWidget)\n", (1636, 1687), False, 'from pyjamas import Factory\n'), ((1036, 1062), 'pyjamas.ui.DropHandler.DropHandler.__init__', 'DropHandler.__init__', (['self'], {}), '(self)\n', (1056, 1062), False, 'from pyjamas.ui.DropHandler import DropHandler\n'), ((1226, 1256), 'pyjamas.dnd.DNDHelper.dndHelper.registerTarget', 'dndHelper.registerTarget', (['self'], {}), '(self)\n', (1250, 1256), False, 'from pyjamas.dnd.DNDHelper import dndHelper\n'), ((1000, 1027), 'pyjamas.ui.Widget.Widget.__init__', 'Widget.__init__', (['self'], {}), '(self, **kw)\n', (1015, 1027), False, 'from pyjamas.ui.Widget import Widget\n'), ((1361, 1386), 'pyjamas.DOM.createElement', 'DOM.createElement', (['"""span"""'], {}), "('span')\n", (1378, 1386), False, 'from pyjamas import DOM\n')] |
import sys
from kivy.app import App
from kivy.support import install_gobject_iteration
from kivy.lang import Builder
from kivy.core.window import Window
from kivy.config import Config
from gi.repository import LightDM
kv = '''
FloatLayout:
username_spinner: username_spinner
session_spinner: session_spinner
info_label: info_label
AnchorLayout:
BoxLayout:
size_hint: None, None
size: 800, 280
info_label: info_label
orientation: 'vertical'
GridLayout:
cols: 2
spacing: 5
Label:
text: "Session"
haling: 'middle'
valing: 'left'
text_size: self.size
font_size: 40
size_hint_x: 0.4
Spinner:
id: session_spinner
font_size: 40
text: self.values[0] if self.values else ""
Label:
text: "Username"
haling: 'middle'
valing: 'left'
text_size: self.size
font_size: 40
size_hint_x: 0.4
Spinner:
id: username_spinner
font_size: 40
text: self.values[0] if self.values else ""
Label:
text: "Password"
haling: 'middle'
valing: 'left'
text_size: self.size
font_size: 40
size_hint_x: 0.4
TextInput:
id: password_input
text: ""
password: <PASSWORD>
font_size: 40
multiline: False
background_normal: 'images/textinput.png'
background_active: 'images/textinput-active.png'
on_text_validate:
login_button.trigger_action()
Label:
id: info_label
size_hint_y: None
height: 30
color: 1,0,0,1
Button:
id: login_button
text: "Login"
size_hint_y: 0.3
on_press: app.login(username_spinner.text, password_input.text, session_spinner.text)
Image:
source: 'images/kivy_logo.png'
size: 183,120
pos: (self.parent.width-self.width)/2, 50
size_hint: None, None
'''
class GreeterApp(App):
def __init__(self, **kwargs):
super(GreeterApp, self).__init__(**kwargs)
self.password = ""
self.session = ""
# Connect to lightDM
install_gobject_iteration()
self.greeter = LightDM.Greeter()
self.greeter.connect("authentication-complete", self.authentication_complete_cb)
self.greeter.connect("show-prompt", self.show_prompt_cb)
self.greeter.connect_sync()
# Get all available sessions
available_sessions = []
for sess in LightDM.get_sessions():
available_sessions.append(LightDM.Session.get_key(sess))
# Get all available users
available_users = []
inst = LightDM.UserList.get_instance()
for user in LightDM.UserList.get_users(inst):
user_name = LightDM.User.get_name(user)
available_users.append(user_name)
self.root_widget = Builder.load_string(kv)
self.root_widget.username_spinner.values = available_users
self.root_widget.session_spinner.values = available_sessions
def build(self):
return self.root_widget
def login(self, username, password, session):
self.password = password
self.session = session
print >> sys.stderr, "Initial entry of username, send it to LightDM"
self.greeter.authenticate(username)
def show_prompt_cb(self, greeter, text, promptType):
print >> sys.stderr, "prompt type: " + str(promptType) + str(text)
if greeter.get_in_authentication():
greeter.respond(self.password)
def authentication_complete_cb(self, greeter):
if greeter.get_is_authenticated():
if not greeter.start_session_sync(self.session):
self.root_widget.info_label.text = "Error while starting session %s" % self.session
else:
print >> sys.stderr, "AUTH COMPLETED"
self.root_widget.info_label.text = ":-)"
self.stop()
else:
print >> sys.stderr, "Login failed"
self.root_widget.info_label.text = "Wrong credentials :-("
if __name__ == '__main__':
# set keyboard to onscreen
Config.set('kivy', 'keyboard_mode', 'systemandmulti')
Config.write()
Window.clearcolor = (0.4274509804, 0.4274509804, 0.4274509804, 1)
GreeterApp().run()
| [
"kivy.config.Config.set",
"kivy.config.Config.write",
"gi.repository.LightDM.Session.get_key",
"kivy.support.install_gobject_iteration",
"gi.repository.LightDM.UserList.get_users",
"kivy.lang.Builder.load_string",
"gi.repository.LightDM.Greeter",
"gi.repository.LightDM.UserList.get_instance",
"gi.re... | [((4841, 4894), 'kivy.config.Config.set', 'Config.set', (['"""kivy"""', '"""keyboard_mode"""', '"""systemandmulti"""'], {}), "('kivy', 'keyboard_mode', 'systemandmulti')\n", (4851, 4894), False, 'from kivy.config import Config\n'), ((4899, 4913), 'kivy.config.Config.write', 'Config.write', ([], {}), '()\n', (4911, 4913), False, 'from kivy.config import Config\n'), ((2804, 2831), 'kivy.support.install_gobject_iteration', 'install_gobject_iteration', ([], {}), '()\n', (2829, 2831), False, 'from kivy.support import install_gobject_iteration\n'), ((2855, 2872), 'gi.repository.LightDM.Greeter', 'LightDM.Greeter', ([], {}), '()\n', (2870, 2872), False, 'from gi.repository import LightDM\n'), ((3161, 3183), 'gi.repository.LightDM.get_sessions', 'LightDM.get_sessions', ([], {}), '()\n', (3181, 3183), False, 'from gi.repository import LightDM\n'), ((3341, 3372), 'gi.repository.LightDM.UserList.get_instance', 'LightDM.UserList.get_instance', ([], {}), '()\n', (3370, 3372), False, 'from gi.repository import LightDM\n'), ((3393, 3425), 'gi.repository.LightDM.UserList.get_users', 'LightDM.UserList.get_users', (['inst'], {}), '(inst)\n', (3419, 3425), False, 'from gi.repository import LightDM\n'), ((3553, 3576), 'kivy.lang.Builder.load_string', 'Builder.load_string', (['kv'], {}), '(kv)\n', (3572, 3576), False, 'from kivy.lang import Builder\n'), ((3451, 3478), 'gi.repository.LightDM.User.get_name', 'LightDM.User.get_name', (['user'], {}), '(user)\n', (3472, 3478), False, 'from gi.repository import LightDM\n'), ((3223, 3252), 'gi.repository.LightDM.Session.get_key', 'LightDM.Session.get_key', (['sess'], {}), '(sess)\n', (3246, 3252), False, 'from gi.repository import LightDM\n')] |
import pytest
import os
@pytest.mark.examples
@pytest.mark.cli_snippets
def test_cli_versioning_snippets(cli_validator):
cli_validator(r'''
.. EXAMPLE-BLOCK-1-START
.. code-block:: bash
$ datafs create my_archive \
> --my_metadata_field 'useful metadata'
created versioned archive <DataArchive local://my_archive>
.. EXAMPLE-BLOCK-1-END
Snippet 2
.. EXAMPLE-BLOCK-2-START
.. code-block:: bash
$ datafs update my_archive --string \
> 'barba crescit caput nescit' # doctest: +NORMALIZE_WHITESPACE
uploaded data to <DataArchive local://my_archive>. new version 0.0.1
created.
.. EXAMPLE-BLOCK-2-END
Snippet 3
.. EXAMPLE-BLOCK-3-START
.. code-block:: bash
$ datafs update my_archive --bumpversion patch --string \
> 'Aliquando et insanire iucundum est' # doctest: +NORMALIZE_WHITESPACE
uploaded data to <DataArchive local://my_archive>. version bumped 0.0.1 -->
0.0.2.
$ datafs update my_archive --bumpversion minor --string \
> 'animum debes mutare non caelum' # doctest: +NORMALIZE_WHITESPACE
uploaded data to <DataArchive local://my_archive>. version bumped 0.0.2 -->
0.1.
.. EXAMPLE-BLOCK-3-END
Snippet 4
.. EXAMPLE-BLOCK-4-START
.. code-block:: bash
$ datafs versions my_archive
['0.0.1', '0.0.2', '0.1']
.. EXAMPLE-BLOCK-4-END
Snippet 5
.. EXAMPLE-BLOCK-5-START
.. code-block:: bash
$ datafs download my_archive my_archive_versioned.txt --version 0.0.2
downloaded v0.0.2 to my_archive_versioned.txt
.. EXAMPLE-BLOCK-5-END
cleanup:
.. code-block:: bash
$ datafs delete my_archive
deleted archive <DataArchive local://my_archive>
''')
os.remove('my_archive_versioned.txt')
| [
"os.remove"
] | [((1679, 1716), 'os.remove', 'os.remove', (['"""my_archive_versioned.txt"""'], {}), "('my_archive_versioned.txt')\n", (1688, 1716), False, 'import os\n')] |
import unittest
import pandas as pd
import random as rd
from silk_ml.features import split_classes
class TestFeatures(unittest.TestCase):
def test_split(self):
x = {
'label1': [rd.random() + 5 for _ in range(100)],
'label2': [rd.random() * 3 - 1 for _ in range(100)],
'label3': [round(rd.random()) for _ in range(100)],
}
X = pd.DataFrame(x)
Y = pd.Series([round(rd.random()) for _ in range(100)])
pos1, neg1 = split_classes(X, Y, 'label1')
pos2, neg2 = split_classes(X, Y, 'label2')
# Check that the sum of the values are equivalent to the amount
self.assertEqual(len(pos1) + len(neg1), 100)
self.assertEqual(len(pos2) + len(neg2), 100)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"random.random",
"silk_ml.features.split_classes",
"pandas.DataFrame"
] | [((788, 803), 'unittest.main', 'unittest.main', ([], {}), '()\n', (801, 803), False, 'import unittest\n'), ((394, 409), 'pandas.DataFrame', 'pd.DataFrame', (['x'], {}), '(x)\n', (406, 409), True, 'import pandas as pd\n'), ((495, 524), 'silk_ml.features.split_classes', 'split_classes', (['X', 'Y', '"""label1"""'], {}), "(X, Y, 'label1')\n", (508, 524), False, 'from silk_ml.features import split_classes\n'), ((546, 575), 'silk_ml.features.split_classes', 'split_classes', (['X', 'Y', '"""label2"""'], {}), "(X, Y, 'label2')\n", (559, 575), False, 'from silk_ml.features import split_classes\n'), ((205, 216), 'random.random', 'rd.random', ([], {}), '()\n', (214, 216), True, 'import random as rd\n'), ((337, 348), 'random.random', 'rd.random', ([], {}), '()\n', (346, 348), True, 'import random as rd\n'), ((439, 450), 'random.random', 'rd.random', ([], {}), '()\n', (448, 450), True, 'import random as rd\n'), ((266, 277), 'random.random', 'rd.random', ([], {}), '()\n', (275, 277), True, 'import random as rd\n')] |
#This file will generate functions in polynomials
import numpy as np
import random
import matplotlib.pyplot as plt
class generateFunctions():
#the initial function taking 4 inputs
def __init__(self, x_vector, high_degree_vector, rangeLow, rangeHigh):
#the input processing
self.x_vector = x_vector
self.high_degree_vector = high_degree_vector
self.rangeLow = rangeLow
self.rangeHigh = rangeHigh
self.functionString = ""
def generate(self):
#allowed values for the highest degree and others can be zeros
allowed_values = list(range(self.rangeLow,self.rangeHigh))
allowed_values.remove(0)
for i in range(len(self.x_vector)):
highestVar = self.high_degree_vector[i]
ppar = np.random.randint(low=self.rangeLow,high=self.rangeHigh,size=(highestVar+1))
#make sure the highest is not zero coefficient
if ppar[0] == 0:
ppar[0] = random.choice(allowed_values)
for j in range(len(ppar)):
add = ""
if ppar[j] != 0:
add = str(ppar[j])
if (highestVar-j) != 0:
add = add +"*"+self.x_vector[i]
if(highestVar-j)!=1:
add = add +"^"+str(highestVar-j)
if ppar[j] > 0:
add = "+" + add
self.functionString = self.functionString + add
return self.functionString
#p = generateFunctions()
#function = p.generate()
#print(function)
| [
"numpy.random.randint",
"random.choice"
] | [((710, 788), 'numpy.random.randint', 'np.random.randint', ([], {'low': 'self.rangeLow', 'high': 'self.rangeHigh', 'size': '(highestVar + 1)'}), '(low=self.rangeLow, high=self.rangeHigh, size=highestVar + 1)\n', (727, 788), True, 'import numpy as np\n'), ((872, 901), 'random.choice', 'random.choice', (['allowed_values'], {}), '(allowed_values)\n', (885, 901), False, 'import random\n')] |
from corehq.apps.commtrack.helpers import make_supply_point
from corehq.apps.commtrack.tests.util import CommTrackTest, make_loc
from corehq.apps.commtrack.const import DAYS_IN_MONTH
from corehq.apps.locations.models import Location
from corehq.apps.locations.bulk import import_location
from mock import patch
from corehq.apps.consumption.shortcuts import get_default_consumption
from corehq.apps.commtrack.models import Product
class LocationImportTest(CommTrackTest):
def setUp(self):
# set up a couple locations that make tests a little more DRY
self.test_state = make_loc('sillyparentstate', type='state')
self.test_village = make_loc('sillyparentvillage', type='village')
return super(LocationImportTest, self).setUp()
def names_of_locs(self):
return [loc.name for loc in Location.by_domain(self.domain.name)]
def test_import_new_top_level_location(self):
data = {
'name': 'importedloc'
}
import_location(self.domain.name, 'state', data)
self.assertTrue(data['name'] in self.names_of_locs())
def test_import_with_existing_parent_by_site_code(self):
data = {
'name': 'importedloc',
'parent_site_code': self.test_state.site_code
}
result = import_location(self.domain.name, 'district', data)
if result['id'] is None:
self.fail('import failed with error: %s' % result['message'])
self.assertTrue(data['name'] in self.names_of_locs())
new_loc = Location.get(result['id'])
self.assertEqual(new_loc.parent_id, self.test_state._id)
def test_id_of_invalid_parent_type(self):
# state can't have outlet as child
data = {
'name': 'oops',
'parent_site_code': self.test_state.site_code
}
original_count = len(list(Location.by_domain(self.domain.name)))
result = import_location(self.domain.name, 'village', data)
self.assertEqual(result['id'], None)
self.assertEqual(len(list(Location.by_domain(self.domain.name))), original_count)
self.assertTrue('Invalid parent type' in result['message'])
def test_invalid_parent_site_code(self):
data = {
'name': 'oops',
'parent_site_code': 'banana'
}
result = import_location(self.domain.name, 'district', data)
self.assertTrue(
'Parent with site code banana does not exist' in result['message'],
result['message']
)
def test_invalid_parent_domain(self):
parent = make_loc('someparent', domain='notright', type='village')
data = {
'name': '<NAME>',
'outlet_type': 'SHG',
'site_code': 'wat',
'parent_site_code': parent.site_code,
}
original_count = len(list(Location.by_domain(self.domain.name)))
result = import_location(self.domain.name, 'outlet', data)
self.assertEqual(result['id'], None)
self.assertEqual(len(list(Location.by_domain(self.domain.name))), original_count)
self.assertTrue('does not exist in this project' in result['message'])
def test_change_parent(self):
parent = make_loc('originalparent', type='village')
existing = make_loc('existingloc', type='outlet', parent=parent)
new_parent = make_loc('new parent', type='village')
self.assertNotEqual(parent._id, new_parent._id)
data = {
'site_code': existing.site_code,
'name': existing.name,
'outlet_type': 'SHG',
'parent_site_code': new_parent.site_code,
}
result = import_location(self.domain.name, 'outlet', data)
new_loc = Location.get(result['id'])
self.assertEqual(existing._id, new_loc._id)
self.assertEqual(new_loc.parent_id, new_parent._id)
def test_change_to_invalid_parent(self):
parent = make_loc('original parent', type='village')
existing = make_loc('existingloc1', type='outlet', parent=parent)
new_parent = make_loc('new parent', type='state')
data = {
'site_code': existing.site_code,
'name': existing.name,
'outlet_type': 'SHG',
'parent_site_code': new_parent.site_code,
}
result = import_location(self.domain.name, 'outlet', data)
self.assertEqual(None, result['id'])
self.assertTrue('Invalid parent type' in result['message'])
new_loc = Location.get(existing._id)
self.assertEqual(existing._id, new_loc._id)
self.assertEqual(new_loc.parent_id, parent._id)
def test_updating_existing_location_properties(self):
existing = make_loc('existingloc2', type='state', domain=self.domain.name)
existing.save()
data = {
'site_code': existing.site_code,
'name': 'new_name',
}
self.assertNotEqual(existing.name, data['name'])
result = import_location(self.domain.name, 'state', data)
loc_id = result.get('id', None)
self.assertIsNotNone(loc_id, result['message'])
new_loc = Location.get(loc_id)
self.assertEqual(existing._id, loc_id)
self.assertEqual(new_loc.name, data['name'])
def test_given_id_matches_type(self):
existing = make_loc('existingloc', type='state')
data = {
'site_code': existing.site_code,
'name': 'new_name',
}
result = import_location(self.domain.name, 'outlet', data)
self.assertEqual(result['id'], None)
self.assertTrue('Existing location type error' in result['message'])
def test_shouldnt_save_if_no_changes(self):
existing = make_loc('existingloc', type='outlet', parent=self.test_village)
existing.site_code = 'wat'
existing.outlet_type = 'SHG'
existing.save()
data = {
'site_code': existing.site_code,
'name': existing.name,
'outlet_type': 'SHG',
}
with patch('corehq.apps.locations.forms.LocationForm.save') as save:
result = import_location(self.domain.name, 'outlet', data)
self.assertEqual(save.call_count, 0)
self.assertEqual(result['id'], existing._id)
def test_should_still_save_if_name_changes(self):
# name isn't a dynamic property so should test these still
# get updated alone
existing = make_loc('existingloc', type='outlet', parent=self.test_village)
existing.site_code = 'wat'
existing.outlet_type = 'SHG'
existing.save()
data = {
'site_code': existing.site_code,
'name': 'newname',
'outlet_type': 'SHG',
}
with patch('corehq.apps.locations.forms.LocationForm.save') as save:
result = import_location(self.domain.name, 'outlet', data)
self.assertEqual(save.call_count, 1)
# id isn't accurate because of the mock, but want to make
# sure we didn't actually return with None
self.assertTrue(result['id'] is not None)
def test_should_import_consumption(self):
existing = make_loc('existingloc', type='state')
sp = make_supply_point(self.loc.domain, existing)
data = {
'site_code': existing.site_code,
'name': 'existingloc',
'default_pp': 77
}
import_location(self.domain.name, 'state', data)
self.assertEqual(
float(get_default_consumption(
self.domain.name,
Product.get_by_code(self.domain.name, 'pp')._id,
'state',
sp._id,
)),
77 / DAYS_IN_MONTH
)
def test_import_coordinates(self):
data = {
'name': 'importedloc',
'latitude': 55,
'longitude': -55,
}
loc_id = import_location(self.domain.name, 'state', data)['id']
loc = Location.get(loc_id)
self.assertEqual(data['latitude'], loc.latitude)
self.assertEqual(data['longitude'], loc.longitude)
| [
"mock.patch",
"corehq.apps.locations.models.Location.get",
"corehq.apps.locations.bulk.import_location",
"corehq.apps.commtrack.models.Product.get_by_code",
"corehq.apps.locations.models.Location.by_domain",
"corehq.apps.commtrack.tests.util.make_loc",
"corehq.apps.commtrack.helpers.make_supply_point"
] | [((590, 632), 'corehq.apps.commtrack.tests.util.make_loc', 'make_loc', (['"""sillyparentstate"""'], {'type': '"""state"""'}), "('sillyparentstate', type='state')\n", (598, 632), False, 'from corehq.apps.commtrack.tests.util import CommTrackTest, make_loc\n'), ((661, 707), 'corehq.apps.commtrack.tests.util.make_loc', 'make_loc', (['"""sillyparentvillage"""'], {'type': '"""village"""'}), "('sillyparentvillage', type='village')\n", (669, 707), False, 'from corehq.apps.commtrack.tests.util import CommTrackTest, make_loc\n'), ((989, 1037), 'corehq.apps.locations.bulk.import_location', 'import_location', (['self.domain.name', '"""state"""', 'data'], {}), "(self.domain.name, 'state', data)\n", (1004, 1037), False, 'from corehq.apps.locations.bulk import import_location\n'), ((1301, 1352), 'corehq.apps.locations.bulk.import_location', 'import_location', (['self.domain.name', '"""district"""', 'data'], {}), "(self.domain.name, 'district', data)\n", (1316, 1352), False, 'from corehq.apps.locations.bulk import import_location\n'), ((1542, 1568), 'corehq.apps.locations.models.Location.get', 'Location.get', (["result['id']"], {}), "(result['id'])\n", (1554, 1568), False, 'from corehq.apps.locations.models import Location\n'), ((1929, 1979), 'corehq.apps.locations.bulk.import_location', 'import_location', (['self.domain.name', '"""village"""', 'data'], {}), "(self.domain.name, 'village', data)\n", (1944, 1979), False, 'from corehq.apps.locations.bulk import import_location\n'), ((2344, 2395), 'corehq.apps.locations.bulk.import_location', 'import_location', (['self.domain.name', '"""district"""', 'data'], {}), "(self.domain.name, 'district', data)\n", (2359, 2395), False, 'from corehq.apps.locations.bulk import import_location\n'), ((2602, 2659), 'corehq.apps.commtrack.tests.util.make_loc', 'make_loc', (['"""someparent"""'], {'domain': '"""notright"""', 'type': '"""village"""'}), "('someparent', domain='notright', type='village')\n", (2610, 2659), False, 'from corehq.apps.commtrack.tests.util import CommTrackTest, make_loc\n'), ((2925, 2974), 'corehq.apps.locations.bulk.import_location', 'import_location', (['self.domain.name', '"""outlet"""', 'data'], {}), "(self.domain.name, 'outlet', data)\n", (2940, 2974), False, 'from corehq.apps.locations.bulk import import_location\n'), ((3241, 3283), 'corehq.apps.commtrack.tests.util.make_loc', 'make_loc', (['"""originalparent"""'], {'type': '"""village"""'}), "('originalparent', type='village')\n", (3249, 3283), False, 'from corehq.apps.commtrack.tests.util import CommTrackTest, make_loc\n'), ((3303, 3356), 'corehq.apps.commtrack.tests.util.make_loc', 'make_loc', (['"""existingloc"""'], {'type': '"""outlet"""', 'parent': 'parent'}), "('existingloc', type='outlet', parent=parent)\n", (3311, 3356), False, 'from corehq.apps.commtrack.tests.util import CommTrackTest, make_loc\n'), ((3379, 3417), 'corehq.apps.commtrack.tests.util.make_loc', 'make_loc', (['"""new parent"""'], {'type': '"""village"""'}), "('new parent', type='village')\n", (3387, 3417), False, 'from corehq.apps.commtrack.tests.util import CommTrackTest, make_loc\n'), ((3687, 3736), 'corehq.apps.locations.bulk.import_location', 'import_location', (['self.domain.name', '"""outlet"""', 'data'], {}), "(self.domain.name, 'outlet', data)\n", (3702, 3736), False, 'from corehq.apps.locations.bulk import import_location\n'), ((3755, 3781), 'corehq.apps.locations.models.Location.get', 'Location.get', (["result['id']"], {}), "(result['id'])\n", (3767, 3781), False, 'from corehq.apps.locations.models import Location\n'), ((3957, 4000), 'corehq.apps.commtrack.tests.util.make_loc', 'make_loc', (['"""original parent"""'], {'type': '"""village"""'}), "('original parent', type='village')\n", (3965, 4000), False, 'from corehq.apps.commtrack.tests.util import CommTrackTest, make_loc\n'), ((4020, 4074), 'corehq.apps.commtrack.tests.util.make_loc', 'make_loc', (['"""existingloc1"""'], {'type': '"""outlet"""', 'parent': 'parent'}), "('existingloc1', type='outlet', parent=parent)\n", (4028, 4074), False, 'from corehq.apps.commtrack.tests.util import CommTrackTest, make_loc\n'), ((4097, 4133), 'corehq.apps.commtrack.tests.util.make_loc', 'make_loc', (['"""new parent"""'], {'type': '"""state"""'}), "('new parent', type='state')\n", (4105, 4133), False, 'from corehq.apps.commtrack.tests.util import CommTrackTest, make_loc\n'), ((4347, 4396), 'corehq.apps.locations.bulk.import_location', 'import_location', (['self.domain.name', '"""outlet"""', 'data'], {}), "(self.domain.name, 'outlet', data)\n", (4362, 4396), False, 'from corehq.apps.locations.bulk import import_location\n'), ((4528, 4554), 'corehq.apps.locations.models.Location.get', 'Location.get', (['existing._id'], {}), '(existing._id)\n', (4540, 4554), False, 'from corehq.apps.locations.models import Location\n'), ((4741, 4804), 'corehq.apps.commtrack.tests.util.make_loc', 'make_loc', (['"""existingloc2"""'], {'type': '"""state"""', 'domain': 'self.domain.name'}), "('existingloc2', type='state', domain=self.domain.name)\n", (4749, 4804), False, 'from corehq.apps.commtrack.tests.util import CommTrackTest, make_loc\n'), ((5010, 5058), 'corehq.apps.locations.bulk.import_location', 'import_location', (['self.domain.name', '"""state"""', 'data'], {}), "(self.domain.name, 'state', data)\n", (5025, 5058), False, 'from corehq.apps.locations.bulk import import_location\n'), ((5173, 5193), 'corehq.apps.locations.models.Location.get', 'Location.get', (['loc_id'], {}), '(loc_id)\n', (5185, 5193), False, 'from corehq.apps.locations.models import Location\n'), ((5357, 5394), 'corehq.apps.commtrack.tests.util.make_loc', 'make_loc', (['"""existingloc"""'], {'type': '"""state"""'}), "('existingloc', type='state')\n", (5365, 5394), False, 'from corehq.apps.commtrack.tests.util import CommTrackTest, make_loc\n'), ((5518, 5567), 'corehq.apps.locations.bulk.import_location', 'import_location', (['self.domain.name', '"""outlet"""', 'data'], {}), "(self.domain.name, 'outlet', data)\n", (5533, 5567), False, 'from corehq.apps.locations.bulk import import_location\n'), ((5759, 5823), 'corehq.apps.commtrack.tests.util.make_loc', 'make_loc', (['"""existingloc"""'], {'type': '"""outlet"""', 'parent': 'self.test_village'}), "('existingloc', type='outlet', parent=self.test_village)\n", (5767, 5823), False, 'from corehq.apps.commtrack.tests.util import CommTrackTest, make_loc\n'), ((6486, 6550), 'corehq.apps.commtrack.tests.util.make_loc', 'make_loc', (['"""existingloc"""'], {'type': '"""outlet"""', 'parent': 'self.test_village'}), "('existingloc', type='outlet', parent=self.test_village)\n", (6494, 6550), False, 'from corehq.apps.commtrack.tests.util import CommTrackTest, make_loc\n'), ((7228, 7265), 'corehq.apps.commtrack.tests.util.make_loc', 'make_loc', (['"""existingloc"""'], {'type': '"""state"""'}), "('existingloc', type='state')\n", (7236, 7265), False, 'from corehq.apps.commtrack.tests.util import CommTrackTest, make_loc\n'), ((7279, 7323), 'corehq.apps.commtrack.helpers.make_supply_point', 'make_supply_point', (['self.loc.domain', 'existing'], {}), '(self.loc.domain, existing)\n', (7296, 7323), False, 'from corehq.apps.commtrack.helpers import make_supply_point\n'), ((7470, 7518), 'corehq.apps.locations.bulk.import_location', 'import_location', (['self.domain.name', '"""state"""', 'data'], {}), "(self.domain.name, 'state', data)\n", (7485, 7518), False, 'from corehq.apps.locations.bulk import import_location\n'), ((8042, 8062), 'corehq.apps.locations.models.Location.get', 'Location.get', (['loc_id'], {}), '(loc_id)\n', (8054, 8062), False, 'from corehq.apps.locations.models import Location\n'), ((6076, 6130), 'mock.patch', 'patch', (['"""corehq.apps.locations.forms.LocationForm.save"""'], {}), "('corehq.apps.locations.forms.LocationForm.save')\n", (6081, 6130), False, 'from mock import patch\n'), ((6161, 6210), 'corehq.apps.locations.bulk.import_location', 'import_location', (['self.domain.name', '"""outlet"""', 'data'], {}), "(self.domain.name, 'outlet', data)\n", (6176, 6210), False, 'from corehq.apps.locations.bulk import import_location\n'), ((6799, 6853), 'mock.patch', 'patch', (['"""corehq.apps.locations.forms.LocationForm.save"""'], {}), "('corehq.apps.locations.forms.LocationForm.save')\n", (6804, 6853), False, 'from mock import patch\n'), ((6884, 6933), 'corehq.apps.locations.bulk.import_location', 'import_location', (['self.domain.name', '"""outlet"""', 'data'], {}), "(self.domain.name, 'outlet', data)\n", (6899, 6933), False, 'from corehq.apps.locations.bulk import import_location\n'), ((7972, 8020), 'corehq.apps.locations.bulk.import_location', 'import_location', (['self.domain.name', '"""state"""', 'data'], {}), "(self.domain.name, 'state', data)\n", (7987, 8020), False, 'from corehq.apps.locations.bulk import import_location\n'), ((830, 866), 'corehq.apps.locations.models.Location.by_domain', 'Location.by_domain', (['self.domain.name'], {}), '(self.domain.name)\n', (848, 866), False, 'from corehq.apps.locations.models import Location\n'), ((1872, 1908), 'corehq.apps.locations.models.Location.by_domain', 'Location.by_domain', (['self.domain.name'], {}), '(self.domain.name)\n', (1890, 1908), False, 'from corehq.apps.locations.models import Location\n'), ((2869, 2905), 'corehq.apps.locations.models.Location.by_domain', 'Location.by_domain', (['self.domain.name'], {}), '(self.domain.name)\n', (2887, 2905), False, 'from corehq.apps.locations.models import Location\n'), ((2060, 2096), 'corehq.apps.locations.models.Location.by_domain', 'Location.by_domain', (['self.domain.name'], {}), '(self.domain.name)\n', (2078, 2096), False, 'from corehq.apps.locations.models import Location\n'), ((3054, 3090), 'corehq.apps.locations.models.Location.by_domain', 'Location.by_domain', (['self.domain.name'], {}), '(self.domain.name)\n', (3072, 3090), False, 'from corehq.apps.locations.models import Location\n'), ((7639, 7682), 'corehq.apps.commtrack.models.Product.get_by_code', 'Product.get_by_code', (['self.domain.name', '"""pp"""'], {}), "(self.domain.name, 'pp')\n", (7658, 7682), False, 'from corehq.apps.commtrack.models import Product\n')] |
import asyncio
import pytest
from supriya.synthdefs import SynthDefFactory
from tloen.domain import Application, AudioEffect
@pytest.fixture
def synthdef_factory():
return (
SynthDefFactory()
.with_channel_count(2)
.with_input()
.with_signal_block(lambda builder, source, state: (source * -2) + 0.25)
.with_gate(0.01, 0.01)
.with_output(replacing=True)
)
@pytest.mark.asyncio
async def test_1(synthdef_factory):
"""
Remove one device
"""
application = Application()
context = await application.add_context()
track = await context.add_track()
device = await track.add_device(AudioEffect, synthdef=synthdef_factory)
await track.remove_devices(device)
assert list(track.devices) == []
assert device.application is None
assert device.graph_order == ()
assert device.parent is None
assert device.provider is None
@pytest.mark.asyncio
async def test_2(synthdef_factory):
"""
Remove two devices
"""
application = Application()
context = await application.add_context()
track = await context.add_track()
device_one = await track.add_device(AudioEffect, synthdef=synthdef_factory)
device_two = await track.add_device(AudioEffect, synthdef=synthdef_factory)
await track.remove_devices(device_one, device_two)
assert list(track.devices) == []
assert device_one.application is None
assert device_one.graph_order == ()
assert device_one.parent is None
assert device_one.provider is None
assert device_two.application is None
assert device_two.graph_order == ()
assert device_two.parent is None
assert device_two.provider is None
@pytest.mark.asyncio
async def test_3(synthdef_factory):
"""
Remove first device, leaving second untouched
"""
application = Application()
context = await application.add_context()
track = await context.add_track()
device_one = await track.add_device(AudioEffect, synthdef=synthdef_factory)
device_two = await track.add_device(AudioEffect, synthdef=synthdef_factory)
await track.remove_devices(device_one)
assert list(track.devices) == [device_two]
assert device_one.application is None
assert device_one.graph_order == ()
assert device_one.parent is None
assert device_one.provider is None
assert device_two.application is context.application
assert device_two.graph_order == (3, 0, 0, 0, 5, 0)
assert device_two.parent is track.devices
assert device_two.provider is None
@pytest.mark.asyncio
async def test_4(synthdef_factory):
"""
Boot, remove first device, leaving second untouched
"""
application = Application()
context = await application.add_context()
track = await context.add_track()
device_one = await track.add_device(AudioEffect, synthdef=synthdef_factory)
device_two = await track.add_device(AudioEffect, synthdef=synthdef_factory)
await application.boot()
with context.provider.server.osc_protocol.capture() as transcript:
await track.remove_devices(device_one)
await asyncio.sleep(0.1)
assert list(track.devices) == [device_two]
assert device_one.application is None
assert device_one.graph_order == ()
assert device_one.parent is None
assert device_one.provider is None
assert device_two.application is context.application
assert device_two.graph_order == (3, 0, 0, 0, 5, 0)
assert device_two.parent is track.devices
assert device_two.provider is context.provider
assert len(transcript.sent_messages) == 1
_, message = transcript.sent_messages[0]
assert message.to_list() == [None, [["/n_set", 1014, "gate", 0]]]
assert track.peak_levels == dict(
input=(0.0, 0.0), postfader=(0.25, 0.25), prefader=(0.25, 0.25)
)
assert context.master_track.peak_levels == dict(
input=(0.25, 0.25), postfader=(0.25, 0.25), prefader=(0.25, 0.25)
)
| [
"supriya.synthdefs.SynthDefFactory",
"tloen.domain.Application",
"asyncio.sleep"
] | [((530, 543), 'tloen.domain.Application', 'Application', ([], {}), '()\n', (541, 543), False, 'from tloen.domain import Application, AudioEffect\n'), ((1038, 1051), 'tloen.domain.Application', 'Application', ([], {}), '()\n', (1049, 1051), False, 'from tloen.domain import Application, AudioEffect\n'), ((1847, 1860), 'tloen.domain.Application', 'Application', ([], {}), '()\n', (1858, 1860), False, 'from tloen.domain import Application, AudioEffect\n'), ((2700, 2713), 'tloen.domain.Application', 'Application', ([], {}), '()\n', (2711, 2713), False, 'from tloen.domain import Application, AudioEffect\n'), ((3115, 3133), 'asyncio.sleep', 'asyncio.sleep', (['(0.1)'], {}), '(0.1)\n', (3128, 3133), False, 'import asyncio\n'), ((190, 207), 'supriya.synthdefs.SynthDefFactory', 'SynthDefFactory', ([], {}), '()\n', (205, 207), False, 'from supriya.synthdefs import SynthDefFactory\n')] |
#!/usr/bin/env python3
"""Create CESAR joblist.
According to predicted orthologous chains create CESAR jobs.
Merge them into joblists.
"""
import argparse
import os
import sys
import math
from collections import defaultdict
from datetime import datetime as dt
from re import finditer, IGNORECASE
import ctypes
from twobitreader import TwoBitFile
from modules.common import parts
from modules.common import split_in_n_lists
from modules.common import chain_extract_id
from modules.common import eprint
from modules.common import make_cds_track
from modules.common import die
__author__ = "<NAME>, 2020."
__version__ = "1.0"
__email__ = "<EMAIL>"
__credits__ = ["<NAME>", "<NAME>", "<NAME>"]
# 0 gene; 1 chains; 2 bed_file; 3 bdb_chain_file; 4 tDB; 5 qDB; 6 memlim gig;
LOCATION = os.path.dirname(__file__)
WRAPPER_ABSPATH = os.path.abspath(os.path.join(LOCATION, "CESAR_wrapper.py"))
WRAPPER_TEMPLATE = (
WRAPPER_ABSPATH
+ " {0} {1} {2} {3} {4} {5} --cesar_binary {6}"
+ " --uhq_flank {7} --memlim {8}"
)
CESAR_RUNNER = os.path.abspath(
os.path.join(LOCATION, "cesar_runner.py")
) # script that will run jobs
LONG_LOCI_FIELDS = {
"GGLOB",
"TRANS",
} # chain classes that could lead to very long query loci
BIGMEM_LIM = 1000 # mem limit for bigmem partition
REL_LENGTH_THR = 50
ABS_LENGTH_TRH = 1000000
EXTRA_MEM = 100000 # extra memory "just in case"
BIGMEM_JOBSNUM = 100 # TODO: make a parameter?
REF_LEN_THRESHOLD = 0.05 # if query length < 5% CDS then skip it
ASM_GAP_SIZE = 10
ASM_GAP_PATTERN = r"N{" + str(ASM_GAP_SIZE) + ",}"
M = "M"
L = "L"
ORTHOLOG = "ORTH"
PARALOG = "PARA"
TRANS = "TRANS"
PROJECTION = "PROJECTION"
TRANSCRIPT = "TRANSCRIPT"
# connect shared lib; define input and output data types
chain_coords_conv_lib_path = os.path.join(
LOCATION, "modules", "chain_coords_converter_slib.so"
)
ch_lib = ctypes.CDLL(chain_coords_conv_lib_path)
ch_lib.chain_coords_converter.argtypes = [
ctypes.c_char_p,
ctypes.c_int,
ctypes.c_int,
ctypes.POINTER(ctypes.c_char_p),
]
ch_lib.chain_coords_converter.restype = ctypes.POINTER(ctypes.c_char_p)
def parse_args():
"""Read args, check."""
app = argparse.ArgumentParser()
app.add_argument("orthologs_file", help="Output of the chain classifier.")
app.add_argument("bed_file", type=str, help="BED FILE")
app.add_argument("bdb_bed_file", type=str, help="BDB BED FILE")
app.add_argument("bdb_chain_file", type=str, help="BDB CHAIN FILE")
app.add_argument("tDB", type=str, help="target 2 bit")
app.add_argument("qDB", type=str, help="query 2 bit")
app.add_argument(
"--cesar_binary",
type=str,
default="cesar",
help="CESAR2.0 binary address, cesar as default.",
)
app.add_argument(
"--jobs_num",
type=int,
default=300,
help="Total number of cluster jobs, 300 is recommended."
" Resulting number may slightly vary in case of buckets "
"param usage due to round issues.",
)
app.add_argument(
"--buckets",
default="0",
help=""
"If you need to split the cluster jobs in different classes"
" according the memory consumption use this parameter. To do "
" that write comma-separated list of memory levels. For "
"example, --buckets 10,30 means that there are two classes of "
"jobs - consuming 10 and 30 gb. All jobs consuming more than 30gb "
"are ignored. Job names will be 'cesar_job_[job_number]_[memory_class]' "
"like cesar_job_9999_30 - meaning all tasks in this file require "
"no more that 30Gb. --buckets 0 means no separation.",
)
app.add_argument(
"--fields",
default=None,
help="Use those chains that are placed in these fields "
" in orthologs file. Comma-separated list. For example "
"PERF,GLOK - for perfect and good local chains.",
)
app.add_argument(
"--mask_stops",
"--ms",
action="store_true",
dest="mask_stops",
help="Mask stop codons in target sequences. CESAR cannot process them."
"Using this parameter please make sure you know what you are doing.",
)
app.add_argument(
"--chains_limit",
type=int,
default=15,
help="Skip genes with amount of orthologs more than the limit.",
)
app.add_argument(
"--skipped_genes",
default=None,
help="If a gene was skipped due to memory of number "
" of chain limit, save it into a file.",
)
app.add_argument(
"--mem_limit",
type=float,
default=50,
help="Skip genes requiring more than X GB to call CESAR",
)
app.add_argument("--jobs_dir", default="cesar_jobs", help="Save jobs in.")
app.add_argument(
"--combined", default="cesar_combined", help="Combined cluster jobs."
)
app.add_argument("--bigmem", default="cesar_bigmem", help="CESAR bigmem joblist")
app.add_argument("--results", default="cesar_results", help="Save results to.")
app.add_argument(
"--check_loss", default=None, help="Call internal gene loss pipeline"
)
app.add_argument("--u12", default=None, help="Add U12 introns data")
app.add_argument(
"--rejected_log", default=None, help="Save rejection data in this dir"
)
app.add_argument(
"--paralogs_log",
default=os.path.join(os.path.dirname(__file__), "paralogs.log"),
help="Write a list of genes for which only paralogous chains were detected.",
)
app.add_argument("--uhq_flank", default=50, type=int, help="UHQ flank size")
app.add_argument(
"--o2o_only",
"--o2o",
action="store_true",
dest="o2o_only",
help="Process only the genes that have a single orthologous chain",
)
app.add_argument(
"--no_fpi",
action="store_true",
dest="no_fpi",
help="Consider some frame-preserving mutations as inactivating. "
"See documentation for details.",
)
app.add_argument(
"--fragments_data", help="Gene: fragments file for fragmented genomes."
)
app.add_argument(
"--opt_cesar",
action="store_true",
dest="opt_cesar",
help="Using lastz-optimized version of CESAR",
)
app.add_argument(
"--precomp_memory_data",
default=None,
help="Memory consumption was already precomputed",
)
app.add_argument(
"--predefined_glp_class_path",
default=None,
help="Save preliminary projection classification for: "
"(i) Projections with too short query region (L or M) and "
"(ii) Projections with very long query region (M)",
)
# print help if there are no args
if len(sys.argv) < 2:
app.print_help()
sys.exit(0)
args = app.parse_args()
return args
def read_u12_data(u12_data_file):
"""Read U12 introns."""
u12_data = defaultdict(list)
if not u12_data_file:
# not provided
return u12_data
f = open(u12_data_file, "r")
f.__next__()
for line in f:
line_data = line[:-1].split("\t")
trans = line_data[0]
exon_num = int(line_data[1])
site = line_data[2]
val = (exon_num, site)
u12_data[trans].append(val)
f.close()
return u12_data
def define_buckets(lim, buckets):
"""Return memory limit in Gig if required. Get classes."""
if buckets == "0":
# split was not required
return lim, {0: []}
# buckets assigned
buckets_values = sorted([int(x) for x in buckets.split(",") if x != ""])
buckets = {x: [] for x in buckets_values}
lim = buckets_values[-1]
return lim, buckets
def read_orthologs(orthologs_file, fields_raw, only_o2o=False):
"""Read orthologs file."""
# convert fields param string to list
# fields = [x.upper() for x in fields_raw.split(",") if x != ""]
genes_chains = {}
chain_gene_field = {}
skipped = [] # genes skipped at this stage
_no_chains_intersecting = []
f = open(orthologs_file, "r") # open the file
f.__next__() # skip header
# first column: transcript identifier
# then: chain class fields (like column 2 - orthologous chains, 3 - paralogous)
for line in f:
# parse line
line_info = line[:-1].split("\t")
# "0" is a placeholder meaning "no chains there"
gene = line_info[0]
selected, chains = [], {}
chains[ORTHOLOG] = [x for x in line_info[1].split(",") if x != "0"]
chains[PARALOG] = [x for x in line_info[2].split(",") if x != "0"]
chains[TRANS] = [x for x in line_info[3].split(",") if x != "0"]
# Processed pseudogenes column ignored -> they are processed separately
all_chains = chains[ORTHOLOG] + chains[PARALOG] + chains[TRANS]
if len(all_chains) == 0:
# no way in running CESAR on this gene
# because there are no chains we could use
skipped.append((gene, "0", "No chains intersecting the gene"))
_no_chains_intersecting.append(gene)
continue
# user can ask to process only the genes that have a single orthologous chain
# here we check that this is the case
not_one2one = len(chains[ORTHOLOG]) == 0 or len(chains[ORTHOLOG]) > 1
if only_o2o and not_one2one: # we requested only a single orthologous chain
skipped.append((gene, "0", "Only one2one requested, this gene didn't pass"))
continue
# use orthologous chains by default,
# if no orthologous chains -> use spanning chains (TRANS)
# no spanning chains -> use paralogous
if len(chains[ORTHOLOG]) > 0:
selected_field = ORTHOLOG
elif len(chains[TRANS]) > 0:
selected_field = TRANS
else:
selected_field = PARALOG
selected = chains[selected_field].copy()
# mark used field
for chain in selected:
key = (chain, gene)
chain_gene_field[key] = selected_field
# write to the dict, gene to chains we will use
genes_chains[gene] = selected
f.close()
die(
"Error! No gene:chains pairs selected! Probably --fields parameter is wrong!"
) if len(genes_chains) == 0 else None
return genes_chains, chain_gene_field, skipped, _no_chains_intersecting
def read_bed(bed):
"""Read bed 12 file.
For each transcript extract genetic coordinates and exon sizes.
"""
bed_data = {}
f = open(bed, "r")
for line in f:
cds_track = make_cds_track(line).split("\t")
bed_info = line[:-1].split("\t")
chrom = bed_info[0]
chrom_start = int(bed_info[1])
chrom_end = int(bed_info[2])
name = bed_info[3]
block_sizes = [int(x) for x in cds_track[10].split(",") if x != ""]
bed_data[name] = (chrom, chrom_start, chrom_end, block_sizes)
f.close()
return bed_data
def define_short_q_proj_stat(q_chrom, q_start, q_end, q_2bit):
"""If the query sequence is shorter than 50% CDS,
we need to check whether projection is really deleted (Lost)
or is just missing due to assembly gaps.
"""
query_genome_sequence = TwoBitFile(q_2bit)
query_chrom = query_genome_sequence[q_chrom]
query_seq = query_chrom[q_start:q_end].upper()
# N are two-sided?
# simply copy pasted solution from CESAR wrapper.py
# can be further optimised
gap_ranges = 0
for match in finditer(ASM_GAP_PATTERN, query_seq, IGNORECASE):
span_start, span_end = match.span()
# gap_ranges.append((seq_start + span_start, seq_start + span_end))
gap_ranges += 1
if gap_ranges == 0:
# no assembly gaps: really deleted -> Lost
return L
else:
# there are assembly gaps -> Missing
return M
def precompute_regions(
batch, bed_data, bdb_chain_file, chain_gene_field, limit, q_2bit
):
"""Precompute region for each chain: bed pair."""
eprint("Precompute regions for each gene:chain pair...")
chain_to_genes, skipped = defaultdict(list), []
predef_glp = {} # predefined GLP classification
# revert the dict, from gene2chain to chain2genes
for gene, chains_not_sorted in batch.items():
if len(chains_not_sorted) == 0:
skipped.append((gene, "no orthologous chains"))
continue
chains = sorted(chains_not_sorted, key=lambda x: int(x))
chains = chains[:limit]
if len(chains_not_sorted) > limit:
# skip genes that have > limit orthologous chains
chains_skipped = chains[limit:]
skipped.append(
(
gene,
",".join(chains_skipped),
f"number of chains ({limit} chains) limit exceeded",
)
)
# add each projection individually
# further append to GLP classification
for c_ in chains_skipped:
proj_id = f"{gene}.{c_}"
predef_glp[proj_id] = f"{PROJECTION}\t{M}"
for chain_id in chains:
chain_to_genes[chain_id].append(gene)
# read regions themselves
gene_chain_grange = defaultdict(dict)
chains_num, iter_num = len(chain_to_genes.keys()), 0
for chain_id, genes in chain_to_genes.items():
# extract chain itself
chain_body = chain_extract_id(bdb_chain_file, chain_id).encode()
all_gene_ranges = []
genes_cds_length = []
for gene in genes:
# get genomic coordinates for each gene
gene_data = bed_data.get(gene)
grange = f"{gene_data[0]}:{gene_data[1]}-{gene_data[2]}"
cds_length = sum(gene_data[3])
genes_cds_length.append(cds_length)
all_gene_ranges.append(grange)
# we need to get corresponding regions in the query
# for now we have chain blocks coordinates and gene
# regions in the reference genome
# use chain_coords_converter shared library to
# convert target -> query coordinates via chain
# first need to convert to C-types
c_chain = ctypes.c_char_p(chain_body)
c_shift = ctypes.c_int(2)
granges_bytes = [s.encode("utf-8") for s in all_gene_ranges]
granges_num = len(all_gene_ranges)
c_granges_num = ctypes.c_int(granges_num)
granges_arr = (ctypes.c_char_p * (granges_num + 1))()
granges_arr[:-1] = granges_bytes
granges_arr[granges_num] = None
# then call the function
raw_ch_conv_out = ch_lib.chain_coords_converter(
c_chain, c_shift, c_granges_num, granges_arr
)
chain_coords_conv_out = [] # keep lines here
# convert C output to python-readable type
for i in range(granges_num + 1):
chain_coords_conv_out.append(raw_ch_conv_out[i].decode("utf-8"))
for line in chain_coords_conv_out[1:]:
# then parse the output
# line contains information about transcript range in the query
# and the corresponding locus in the reference
line_info = line.rstrip().split()
# line info is: region num, region in reference, region in query
# one line per one gene, in the same order
num = int(line_info[0])
# regions format is chrom:start-end
q_chrom = line_info[1].split(":")[0]
q_grange = line_info[1].split(":")[1].split("-")
q_start, q_end = int(q_grange[0]), int(q_grange[1])
que_len = q_end - q_start
t_grange = line_info[2].split(":")[1].split("-")
t_start, t_end = int(t_grange[0]), int(t_grange[1])
tar_len = t_end - t_start
len_delta = abs(tar_len - que_len)
delta_gene_times = len_delta / tar_len
gene = genes[num] # shared lib returns data per gene in the same order
proj_id = f"{gene}.{chain_id}"
cds_length = genes_cds_length[num]
min_query_length = cds_length * REF_LEN_THRESHOLD
field = chain_gene_field.get((chain_id, gene))
# check that corresponding region in the query is not too long
# for instance query locus is 50 times longer than the gene
# or it's longer than 1M base and also this is a TRANS chain
high_rel_len = delta_gene_times > REL_LENGTH_THR
high_abs_len = len_delta > ABS_LENGTH_TRH
long_loci_field = field in LONG_LOCI_FIELDS
if (high_rel_len or high_abs_len) and long_loci_field:
skipped.append((gene, chain_id, "too long query locus"))
# print(f"TOO LONG: {proj_id}")
predef_glp[proj_id] = f"{PROJECTION}\t{M}"
continue
# in contrast, if query locus is too short (<5% CDS length)
# then CESAR might not build HMM properly, we skip this
# hard to imagine in what case such an input will give us any meaningful result
if que_len < min_query_length:
# in this case we need to check whether the gene is truly deleted
# in the corresponding locus or is missing
# to separate these cases, TOGA checks whether the region contains
# assembly gaps
# skipped.append((gene, chain_id, "too short query locus"))
proj_stat = define_short_q_proj_stat(q_chrom, q_start, q_end, q_2bit)
predef_glp[proj_id] = f"{PROJECTION}\t{proj_stat}"
# print(f"Too short query: {proj_id}: {que_len}: {proj_stat}")
continue
# for each chain-gene pair save query region length
# need this for required memory estimation
gene_chain_grange[gene][chain_id] = que_len
del raw_ch_conv_out # not sure if necessary but...
iter_num += 1 # verbosity
eprint(f"Chain {iter_num} / {chains_num}", end="\r")
return gene_chain_grange, skipped, predef_glp
def fill_buckets(buckets, all_jobs):
"""Split jobs in buckets according their memory consumption."""
if 0 in buckets.keys(): # do not split it
buckets[0] = list(all_jobs.keys())
return buckets
# buckets were set
memlims = sorted(buckets.keys())
prev_lim = 0
for memlim in memlims:
# buckets and memory limits are pretty much the same
# if buckets are 5 and 10 then:
# memlim[5] -> jobs that require <= 5Gb
# memlim[10] -> jobs that require > 5Gb AND <= 10Gb
buckets[memlim] = [
job for job, job_mem in all_jobs.items() if prev_lim < job_mem <= memlim
]
prev_lim = memlim
# remove empty
filter_buckets = {k: v for k, v in buckets.items() if len(v) > 0}
return filter_buckets
def save_jobs(filled_buckets, bucket_jobs_num, jobs_dir):
"""Save cesar calls in the dir assigned."""
os.mkdir(jobs_dir) if not os.path.isdir(jobs_dir) else None
file_num, to_combine = 0, []
for bucket_id, jobs in filled_buckets.items():
num_of_files = bucket_jobs_num[bucket_id]
# just in case
num_of_files = len(jobs) if num_of_files >= len(jobs) else num_of_files
size_of_file = len(jobs) // num_of_files
# size_of_file = size_of_file + 1 if len(jobs) % num_of_files != 0 else size_of_file
jobs_split = parts(jobs, n=size_of_file)
for part in jobs_split:
file_num += 1
file_name = f"cesar_job_{file_num}_{bucket_id}"
file_path = os.path.abspath(os.path.join(jobs_dir, file_name))
f = open(file_path, "w")
f.write("\n".join(part) + "\n")
f.close()
to_combine.append(file_path)
return to_combine
def save_bigmem_jobs(bigmem_joblist, jobs_dir):
"""Save bigmem jobs."""
# TODO: try to merge with save_jobs() func
# one bigmem job per joblist, but not more than 100
# if > 100: something is wrong
joblist_size = len(bigmem_joblist)
num_of_parts = joblist_size if joblist_size <= BIGMEM_LIM else BIGMEM_JOBSNUM
if num_of_parts == 0:
return None # no bigmem jobs
bigmem_parts = split_in_n_lists(bigmem_joblist, num_of_parts)
bigmem_files_num = len(bigmem_parts) # in case if num of jobs < BIGMEM_JOBSNUM
bigmem_paths = []
if bigmem_files_num == 0:
return None # no bigmem jobs at all
for num, bigmem_part in enumerate(bigmem_parts):
file_name = f"cesar_job_{num}_bigmem"
file_path = os.path.abspath(os.path.join(jobs_dir, file_name))
f = open(file_path, "w")
f.write("\n".join(bigmem_part) + "\n")
f.close()
bigmem_paths.append(file_path)
return bigmem_paths
def save_combined_joblist(
to_combine, combined_file, results_dir, inact_mut_dat, rejected_log, name=""
):
"""Save joblist of joblists (combined joblist)."""
f = open(combined_file, "w")
for num, comb in enumerate(to_combine, 1):
basename = os.path.basename(comb).split(".")[0]
results_path = os.path.abspath(os.path.join(results_dir, basename + ".txt"))
combined_command = f"{CESAR_RUNNER} {comb} {results_path}"
if inact_mut_dat:
loss_data_path = os.path.join(inact_mut_dat, f"{basename}.inact_mut.txt")
combined_command += f" --check_loss {loss_data_path}"
if rejected_log:
log_path = os.path.join(rejected_log, f"{basename}.txt")
combined_command += f" --rejected_log {log_path}"
f.write(combined_command + "\n")
f.close()
def read_fragments_data(in_file):
"""Read gene: fragments file."""
ret = {}
f = open(in_file, "r")
for line in f:
line_data = line.rstrip().split("\t")
gene = line_data[0]
chain_str = line_data[1]
# chains = [int(x) for x in line_data.split(",") if x != ""]
# actually there are strings:
chains = [x for x in chain_str.split(",") if x != ""]
ret[gene] = chains
f.close()
return ret
def read_precomp_mem(precomp_file):
"""Read precomputed memory if exists."""
ret = {}
if precomp_file is None:
return ret
f = open(precomp_file, "r")
for line in f:
if line == "\n":
continue
line_data = line.rstrip().split("\t")
gene = line_data[0]
mem_raw = float(line_data[1])
mem = math.ceil(mem_raw) + 1.25
ret[gene] = mem
f.close()
return ret
def main():
"""Entry point."""
t0 = dt.now()
args = parse_args()
os.environ["HDF5_USE_FILE_LOCKING"] = "FALSE" # otherwise it could crash
# as default we create CESAR jobs for chains with "orth" or "trans" class
# but user could select another set of chain classes
fields = "ORTH,TRANS" if args.fields is None else args.fields
# read U12 introns: to create a list of U12-containing genes
# need it to make subsequent commands
u12_data = read_u12_data(args.u12)
# if memory is precomputed: use it
precomp_mem = read_precomp_mem(args.precomp_memory_data)
# get lists of orthologous chains per each gene
# skipped_1 - no chains found -> log them
predefined_glp_class = {} # for projections which are M and L without CESAR
# m_ -> to be added to Missing bucket
batch, chain_gene_field, skipped_1, m_ = read_orthologs(
args.orthologs_file, fields, only_o2o=args.o2o_only
)
for gene in m_: # classify transcripts with no intersecting chains as missing
predefined_glp_class[gene] = f"{TRANSCRIPT}\t{M}"
# split cesar jobs in different buckets (if user requested so)
# like put all jobs that require < 5Gig in the bucket 1
# jobs requiring 5 to 15 Gb to bucket 2 and so on
# CESAR might be very memory-consuming -> so we care about this
mem_limit, buckets = define_buckets(args.mem_limit, args.buckets)
# load reference bed file data; coordinates and exon sizes
bed_data = read_bed(args.bed_file)
# check if cesar binary exists
die(
f"Error! Cannot find cesar executable at {args.cesar_binary}!"
) if not os.path.isfile(args.cesar_binary) else None
# if this is a fragmmented genome: we need to change CESAR commands for
# split genes
if args.fragments_data:
gene_fragments_dict = read_fragments_data(args.fragments_data)
else: # better to create empty dict and call dict.get()
gene_fragments_dict = dict()
# pre-compute chain : gene : region data
# collect the second list of skipped genes
# skipped_2 -> too long corresponding regions in query
regions, skipped_2, predef_glp = precompute_regions(
batch,
bed_data,
args.bdb_chain_file,
chain_gene_field,
args.chains_limit,
args.qDB,
)
predefined_glp_class.update(predef_glp)
# start making the jobs
all_jobs = {}
skipped_3 = []
bigmem_jobs = []
for gene in batch.keys():
u12_this_gene = u12_data.get(gene)
block_sizes = bed_data[gene][3]
gene_chains_data = regions.get(gene)
# check that there is something for this gene
if not gene_chains_data:
continue
elif len(gene_chains_data.keys()) == 0:
continue
gene_fragments = gene_fragments_dict.get(gene, False)
if gene_fragments:
# this is a fragmented gene, need to change the procedure a bit
gene_chains_data = {
k: v for k, v in gene_chains_data.items() if k in gene_fragments
}
chains = gene_chains_data.keys()
if len(chains) == 0:
continue
chains_arg = ",".join(chains) # chain ids -> one of the cmd args
# if memory is precomputed then use it
precomp_gig = precomp_mem.get(gene, None)
if precomp_gig is None:
# proceed to memory estimation
# the same procedure as inside CESAR2.0 code
num_states, r_length = 0, 0
# required memory depends on numerous params
# first, we need reference transcript-related parameters
# query-related parameters will be later
for block_size in block_sizes:
# num_states += 6 + 6 * reference->num_codons + 1 + 2 + 2 + 22 + 6;
# /* 22 and 6 for acc and donor states */
num_codons = block_size // 3
num_states += 6 + 6 * num_codons + 1 + 2 + 2 + 22 + 6
# r_length += 11 + 6 * fasta.references[i]->length
# + donors[i]->length + acceptors[i]->length;
r_length += block_size
# now compute query sequence-related parameters
query_lens = [v for v in gene_chains_data.values()]
if (
gene_fragments
): # in case of fragmented genome: we stitch queries together
# so query length = sum of all queries
q_length_max = sum(query_lens)
else: # not fragmented genome: processins queries separately
# thus we need only the max length
q_length_max = max(query_lens)
# and now compute the amount of required memory
memory = (
(num_states * 4 * 8)
+ (num_states * q_length_max * 4)
+ (num_states * 304)
+ (2 * q_length_max + r_length) * 8
+ (q_length_max + r_length) * 2 * 1
+ EXTRA_MEM
)
gig = math.ceil(memory / 1000000000) + 0.25
else:
# memory was precomputed
gig = precomp_gig
# gig = compute_amount_of_memory(block_sizes, q_length_max, args.opt_cesar)
# # 0 gene; 1 chains; 2 bed_file; 3 bdb chain_file; 4 tDB; 5 qDB; 6 output; 7 cesar_bin
job = WRAPPER_TEMPLATE.format(
gene,
chains_arg,
os.path.abspath(args.bdb_bed_file),
os.path.abspath(args.bdb_chain_file),
os.path.abspath(args.tDB),
os.path.abspath(args.qDB),
os.path.abspath(args.cesar_binary),
args.uhq_flank,
gig,
)
# add some flags if required
job = job + " --mask_stops" if args.mask_stops else job
job = job + " --check_loss" if args.check_loss else job
job = job + " --no_fpi" if args.no_fpi else job
job = job + " --fragments" if gene_fragments else job
job = job + " --opt_cesar" if args.opt_cesar else job
# add U12 introns data if this gene has them:
job = job + f" --u12 {os.path.abspath(args.u12)}" if u12_this_gene else job
# define whether it's an ordinary or a bigmem job
# depending on the memory requirements
if gig <= mem_limit: # ordinary job
all_jobs[job] = gig
elif gig <= BIGMEM_LIM:
skipped_3.append((gene, ",".join(chains), f"requires {gig}) -> bigmem job"))
predef_glp[gene] = f"{TRANSCRIPT}\tM"
bigmem_jobs.append(job)
else:
skipped_3.append(
(
gene,
",".join(chains),
f"big mem limit ({BIGMEM_LIM} gig) exceeded (needs {gig})",
)
)
predef_glp[gene] = f"{TRANSCRIPT}\tM"
eprint(f"\nThere are {len(all_jobs.keys())} jobs in total.")
eprint("Splitting the jobs.")
# split jobs in buckets | compute proportions
filled_buckets = fill_buckets(buckets, all_jobs)
prop_sum = sum([k * len(v) for k, v in filled_buckets.items()])
# estimate proportion of a bucket in the runtime
buckets_prop = (
{k: (k * len(v)) / prop_sum for k, v in filled_buckets.items()}
if 0 not in filled_buckets.keys()
else {0: 1.0}
)
eprint("Bucket proportions are:")
eprint("\n".join([f"{k} -> {v}" for k, v in buckets_prop.items()]))
eprint(f"Also there are {len(bigmem_jobs)} bigmem jobs")
# get number of jobs for each bucket
bucket_jobs_num = {k: math.ceil(args.jobs_num * v) for k, v in buckets_prop.items()}
# save jobs, get comb lines
to_combine = save_jobs(filled_buckets, bucket_jobs_num, args.jobs_dir)
# save combined jobs, combined is a file containing paths to separate jobs
os.mkdir(args.results) if not os.path.isdir(args.results) else None
os.mkdir(args.check_loss) if args.check_loss and not os.path.isdir(
args.check_loss
) else None
# save joblist of joblists
save_combined_joblist(
to_combine, args.combined, args.results, args.check_loss, args.rejected_log
)
# save bigmem jobs, a bit different logic
bigmem_paths = save_bigmem_jobs(bigmem_jobs, args.jobs_dir)
if bigmem_paths:
save_combined_joblist(
bigmem_paths,
args.bigmem,
args.results,
args.check_loss,
args.rejected_log,
name="bigmem",
)
# save skipped genes if required
if args.skipped_genes:
skipped = skipped_1 + skipped_2 + skipped_3
f = open(args.skipped_genes, "w")
# usually we have gene + reason why skipped
# we split them with tab
f.write("\n".join(["\t".join(x) for x in skipped]) + "\n")
f.close()
if args.predefined_glp_class_path:
# if we know GLP class for some of the projections: save it
f = open(args.predefined_glp_class_path, "w")
for k, v in predefined_glp_class.items():
f.write(f"{k}\t{v}\n")
f.close()
# save IDs of paralogous projections
f = open(args.paralogs_log, "w")
for k, v in chain_gene_field.items():
if v != "PARA":
continue
gene_ = f"{k[1]}.{k[0]}\n"
f.write(gene_)
f.close()
eprint(f"Estimated: {dt.now() - t0}")
sys.exit(0)
if __name__ == "__main__":
main()
| [
"modules.common.split_in_n_lists",
"sys.exit",
"ctypes.CDLL",
"argparse.ArgumentParser",
"modules.common.parts",
"os.path.isdir",
"re.finditer",
"ctypes.c_int",
"os.mkdir",
"modules.common.make_cds_track",
"os.path.isfile",
"os.path.dirname",
"ctypes.c_char_p",
"modules.common.die",
"cty... | [((782, 807), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (797, 807), False, 'import os\n'), ((1773, 1840), 'os.path.join', 'os.path.join', (['LOCATION', '"""modules"""', '"""chain_coords_converter_slib.so"""'], {}), "(LOCATION, 'modules', 'chain_coords_converter_slib.so')\n", (1785, 1840), False, 'import os\n'), ((1857, 1896), 'ctypes.CDLL', 'ctypes.CDLL', (['chain_coords_conv_lib_path'], {}), '(chain_coords_conv_lib_path)\n', (1868, 1896), False, 'import ctypes\n'), ((2076, 2107), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_char_p'], {}), '(ctypes.c_char_p)\n', (2090, 2107), False, 'import ctypes\n'), ((842, 884), 'os.path.join', 'os.path.join', (['LOCATION', '"""CESAR_wrapper.py"""'], {}), "(LOCATION, 'CESAR_wrapper.py')\n", (854, 884), False, 'import os\n'), ((1055, 1096), 'os.path.join', 'os.path.join', (['LOCATION', '"""cesar_runner.py"""'], {}), "(LOCATION, 'cesar_runner.py')\n", (1067, 1096), False, 'import os\n'), ((2001, 2032), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_char_p'], {}), '(ctypes.c_char_p)\n', (2015, 2032), False, 'import ctypes\n'), ((2166, 2191), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2189, 2191), False, 'import argparse\n'), ((6991, 7008), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (7002, 7008), False, 'from collections import defaultdict\n'), ((11310, 11328), 'twobitreader.TwoBitFile', 'TwoBitFile', (['q_2bit'], {}), '(q_2bit)\n', (11320, 11328), False, 'from twobitreader import TwoBitFile\n'), ((11575, 11623), 're.finditer', 'finditer', (['ASM_GAP_PATTERN', 'query_seq', 'IGNORECASE'], {}), '(ASM_GAP_PATTERN, query_seq, IGNORECASE)\n', (11583, 11623), False, 'from re import finditer, IGNORECASE\n'), ((12089, 12145), 'modules.common.eprint', 'eprint', (['"""Precompute regions for each gene:chain pair..."""'], {}), "('Precompute regions for each gene:chain pair...')\n", (12095, 12145), False, 'from modules.common import eprint\n'), ((13320, 13337), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (13331, 13337), False, 'from collections import defaultdict\n'), ((20353, 20399), 'modules.common.split_in_n_lists', 'split_in_n_lists', (['bigmem_joblist', 'num_of_parts'], {}), '(bigmem_joblist, num_of_parts)\n', (20369, 20399), False, 'from modules.common import split_in_n_lists\n'), ((22713, 22721), 'datetime.datetime.now', 'dt.now', ([], {}), '()\n', (22719, 22721), True, 'from datetime import datetime as dt\n'), ((29627, 29656), 'modules.common.eprint', 'eprint', (['"""Splitting the jobs."""'], {}), "('Splitting the jobs.')\n", (29633, 29656), False, 'from modules.common import eprint\n'), ((30048, 30081), 'modules.common.eprint', 'eprint', (['"""Bucket proportions are:"""'], {}), "('Bucket proportions are:')\n", (30054, 30081), False, 'from modules.common import eprint\n'), ((32080, 32091), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (32088, 32091), False, 'import sys\n'), ((6856, 6867), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (6864, 6867), False, 'import sys\n'), ((10247, 10334), 'modules.common.die', 'die', (['"""Error! No gene:chains pairs selected! Probably --fields parameter is wrong!"""'], {}), "('Error! No gene:chains pairs selected! Probably --fields parameter is wrong!'\n )\n", (10250, 10334), False, 'from modules.common import die\n'), ((12176, 12193), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (12187, 12193), False, 'from collections import defaultdict\n'), ((14270, 14297), 'ctypes.c_char_p', 'ctypes.c_char_p', (['chain_body'], {}), '(chain_body)\n', (14285, 14297), False, 'import ctypes\n'), ((14316, 14331), 'ctypes.c_int', 'ctypes.c_int', (['(2)'], {}), '(2)\n', (14328, 14331), False, 'import ctypes\n'), ((14468, 14493), 'ctypes.c_int', 'ctypes.c_int', (['granges_num'], {}), '(granges_num)\n', (14480, 14493), False, 'import ctypes\n'), ((18073, 18125), 'modules.common.eprint', 'eprint', (['f"""Chain {iter_num} / {chains_num}"""'], {'end': "'\\r'"}), "(f'Chain {iter_num} / {chains_num}', end='\\r')\n", (18079, 18125), False, 'from modules.common import eprint\n'), ((19085, 19103), 'os.mkdir', 'os.mkdir', (['jobs_dir'], {}), '(jobs_dir)\n', (19093, 19103), False, 'import os\n'), ((19545, 19572), 'modules.common.parts', 'parts', (['jobs'], {'n': 'size_of_file'}), '(jobs, n=size_of_file)\n', (19550, 19572), False, 'from modules.common import parts\n'), ((24224, 24291), 'modules.common.die', 'die', (['f"""Error! Cannot find cesar executable at {args.cesar_binary}!"""'], {}), "(f'Error! Cannot find cesar executable at {args.cesar_binary}!')\n", (24227, 24291), False, 'from modules.common import die\n'), ((30282, 30310), 'math.ceil', 'math.ceil', (['(args.jobs_num * v)'], {}), '(args.jobs_num * v)\n', (30291, 30310), False, 'import math\n'), ((30535, 30557), 'os.mkdir', 'os.mkdir', (['args.results'], {}), '(args.results)\n', (30543, 30557), False, 'import os\n'), ((30607, 30632), 'os.mkdir', 'os.mkdir', (['args.check_loss'], {}), '(args.check_loss)\n', (30615, 30632), False, 'import os\n'), ((19111, 19134), 'os.path.isdir', 'os.path.isdir', (['jobs_dir'], {}), '(jobs_dir)\n', (19124, 19134), False, 'import os\n'), ((20716, 20749), 'os.path.join', 'os.path.join', (['jobs_dir', 'file_name'], {}), '(jobs_dir, file_name)\n', (20728, 20749), False, 'import os\n'), ((21255, 21299), 'os.path.join', 'os.path.join', (['results_dir', "(basename + '.txt')"], {}), "(results_dir, basename + '.txt')\n", (21267, 21299), False, 'import os\n'), ((21423, 21479), 'os.path.join', 'os.path.join', (['inact_mut_dat', 'f"""{basename}.inact_mut.txt"""'], {}), "(inact_mut_dat, f'{basename}.inact_mut.txt')\n", (21435, 21479), False, 'import os\n'), ((21594, 21639), 'os.path.join', 'os.path.join', (['rejected_log', 'f"""{basename}.txt"""'], {}), "(rejected_log, f'{basename}.txt')\n", (21606, 21639), False, 'import os\n'), ((22588, 22606), 'math.ceil', 'math.ceil', (['mem_raw'], {}), '(mem_raw)\n', (22597, 22606), False, 'import math\n'), ((24313, 24346), 'os.path.isfile', 'os.path.isfile', (['args.cesar_binary'], {}), '(args.cesar_binary)\n', (24327, 24346), False, 'import os\n'), ((28128, 28162), 'os.path.abspath', 'os.path.abspath', (['args.bdb_bed_file'], {}), '(args.bdb_bed_file)\n', (28143, 28162), False, 'import os\n'), ((28176, 28212), 'os.path.abspath', 'os.path.abspath', (['args.bdb_chain_file'], {}), '(args.bdb_chain_file)\n', (28191, 28212), False, 'import os\n'), ((28226, 28251), 'os.path.abspath', 'os.path.abspath', (['args.tDB'], {}), '(args.tDB)\n', (28241, 28251), False, 'import os\n'), ((28265, 28290), 'os.path.abspath', 'os.path.abspath', (['args.qDB'], {}), '(args.qDB)\n', (28280, 28290), False, 'import os\n'), ((28304, 28338), 'os.path.abspath', 'os.path.abspath', (['args.cesar_binary'], {}), '(args.cesar_binary)\n', (28319, 28338), False, 'import os\n'), ((30565, 30592), 'os.path.isdir', 'os.path.isdir', (['args.results'], {}), '(args.results)\n', (30578, 30592), False, 'import os\n'), ((5437, 5462), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (5452, 5462), False, 'import os\n'), ((10659, 10679), 'modules.common.make_cds_track', 'make_cds_track', (['line'], {}), '(line)\n', (10673, 10679), False, 'from modules.common import make_cds_track\n'), ((13499, 13541), 'modules.common.chain_extract_id', 'chain_extract_id', (['bdb_chain_file', 'chain_id'], {}), '(bdb_chain_file, chain_id)\n', (13515, 13541), False, 'from modules.common import chain_extract_id\n'), ((19731, 19764), 'os.path.join', 'os.path.join', (['jobs_dir', 'file_name'], {}), '(jobs_dir, file_name)\n', (19743, 19764), False, 'import os\n'), ((27735, 27765), 'math.ceil', 'math.ceil', (['(memory / 1000000000)'], {}), '(memory / 1000000000)\n', (27744, 27765), False, 'import math\n'), ((30660, 30690), 'os.path.isdir', 'os.path.isdir', (['args.check_loss'], {}), '(args.check_loss)\n', (30673, 30690), False, 'import os\n'), ((21179, 21201), 'os.path.basename', 'os.path.basename', (['comb'], {}), '(comb)\n', (21195, 21201), False, 'import os\n'), ((32059, 32067), 'datetime.datetime.now', 'dt.now', ([], {}), '()\n', (32065, 32067), True, 'from datetime import datetime as dt\n'), ((28825, 28850), 'os.path.abspath', 'os.path.abspath', (['args.u12'], {}), '(args.u12)\n', (28840, 28850), False, 'import os\n')] |
from django.db import models
from django.utils import timezone
from user.models import User
class GuestBook(models.Model):
username = models.ForeignKey(User, models.DO_NOTHING, verbose_name='username')
title = models.CharField(verbose_name='Title', max_length=64, blank=False)
content = models.TextField(verbose_name='Content')
image = models.ImageField(blank=True, null=True)
create_dt = models.DateTimeField(verbose_name='Create date', auto_now_add=True)
modify_dt = models.DateTimeField(verbose_name='Modify date', auto_now=True)
def __str__(self):
return '[%d] %.40s' % (self.id, self.title)
class Meta:
verbose_name = 'guestbook'
verbose_name_plural = 'guestbook'
db_table = 'guestbook'
class Comment(models.Model):
guestbook_post = models.ForeignKey(GuestBook, models.DO_NOTHING)
username = models.ForeignKey(User, models.DO_NOTHING, verbose_name='username')
level = models.IntegerField(blank=True, null=True)
content = models.TextField(verbose_name='Contnet')
reference_comment_id = models.IntegerField(blank=True, null=True)
create_dt = models.DateTimeField(verbose_name='Create date', auto_now_add=True)
modify_dt = models.DateTimeField(verbose_name='Modify date', auto_now=True)
def __str__(self):
return '[%d] %.40s - [%d] %.40s' % (self.guestbook_post.id, self.guestbook_post.title, self.id, self.content)
class Meta:
verbose_name = 'comment'
verbose_name_plural = 'comment'
db_table = 'comment'
class Like(models.Model):
guestbook_post = models.ForeignKey(GuestBook, models.DO_NOTHING)
username = models.ForeignKey(User, models.DO_NOTHING, verbose_name='username')
def __str__(self):
return '[%d] %.40 - %s' % (self.guestbook_post.id, self.guestbook_post.title, self.username.name)
class Meta:
verbose_name = 'like'
verbose_name_plural = 'like'
db_table = 'like' | [
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.db.models.IntegerField",
"django.db.models.DateTimeField",
"django.db.models.ImageField",
"django.db.models.CharField"
] | [((137, 204), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User', 'models.DO_NOTHING'], {'verbose_name': '"""username"""'}), "(User, models.DO_NOTHING, verbose_name='username')\n", (154, 204), False, 'from django.db import models\n'), ((214, 280), 'django.db.models.CharField', 'models.CharField', ([], {'verbose_name': '"""Title"""', 'max_length': '(64)', 'blank': '(False)'}), "(verbose_name='Title', max_length=64, blank=False)\n", (230, 280), False, 'from django.db import models\n'), ((292, 332), 'django.db.models.TextField', 'models.TextField', ([], {'verbose_name': '"""Content"""'}), "(verbose_name='Content')\n", (308, 332), False, 'from django.db import models\n'), ((342, 382), 'django.db.models.ImageField', 'models.ImageField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (359, 382), False, 'from django.db import models\n'), ((397, 464), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'verbose_name': '"""Create date"""', 'auto_now_add': '(True)'}), "(verbose_name='Create date', auto_now_add=True)\n", (417, 464), False, 'from django.db import models\n'), ((478, 541), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'verbose_name': '"""Modify date"""', 'auto_now': '(True)'}), "(verbose_name='Modify date', auto_now=True)\n", (498, 541), False, 'from django.db import models\n'), ((762, 809), 'django.db.models.ForeignKey', 'models.ForeignKey', (['GuestBook', 'models.DO_NOTHING'], {}), '(GuestBook, models.DO_NOTHING)\n', (779, 809), False, 'from django.db import models\n'), ((822, 889), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User', 'models.DO_NOTHING'], {'verbose_name': '"""username"""'}), "(User, models.DO_NOTHING, verbose_name='username')\n", (839, 889), False, 'from django.db import models\n'), ((899, 941), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (918, 941), False, 'from django.db import models\n'), ((953, 993), 'django.db.models.TextField', 'models.TextField', ([], {'verbose_name': '"""Contnet"""'}), "(verbose_name='Contnet')\n", (969, 993), False, 'from django.db import models\n'), ((1018, 1060), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1037, 1060), False, 'from django.db import models\n'), ((1075, 1142), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'verbose_name': '"""Create date"""', 'auto_now_add': '(True)'}), "(verbose_name='Create date', auto_now_add=True)\n", (1095, 1142), False, 'from django.db import models\n'), ((1156, 1219), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'verbose_name': '"""Modify date"""', 'auto_now': '(True)'}), "(verbose_name='Modify date', auto_now=True)\n", (1176, 1219), False, 'from django.db import models\n'), ((1496, 1543), 'django.db.models.ForeignKey', 'models.ForeignKey', (['GuestBook', 'models.DO_NOTHING'], {}), '(GuestBook, models.DO_NOTHING)\n', (1513, 1543), False, 'from django.db import models\n'), ((1556, 1623), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User', 'models.DO_NOTHING'], {'verbose_name': '"""username"""'}), "(User, models.DO_NOTHING, verbose_name='username')\n", (1573, 1623), False, 'from django.db import models\n')] |
import gzip
import zlib
import io
from ssxtd import parsers
import time
my_file = io.StringIO('''<doc farm = "456">
<i species = "lapin" sex = "male" >John</i>
<i species = "chien"><sub subspec = "Kooikerhondje">Tristan</sub></i>
<i species = "cheval">
<count>1.1</count>
</i>
<i>
<month>11</month>
<year>2011</year>
<day>1</day>
</i>
</doc>
''')
def try_conversion(value):
"""called when encountering a string in the xml
Arguments:
value {str} -- value to be converted, if possible
Returns:
[str, float, int] -- converted value
"""
try:
return int(value)
except (ValueError, TypeError):
pass
try:
return float(value)
except (ValueError, TypeError):
pass
return value
# {'#alldata': [{'month': {'#alldata': ['09']}}, {'year': {'#alldata': ['1993']}}, {'day': {'#alldata': ['09']}}]}
def merge_date_tags(path, k):
"""called when encountering only tags in an element ( no text, nor mixed tag and text)
Arguments:
path {list} -- path of the element containing the tags
k {string} -- name of the element containing the tags
Returns:
whatever type you want -- the value of the element
note : if you want
"""
l=k['#alldata']
#2015/01/01 12:10:30
# if "PubMedPubDate" in path[-1]:
if "date" in path[-1].lower():
month=None
year=None
day=None
hour=None
minute=None
r=""
# it should always be a dict with one key, and a subdict as value, containing an "#alldata" key
# {'month': {'#alldata': ['09']}}
for i in l:
# month
k = next(iter(i))
# ['09']
ad = i[k]['#alldata']
if k == "Year" and len(ad) == 1 and isinstance (ad[0], str):
year=ad[0]
elif k == "Month" and len(ad) == 1 and isinstance (ad[0], str):
month=ad[0]
elif k == "Day" and len(ad) == 1 and isinstance (ad[0], str):
day=ad[0]
elif k == "Hour" and len(ad) == 1 and isinstance (ad[0], str):
hour=ad[0]
if len(hour) == 1:
hour = "0"+hour
elif k == "Minute" and len(ad) == 1 and isinstance (ad[0], str):
minute=ad[0]
if len(minute) == 1:
minute = "0"+minute
if year is not None:
r=r+year
if month is not None:
r=r+"/"+month
if day is not None:
r=r+"/"+day
if hour is not None:
r=r+ " "+hour
if minute is not None:
r=r+":"+minute
#retrun only if at least "year" is present
return r
return k
def month_to_num(m):
try:
int(m)
except (ValueError, TypeError):
return{
'Jan' : "01",
'Feb' : "02",
'Mar' : "03",
'Apr' : "04",
'May' : "05",
'Jun' : "06",
'Jul' : "07",
'Aug' : "08",
'Sep' : "09",
'Oct' : "10",
'Nov' : "11",
'Dec' : "12"
}[m]
return m
print("lxml_parse :")
for i in parsers.lxml_parse(my_file, depth=2, compression=None, value_processor=try_conversion, object_processor=merge_date_tags):
print(i) | [
"io.StringIO",
"ssxtd.parsers.lxml_parse"
] | [((84, 412), 'io.StringIO', 'io.StringIO', (['"""<doc farm = "456"> \n <i species = "lapin" sex = "male" >John</i>\n <i species = "chien"><sub subspec = "Kooikerhondje">Tristan</sub></i>\n <i species = "cheval">\n <count>1.1</count>\n </i>\n\t<i>\n <month>11</month>\n <year>2011</year>\n <day>1</day>\n </i>\n</doc>\n"""'], {}), '(\n """<doc farm = "456"> \n <i species = "lapin" sex = "male" >John</i>\n <i species = "chien"><sub subspec = "Kooikerhondje">Tristan</sub></i>\n <i species = "cheval">\n <count>1.1</count>\n </i>\n\t<i>\n <month>11</month>\n <year>2011</year>\n <day>1</day>\n </i>\n</doc>\n"""\n )\n', (95, 412), False, 'import io\n'), ((3576, 3701), 'ssxtd.parsers.lxml_parse', 'parsers.lxml_parse', (['my_file'], {'depth': '(2)', 'compression': 'None', 'value_processor': 'try_conversion', 'object_processor': 'merge_date_tags'}), '(my_file, depth=2, compression=None, value_processor=\n try_conversion, object_processor=merge_date_tags)\n', (3594, 3701), False, 'from ssxtd import parsers\n')] |
import discord
from discord.ext import commands
from typing import Any, Iterator, List, NoReturn, Optional, Sequence
class Paginator:
def __init__(self):
self.pages: List[discord.Embed] = []
def insert_page_at(self, index: int, page: discord.Embed):
"""Inserts a new page at a particular position in the paginator
Prameters
---------
page : discord.Embed
The embed to insert into the paginator
"""
self.pages.insert(index, page)
def clear(self):
"""Clears the paginator of all pages"""
self.pages = []
def prepend_page(self, page: discord.Embed):
"""Adds a new page to the beginning of the paginator's pages
Prameters
---------
page : discord.Embed
The embed to add to the beginning of the paginator
"""
self.pages.insert(0, page)
def add_page(self, page: discord.Embed):
"""Adds a new page to the end of the paginator's pages
Parameters
----------
page : discord.Embed
The embed to add at the end of the current pages
"""
self.pages.append(page)
def __iter__(self) -> Iterator[discord.Embed]:
"""Returns an interator to iterate through the paginator's pages
Returns
-------
An iterator of `discord.Embed`s
"""
return iter(self.pages)
def __next__(self) -> Optional[discord.Embed]:
"""Returns the next page in the paginator
Raises
------
`StopIteration` when there are no more pages to paginate through
Returns
-------
The next `discord.Embed` in the pagination sequence
"""
return next(self.pages)
def __len__(self) -> int:
"""Returns the number of pages for the paginator
Returns
-------
An integer representing the number of pages the paginator has
"""
return len(self.pages)
@property
def is_paginated(self) -> bool:
"""Returns whether the paginator is "paginated", meaning containing more than one page
Returns
-------
`True` if the paginator contains more than one page, else `False`
"""
return len(self) != 0
async def paginate(self, ctx: commands.Context) -> NoReturn:
"""Starts the paginator in the given context
NOTE: In order to paginate, your bot needs to have the
following permissions in the given context:
- Send Messages
- Embed Links
- Add Reactions
- Manage Messages (for resetting pagination menu button reactions)
If any of the above permissions are missing, this coroutine is exited silently
Parameters
----------
ctx : discord.commands.Context
The invocation context
"""
permissions: discord.Permissions = ctx.me.permissions_in(ctx.channel)
if permissions.add_reactions and permissions.embed_links and permissions.send_messages and permissions.manage_messages:
# set emojis
far_left = "⏮"
left = '⏪'
right = '⏩'
far_right = "⏭"
# reaction check to be used later
def predicate(m: discord.Message, set_begin: bool, push_left: bool, push_right: bool, set_end: bool):
def check(reaction: discord.Reaction, user: discord.User):
if reaction.message.id != m.id or user.id == ctx.bot.user.id or user.id != ctx.author.id:
return False
if set_begin and reaction.emoji == far_left:
return True
if push_left and reaction.emoji == left:
return True
if push_right and reaction.emoji == right:
return True
if set_end and reaction.emoji == far_right:
return True
return False
return check
index = 0
message = None
action = ctx.send
while True:
res = await action(embed=self.pages[index])
if res is not None:
message = res
await message.clear_reactions()
# determine which emojis should be added depending on how many pages are left in each direction
set_begin = index > 1
push_left = index != 0
push_right = index != len(self.pages) - 1
set_end = index < len(self.pages) - 2
# add the appropriate emojis
if set_begin:
await message.add_reaction(far_left)
if push_left:
await message.add_reaction(left)
if push_right:
await message.add_reaction(right)
if set_end:
await message.add_reaction(far_right)
# wait for reaction and set page index
react, usr = await ctx.bot.wait_for(
"reaction_add", check=predicate(message, set_begin, push_left, push_right, set_end)
)
# set next page index
if react.emoji == far_left:
index = 0
elif react.emoji == left:
index -= 1
elif react.emoji == right:
index += 1
elif react.emoji == far_right:
index = len(self.pages) - 1
else:
# invalid reaction, remove it
await react.remove(usr)
action = message.edit
@classmethod
def from_embeds(cls, *pages: Sequence[discord.Embed]):
"""Creates a paginator from a given list of embeds
This allows for more lower level control than `Paginator.from_sequence`
Parameters
----------
pages : Sequence[discord.Embed]
The list of pages to add to the list
"""
c = cls()
c.pages = pages
return c
@staticmethod
def chunks(l, n):
"""
Converts a sequence to a list of sub-lists of a maximum size
The code for this function comes from https://stackoverflow.com/a/9671301
Parameters
----------
l : list
The list to split into chunks
n : int
The maximum number of items for each sublist
Returns
-------
A list of lists, with each sublist being of maximum size `n`
"""
n = max(1, n)
return [l[i:i+n] for i in range(0, len(l), n)]
@classmethod
def from_sequence(
cls,
sequence: Sequence[Any],
max_lines: int = 10,
base_embed: discord.Embed = None,
line_sep: str = "\n"
):
"""Creates a new paginator from a list of items
Parameters
----------
sequence : Sequence(Any)
The sequence of items to paginate through. Each item is stringified and given its own line in the paginator
max_lines : int
The maximum number of lines to have on each page.
This defaults to `10`.
base_embed : discord.Embed
The base embed to use for the sequence.
The title of the embed will automatically have each page number formatted for each page, if able
The description of the embed will have each item in the sequence formatted, if able
The footer of the embed will have the current page number and the total number of pages formatted, if able
If a base embed is not supplied, an embed for each page will be created with the following properties:
- Title is "Page {page number}"
- Description is each item from `sequence` on each line separated by `line_sep`
- Color is `discord.Color.dark_theme()`
- Footer text is "Page {page_number}/{total_pages}"
line_sep : str
The line separator to use for each line in the paginator
This defaults to "\\n"
"""
if not base_embed:
base_embed = discord.Embed(
title="Page {}",
description="{}",
color=discord.Color.dark_theme()
).set_footer(text="Page {}/{}")
c = cls()
pages = Paginator.chunks(sequence, max_lines)
for i, page in enumerate(pages):
embed = base_embed.copy()
# update title and description for new page
embed.title = base_embed.title.format(i+1)
embed.description = line_sep.join([base_embed.description.format(item) for item in page])
# update footer
if base_embed.footer.text is not discord.Embed.Empty:
embed.set_footer(text=embed.footer.text.format(i+1, len(pages)))
c.pages.append(embed)
return c
| [
"discord.Color.dark_theme"
] | [((8413, 8439), 'discord.Color.dark_theme', 'discord.Color.dark_theme', ([], {}), '()\n', (8437, 8439), False, 'import discord\n')] |
import tensorflow as tf
#initialize data
X = [2, 5, 7]
Y = [3, 4, 6]
W = tf.Variable(tf.random_normal([1]), name='weight')
b = tf.Variable(tf.random_normal([1]), name='bias')
#hypothesis H = WX+b
H = W * X + b
#cost/loss function
cost = tf.reduce_mean(tf.square(H - Y))
#minimize
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
train = optimizer.minimize(cost)
#launch the graph in a session
sess = tf.Session()
#initialize global variables in the graph
sess.run(tf.global_variables_initializer())
#output
for epoch in range(1001):
sess.run(train)
if (epoch % 100 == 0):
print (("step %4dth : cost = %f, W = %f, b = %f")
% (epoch, sess.run(cost), float(sess.run(W)), float(sess.run(b))))
| [
"tensorflow.random_normal",
"tensorflow.Session",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.global_variables_initializer",
"tensorflow.square"
] | [((313, 366), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', ([], {'learning_rate': '(0.01)'}), '(learning_rate=0.01)\n', (346, 366), True, 'import tensorflow as tf\n'), ((443, 455), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (453, 455), True, 'import tensorflow as tf\n'), ((93, 114), 'tensorflow.random_normal', 'tf.random_normal', (['[1]'], {}), '([1])\n', (109, 114), True, 'import tensorflow as tf\n'), ((148, 169), 'tensorflow.random_normal', 'tf.random_normal', (['[1]'], {}), '([1])\n', (164, 169), True, 'import tensorflow as tf\n'), ((269, 285), 'tensorflow.square', 'tf.square', (['(H - Y)'], {}), '(H - Y)\n', (278, 285), True, 'import tensorflow as tf\n'), ((509, 542), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (540, 542), True, 'import tensorflow as tf\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 15 17:22:01 2020
@author: Kamil
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import morse_decoder
import iir_filter
class RealtimeWindow:
def __init__(self, channel: str):
# create a plot window
self.fig, (self.ax, self.ax1)= plt.subplots(2)
plt.title(f"Channel: {channel}")
self.ax.set_title('Lumiannce')
self.ax1.set_title('Filtered Signal')
self.plotbuffer = np.zeros(800)
self.plotbuffer1 = np.zeros(800)
# Create empty lines
line, = self.ax.plot(self.plotbuffer)
line2, = self.ax1.plot(self.plotbuffer1)
self.line = [line, line2]
# Set axis limits
self.ax.set_ylim(-1, 1)
self.ax1.set_ylim(-1, 1)
# Initalize Ringbuffers
self.ringbuffer = []
self.ringbuffer1 = []
# add any initialisation code here (filters etc)
# start the animation
self.decodedSequence = ''
# Design High Pass filter
samplingFrequency = 30
# Define cut off frequency
cutOfFrequencyHighPass = 0.1 #Hz
# Number order that IIR filter array will be equivalent to
order = 2
# Genereate second order sections
sos = np.array(iir_filter.GenerateHighPassCoeff( cutOfFrequencyHighPass, samplingFrequency, order ))
# Initalize morse code decoder object
self.decoder = morse_decoder.MorseCodeDecoder()
# Create IIR Array object
self.iirFilter = iir_filter.IIRFilter(sos)
# Initialize filter output variable
self.filterOutput = 0
# start the animation
self.ani = animation.FuncAnimation(self.fig, self.update, interval=100)
# updates the plot
def update(self, data):
# add new data to the buffer
self.plotbuffer = np.append(self.plotbuffer, self.ringbuffer)
self.plotbuffer1 = np.append(self.plotbuffer1, self.ringbuffer1)
# only keep the 500 newest ones and discard the old ones
self.plotbuffer = self.plotbuffer[-800:]
self.plotbuffer1 = self.plotbuffer1[-800:]
self.ringbuffer = []
self.ringbuffer1 = []
# set the new 500 points of channel 9
self.line[0].set_ydata(self.plotbuffer)
self.line[1].set_ydata(self.plotbuffer1)
self.ax.set_ylim((min(self.plotbuffer)-1), max(self.plotbuffer)+1)
self.ax1.set_ylim((min(self.plotbuffer1)-1), max(self.plotbuffer1)+1)
# Update the decoded sequence
self.ax.set_title('Lumiannce - Detected Sequence: '+ self.decoder.morseSequence)
self.ax1.set_title('Filtered Signal - Decoded Sequence: '+ self.decoder.decodedLetters)
return self.line
# appends data to the ringbuffer
def addData(self, signal):
self.ringbuffer.append(signal)
self.filterOutput = self.iirFilter.Filter(signal)
self.ringbuffer1.append(self.filterOutput)
self.decoder.Detect(self.filterOutput)
| [
"matplotlib.animation.FuncAnimation",
"iir_filter.GenerateHighPassCoeff",
"numpy.append",
"numpy.zeros",
"iir_filter.IIRFilter",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplots",
"morse_decoder.MorseCodeDecoder"
] | [((349, 364), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)'], {}), '(2)\n', (361, 364), True, 'import matplotlib.pyplot as plt\n'), ((373, 405), 'matplotlib.pyplot.title', 'plt.title', (['f"""Channel: {channel}"""'], {}), "(f'Channel: {channel}')\n", (382, 405), True, 'import matplotlib.pyplot as plt\n'), ((517, 530), 'numpy.zeros', 'np.zeros', (['(800)'], {}), '(800)\n', (525, 530), True, 'import numpy as np\n'), ((558, 571), 'numpy.zeros', 'np.zeros', (['(800)'], {}), '(800)\n', (566, 571), True, 'import numpy as np\n'), ((1498, 1530), 'morse_decoder.MorseCodeDecoder', 'morse_decoder.MorseCodeDecoder', ([], {}), '()\n', (1528, 1530), False, 'import morse_decoder\n'), ((1595, 1620), 'iir_filter.IIRFilter', 'iir_filter.IIRFilter', (['sos'], {}), '(sos)\n', (1615, 1620), False, 'import iir_filter\n'), ((1753, 1813), 'matplotlib.animation.FuncAnimation', 'animation.FuncAnimation', (['self.fig', 'self.update'], {'interval': '(100)'}), '(self.fig, self.update, interval=100)\n', (1776, 1813), True, 'import matplotlib.animation as animation\n'), ((1930, 1973), 'numpy.append', 'np.append', (['self.plotbuffer', 'self.ringbuffer'], {}), '(self.plotbuffer, self.ringbuffer)\n', (1939, 1973), True, 'import numpy as np\n'), ((2001, 2046), 'numpy.append', 'np.append', (['self.plotbuffer1', 'self.ringbuffer1'], {}), '(self.plotbuffer1, self.ringbuffer1)\n', (2010, 2046), True, 'import numpy as np\n'), ((1342, 1428), 'iir_filter.GenerateHighPassCoeff', 'iir_filter.GenerateHighPassCoeff', (['cutOfFrequencyHighPass', 'samplingFrequency', 'order'], {}), '(cutOfFrequencyHighPass, samplingFrequency,\n order)\n', (1374, 1428), False, 'import iir_filter\n')] |
from library.utilities.misc import parse_link, get_resource_type
from library.utilities.logger import get_logger
from google.auth.transport.requests import AuthorizedSession
from google.auth import default
from requests import codes, exceptions
import time
import re
from json import JSONDecodeError
class GcloudRestLibBase:
"""
Expected to house all common functionality for Rest Client
"""
operation_polling_time_sleep_secs = 5
def __init__(self, project_id, **kwargs):
# Add authentication check here
# Add common object instantiation
# TODO: fetch the default project from the APPLICATION CREDENTIALS JSON
self.project_id = project_id
self.credentials, self.default_project_id = default(scopes=['https://www.googleapis.com/auth/cloud-platform'])
self.session = AuthorizedSession(self.credentials)
self.logger = get_logger(__name__)
def wait_for_operation(self, operation, max_timeout_mins=15):
"""
:param operation: the operation object
:param max_timeout_mins:
:return: Bool(status), Dict(Last Operation Recieved)
"""
# TODO: Implement max_timeout_mins
operation_status = operation['status']
self.logger.debug('Beginning to poll for operation')
operation_self_link = operation['selfLink']
start_time = time.time()
while operation_status != 'DONE' and time.time() - start_time < max_timeout_mins * 60:
self.logger.debug(f'Sleeping for {self.operation_polling_time_sleep_secs} secs before polling')
time.sleep(self.operation_polling_time_sleep_secs)
self.logger.debug("Making post call for operation status on wait endpoint ..")
operation_response = self.session.post(operation_self_link + "/wait")
self.logger.error(f'Recieved operation response: {operation_response.text}')
if not operation_response.status_code == codes.ok:
if operation_response.status_code == codes.not_found:
self.logger.debug('Apprehending 404 not found as ')
return True
self.logger.error(f'Error while polling for operation')
return False
operation = operation_response.json()
operation_status = operation['status']
self.logger.debug(operation)
error = operation.get('error')
if error:
self.logger.exception('Error while polling for operation: {}'.format(error))
return False
self.logger.debug(f"Final operation status: {operation}")
return operation_status == 'DONE'
def delete_self_link(self, self_link, delete_dependencies=True):
max_retries = 5
count = 0
self.logger.debug(f'Received request to delete: {self_link}')
while count < max_retries:
count += 1
self.logger.debug(f"Attempt #{count}")
if count > 1:
self.logger.debug(f"Sleeping for {self.operation_polling_time_sleep_secs} secs before re-attempting")
time.sleep(self.operation_polling_time_sleep_secs)
del_response = self.session.delete(self_link)
self.logger.info(del_response.status_code)
# Apprehending 404 not_found as resource already deleted
if del_response.status_code == codes.not_found:
self.logger.info("Apprehending 404 as resource already deleted")
return True
# If response == 400, trying to check if it a resourceInUseByAnotherResource and resolve it
if del_response.status_code == codes.bad_request:
self.logger.debug("Bad Request on delete request. Trying to debug it .. ")
self.logger.debug(f"Response text : {del_response.text}")
try:
self.logger.debug("Decoding Error JSON")
response_json = del_response.json()
for error in response_json['error'].get('errors', []):
if error.get('reason', "") == "resourceInUseByAnotherResource" \
and ("used" in error.get("message", "") or "depend" in error.get("message", "")):
error_message = error['message']
possible_dependency_search = re.search(pattern=r"'[0-9a-zA-Z_/-]+'$", string=error_message)
self.logger.debug('Using regex to figure out dependency')
if possible_dependency_search:
dependent_resource = possible_dependency_search.group()
old = self_link.split('/')
new = dependent_resource.strip("'").split('/')
for i in range(old.index(new[0])):
new.insert(i, old[i])
dependent_resource_self_link = '/'.join(new)
self.logger.info(f"Dependent resource self_link : {dependent_resource_self_link}")
self.logger.info("Attempting to delete it .. ")
self.delete_self_link(dependent_resource_self_link)
else:
self.logger.debug('Dependency could not be identified using regex')
else:
self.logger.debug('The error message is not known .. cant debug that')
except ValueError:
self.logger.exception('ValueError while reading JSON from response body')
pass
except KeyError:
pass
# Checking if an operation object was returned
try:
response_json = del_response.json()
if "operation" in response_json.get("kind", ""):
return self.wait_for_operation(operation=response_json)
except ValueError:
pass
# Anything in 400 and 500 series
try:
del_response.raise_for_status()
except exceptions.HTTPError as ex:
self.logger.exception(del_response.text)
raise ex
return True
| [
"google.auth.default",
"time.sleep",
"library.utilities.logger.get_logger",
"google.auth.transport.requests.AuthorizedSession",
"time.time",
"re.search"
] | [((747, 813), 'google.auth.default', 'default', ([], {'scopes': "['https://www.googleapis.com/auth/cloud-platform']"}), "(scopes=['https://www.googleapis.com/auth/cloud-platform'])\n", (754, 813), False, 'from google.auth import default\n'), ((837, 872), 'google.auth.transport.requests.AuthorizedSession', 'AuthorizedSession', (['self.credentials'], {}), '(self.credentials)\n', (854, 872), False, 'from google.auth.transport.requests import AuthorizedSession\n'), ((895, 915), 'library.utilities.logger.get_logger', 'get_logger', (['__name__'], {}), '(__name__)\n', (905, 915), False, 'from library.utilities.logger import get_logger\n'), ((1373, 1384), 'time.time', 'time.time', ([], {}), '()\n', (1382, 1384), False, 'import time\n'), ((1600, 1650), 'time.sleep', 'time.sleep', (['self.operation_polling_time_sleep_secs'], {}), '(self.operation_polling_time_sleep_secs)\n', (1610, 1650), False, 'import time\n'), ((3128, 3178), 'time.sleep', 'time.sleep', (['self.operation_polling_time_sleep_secs'], {}), '(self.operation_polling_time_sleep_secs)\n', (3138, 3178), False, 'import time\n'), ((1430, 1441), 'time.time', 'time.time', ([], {}), '()\n', (1439, 1441), False, 'import time\n'), ((4398, 4459), 're.search', 're.search', ([], {'pattern': '"""\'[0-9a-zA-Z_/-]+\'$"""', 'string': 'error_message'}), '(pattern="\'[0-9a-zA-Z_/-]+\'$", string=error_message)\n', (4407, 4459), False, 'import re\n')] |
#!/usr/bin/env python3
# This is a script for using circuitpython's repo to make pyi files for each board type.
# These need to be bundled with the extension, which means that adding new boards is still
# a new release of the extension.
# import mypy
import json
import pathlib
import re
def main():
repo_root = pathlib.Path(__file__).resolve().parent.parent
# First thing we want to do is store in memory, the contents of
# ./stubs/board/__init__.py so we can append it (partially) to
# every other board.
# See [Issue #26](https://github.com/joedevivo/vscode-circuitpython/issues/26)
# for more on this.
board_stub = repo_root / "stubs" / "board" / "__init__.pyi"
generic_stubs = parse_generic_stub(board_stub)
circuitpy_repo_root = repo_root / "circuitpython"
boards = process_boards(repo_root, circuitpy_repo_root, generic_stubs)
json_file = repo_root / "boards" / "metadata.json"
with open(json_file, "w") as metadata:
json.dump(boards, metadata)
def parse_generic_stub(board_stub):
generic_stubs = {}
def_re = re.compile(r"def ([^\(]*)\(.*")
with open(board_stub) as stub:
stubs = stub.readlines()
# Find the first line number and name of each definition
f = []
names = []
for i, s in enumerate(stubs):
match = def_re.match(s)
if match is not None:
f.append(i)
names.append(match[1])
f.append(len(stubs))
# Iterate the line ranges
for name, start, end in zip(names, f, f[1:]):
generic_stubs[name] = "".join(stubs[start:end])
return generic_stubs
def normalize_vid_pid(vid_or_pid: str):
"""Make a hex string all uppercase except for the 0x."""
return vid_or_pid.upper().replace("0X", "0x")
_PIN_DEF_RE = re.compile(
r"\s*{\s*MP_ROM_QSTR\(MP_QSTR_(?P<name>[^\)]*)\)\s*,\s*MP_ROM_PTR\((?P<value>[^\)]*)\).*"
)
def parse_pins(generic_stubs, pins: pathlib.Path, board_stubs):
imports = set()
stub_lines = []
with open(pins) as p:
for line in p:
pin = _PIN_DEF_RE.match(line)
if pin is None:
continue
pin_name = pin.group("name")
if pin_name in generic_stubs:
board_stubs[pin_name] = generic_stubs[pin_name]
if "busio" in generic_stubs[pin_name]:
imports.add("busio")
continue
pin_type = None
# sometimes we can guess better based on the value
pin_value = pin.group("value")
if pin_value.startswith("&displays"):
imports.add("displayio")
pin_type = "displayio.Display"
elif pin_value.startswith("&pin_"):
imports.add("microcontroller")
pin_type = "microcontroller.Pin"
if pin_type is None:
imports.add("typing")
pin_type = "typing.Any"
stub_lines.append("{0}: {1} = ...\n".format(pin_name, pin_type))
imports_string = "".join("import %s\n" % x for x in sorted(imports))
stubs_string = "".join(stub_lines)
return imports_string, stubs_string
# now, while we build the actual board stubs, replace any line that starts with ` $name:` with value
def process_boards(repo_root, circuitpy_repo_root, generic_stubs):
boards = []
board_configs = circuitpy_repo_root.glob("ports/*/boards/*/mpconfigboard.mk")
for config in board_configs:
b = config.parent
site_path = b.stem
print(config)
pins = b / "pins.c"
if not config.is_file() or not pins.is_file():
continue
usb_vid = ""
usb_pid = ""
usb_product = ""
usb_manufacturer = ""
with open(config) as conf:
for line in conf:
if line.startswith("USB_VID"):
usb_vid = line.split("=")[1].split("#")[0].strip('" \n')
elif line.startswith("USB_PID"):
usb_pid = line.split("=")[1].split("#")[0].strip('" \n')
elif line.startswith("USB_PRODUCT"):
usb_product = line.split("=")[1].split("#")[0].strip('" \n')
elif line.startswith("USB_MANUFACTURER"):
usb_manufacturer = line.split("=")[1].split("#")[0].strip('" \n')
# CircuitPython 7 BLE-only boards
elif line.startswith("CIRCUITPY_CREATOR_ID"):
usb_vid = line.split("=")[1].split("#")[0].strip('" \n')
elif line.startswith("CIRCUITPY_CREATION_ID"):
usb_pid = line.split("=")[1].split("#")[0].strip('" \n')
if usb_manufacturer == "Nadda-Reel Company LLC":
continue
usb_vid = normalize_vid_pid(usb_vid)
usb_pid = normalize_vid_pid(usb_pid)
# CircuitPython 7 BLE-only boards have no usb manuf/product
description = site_path
if usb_manufacturer and usb_product:
description = "{0} {1}".format(usb_manufacturer, usb_product)
board = {
"vid": usb_vid,
"pid": usb_pid,
"product": usb_product,
"manufacturer": usb_manufacturer,
"site_path": site_path,
"description": description,
}
boards.append(board)
print(
"{0}:{1} {2}, {3}".format(usb_vid, usb_pid, usb_manufacturer, usb_product)
)
board_pyi_path = repo_root / "boards" / usb_vid / usb_pid
board_pyi_path.mkdir(parents=True, exist_ok=True)
board_pyi_file = board_pyi_path / "board.pyi"
# We're going to put the common stuff from the generic board stub at the
# end of the file, so we'll collect them after the loop
board_stubs = {}
with open(board_pyi_file, "w") as outfile:
imports_string, stubs_string = parse_pins(generic_stubs, pins, board_stubs)
outfile.write("from __future__ import annotations\n")
outfile.write(imports_string)
# start of module doc comment
outfile.write('"""\n')
outfile.write("board {0}\n".format(board["description"]))
outfile.write(
"https://circuitpython.org/boards/{0}\n".format(board["site_path"])
)
outfile.write('"""\n')
# start of actual stubs
outfile.write(stubs_string)
for p in board_stubs:
outfile.write("{0}\n".format(board_stubs[p]))
return boards
if __name__ == "__main__":
main()
| [
"pathlib.Path",
"json.dump",
"re.compile"
] | [((1834, 1954), 're.compile', 're.compile', (['"""\\\\s*{\\\\s*MP_ROM_QSTR\\\\(MP_QSTR_(?P<name>[^\\\\)]*)\\\\)\\\\s*,\\\\s*MP_ROM_PTR\\\\((?P<value>[^\\\\)]*)\\\\).*"""'], {}), "(\n '\\\\s*{\\\\s*MP_ROM_QSTR\\\\(MP_QSTR_(?P<name>[^\\\\)]*)\\\\)\\\\s*,\\\\s*MP_ROM_PTR\\\\((?P<value>[^\\\\)]*)\\\\).*'\n )\n", (1844, 1954), False, 'import re\n'), ((1087, 1119), 're.compile', 're.compile', (['"""def ([^\\\\(]*)\\\\(.*"""'], {}), "('def ([^\\\\(]*)\\\\(.*')\n", (1097, 1119), False, 'import re\n'), ((985, 1012), 'json.dump', 'json.dump', (['boards', 'metadata'], {}), '(boards, metadata)\n', (994, 1012), False, 'import json\n'), ((319, 341), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (331, 341), False, 'import pathlib\n')] |
"""
It takes to peleffy's molecule representations, aligns them and calculates
the RMSD using RDKit.
"""
from multiprocessing import Pool
from copy import deepcopy
from rdkit.Chem import AllChem
from rdkit import Chem
class Aligner(object):
"""
It aligns two molecule representations as much as possible and
calculates the resulting RMSD difference.
"""
def __init__(self, prb_mol, ref_mol, n_proc=1, max_iter=1000,
threshold=1.0):
"""
It initializes an Aligner object.
Parameters
----------
prb_mol : an rdkit.Chem.rdchem.Mol object
The RDKit's Molecule object to use as probe
ref_mol : an rdkit.Chem.rdchem.Mol object
The RDKit's Molecule object to use as reference
n_proc : int
The number of processors to employ for such alignment
max_iter : int
The maximum number of conformations to generate during the
alignment process
threshold : float
The minimum threshold that is used as exit condition
"""
# First checkings
if not isinstance(prb_mol, Chem.rdchem.Mol):
raise TypeError('Wrong type for probe molecule')
if not isinstance(ref_mol, Chem.rdchem.Mol):
raise TypeError('Wrong type for reference molecule')
self._prb_mol = prb_mol
self._ref_mol = ref_mol
self._n_proc = n_proc
self._max_iter = max_iter
self._threshold = threshold
self._results = None
def _get_lowest_RMSD(self, seed):
prb_mol = deepcopy(self._prb_mol)
_ = AllChem.EmbedMultipleConfs(prb_mol, 15, randomSeed=seed)
best_RMSD = None
best_conf = None
for i in range(0, prb_mol.GetNumConformers()):
rmsd = Chem.rdMolAlign.AlignMol(prbMol=prb_mol,
refMol=self._ref_mol,
prbCid=i)
if best_RMSD is None or rmsd < best_RMSD:
best_RMSD = rmsd
best_conf = i
return {'best_RMSD': best_RMSD, 'best_conformation': best_conf,
'molecule': prb_mol}
def align(self):
match_found = False
self._results = list()
# TODO save all results
for iteration_id in range(0, self._max_iter, self._n_proc):
with Pool(self._n_proc) as pool:
results = pool.map(self._get_lowest_RMSD,
[iteration_id + i + 1 for i
in range(0, self._n_proc)])
for result in results:
self._results.append(result)
if result['best_RMSD'] < self._threshold:
match_found = True
if match_found:
break
def get_results(self):
if self._results is not None:
return self._results
else:
return None
def get_best_results(self):
if self._results is not None:
return sorted(self._results, key=lambda d: d['best_RMSD'])[0]
else:
return None
def to_pdb(self, prb_pdb_name, ref_pdb_name):
best_results = self.get_best_results()
mol = best_results['molecule']
cid = best_results['best_conformation']
Chem.rdmolfiles.MolToPDBFile(mol, prb_pdb_name, confId=cid)
Chem.rdmolfiles.MolToPDBFile(self._ref_mol, ref_pdb_name)
| [
"rdkit.Chem.AllChem.EmbedMultipleConfs",
"multiprocessing.Pool",
"rdkit.Chem.rdmolfiles.MolToPDBFile",
"copy.deepcopy",
"rdkit.Chem.rdMolAlign.AlignMol"
] | [((1607, 1630), 'copy.deepcopy', 'deepcopy', (['self._prb_mol'], {}), '(self._prb_mol)\n', (1615, 1630), False, 'from copy import deepcopy\n'), ((1643, 1699), 'rdkit.Chem.AllChem.EmbedMultipleConfs', 'AllChem.EmbedMultipleConfs', (['prb_mol', '(15)'], {'randomSeed': 'seed'}), '(prb_mol, 15, randomSeed=seed)\n', (1669, 1699), False, 'from rdkit.Chem import AllChem\n'), ((3394, 3453), 'rdkit.Chem.rdmolfiles.MolToPDBFile', 'Chem.rdmolfiles.MolToPDBFile', (['mol', 'prb_pdb_name'], {'confId': 'cid'}), '(mol, prb_pdb_name, confId=cid)\n', (3422, 3453), False, 'from rdkit import Chem\n'), ((3462, 3519), 'rdkit.Chem.rdmolfiles.MolToPDBFile', 'Chem.rdmolfiles.MolToPDBFile', (['self._ref_mol', 'ref_pdb_name'], {}), '(self._ref_mol, ref_pdb_name)\n', (3490, 3519), False, 'from rdkit import Chem\n'), ((1825, 1897), 'rdkit.Chem.rdMolAlign.AlignMol', 'Chem.rdMolAlign.AlignMol', ([], {'prbMol': 'prb_mol', 'refMol': 'self._ref_mol', 'prbCid': 'i'}), '(prbMol=prb_mol, refMol=self._ref_mol, prbCid=i)\n', (1849, 1897), False, 'from rdkit import Chem\n'), ((2414, 2432), 'multiprocessing.Pool', 'Pool', (['self._n_proc'], {}), '(self._n_proc)\n', (2418, 2432), False, 'from multiprocessing import Pool\n')] |
from data_structures.node import Node
class LinkedList:
def __init__(self):
self.head = None
self.list_size = 0
def is_empty(self):
return self.list_size == 0
def is_full(self):
return False
def __len__(self):
return self.list_size
def size(self):
return self.list_size
def _get_node(self, index):
assert index >= 0 and index < self.list_size, "index out of bounds"
node = self.head
for _ in range(index):
node = node.link
return node
def insert(self, index, item):
if index < 0:
index = 0
elif index > self.list_size:
index = self.list_size
if index == 0:
node = Node(item, self.head)
self.head = node
else:
node = self._get_node(index - 1)
node.link = Node(item, node.link)
self.list_size += 1
def delete(self, index):
assert not self.is_empty(), "list is empty"
assert index >= 0 and index < self.list_size, "index out of bounds"
if index == 0:
self.head = self.head.link
else:
node = self._get_node(index - 1)
node.link = node.link.link
self.list_size -= 1
def __str__(self):
ret = ""
current = self.head
while current is not None:
ret = ret + str(current.item) + ","
current = current.link
return ret
def copy(self):
new_list = LinkedList()
self.copy_aux(self.head, new_list)
return new_list
# complexity of O(N2) cause insert inserts at the end of the list, needs to iterate
def copy_aux(self, current, new_list):
if current is not None:
new_list.insert(len(new_list), current.item)
self.copy_aux(current.link, new_list)
# complexity of O(N) cause insert inserts at the front of the list, no need to iterate
def copy_aux2(self, current, new_list):
if current is not None:
self.copy_aux(current.link, new_list)
new_list.insert(0, current.item)
def periodic_delete(self, n):
# change the pointer of self.head
for _ in range(n):
if self.head is not None:
self.head = self.head.link
current = self.head
while current is not None:
previous = current
current = current.link
# keep the next n-1 unchanged
for _ in range(n - 1):
if current is not None:
previous = current
current = current.link
# delete the next n
for _ in range(n):
if current is not None:
previous.link = current.link
current = current.link
def periodic_delete2(self, n):
self.head = self.get_node(self.head, n)
current = self.head
while current is not None:
# keep the next n-1 unchanged
previous = self.get_node(current, n - 1)
# every time when get_node is called, we need to check if the returned result is None before proceeding next step
if previous is not None:
# delete the next n by changing the pointer of previous to the next n node
current = self.get_node(previous.link, n)
previous.link = current
else:
# if the previous is None, means the next n-1 (at most) is unchanged, so current = None to stop loop
current = None
def get_node(self, current, n):
while current is not None and n > 0:
current = current.link
n -= 1
return current
def double_list(self):
current = self.head
while current is not None:
node = Node(current.item, current.link)
current.link = node
current = current.link.link
def add_sorted(self, item):
if self.head is None:
self.head = Node(item)
self.count += 1
return
current = self.head
previous = None
while current is not None:
if current.item < item:
previous = current
current = current.link
else:
break
if previous is not None:
node = Node(item, previous.link)
previous.link = node
self.count += 1
else:
self.head = Node(item, self.head)
self.count += 1
if __name__ == '__main__':
a_list = LinkedList()
# a_list.insert(0, 13)
# a_list.insert(0, 12)
# a_list.insert(0, 11)
# a_list.insert(0, 10)
# a_list.insert(0, 9)
# a_list.insert(0, 8)
# a_list.insert(0, 7)
# a_list.insert(0, 6)
# a_list.insert(0, 5)
a_list.insert(0, 4)
a_list.insert(0, 3)
a_list.insert(0, 2)
a_list.insert(0, 1)
print(a_list)
# a_list.periodic_delete(4)
# print(a_list)
a_list.double_list()
print(a_list)
| [
"data_structures.node.Node"
] | [((618, 639), 'data_structures.node.Node', 'Node', (['item', 'self.head'], {}), '(item, self.head)\n', (622, 639), False, 'from data_structures.node import Node\n'), ((719, 740), 'data_structures.node.Node', 'Node', (['item', 'node.link'], {}), '(item, node.link)\n', (723, 740), False, 'from data_structures.node import Node\n'), ((3153, 3185), 'data_structures.node.Node', 'Node', (['current.item', 'current.link'], {}), '(current.item, current.link)\n', (3157, 3185), False, 'from data_structures.node import Node\n'), ((3310, 3320), 'data_structures.node.Node', 'Node', (['item'], {}), '(item)\n', (3314, 3320), False, 'from data_structures.node import Node\n'), ((3553, 3578), 'data_structures.node.Node', 'Node', (['item', 'previous.link'], {}), '(item, previous.link)\n', (3557, 3578), False, 'from data_structures.node import Node\n'), ((3645, 3666), 'data_structures.node.Node', 'Node', (['item', 'self.head'], {}), '(item, self.head)\n', (3649, 3666), False, 'from data_structures.node import Node\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import numpy as np
MAXLINE = 10000
MAXFRAME = 10000
def read_xyz(xyz,natoms):
#
fopen = open(xyz,'r')
frames = []
for i in range(MAXLINE):
line = fopen.readline()
if line.strip():
assert int(line.strip().split()[0]) == natoms
line = fopen.readline() # comment
forces, poses = [], []
for j in range(natoms):
line = fopen.readline() #
data = line.strip().split()
poses.append(data[1:4])
forces.append(data[4:7])
poses = np.array(poses,dtype=float)
forces = np.array(forces,dtype=float)
frames.append([poses,forces])
if not line:
break
else:
raise ValueError('Too many lines in %s' %xyz)
fopen.close()
return frames
def read_outcar(outcar='OUTCAR',natoms=100,verbose=True,wdat=False,**kwargs):
# how many steps to read
nframes = MAXFRAME
if kwargs:
if 'nframes' in kwargs.keys():
if kwargs['nframes'] > 0:
nframes = int(kwargs['nframes'])
# read OUTCAR
energy = []
free_energy = []
frames = []
fopen = open(outcar, 'r')
count, flag = 0, True
while flag:
line = fopen.readline()
if line.startswith(' POSITION'):
fopen.readline() # segment line ---...---
poses, forces = [], []
for n in range(natoms):
data = fopen.readline().strip().split()
poses.append(data[:3]) # x y z
forces.append(data[3:]) # fx fy fz
poses = np.array(poses, dtype=float)
forces = np.array(forces, dtype=float)
frames.append((poses, forces))
count += 1
if line.strip().startswith('FREE ENERGIE OF THE ION-ELECTRON SYSTEM'):
fopen.readline() # segment line ---...---
# free energy F
data = fopen.readline().strip().split()
free_energy.append(float(data[-2]))
fopen.readline() # blank line
# energy E0
data = fopen.readline().strip().split()
energy.append(float(data[-1]))
if count == nframes or (not line):
flag = False
fopen.close()
return frames, energy, free_energy
def read_poscar(poscar='POSCAR',format='vasp5',verbose=True):
"""read POSCAR"""
with open(poscar, 'r') as reader:
lines = reader.readlines()
lines = [line.strip().split() for line in lines]
fname = ' '.join(lines[0]) # file description
scaling = float(lines[1][0])
lattice = np.array(lines[2:5], dtype=float)
symbols = lines[5]
numbers = [int(i) for i in lines[6]]
natoms = np.sum(numbers)
dyntype = ' '.join(lines[7]) # dynamic type
coorsys = lines[8] # coordinate system
poses, fixes = [], []
for coord in lines[9:9+natoms]:
poses.append(coord[:3])
fixes.append(coord[3:])
poses = np.array(poses, dtype=float)
# TODO: read velocity
if verbose:
print('Successfully read POSCAR, taking it as the reference...')
return fname, scaling, lattice, symbols, numbers, poses, fixes
def write_xyz(symbols,lattice,positions,forces,energy):
"""
positions in cartesian (AA) and forces in eV/AA
energy is always force-consistent energy for training purpose
"""
natoms = len(symbols)
content = "{:<d}\n".format(natoms)
content += ("Lattice=\""+"{:<.2f} "*9+"\""+\
" Properties=species:S:1:pos:R:3:forces:R:3 pbc=\"T T T\""+\
" energy={:<12.6f}"+"\n") \
.format(*list(lattice),energy)
for i in range(natoms):
content += ('{:<4s} '+'{:>12.6f} '*6+'\n')\
.format(symbols[i],*list(positions[i]),*list(forces[i]))
return content
def find_outcars():
fname, scaling, lattice, formula, numbers, refposes, fixes = \
read_poscar('POSCAR')
symbols = []
for s, n in zip(formula, numbers):
symbols.extend([s]*n)
print(symbols)
#
outcars = []
cur_dir = './POSCARs/'
for cur_fname in os.listdir(cur_dir):
if cur_fname.startswith('OUTCAR_'):
outcars.append(cur_fname)
outcars.sort(key = lambda fname: int(fname.split('_')[-1]))
print(outcars)
outcars = outcars[1::3]
print(outcars)
content = ''
for outcar in outcars:
print(os.path.abspath(outcar))
outcar = os.path.join(cur_dir,outcar)
frames, energy, free_energy = read_outcar(outcar=outcar,natoms=np.sum(numbers))
positions, forces = frames[0][0], frames[0][1]
en = free_energy[0]
content += write_xyz(symbols,lattice.ravel(),positions,forces,en)
with open('data.xyz', 'w') as writer:
writer.write(content)
return
if __name__ == '__main__':
#frames = read_xyz('bonds.xyz',2)
#print(frames[0])
find_outcars()
| [
"os.listdir",
"os.path.join",
"numpy.array",
"numpy.sum",
"os.path.abspath"
] | [((2693, 2726), 'numpy.array', 'np.array', (['lines[2:5]'], {'dtype': 'float'}), '(lines[2:5], dtype=float)\n', (2701, 2726), True, 'import numpy as np\n'), ((2804, 2819), 'numpy.sum', 'np.sum', (['numbers'], {}), '(numbers)\n', (2810, 2819), True, 'import numpy as np\n'), ((3051, 3079), 'numpy.array', 'np.array', (['poses'], {'dtype': 'float'}), '(poses, dtype=float)\n', (3059, 3079), True, 'import numpy as np\n'), ((4201, 4220), 'os.listdir', 'os.listdir', (['cur_dir'], {}), '(cur_dir)\n', (4211, 4220), False, 'import os\n'), ((4537, 4566), 'os.path.join', 'os.path.join', (['cur_dir', 'outcar'], {}), '(cur_dir, outcar)\n', (4549, 4566), False, 'import os\n'), ((637, 665), 'numpy.array', 'np.array', (['poses'], {'dtype': 'float'}), '(poses, dtype=float)\n', (645, 665), True, 'import numpy as np\n'), ((686, 715), 'numpy.array', 'np.array', (['forces'], {'dtype': 'float'}), '(forces, dtype=float)\n', (694, 715), True, 'import numpy as np\n'), ((1688, 1716), 'numpy.array', 'np.array', (['poses'], {'dtype': 'float'}), '(poses, dtype=float)\n', (1696, 1716), True, 'import numpy as np\n'), ((1738, 1767), 'numpy.array', 'np.array', (['forces'], {'dtype': 'float'}), '(forces, dtype=float)\n', (1746, 1767), True, 'import numpy as np\n'), ((4495, 4518), 'os.path.abspath', 'os.path.abspath', (['outcar'], {}), '(outcar)\n', (4510, 4518), False, 'import os\n'), ((4637, 4652), 'numpy.sum', 'np.sum', (['numbers'], {}), '(numbers)\n', (4643, 4652), True, 'import numpy as np\n')] |
import os
def get_path(path):
return os.path.split(path)[0]
def get_filename(path):
return os.path.splitext(os.path.basename(path))[0]
def extract_turns(input):
"""
Génère automatiquement un fichier contenant le nombre de tours par locuteur à partir du fichier de tours
:param input:
:return:
"""
f = open(input,mode="r",encoding="utf-8")
content = f.read()
output_basename = get_filename(input)
outputFile1 = open(output_basename+"_turns.csv",mode="w",encoding="utf-8")
outputFile1.write("id_turn;start_time;end_time;speaker;episode;trans\n")
lines = content.split("\n")
turns_by_spk={}
id_turn=0
for line in lines:
if len(line)>1:
col = line.split("\t")
episode, speaker, start_time, end_time = col[0].split("#")
outputFile1.write(str(id_turn)+";"+start_time+";"+end_time+";"+speaker+";"+episode+";"+col[1]+"\n")
id_turn+=1
if speaker not in turns_by_spk.keys():
turns_by_spk[speaker] = {}
turns_by_spk[speaker]["count"]=1
turns_by_spk[speaker]["length"]=float(end_time)-float(start_time)
else:
turns_by_spk[speaker]["count"] += 1
turns_by_spk[speaker]["length"]+=float(end_time)-float(start_time)
outputFile2 = open(output_basename+"_turns_by_spk.csv",mode="w",encoding="utf-8")
outputFile2.write("speaker;nb_turn;total_length\n")
for entry in turns_by_spk.keys():
outputFile2.write(entry+";"+str(turns_by_spk[entry]["count"])+";"+str(turns_by_spk[entry]["length"])+"\n")
f.close()
outputFile1.close()
outputFile2.close()
def gender_mapping(filename,gender_file):
"""
Associe un genre à chaque locuteur dans le fichier de tours de parole ou dans le fichier de nombre de tours par locuteur
:param filename: nom du fichiers de tours
:param gender_file: nom du fichier de mapping locuteur-genre
:return:
"""
f = open(gender_file, mode="r", encoding="utf-8")
genders = {}
lines = f.read().split("\n")
for line in lines:
if len(line)>1:
speaker,gender=line.split(" ")
genders[speaker] = gender
f.close()
f = open(filename,mode="r",encoding="utf-8")
outf_basename=get_filename(filename)
outf = open(outf_basename+"_gendered.csv",mode="w",encoding="utf-8")
# si on a un fichier de tours de parole par locuteur
if "_turns_by_spk" in outf_basename:
outf.write("name;gender;nb_turn;total_length\n")
lines = f.read().split("\n")
for line in lines[1:]:
if len(line)>1:
speaker, turn_count, total_length = line.split(";")
if speaker in genders.keys():
gender=genders[speaker]
else:
gender="NA"
outf.write(speaker+";"+gender+";"+turn_count+";"+total_length+"\n")
# si on a le fichier de tours
else:
outf.write("id_turn;start_time;end_time;speaker;gender;episode;trans\n")
lines = f.read().split("\n")
for line in lines[1:]:
if len(line) > 1:
id_turn,start_time,end_time,speaker,episode,trans = line.split(";")
if speaker in genders.keys():
gender = genders[speaker]
else:
gender = "NA"
outf.write(id_turn+";"+start_time+";"+end_time+";"+speaker+";"+gender+";"+episode+";"+trans+"\n")
f.close()
outf.close()
def add_id_class(filename,classdict):
f = open(filename, mode="r", encoding="utf-8")
outf_basename = get_filename(filename)
outf = open(outf_basename + "_with_class.csv", mode="w", encoding="utf-8")
# si on a un fichier de tours de parole par locuteur
if "_turns_by_spk" in outf_basename:
outf.write("name;gender;spk_class;nb_turn;total_length\n")
lines = f.read().split("\n")
for line in lines[1:-1]:
speaker, gender, turn_count, total_length = line.split(";")
outf.write(speaker + ";" + gender + ";"+ classdict[(speaker,gender)] + ";" + turn_count + ";" + total_length +"\n")
# si on a le fichier de tours
else:
outf.write("id_turn;start_time;end_time;speaker;gender;spk_class;episode;trans\n")
lines = f.read().split("\n")
for line in lines[1:-1]:
id_turn, start_time, end_time, speaker, gender, episode, trans = line.split(";")
outf.write(
id_turn + ";" + start_time + ";" + end_time + ";" + speaker + ";" + gender + ";"+ classdict[(speaker,gender)] +";" + episode + ";" + trans + "\n")
f.close()
outf.close()
def main() :
extract_turns("text.csv")
gender_mapping("text_turns_by_spk.csv","spk-gender.txt")
gender_mapping("text_turns.csv","spk-gender.txt")
spk_class={}
f=open("text_turns_by_spk_gendered.csv",mode="r",encoding="utf-8")
content=f.read()
lines=content.split("\n")
for line in lines[1:-1]:
speaker, gender, nb_turn, total_length = line.split(";")
if int(nb_turn) < 75:
if float(total_length) < 600:
id_class = "1"
else:
id_class = "3"
else:
if float(total_length) < 600:
id_class = "2"
else:
id_class = "4"
spk_class[(speaker,gender)]=id_class
add_id_class("text_turns_by_spk_gendered.csv",spk_class)
add_id_class("text_turns_gendered.csv",spk_class)
if __name__ == '__main__':
main() | [
"os.path.basename",
"os.path.split"
] | [((42, 61), 'os.path.split', 'os.path.split', (['path'], {}), '(path)\n', (55, 61), False, 'import os\n'), ((119, 141), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (135, 141), False, 'import os\n')] |
# (C) <EMAIL> released under the MIT license (see LICENSE)
import os
import requests
import threading
import time
import sys
import win32con
import win32gui
import common
def threaded_function(thread_data, thread_static, env, addons, hWnd, log):
while not thread_data.stop:
time.sleep(0.01)
will_update_ca_bundle = False
will_update_addons = False
force_update_addons = False
activate_launch = False
with thread_static.lock:
if thread_static.need_update_ca_bundle:
will_update_ca_bundle = common.locate_ca_bundle(env['script_dir']) is None or thread_static.force_update_ca_bundle
thread_static.need_update_ca_bundle = False
thread_static.force_update_ca_bundle = False
if thread_static.need_update_addons:
will_update_addons = True
force_update_addons = thread_static.force_update_addons
thread_static.need_update_addons = False
thread_static.force_update_addons = False
activate_launch = thread_static.queue_launch
thread_static.queue_launch = False
if will_update_ca_bundle:
try:
common.get_node({'src': 'https://curl.haxx.se/ca/cacert.pem', 'dest': 'cacert.pem'}, False, False, env['script_dir'], log)
log.log_ts('CA Bundle updated', info = True)
except:
exc = sys.exc_info()
log.log_ts('{}: {}'.format(exc[0], exc[1]))
win32gui.SendMessage(hWnd, win32con.WM_COMMAND, common.commands.SHOW_LOG, 0)
continue
if will_update_addons:
update_context = {'launch': activate_launch, 'error': False}
common.update_addons(env, addons, log, force_update_addons, update_context)
if update_context['error']:
win32gui.SendMessage(hWnd, win32con.WM_COMMAND, common.commands.SHOW_LOG, 0)
elif update_context['launch']:
win32gui.SendMessage(hWnd, win32con.WM_COMMAND, common.commands.LAUNCH, 0)
class thread_data_type:
stop = False
class thread_static_type:
lock = threading.Lock()
need_update_ca_bundle = True
force_update_ca_bundle = False
need_update_addons = False
force_update_addons = False
queue_launch = False
class updater_thread:
thread = None
thread_data = None
thread_static = thread_static_type()
def start(self, env, hWnd, log):
if self.thread is not None: return
self.thread_data = thread_data_type()
self.thread = threading.Thread(name = 'UpdaterThread', target = threaded_function, args = (self.thread_data, self.thread_static, env, common.list_addons(env, log), hWnd, log))
self.thread.start()
def stop(self):
if self.thread is None: return
if self.thread_data is None: return
self.thread_data.stop = True
self.thread = None
def update_launch(self):
with self.thread_static.lock:
self.thread_static.need_update_addons = True
self.thread_static.queue_launch = True
def update(self):
with self.thread_static.lock:
self.thread_static.need_update_addons = True
def force_update(self):
with self.thread_static.lock:
self.thread_static.need_update_addons = True
self.thread_static.force_update_addons = True
def force_update_ca_bundle(self):
with self.thread_static.lock:
self.thread_static.need_update_ca_bundle = True
self.thread_static.force_update_ca_bundle = True
| [
"win32gui.SendMessage",
"common.locate_ca_bundle",
"threading.Lock",
"common.get_node",
"time.sleep",
"sys.exc_info",
"common.list_addons",
"common.update_addons"
] | [((2006, 2022), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (2020, 2022), False, 'import threading\n'), ((283, 299), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (293, 299), False, 'import time\n'), ((1596, 1671), 'common.update_addons', 'common.update_addons', (['env', 'addons', 'log', 'force_update_addons', 'update_context'], {}), '(env, addons, log, force_update_addons, update_context)\n', (1616, 1671), False, 'import common\n'), ((1123, 1249), 'common.get_node', 'common.get_node', (["{'src': 'https://curl.haxx.se/ca/cacert.pem', 'dest': 'cacert.pem'}", '(False)', '(False)', "env['script_dir']", 'log'], {}), "({'src': 'https://curl.haxx.se/ca/cacert.pem', 'dest':\n 'cacert.pem'}, False, False, env['script_dir'], log)\n", (1138, 1249), False, 'import common\n'), ((1714, 1790), 'win32gui.SendMessage', 'win32gui.SendMessage', (['hWnd', 'win32con.WM_COMMAND', 'common.commands.SHOW_LOG', '(0)'], {}), '(hWnd, win32con.WM_COMMAND, common.commands.SHOW_LOG, 0)\n', (1734, 1790), False, 'import win32gui\n'), ((1327, 1341), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (1339, 1341), False, 'import sys\n'), ((1402, 1478), 'win32gui.SendMessage', 'win32gui.SendMessage', (['hWnd', 'win32con.WM_COMMAND', 'common.commands.SHOW_LOG', '(0)'], {}), '(hWnd, win32con.WM_COMMAND, common.commands.SHOW_LOG, 0)\n', (1422, 1478), False, 'import win32gui\n'), ((1836, 1910), 'win32gui.SendMessage', 'win32gui.SendMessage', (['hWnd', 'win32con.WM_COMMAND', 'common.commands.LAUNCH', '(0)'], {}), '(hWnd, win32con.WM_COMMAND, common.commands.LAUNCH, 0)\n', (1856, 1910), False, 'import win32gui\n'), ((2555, 2583), 'common.list_addons', 'common.list_addons', (['env', 'log'], {}), '(env, log)\n', (2573, 2583), False, 'import common\n'), ((543, 585), 'common.locate_ca_bundle', 'common.locate_ca_bundle', (["env['script_dir']"], {}), "(env['script_dir'])\n", (566, 585), False, 'import common\n')] |
import pytest
@pytest.mark.describe("`var` tag")
class TestGetVarTag:
tag_name = "var"
@pytest.mark.it("Only with variable name")
def test_only_var_name(self, template_factory, context_factory, variable_factory):
variable_factory(5, "V_INT")
t_int = template_factory("V_INT", tag_name=self.tag_name).render(
context_factory()
)
variable_factory(5.5, "V_FLOAT")
t_float = template_factory("V_FLOAT", tag_name=self.tag_name).render(
context_factory()
)
variable_factory(False, "V_BOOL")
t_bool = template_factory("V_BOOL", tag_name=self.tag_name).render(
context_factory()
)
variable_factory("lorem", "V_STR")
t_str = template_factory("V_STR", tag_name=self.tag_name).render(
context_factory()
)
variable_factory(None, "V_NONE")
t_none = template_factory("V_NONE", tag_name=self.tag_name).render(
context_factory()
)
assert "<p>5</p>" in t_int
assert "<p>5.5</p>" in t_float
assert "<p>False</p>" in t_bool
assert "<p>lorem</p>" in t_str
assert "<p>None</p>" in t_none
@pytest.mark.it("Render if True")
def test_render_if_true(self, template_factory, context_factory, variable_factory):
variable_factory(True)
template = template_factory(
"FOO", tag_name=self.tag_name, rit="this is true"
).render(context_factory())
assert "<p>this is true</p>" in template
@pytest.mark.it("Render if False")
def test_render_if_false(self, template_factory, context_factory, variable_factory):
variable_factory(False)
template = template_factory(
"FOO", tag_name=self.tag_name, rif="this is false"
).render(context_factory())
assert "<p>this is false</p>" in template
@pytest.mark.it("Render if None")
def test_render_if_none(self, template_factory, context_factory, variable_factory):
variable_factory(None)
template = template_factory(
"FOO", tag_name=self.tag_name, rin="this is none"
).render(context_factory())
assert "<p>this is none</p>" in template
@pytest.mark.it("Render if not exists")
def test_render_if_not_exists(self, db, template_factory, context_factory):
template = template_factory(
"FOO", tag_name=self.tag_name, rine="this does not exist"
).render(context_factory())
assert "<p>this does not exist</p>" in template
| [
"pytest.mark.describe",
"pytest.mark.it"
] | [((17, 50), 'pytest.mark.describe', 'pytest.mark.describe', (['"""`var` tag"""'], {}), "('`var` tag')\n", (37, 50), False, 'import pytest\n'), ((99, 140), 'pytest.mark.it', 'pytest.mark.it', (['"""Only with variable name"""'], {}), "('Only with variable name')\n", (113, 140), False, 'import pytest\n'), ((1213, 1245), 'pytest.mark.it', 'pytest.mark.it', (['"""Render if True"""'], {}), "('Render if True')\n", (1227, 1245), False, 'import pytest\n'), ((1555, 1588), 'pytest.mark.it', 'pytest.mark.it', (['"""Render if False"""'], {}), "('Render if False')\n", (1569, 1588), False, 'import pytest\n'), ((1902, 1934), 'pytest.mark.it', 'pytest.mark.it', (['"""Render if None"""'], {}), "('Render if None')\n", (1916, 1934), False, 'import pytest\n'), ((2244, 2282), 'pytest.mark.it', 'pytest.mark.it', (['"""Render if not exists"""'], {}), "('Render if not exists')\n", (2258, 2282), False, 'import pytest\n')] |
from django.contrib import admin
from .models import *
# Register your models here.
admin.site.register(OneshirtUser)
admin.site.register(Item)
admin.site.register(Trade)
admin.site.register(PasswordResetRequest)
| [
"django.contrib.admin.site.register"
] | [((86, 119), 'django.contrib.admin.site.register', 'admin.site.register', (['OneshirtUser'], {}), '(OneshirtUser)\n', (105, 119), False, 'from django.contrib import admin\n'), ((120, 145), 'django.contrib.admin.site.register', 'admin.site.register', (['Item'], {}), '(Item)\n', (139, 145), False, 'from django.contrib import admin\n'), ((146, 172), 'django.contrib.admin.site.register', 'admin.site.register', (['Trade'], {}), '(Trade)\n', (165, 172), False, 'from django.contrib import admin\n'), ((173, 214), 'django.contrib.admin.site.register', 'admin.site.register', (['PasswordResetRequest'], {}), '(PasswordResetRequest)\n', (192, 214), False, 'from django.contrib import admin\n')] |
from sabueso.tools.string_pdb_text import is_pdb_text
from sabueso.tools.string_pdb_id import is_pdb_id
from sabueso.tools.string_uniprot_id import is_uniprot_id
def guess_form(string):
output = None
if is_pdb_text(string):
output = 'string:pdb_text'
elif is_pdb_id(string):
output = 'string:pdb_id'
elif is_uniprot_id(string):
output = 'string:uniprot_id'
return output
| [
"sabueso.tools.string_pdb_id.is_pdb_id",
"sabueso.tools.string_pdb_text.is_pdb_text",
"sabueso.tools.string_uniprot_id.is_uniprot_id"
] | [((214, 233), 'sabueso.tools.string_pdb_text.is_pdb_text', 'is_pdb_text', (['string'], {}), '(string)\n', (225, 233), False, 'from sabueso.tools.string_pdb_text import is_pdb_text\n'), ((279, 296), 'sabueso.tools.string_pdb_id.is_pdb_id', 'is_pdb_id', (['string'], {}), '(string)\n', (288, 296), False, 'from sabueso.tools.string_pdb_id import is_pdb_id\n'), ((340, 361), 'sabueso.tools.string_uniprot_id.is_uniprot_id', 'is_uniprot_id', (['string'], {}), '(string)\n', (353, 361), False, 'from sabueso.tools.string_uniprot_id import is_uniprot_id\n')] |
"""
Parser class definition
"""
from exprail.node import NodeType
from exprail import router
from exprail.state import State
class Parser(object):
"""The base class for other parsers"""
def __init__(self, grammar, source):
"""
Initialize the parser.
:param grammar: a grammar object
:param source: a parser object which provides the source token stream
"""
self._grammar = grammar
self._state = grammar.get_initial_state()
self._source = source
self._source.parse()
self._ready = False
self._token = None
self._stacks = {'': []}
def parse(self):
"""
Parse the source stream while the parser has not become ready.
:return: None
"""
self._ready = False
while not self._ready:
token = self._source.get_token()
self._state = router.find_next_state(self._state, token)
self.process_token(token)
def get_token(self):
"""
Get the recently parsed token.
:return: a token object
"""
return self._token
def get_next_token(self):
"""
A convenience method for parsing and getting the last token at one step.
:return: a token object
"""
self.parse()
return self.get_token()
def get_finish_token(self):
"""
Returns with the finish token of the parser.
NOTE: It must be implemented when the parser provides tokens as outputs!
:return: a token object
"""
pass
def show_info(self, message, token):
"""
Show information about the current state of parsing.
:param message: the message of the node
:param token: the current token object
:return: None
"""
print('INFO: {}'.format(message))
def show_error(self, message, token):
"""
Show error in the parsing process. It stops the parsing process.
:param message:
:param token:
:return:
"""
print('ERROR: {}'.format(message))
def transform(self, transformation, token):
"""
Transform the token to an other token.
:param transformation: the transformation described as a string
:param token: the original token object
:return: the transformed token object
"""
return token
def operate(self, operation, token):
"""
Fulfil the required operation.
:param operation: the name of the operation as a string
:param token: the current token
:return: None
"""
pass
def push_stack(self, stack_name, token):
"""
Push the token value onto the stack.
:param stack_name: the name of the stack as a string
:param token: the current token
:return: None
"""
if stack_name not in self._stacks:
self._stacks[stack_name] = []
self._stacks[stack_name].append(token.value)
def clean_stack(self, stack_name, token):
"""
Clean the given stack
:param stack_name: the name of the stack as a string
:param token: the current token
:return: None
"""
self._stacks[stack_name] = []
def process_token(self, token):
"""
Process the token according to the current node.
:param token: a token object
:return: None
"""
node_type = self._state.node.type
node_value = self._state.node.value
if node_type is NodeType.EXPRESSION:
expression_name = self._state.node.value
node_id = self._state.grammar.expressions[expression_name].get_start_node_id()
self._state = State(self._state.grammar, expression_name, node_id, self._state)
elif node_type is NodeType.FINISH:
if self._state.return_state is None:
self._token = self.get_finish_token()
self._ready = True
elif node_type is NodeType.INFO:
self.show_info(node_value, token)
elif node_type is NodeType.ERROR:
self.show_error(node_value, token)
elif node_type is NodeType.TRANSFORMATION:
self._token = self.transform(node_value, token)
elif node_type is NodeType.OPERATION:
self.operate(node_value, token)
elif node_type is NodeType.STACK:
self.push_stack(node_value, token)
elif node_type is NodeType.CLEAN:
self.clean_stack(node_value, token)
elif node_type in [NodeType.TOKEN, NodeType.EXCEPT_TOKEN, NodeType.DEFAULT_TOKEN]:
self._source.parse()
| [
"exprail.state.State",
"exprail.router.find_next_state"
] | [((906, 948), 'exprail.router.find_next_state', 'router.find_next_state', (['self._state', 'token'], {}), '(self._state, token)\n', (928, 948), False, 'from exprail import router\n'), ((3791, 3856), 'exprail.state.State', 'State', (['self._state.grammar', 'expression_name', 'node_id', 'self._state'], {}), '(self._state.grammar, expression_name, node_id, self._state)\n', (3796, 3856), False, 'from exprail.state import State\n')] |
from Bio import SeqIO
import pandas as pd
def subset_unmasked_csv(blast_csv, unmasked_fasta, output):
# blast hits
hits = pd.read_csv(blast_csv)
# keep query id until first whitespace
queryid_with_hits = hits["query"].str.extract("(^[^\\s]+)", expand=False)
# convert to list
queryid_with_hits = list(queryid_with_hits)
# reduce to unique ids
queryid_with_hits = set(queryid_with_hits)
# unmasked sequences
raw_qresults = (qresult for qresult in SeqIO.parse(unmasked_fasta, "fasta"))
# filter unmasked using blast hits
filtered_records = (
qresult for qresult in raw_qresults if qresult.id in queryid_with_hits
)
# write to fasta
SeqIO.write(filtered_records, output, "fasta")
subset_unmasked_csv(
blast_csv=snakemake.input[0],
unmasked_fasta=snakemake.input[1],
output=snakemake.output[0],
)
| [
"Bio.SeqIO.write",
"Bio.SeqIO.parse",
"pandas.read_csv"
] | [((132, 154), 'pandas.read_csv', 'pd.read_csv', (['blast_csv'], {}), '(blast_csv)\n', (143, 154), True, 'import pandas as pd\n'), ((700, 746), 'Bio.SeqIO.write', 'SeqIO.write', (['filtered_records', 'output', '"""fasta"""'], {}), "(filtered_records, output, 'fasta')\n", (711, 746), False, 'from Bio import SeqIO\n'), ((488, 524), 'Bio.SeqIO.parse', 'SeqIO.parse', (['unmasked_fasta', '"""fasta"""'], {}), "(unmasked_fasta, 'fasta')\n", (499, 524), False, 'from Bio import SeqIO\n')] |
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from Platforms.Web.index import WebIndex
import json
from aiohttp.web import Response, Request
async def apiDiscordGuildUnknown(cls:"WebIndex", WebRequest:Request, **kwargs:dict) -> Response:
"""
Takes from kwargs:
msg:str
guild_id:str
guild_name:str
"""
res:dict = dict(status=400, error="discord_guild_unknown")
guild_id:str = kwargs.get("guild_id", "")
if guild_id:
res["guild_id"] = guild_id
guild_name:str = kwargs.get("guild_name", "")
if guild_name:
res["guild_name"] = guild_name
# build message
default_msg:str = "Could not find a phaaze known guild"
if guild_name:
default_msg += f" with name '{guild_name}'"
if guild_id:
default_msg += f" (Guild ID:{guild_id})"
msg:str = kwargs.get("msg", default_msg)
res["msg"] = msg
cls.Web.BASE.Logger.debug(f"(API/Discord) 400: {WebRequest.path} | {msg}", require="api:400")
return cls.response(
text=json.dumps( res ),
content_type="application/json",
status=400
)
async def apiDiscordMissingPermission(cls:"WebIndex", WebRequest:Request, **kwargs:dict) -> Response:
"""
Takes from kwargs:
msg:str
user_id:str
user_name:str
guild_id:str
guild_name:str
"""
res:dict = dict(status=400, error="discord_missing_permission")
user_id:str = kwargs.get("user_id", "")
if user_id:
res["user_id"] = user_id
user_name:str = kwargs.get("user_name", "")
if user_name:
res["user_name"] = user_name
guild_id:str = kwargs.get("guild_id", "")
if guild_id:
res["guild_id"] = guild_id
guild_name:str = kwargs.get("guild_name", "")
if guild_name:
res["guild_name"] = guild_name
# build message
default_msg:str = "Missing 'administrator' or 'manage_guild' permission"
if user_name:
default_msg += f" for user '{user_name}'"
if guild_name:
default_msg += f" on guild '{guild_name}'"
if user_id:
default_msg += f" (User ID:{user_id})"
if guild_id:
default_msg += f" (Guild ID:{guild_id})"
msg:str = kwargs.get("msg", default_msg)
res["msg"] = msg
cls.Web.BASE.Logger.debug(f"(API/Discord) 400 Missing Permission: {WebRequest.path} | {msg}", require="api:400")
return cls.response(
text=json.dumps( res ),
content_type="application/json",
status=400
)
async def apiDiscordMemberNotFound(cls:"WebIndex", WebRequest:Request, **kwargs:dict) -> Response:
"""
Takes from kwargs:
msg:str
user_id:str
user_name:str
guild_id:str
guild_name:str
"""
res:dict = dict(status=404, error="discord_member_not_found")
user_id:str = kwargs.get("user_id", "")
if user_id:
res["user_id"] = user_id
user_name:str = kwargs.get("user_name", "")
if user_name:
res["user_name"] = user_name
guild_id:str = kwargs.get("guild_id", "")
if guild_id:
res["guild_id"] = guild_id
guild_name:str = kwargs.get("guild_name", "")
if guild_name:
res["guild_name"] = guild_name
# build message
default_msg:str = "Could not find a valid member"
if guild_name:
default_msg += f" on guild '{guild_name}'"
if user_name:
default_msg += f" with name '{user_name}'"
if guild_id:
default_msg += f" (Guild ID: {guild_id})"
if user_id:
default_msg += f" (User ID: {user_id})"
msg:str = kwargs.get("msg", default_msg)
res["msg"] = msg
cls.Web.BASE.Logger.debug(f"(API/Discord) 400 Member not Found: {WebRequest.path} | {msg}", require="api:404")
return cls.response(
text=json.dumps( res ),
content_type="application/json",
status=404
)
async def apiDiscordRoleNotFound(cls:"WebIndex", WebRequest:Request, **kwargs:dict) -> Response:
"""
Takes from kwargs:
msg:str
role_id:str
role_name:str
guild_id:str
guild_name:str
"""
res:dict = dict(status=404, error="discord_role_not_found")
role_id:str = kwargs.get("role_id", "")
if role_id:
res["role_id"] = role_id
role_name:str = kwargs.get("role_name", "")
if role_name:
res["role_name"] = role_name
guild_id:str = kwargs.get("guild_id", "")
if guild_id:
res["guild_id"] = guild_id
guild_name:str = kwargs.get("guild_name", "")
if guild_name:
res["guild_name"] = guild_name
# build message
default_msg:str = "Could not find a valid role"
if guild_name:
default_msg += f" on guild '{guild_name}'"
if role_name:
default_msg += f" with name '{role_name}'"
if guild_id:
default_msg += f" (Guild ID:{guild_id})"
if role_id:
default_msg += f" (Role ID:{role_id})"
msg:str = kwargs.get("msg", default_msg)
res["msg"] = msg
cls.Web.BASE.Logger.debug(f"(API/Discord) 400 Role not Found: {WebRequest.path} | {msg}", require="api:404")
return cls.response(
text=json.dumps( res ),
content_type="application/json",
status=404
)
async def apiDiscordChannelNotFound(cls:"WebIndex", WebRequest:Request, **kwargs:dict) -> Response:
"""
Takes from kwargs:
msg:str
channel_id:str
channel_name:str
guild_id:str
guild_name:str
"""
res:dict = dict(status=404, error="discord_channel_not_found")
channel_id:str = kwargs.get("channel_id", "")
if channel_id:
res["channel_id"] = channel_id
channel_name:str = kwargs.get("channel_name", "")
if channel_name:
res["channel_name"] = channel_name
guild_id:str = kwargs.get("guild_id", "")
if guild_id:
res["guild_id"] = guild_id
guild_name:str = kwargs.get("guild_name", "")
if guild_name:
res["guild_name"] = guild_name
# build message
default_msg:str = "Could not find a valid channel"
if guild_name:
default_msg += f" on guild '{guild_name}'"
if channel_name:
default_msg += f" with name '{channel_name}'"
if guild_id:
default_msg += f" (Guild ID:{guild_id})"
if channel_id:
default_msg += f" (Channel ID:{channel_id})"
msg:str = kwargs.get("msg", default_msg)
res["msg"] = msg
cls.Web.BASE.Logger.debug(f"(API/Discord) 400 Channel not Found: {WebRequest.path} | {msg}", require="api:404")
return cls.response(
text=json.dumps( res ),
content_type="application/json",
status=404
)
| [
"json.dumps"
] | [((948, 963), 'json.dumps', 'json.dumps', (['res'], {}), '(res)\n', (958, 963), False, 'import json\n'), ((2182, 2197), 'json.dumps', 'json.dumps', (['res'], {}), '(res)\n', (2192, 2197), False, 'import json\n'), ((3389, 3404), 'json.dumps', 'json.dumps', (['res'], {}), '(res)\n', (3399, 3404), False, 'import json\n'), ((4586, 4601), 'json.dumps', 'json.dumps', (['res'], {}), '(res)\n', (4596, 4601), False, 'import json\n'), ((5846, 5861), 'json.dumps', 'json.dumps', (['res'], {}), '(res)\n', (5856, 5861), False, 'import json\n')] |
import sqlite3
import matplotlib as mpl
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import networkx as nx
from sqlitedict import SqliteDict
gene = "phpt1"
transcript = "ENST00000463215"
sqlite_path_organism = "homo_sapiens.v2.sqlite"
sqlite_path_reads = ["SRR2433794.sqlite"]
def get_gene_info(gene_id, sqlite_path_organism):
# get the transcript ID, exon junctions and transcript sequence from the specified sqlite file for a given gene ID
gene_id = gene_id.upper()
conn = sqlite3.connect(sqlite_path_organism)
c = conn.cursor()
c.execute(
"SELECT transcript, exon_junctions, sequence FROM transcripts WHERE gene = (:gene);",
{"gene": gene_id},
)
gene_info = c.fetchall()
return gene_info
def get_genes_principal(gene_id, sqlite_path_organism):
# get the transcript ID for the given genes principal isoform as per APPRIS
gene_id = gene_id.upper()
conn = sqlite3.connect(sqlite_path_organism)
c = conn.cursor()
c.execute(
"SELECT transcript FROM transcripts " "WHERE gene = (:gene) AND principal = 1;",
{"gene": gene_id},
)
principal_transcript = c.fetchall()
return principal_transcript[0][0]
def string_to_integer_list(lst):
# Return the list of integers from a list of strings. If the list is empty then return 0
if lst[0] == "":
lst[0] = "0"
new_lst = []
for i in range(len(lst)):
new_lst.append(int(lst[i]))
return new_lst
def get_3prime_exon(junction_list, sequence):
# Part of the process of producing the sequences of all exons in a transcript.
# this function slices the 3' exon sequence from the transcript sequence returning
# both the exon sequence and the remaining sequence of the transcript
cut_off = junction_list[-1]
exon = sequence[cut_off - 1 :]
seq_less_exon = sequence[: cut_off - 1]
return exon, seq_less_exon
def get_transcript_info(transcript_id, sqlite_path_organism):
# Returns the exon junctions and sequences for the given transcript
transcript_id = transcript_id.upper()
conn = sqlite3.connect(sqlite_path_organism)
c = conn.cursor()
c.execute(
"SELECT exon_junctions, sequence FROM transcripts WHERE transcript = (:trans);",
{"trans": transcript_id},
)
transcript_info = c.fetchall()
return transcript_info
def get_max_min_exon_genomic_positions(exon_info):
# Of all exons starts return the lowest position and highest position of all stops
starts = []
for i, value in enumerate(exon_info):
starts.append(exon_info[i][1])
stops = []
for i, value in enumerate(exon_info):
stops.append(exon_info[i][2])
return min(starts), max(stops)
def exons_of_transcript(transcript_id, sqlite_path_organism):
# For a given transcript return the exon sequences in a list in 5' to 3' direction
exon_lst = []
trans_info = get_transcript_info(transcript_id, sqlite_path_organism)
exon_junctions = trans_info[0][0].split(",")
sequence = trans_info[0][1]
exon_junct_int = string_to_integer_list(exon_junctions)
while len(exon_junct_int) != 0:
exon, sequence = get_3prime_exon(exon_junct_int, sequence)
exon_lst.append(exon)
exon_junct_int.pop(-1)
exon_lst.append(sequence)
return exon_lst[::-1]
def get_exon_coordinate_ranges(sequence, exons, junctions):
# Return list of transcript coordinate ranges (start position, stop position).
end = len(sequence) - 1
junctions.append(end)
ranges = []
for i in range(len(exons)):
ranges.append((sequence.find(exons[i]), junctions[i] - 1))
return ranges
def get_exon_info(gene, sqlite_path_organism, supported_transcripts, filter=True):
# Return the exon starts,stops and transcript for a given gene
gene = gene.upper()
conn = sqlite3.connect(sqlite_path_organism)
c = conn.cursor()
c.execute(
"SELECT transcript, exon_start, exon_stop FROM exons WHERE transcript IN "
"(SELECT transcript FROM transcripts WHERE gene = (:gene));",
{"gene": gene},
)
exon_info = c.fetchall()
if filter:
supported_exon_info = []
for exon in exon_info:
if exon[0] in supported_transcripts:
supported_exon_info.append(exon)
return supported_exon_info
return exon_info
def genomic_exon_coordinate_ranges(
gene, sqlite_path_organism, supported_transcripts, filter=True
):
# create a dictionary with transcript_ids as keys and exon coordinates in tuple (start, stop) as values
# subtract the minumum start codon position of any exon for the gene
exon_info = get_exon_info(
gene, sqlite_path_organism, supported_transcripts, filter=filter
)
minimum, _ = get_max_min_exon_genomic_positions(exon_info)
genomic_coordinates_per_transcript = {}
for exon in exon_info:
if exon[0] not in genomic_coordinates_per_transcript:
genomic_coordinates_per_transcript[exon[0]] = [
(exon[1] - minimum, exon[2] - minimum)
]
else:
genomic_coordinates_per_transcript[exon[0]].append(
(exon[1] - minimum, exon[2] - minimum)
)
return genomic_coordinates_per_transcript
def get_reads_per_transcript_location(transcript_id, sqlite_path_reads):
infile = SqliteDict(sqlite_path_reads)
if transcript_id not in infile:
print("No unambiguous reads support this gene")
return None
return infile[transcript_id]["unambig"]
def get_reads_per_genomic_location_asite(
gene, sqlite_path_reads, sqlite_path_organism, supported_transcripts, filter
):
# get the number of reads supporting each genomic position to be used in the display of support of the
# supertranscript model. This function takes the reads mapped for each transcript of a gene and uses a combination
# of genomic and transcriptomic ranges to translate each transcript position to a genomic one.
gene_info = get_gene_info(gene, sqlite_path_organism)
genomic_exon_coordinates = genomic_exon_coordinate_ranges(
gene, sqlite_path_organism, supported_transcripts, filter
)
genomic_read_dictionary = {}
for read_file in sqlite_path_reads:
infile = SqliteDict(read_file)
for transcript in gene_info:
if filter:
if transcript[0] not in supported_transcripts:
continue
if transcript[0] not in infile:
print("No unambiguous reads support this gene")
return None
transcript_read_dictionary = infile[transcript[0]]["unambig"]
genomic_ranges = genomic_exon_coordinates[transcript[0]]
exon_junctions = string_to_integer_list(transcript[1].split(","))
sequence = transcript[2]
exons = exons_of_transcript(transcript[0], sqlite_path_organism)
transcript_ranges = get_exon_coordinate_ranges(
sequence, exons, exon_junctions
)
for length in transcript_read_dictionary:
for location in transcript_read_dictionary[length]:
position = (
location + infile["offsets"]["fiveprime"]["offsets"][length]
)
range_counter = 0
for exon in transcript_ranges:
if position in range(exon[0], exon[1]):
difference_between_read_position_and_exon_asite = (
position - exon[0]
)
genomic_asite = (
genomic_ranges[range_counter][0]
+ difference_between_read_position_and_exon_asite
)
if genomic_asite not in genomic_read_dictionary:
genomic_read_dictionary[
genomic_asite
] = transcript_read_dictionary[length][location]
else:
genomic_read_dictionary[
genomic_asite
] += transcript_read_dictionary[length][location]
range_counter += 1
return genomic_read_dictionary
def get_reads_per_genomic_location_fiveprime(
gene, sqlite_path_reads, sqlite_path_organism, supported_transcripts, filter
):
# get the number of reads supporting each genomic position to be used in the display of support of the
# supertranscript model. This function takes the reads mapped for each transcript of a gene and uses a combination
# of genomic and transcriptomic ranges to translate each transcript position to a genomic one.
gene_info = get_gene_info(gene, sqlite_path_organism)
genomic_exon_coordinates = genomic_exon_coordinate_ranges(
gene, sqlite_path_organism, supported_transcripts, filter
)
genomic_read_dictionary = {}
for read_file in sqlite_path_reads:
infile = SqliteDict(read_file)
for transcript in gene_info:
if filter:
if transcript[0] not in supported_transcripts:
continue
if transcript[0] not in infile:
print("No unambiguous reads support this gene")
return None
transcript_read_dictionary = infile[transcript[0]]["unambig"]
genomic_ranges = genomic_exon_coordinates[transcript[0]]
exon_junctions = string_to_integer_list(transcript[1].split(","))
sequence = transcript[2]
exons = exons_of_transcript(transcript[0], sqlite_path_organism)
transcript_ranges = get_exon_coordinate_ranges(
sequence, exons, exon_junctions
)
for length in transcript_read_dictionary:
for position in transcript_read_dictionary[length]:
range_counter = 0
for exon in transcript_ranges:
if position in range(exon[0], exon[1]):
difference_between_read_position_and_exon_start = (
position - exon[0]
)
genomic_start_pos = (
genomic_ranges[range_counter][0]
+ difference_between_read_position_and_exon_start
)
if genomic_start_pos not in genomic_read_dictionary:
genomic_read_dictionary[
genomic_start_pos
] = transcript_read_dictionary[length][position]
else:
genomic_read_dictionary[
genomic_start_pos
] += transcript_read_dictionary[length][position]
range_counter += 1
return genomic_read_dictionary
def get_read_ranges_genomic_location(
gene, sqlite_path_reads, sqlite_path_organism, supported_transcripts, filter
):
# get the number of reads supporting each genomic position to be used in the display of support of the
# supertranscript model. This function takes the reads mapped for each transcript of a gene and uses a combination
# of genomic and transcriptomic ranges to translate each transcript position to a genomic one.
gene_info = get_gene_info(gene, sqlite_path_organism)
genomic_exon_coordinates = genomic_exon_coordinate_ranges(
gene, sqlite_path_organism, supported_transcripts, filter
)
genomic_read_dictionary = {}
for read_file in sqlite_path_reads:
infile = SqliteDict(read_file)
for transcript in gene_info:
if filter:
if transcript[0] not in supported_transcripts:
continue
if transcript[0] not in infile:
print("No unambiguous reads support this gene")
return None
transcript_read_dictionary = infile[transcript[0]]["unambig"]
genomic_ranges = genomic_exon_coordinates[transcript[0]]
exon_junctions = string_to_integer_list(transcript[1].split(","))
sequence = transcript[2]
exons = exons_of_transcript(transcript[0], sqlite_path_organism)
transcript_ranges = get_exon_coordinate_ranges(
sequence, exons, exon_junctions
)
for length in transcript_read_dictionary:
for position in transcript_read_dictionary[length]:
range_counter = 0
for exon in transcript_ranges:
if position in range(exon[0], exon[1]):
difference_between_read_position_and_exon_start = (
position - exon[0]
)
genomic_start_pos = (
genomic_ranges[range_counter][0]
+ difference_between_read_position_and_exon_start
)
genomic_read_range = (
genomic_start_pos,
genomic_start_pos + length,
)
if genomic_read_range not in genomic_read_dictionary:
genomic_read_dictionary[
genomic_read_range
] = transcript_read_dictionary[length][position]
else:
genomic_read_dictionary[
genomic_read_range
] += transcript_read_dictionary[length][position]
range_counter += 1
return genomic_read_dictionary
def read_ranges_per_transcript(reads):
# Return a list of tuples of ranges which each read spans
range_dict = {}
for length in reads:
for position in reads[length]:
range_dict[(position, position + length)] = reads[length][position]
def get_exonjunction_pileup_for_transcript(
transcript_id, sqlite_path_organism, sqlite_path_reads
):
# count the number of reads in the read file that span each exon-exon junction. for a given transcript
# returns a dictionary with junctions as keys and counts as values d
transcript_info = get_transcript_info(transcript_id, sqlite_path_organism)
exon_junctions = string_to_integer_list(transcript_info[0][0].split(","))
counts = {}
for read_file in sqlite_path_reads:
reads = get_reads_per_transcript_location(transcript_id, read_file)
for junction in exon_junctions:
for read_length in reads:
for position in reads[read_length]:
if junction in range(position, position + read_length):
if junction in counts:
counts[junction] += reads[read_length][position]
else:
counts[junction] = reads[read_length][position]
return counts
def get_exon_pileup_for_transcript(
transcript_id, sqlite_path_organism, sqlite_path_reads
):
# count the number of reads whos p site lies within the exon sequence for a given transcript
# returns a dictionary with sequences as keys and counts as values
transcript_info = get_transcript_info(transcript_id, sqlite_path_organism)
exon_junctions = string_to_integer_list(transcript_info[0][0].split(","))
sequence = transcript_info[0][1]
exons = exons_of_transcript(transcript_id, sqlite_path_organism)
ranges = get_exon_coordinate_ranges(sequence, exons, exon_junctions)
counts = {}
for read_file in sqlite_path_reads:
reads = get_reads_per_transcript_location(transcript_id, read_file)
if reads is None:
return counts
for exon in range(len(ranges)):
for read_length in reads:
for position in reads[read_length]:
if position in range(ranges[exon][0], ranges[exon][1] + 1):
if exons[exon] in counts:
counts[exons[exon]] += reads[read_length][position]
else:
counts[exons[exon]] = reads[read_length][position]
for exon in exons:
if exon not in counts:
counts[exon] = 0
return counts
def filter_unsupported_transcripts(gene, sqlite_path_organism, sqlite_path_reads):
gene_info = get_gene_info(gene, sqlite_path_organism)
transcripts = [i[0] for i in gene_info]
supported = []
for transcript in transcripts:
pileup = get_exon_pileup_for_transcript(
transcript, sqlite_path_organism, sqlite_path_reads
)
support = True
for exon in pileup:
if pileup[exon] == 0:
support = False
if support:
supported.append(transcript)
return supported
# def get_exon_sequences(gene, sqlite_path_organism, supported_transcripts, filter=True):
# # returns the exon sequences for all annotated exons of a gene.
# gene_info = get_gene_info(gene, sqlite_path_organism)
# exon_dict = {}
# for transcript in gene_info:
# trans_id = transcript[0]
# if filter and (trans_id not in supported_transcripts):
# continue
# exon_junctions = transcript[1].split(",")
# sequence = transcript[2]
# exon_junct_int = string_to_integer_list(exon_junctions)
#
# while len(exon_junct_int) != 0:
# exon, sequence = get_3prime_exon(exon_junct_int, sequence)
# if exon in exon_dict:
# exon_dict[exon] += trans_id
# else:
# exon_dict[exon] = trans_id
# exon_junct_int.pop(-1)
#
# if len(exon_junct_int) == 0:
# if sequence in exon_dict:
# exon_dict[sequence] += trans_id
# else:
# exon_dict[sequence] = trans_id
#
# # for exon_sequence in exon_list:
# # if exon_sequence in exon_dict:
# # exon_dict[exon_sequence] += trans_id
# # else:
# # exon_dict[exon_sequence] = trans_id
# return exon_dict
def get_exon_sequences(gene, sqlite_path_organism, supported_transcripts, filter=True):
# returns the exon sequences for all annotated exons of a gene.
gene_info = get_gene_info(gene, sqlite_path_organism)
exon_dict = {}
for transcript in gene_info:
trans_id = transcript[0]
if filter and (trans_id not in supported_transcripts):
continue
exon_junctions = transcript[1].split(",")
sequence = transcript[2]
exon_junct_int = string_to_integer_list(exon_junctions)
exon_list = []
while len(exon_junct_int) != 0:
exon, sequence = get_3prime_exon(exon_junct_int, sequence)
exon_list.append(exon)
exon_junct_int.pop(-1)
if len(exon_junct_int) == 0:
exon_list.append(sequence)
# I reverse the list to fix the numbers in the genomic positioning plot to make it more intuitive.
# exon_list = exon_list[::-1]
for exon_sequence in exon_list:
if exon_sequence in exon_dict:
exon_dict[exon_sequence] += trans_id
else:
exon_dict[exon_sequence] = trans_id
return exon_dict
def assign_exon_numbers(gene, sqlite_path_organism, supported_transcripts, filter=True):
# For ease of reference each exon in the gene is given a unique integer ID
# retruns a dictionary with numbers as keys and sequence as values.
exons = get_exon_sequences(
gene, sqlite_path_organism, supported_transcripts, filter
)
counter = 1
exon_dictionary = {}
used_ids = []
for i in exons:
if i not in used_ids:
exon_dictionary[counter] = i
counter += 1
used_ids.append(i)
return exon_dictionary
def get_scores_per_exonjunction_for_gene(
gene_name,
sqlite_path_organism,
sqlite_path_reads,
supported_transcripts,
filter=True,
):
# count the reads in the reads file whos p sites lite within the exon sequence
# returns a dictionary with all unique exons in the gene as keys and counts as values
gene_info = get_gene_info(gene_name, sqlite_path_organism)
if filter:
transcripts = supported_transcripts.copy()
else:
transcripts = [i[0] for i in gene_info]
pileup = {}
for transcript in transcripts:
if transcript in pileup:
pileup[transcript] += get_exonjunction_pileup_for_transcript(
transcript, sqlite_path_organism, sqlite_path_reads
)
else:
pileup[transcript] = get_exonjunction_pileup_for_transcript(
transcript, sqlite_path_organism, sqlite_path_reads
)
return pileup
def get_scores_per_exon_for_gene(
gene_name,
sqlite_path_organism,
sqlite_path_reads,
supported_transcripts,
filter=True,
):
# count the reads in the reads file whos p sites lite within the exon sequence
# returns a dictionary with all unique exons in the gene as keys and counts as values
gene_info = get_gene_info(gene_name, sqlite_path_organism)
if filter:
transcripts = supported_transcripts.copy()
else:
transcripts = [i[0] for i in gene_info]
pileup = {}
for transcript in transcripts:
if transcript in pileup:
pileup[transcript] += get_exon_pileup_for_transcript(
transcript, sqlite_path_organism, sqlite_path_reads
)
else:
pileup[transcript] = get_exon_pileup_for_transcript(
transcript, sqlite_path_organism, sqlite_path_reads
)
all_exons = {}
for transcript in pileup:
for exon in pileup[transcript]:
if exon in all_exons:
all_exons[exon] += pileup[transcript][exon]
else:
all_exons[exon] = pileup[transcript][exon]
return all_exons
# supported = filter_unsupported_transcripts(gene, sqlite_path_organism, sqlite_path_reads)
# prime = get_reads_per_genomic_location_fiveprime(gene, sqlite_path_reads, sqlite_path_organism, supported, filter=True)
# asite = get_reads_per_genomic_location_asite(gene, sqlite_path_reads, sqlite_path_organism, supported, filter=True)
# print("asite", asite)
# print("prime", prime)
#
def get_scores_for_gene(
gene_name,
sqlite_path_organism,
sqlite_path_reads,
supported_transcripts,
filter=True,
):
# count the reads in the reads file whos p sites lite within the exon sequence
# returns a dictionary with all unique exons in the gene as keys and counts as values
gene_info = get_gene_info(gene_name, sqlite_path_organism)
if filter:
transcripts = supported_transcripts.copy()
else:
transcripts = [i[0] for i in gene_info]
exon_pileup = {}
junct_pileup = {}
for transcript in transcripts:
if transcript in exon_pileup:
exon_pileup[transcript] += get_exon_pileup_for_transcript(
transcript, sqlite_path_organism, sqlite_path_reads
)
else:
exon_pileup[transcript] = get_exon_pileup_for_transcript(
transcript, sqlite_path_organism, sqlite_path_reads
)
if transcript in junct_pileup:
junct_pileup[transcript] += get_exonjunction_pileup_for_transcript(
transcript, sqlite_path_organism, sqlite_path_reads
)
else:
junct_pileup[transcript] = get_exonjunction_pileup_for_transcript(
transcript, sqlite_path_organism, sqlite_path_reads
)
all_exons = {}
for transcript in exon_pileup:
for exon in exon_pileup[transcript]:
if exon in all_exons:
all_exons[exon] += exon_pileup[transcript][exon]
else:
all_exons[exon] = exon_pileup[transcript][exon]
exon_dict = assign_exon_numbers(
gene_name, sqlite_path_organism, supported_transcripts, filter
)
scores_number = {}
for i in all_exons:
score_number = get_keys_by_value(exon_dict, i)[0]
if score_number in scores_number:
scores_number[score_number] += all_exons[i]
else:
scores_number[score_number] = all_exons[i]
return scores_number, junct_pileup
def get_edges(gene_name, sqlite_path_organism, supported_transcripts, filter=True):
# Exons that lead into eachother in a transcript are represented as directed edges in the graphs.
# Each edge is represented as a tuple (out node, in_node). Function returns a list of edges
gene_info = get_gene_info(gene_name, sqlite_path_organism)
if filter:
transcripts = supported_transcripts.copy()
else:
transcripts = [i[0] for i in gene_info]
edges = []
for transcript in transcripts:
exon_lst = exons_of_transcript(transcript, sqlite_path_organism)
for i in range(len(exon_lst) - 1):
edges.append((exon_lst[i], exon_lst[i + 1]))
return edges
def get_edges_scores(
gene_name, sqlite_path_organism, supported_transcripts, junction_pileup, filter=True
):
# Exons that lead into eachother in a transcript are represented as directed edges in the graphs.
# Each edge is represented as a tuple (out node, in_node). Function returns a list of edges
gene_info = get_gene_info(gene_name, sqlite_path_organism)
if filter:
transcripts = supported_transcripts.copy()
else:
transcripts = [i[0] for i in gene_info]
edge_scores = {}
for transcript in transcripts:
try:
scores = junction_pileup[transcript]
junct = sorted(scores.keys())
except KeyError:
continue
edges = []
exon_lst = exons_of_transcript(transcript, sqlite_path_organism)
for i in range(len(exon_lst) - 1):
edges.append((exon_lst[i], exon_lst[i + 1]))
edge_junction = zip(edges, junct)
for junction in edge_junction:
edge_scores[junction[0]] = scores[junction[1]]
return edge_scores
def get_keys_by_value(dictionary, value):
# function to reverse look up a given dictionary returns a list of the keys with that value
keys = []
items = dictionary.items()
for item in items:
if item[1] == value:
keys.append(item[0])
return keys
def scores_per_exon_number(
gene, sqlite_path_organism, sqlite_path_reads, supported_transcripts, filter=True
):
# converts the scores dictionary from sequences as keys to numbers as keys
scores = get_scores_per_exon_for_gene(
gene,
sqlite_path_organism,
sqlite_path_reads,
supported_transcripts,
filter=filter,
)
exon_dict = assign_exon_numbers(
gene, sqlite_path_organism, supported_transcripts, filter
)
scores_number = {}
for i in scores:
score_number = get_keys_by_value(exon_dict, i)[0]
if score_number in scores_number:
scores_number[score_number] += scores[i]
else:
scores_number[score_number] = scores[i]
return scores_number
def get_edges_numbers(gene, sqlite_path_organism, supported_transcripts, filter=True):
# converts the edges to use the number ids, Returns a list of tuples (out node, in node)
edges = get_edges(gene, sqlite_path_organism, supported_transcripts, filter)
exon_dictionary = assign_exon_numbers(
gene, sqlite_path_organism, supported_transcripts, filter
)
number_edges = []
for i in edges:
number_edge = (
get_keys_by_value(exon_dictionary, i[0])[0],
get_keys_by_value(exon_dictionary, i[1])[0],
)
number_edges.append(number_edge)
return number_edges
def get_edges_scores_numbers(
gene, sqlite_path_organism, supported_transcripts, junction_pileup, filter=True
):
# converts the edges scores to use the number ids, Returns a dictionary with tuples (out node, in node) as keys
edges_scores = get_edges_scores(
gene, sqlite_path_organism, supported_transcripts, junction_pileup, filter=True
)
exon_dictionary = assign_exon_numbers(
gene, sqlite_path_organism, supported_transcripts, filter
)
number_edge_scores = {}
for i in edges_scores:
number_edge = (
get_keys_by_value(exon_dictionary, i[0])[0],
get_keys_by_value(exon_dictionary, i[1])[0],
)
number_edge_scores[number_edge] = edges_scores[i]
return number_edge_scores
def get_path_starts(graph):
# Get the exons that start each path through the graph. 5' exons
edges = graph.edges()
start_exons = list(graph.nodes())
for edge in edges:
if edge[1] in start_exons:
start_exons.pop(start_exons.index(edge[1]))
return start_exons
def principal_path_in_numbers(
gene, sqlite_path_organism, supported_transcripts, filter
):
# take the principal path in exons and return the same using number ids for inclusion in the graph
exons = exons_of_transcript(
get_genes_principal(gene, sqlite_path_organism), sqlite_path_organism
)
exon_dictionary = assign_exon_numbers(
gene, sqlite_path_organism, supported_transcripts, filter
)
number_path_of_principal = []
for item in exons:
number_path_of_principal.append(get_keys_by_value(exon_dictionary, item)[0])
return number_path_of_principal
def get_exon_positions(exon_info, supported_transcripts, filter=True):
# returns the exon sequence as keys and the genomic position of the first nucleotide of the sequence as values
transcript_positions = {}
transcripts = []
for exon in exon_info:
if exon[0] not in transcripts:
transcripts.append(exon[0])
for i in range(len(exon_info)):
if exon_info[i][0] not in transcript_positions:
transcript_positions[exon_info[i][0]] = [exon_info[i][1]]
else:
transcript_positions[exon_info[i][0]].append(exon_info[i][1])
exon_positions = {}
for transcript in transcript_positions:
exons = exons_of_transcript(transcript, sqlite_path_organism)
positions = transcript_positions[transcript]
for i in range(len(exons)):
if exons[i] not in exon_positions:
exon_positions[exons[i]] = positions[i]
else:
exon_positions[exons[i]] = max(positions[i], exon_positions[exons[i]])
return exon_positions
def exon_positions_with_numbers(
gene, sqlite_path_organism, supported_transcripts, filter=True
):
# returns a dictionary of exon numbers as designated with "assign_exon_numbers()"
# values are start and stop positions (position start, start + length)
exon_info = get_exon_info(gene, sqlite_path_organism, supported_transcripts, filter)
exon_positions = get_exon_positions(exon_info, supported_transcripts, filter)
exon_dict = assign_exon_numbers(
gene, sqlite_path_organism, supported_transcripts, filter
)
min_start_position = get_max_min_exon_genomic_positions(exon_info)[0]
positions_number = {}
for i in exon_positions:
position_number = get_keys_by_value(exon_dict, i)[0]
position = exon_positions[i] - min_start_position
if position_number not in positions_number:
positions_number[position_number] = (position, position + len(i) - 1)
else:
positions_number[position_number] = (
max(position, positions_number[position_number][0]),
max(position, positions_number[position_number][0])
+ max(len(i) - 1, positions_number[position_number][1]),
)
return positions_number
def get_y_values(positions_number):
# to produce a clear layout of the networkx plot ensure that nodes dont overlap.
# if overlap would occur based on the genomic coordinates (x axis) elevate the next
# node one point on the y axis
start_positions = []
end_positions = []
for i in positions_number:
start_positions.append(positions_number[i][0])
end_positions.append(positions_number[i][1])
min_position = min(start_positions)
max_position = max(end_positions)
number_x_y = {}
for i in range(min_position, max_position):
exons_in_range = 0
for exon in positions_number:
if i in range(
positions_number[exon][0] - 100, positions_number[exon][1] + 100
):
exons_in_range += 1
if exon not in number_x_y:
number_x_y[exon] = [positions_number[exon][0], exons_in_range]
else:
if number_x_y[exon][1] < exons_in_range:
number_x_y[exon][1] = exons_in_range
return number_x_y
def relabel_nodes_L_R(number_x_y):
relabel = {}
node = 1
for i in number_x_y:
relabel[i] = node
node += 1
return relabel
def get_transcript_modules(
gene, sqlite_path_organism, supported_transcripts, filter=True
):
# modules are either unique or shared regions across the set of transcripts.
# the number of exons transcribed from each individual genomic loci is recorded
# a module is defined as a change in the number of exons transcribed from that region
positions_number = exon_positions_with_numbers(
gene, sqlite_path_organism, supported_transcripts, filter
)
start_positions = []
end_positions = []
for i in positions_number:
start_positions.append(positions_number[i][0])
end_positions.append(positions_number[i][1])
min_position = min(start_positions)
max_position = max(end_positions)
number_supported = []
modules = {}
module_number = 0
module_start = min_position
prev_count = 0
for i in range(min_position, max_position):
exons_in_range = 0
exons_in_range_list = []
for exon in positions_number:
if i in range(positions_number[exon][0], positions_number[exon][1]):
exons_in_range += 1
exons_in_range_list.append(exon)
if prev_count != exons_in_range:
if module_number not in modules:
modules[module_number] = (module_start, i, exons_in_range_list)
module_number += 1
module_start = i
prev_count = exons_in_range
number_supported.append(exons_in_range)
if modules[0] == (0, 0, 0):
modules.pop(0)
return modules
def get_principal_modules(
gene, sqlite_path_organism, modules, supported_transcripts, filter=True
):
principal_path = principal_path_in_numbers(
gene, sqlite_path_organism, supported_transcripts, filter
)
modules_in_principal = []
for module in modules:
for exon in modules[module][2]:
if exon in principal_path:
modules_in_principal.append(module)
return modules_in_principal
def get_module_pileup(modules, genomic_read_dictionary):
# count the reads that support each module. if no reads are counted then assign a score of zero.
module_pileup = {}
for position in genomic_read_dictionary:
for module in modules:
if position in range(modules[module][0], modules[module][1]):
if module not in module_pileup:
module_pileup[module] = genomic_read_dictionary[position]
else:
module_pileup[module] += genomic_read_dictionary[position]
for i, value in enumerate(modules):
if i not in module_pileup:
module_pileup[i] = 0
return module_pileup
def supertranscript_positions(modules):
# convert the modules dictionary into a list of positions of each module.
positions = []
for i in modules:
positions.append((modules[i][0], modules[i][1] - modules[i][0]))
return positions
def supertranscript_widths_starts(modules):
# convert the modules dictionary into a list of start positions and lengths of each module.
widths = []
starts = []
for i in modules:
widths.append(modules[i][1] - modules[i][0])
starts.append(modules[i][0])
return widths, starts
def supertranscript_colors(scores):
# convert the scores to a decimal between 0-1. The max being 1 and 0 values being 0.
colors = []
for i in range(len(scores)):
colors.append(scores[i])
maximum = max(colors)
transformed_colors = []
for i in colors:
transformed_colors.append(i / maximum)
return transformed_colors
def supertranscript_edgecolors(modules, principal_modules):
# Function to provide edge color list where principal isoform path is coloured in red.
edgecolors = []
for i in modules:
if i in principal_modules:
edgecolors.append("red")
else:
edgecolors.append("black")
return edgecolors
def plot_supertranscript(
gene, sqlite_path_organism, supported_transcripts, filter=True
):
# plot the super transcript using a broken bar chart with each module touching. scores/colors not supported
modules = get_transcript_modules(
gene, sqlite_path_organism, supported_transcripts, filter
)
principal_modules = get_principal_modules(
gene, sqlite_path_organism, supported_transcripts, filter
)
fig, ax = plt.subplots()
positions = supertranscript_positions(modules)
edgecolors = supertranscript_edgecolors(modules, principal_modules)
ax.broken_barh(positions, (0, 1), facecolors="blue", edgecolors=edgecolors)
ax.set_ylim(0, 15)
ax.set_xlim(0, positions[-1][0] + positions[-1][1])
plt.show()
def plot_supertranscript_barh(
gene, sqlite_path_organism, sqlite_path_reads, filter=True
):
# horizontal bar chart method of plotting a superTranscript. A modules support is
# reflected in the color concentration.
supported_transcripts = filter_unsupported_transcripts(
gene, sqlite_path_organism, sqlite_path_reads
)
modules = get_transcript_modules(gene, sqlite_path_organism, supported_transcripts)
genomic_read_dictionary = get_reads_per_genomic_location(
gene, sqlite_path_reads, sqlite_path_organism, supported_transcripts, filter
)
scores = get_module_pileup(modules, genomic_read_dictionary)
colors = supertranscript_colors(scores)
widths, starts = supertranscript_widths_starts(modules)
fig, ax = plt.subplots(figsize=(10, 1.5))
ax.invert_yaxis()
ax.xaxis.set_visible(True)
ax.yaxis.set_visible(False)
c = plt.get_cmap("Blues")(colors)
principal_modules = get_principal_modules(gene, sqlite_path_organism)
edgecolors = supertranscript_edgecolors(modules, principal_modules)
ax.barh(1, widths, left=starts, height=0.5, edgecolor=edgecolors, color=c)
plt.show()
def get_all_paths(graph):
# function to princt out all valid paths through a graph. Which should be
# all valid transcripts as per the provided annotations
for i in range(1, len(graph.nodes) + 1):
print("source: ", i)
for j in range(1, len(graph.nodes) + 1):
print("traget: ", j)
print(list(nx.all_simple_paths(graph, source=i, target=j)))
def nx_plot_spring(gene, sqlite_path_organism, sqlite_path_reads, filter=filter):
graph = nx.DiGraph()
edge_numbers = get_edges_numbers(
gene, sqlite_path_organism, sqlite_path_reads, filter
)
graph.add_edges_from(edge_numbers, length=1)
handles = []
exon_dict = assign_exon_numbers(
gene, sqlite_path_organism, sqlite_path_reads, filter
)
number_of_exons = len(exon_dict.keys())
scores = scores_per_exon_number(
gene, sqlite_path_organism, sqlite_path_reads, filter=filter
)
for i in range(number_of_exons + 1):
if i not in scores:
scores[i] = 0
for i in exon_dict:
handles.append(mpatches.Patch(label=str(i) + " : " + ": " + exon_dict[i]))
# nx.draw_networkx_labels(graph, pos) # Including nodes allowing font changes
# Causes errors with framing the plot.
connected = list(nx.weakly_connected_components(graph))
color_lookup = scores
low = min(sorted(color_lookup.values()))
high = max(sorted(color_lookup.values()))
norm = mpl.colors.Normalize(vmin=low, vmax=high, clip=True)
mapper = mpl.cm.ScalarMappable(norm=norm, cmap=mpl.cm.Blues)
gs = mpl.gridspec.GridSpec(len(connected) + 3, 1)
plt.gca().set_position(gs[1].get_position(plt.gcf()))
plt.gca().set_subplotspec(gs[1])
# rcParams['figure.figsize'] = 12, 7 #full screen
for i in range(len(connected)):
subgraph = graph.subgraph(connected[i])
i_color_lookup = {}
for j in connected[i]:
if j not in i_color_lookup:
i_color_lookup[j] = color_lookup[j]
subpos = nx.kamada_kawai_layout(subgraph)
plt.subplot(gs[i])
nx.draw(
subgraph,
subpos,
node_shape="s",
nodelist=i_color_lookup,
node_size=150,
node_color=[mapper.to_rgba(i) for i in i_color_lookup.values()],
with_labels=True,
)
# G = nx.Graph()
# plt.subplot(gs[len(connected)])
# nx.draw(G)
# plt.subplot(gs[len(connected)+1])
# nx.draw(G)
# plt.subplot(gs[len(connected)+2])
# nx.draw(G)
# plt.legend(loc = "lower left", prop= {"size": 4}, handles = handles)
title = "Kamada Kawai Layout Network of " + gene + " Exons"
plt.figtext(0.0, 0.95, title)
plt.margins(x=0, y=2)
plt.show()
| [
"sqlite3.connect",
"matplotlib.pyplot.figtext",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.gcf",
"networkx.DiGraph",
"networkx.all_simple_paths",
"sqlitedict.SqliteDict",
"matplotlib.cm.ScalarMappable",
"networkx.weakly_connected_components",
"matplotlib.colors.Normalize",
"networkx.kamada_kawa... | [((516, 553), 'sqlite3.connect', 'sqlite3.connect', (['sqlite_path_organism'], {}), '(sqlite_path_organism)\n', (531, 553), False, 'import sqlite3\n'), ((947, 984), 'sqlite3.connect', 'sqlite3.connect', (['sqlite_path_organism'], {}), '(sqlite_path_organism)\n', (962, 984), False, 'import sqlite3\n'), ((2117, 2154), 'sqlite3.connect', 'sqlite3.connect', (['sqlite_path_organism'], {}), '(sqlite_path_organism)\n', (2132, 2154), False, 'import sqlite3\n'), ((3878, 3915), 'sqlite3.connect', 'sqlite3.connect', (['sqlite_path_organism'], {}), '(sqlite_path_organism)\n', (3893, 3915), False, 'import sqlite3\n'), ((5408, 5437), 'sqlitedict.SqliteDict', 'SqliteDict', (['sqlite_path_reads'], {}), '(sqlite_path_reads)\n', (5418, 5437), False, 'from sqlitedict import SqliteDict\n'), ((38131, 38145), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (38143, 38145), True, 'import matplotlib.pyplot as plt\n'), ((38433, 38443), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (38441, 38443), True, 'import matplotlib.pyplot as plt\n'), ((39217, 39248), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 1.5)'}), '(figsize=(10, 1.5))\n', (39229, 39248), True, 'import matplotlib.pyplot as plt\n'), ((39604, 39614), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (39612, 39614), True, 'import matplotlib.pyplot as plt\n'), ((40105, 40117), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (40115, 40117), True, 'import networkx as nx\n'), ((41076, 41128), 'matplotlib.colors.Normalize', 'mpl.colors.Normalize', ([], {'vmin': 'low', 'vmax': 'high', 'clip': '(True)'}), '(vmin=low, vmax=high, clip=True)\n', (41096, 41128), True, 'import matplotlib as mpl\n'), ((41142, 41193), 'matplotlib.cm.ScalarMappable', 'mpl.cm.ScalarMappable', ([], {'norm': 'norm', 'cmap': 'mpl.cm.Blues'}), '(norm=norm, cmap=mpl.cm.Blues)\n', (41163, 41193), True, 'import matplotlib as mpl\n'), ((42314, 42343), 'matplotlib.pyplot.figtext', 'plt.figtext', (['(0.0)', '(0.95)', 'title'], {}), '(0.0, 0.95, title)\n', (42325, 42343), True, 'import matplotlib.pyplot as plt\n'), ((42348, 42369), 'matplotlib.pyplot.margins', 'plt.margins', ([], {'x': '(0)', 'y': '(2)'}), '(x=0, y=2)\n', (42359, 42369), True, 'import matplotlib.pyplot as plt\n'), ((42374, 42384), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (42382, 42384), True, 'import matplotlib.pyplot as plt\n'), ((6332, 6353), 'sqlitedict.SqliteDict', 'SqliteDict', (['read_file'], {}), '(read_file)\n', (6342, 6353), False, 'from sqlitedict import SqliteDict\n'), ((9212, 9233), 'sqlitedict.SqliteDict', 'SqliteDict', (['read_file'], {}), '(read_file)\n', (9222, 9233), False, 'from sqlitedict import SqliteDict\n'), ((11958, 11979), 'sqlitedict.SqliteDict', 'SqliteDict', (['read_file'], {}), '(read_file)\n', (11968, 11979), False, 'from sqlitedict import SqliteDict\n'), ((39342, 39363), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""Blues"""'], {}), "('Blues')\n", (39354, 39363), True, 'import matplotlib.pyplot as plt\n'), ((40908, 40945), 'networkx.weakly_connected_components', 'nx.weakly_connected_components', (['graph'], {}), '(graph)\n', (40938, 40945), True, 'import networkx as nx\n'), ((41652, 41684), 'networkx.kamada_kawai_layout', 'nx.kamada_kawai_layout', (['subgraph'], {}), '(subgraph)\n', (41674, 41684), True, 'import networkx as nx\n'), ((41693, 41711), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[i]'], {}), '(gs[i])\n', (41704, 41711), True, 'import matplotlib.pyplot as plt\n'), ((41253, 41262), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (41260, 41262), True, 'import matplotlib.pyplot as plt\n'), ((41295, 41304), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (41302, 41304), True, 'import matplotlib.pyplot as plt\n'), ((41311, 41320), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (41318, 41320), True, 'import matplotlib.pyplot as plt\n'), ((39960, 40006), 'networkx.all_simple_paths', 'nx.all_simple_paths', (['graph'], {'source': 'i', 'target': 'j'}), '(graph, source=i, target=j)\n', (39979, 40006), True, 'import networkx as nx\n')] |
import pytest
from lunchroulette.app import app as _app
from lunchroulette.app import db as _db
from sqlalchemy import event
from sqlalchemy.orm import sessionmaker
@pytest.fixture(scope="session")
def app(request):
return _app
@pytest.fixture(scope="function")
def db(app, request):
with app.app_context():
_db.drop_all()
_db.create_all()
@pytest.fixture(scope="function", autouse=True)
def session(app, db, request):
with app.app_context():
print(app.url_map)
conn = _db.engine.connect()
txn = conn.begin()
options = dict(bind=conn, binds={})
sess = _db.create_scoped_session(options=options)
sess.begin_nested()
@event.listens_for(sess(), 'after_transaction_end')
def restart_savepoint(sess2, trans):
# Detecting whether this is indeed the nested transaction of the test
if trans.nested and not trans._parent.nested:
# The test should have normally called session.commit(),
# but to be safe we explicitly expire the session
sess2.expire_all()
sess.begin_nested()
_db.session = sess
yield sess
# Cleanup
sess.remove()
txn.rollback()
conn.close()
@pytest.fixture
def client(app):
return app.test_client() | [
"lunchroulette.app.db.create_all",
"lunchroulette.app.db.engine.connect",
"lunchroulette.app.db.drop_all",
"pytest.fixture",
"lunchroulette.app.db.create_scoped_session"
] | [((168, 199), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (182, 199), False, 'import pytest\n'), ((236, 268), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (250, 268), False, 'import pytest\n'), ((369, 415), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""', 'autouse': '(True)'}), "(scope='function', autouse=True)\n", (383, 415), False, 'import pytest\n'), ((327, 341), 'lunchroulette.app.db.drop_all', '_db.drop_all', ([], {}), '()\n', (339, 341), True, 'from lunchroulette.app import db as _db\n'), ((350, 366), 'lunchroulette.app.db.create_all', '_db.create_all', ([], {}), '()\n', (364, 366), True, 'from lunchroulette.app import db as _db\n'), ((517, 537), 'lunchroulette.app.db.engine.connect', '_db.engine.connect', ([], {}), '()\n', (535, 537), True, 'from lunchroulette.app import db as _db\n'), ((625, 667), 'lunchroulette.app.db.create_scoped_session', '_db.create_scoped_session', ([], {'options': 'options'}), '(options=options)\n', (650, 667), True, 'from lunchroulette.app import db as _db\n')] |
import argparse
import configparser
import json
import logging.config
import time
import urllib.parse
import urllib.request
import yaml
######################################################################################################
# Parse command line.
######################################################################################################
args_parser = argparse.ArgumentParser()
args_parser.add_argument('--flat', dest='flat', required=False, default=True, action='store_true', help='specify if configuration keys are dot-separated strings')
args_parser.add_argument('--no-flat', dest='flat', required=False, default=True, action='store_false', help='specify if configuration keys are heirarchical and separated by a forward slash (/)')
args_parser.add_argument('--parent', required=True, help='containing folder or key in Consul under which new KV pairs are located')
args_parser.add_argument('--upload', required=True, help='specifies configuration file to upload to KV data store in Consul')
args = args_parser.parse_args()
######################################################################################################
# Read program configuration.
######################################################################################################
config = configparser.ConfigParser()
config.read(f'config/cf2kv.ini')
defaults = config['DEFAULT']
######################################################################################################
# Set up logging.
######################################################################################################
logging_config_file = 'config/logging.yml'
with open(logging_config_file, 'r') as lcf:
logging.config.dictConfig(yaml.safe_load(lcf.read()))
logger = logging.getLogger("cf2ky-logger")
######################################################################################################
# Rock.
######################################################################################################
def read_configuration_file(fname:str):
# Assume *.properties contain Java properties.
if fname.endswith('.properties'):
try:
with open(fname, 'r') as java_properties:
properties = java_properties.read().splitlines()
except:
logger.error(f'Could not open Java properties file {fname}.', exc_info=True)
return None
kv_pairs = []
for p in properties:
kv = p.split('=')
kv_pairs.append({kv[0]: kv[1]})
return kv_pairs
# Assume *.yml and *.yaml are YAML configurations, and *.json contain JSON objects.
if fname.endswith('.yml') or fname.endswith('.yaml') or fname.endswith('.json'):
if fname.endswith('.yml') or fname.endswith('.yaml'):
try:
with open(fname, 'r') as yaml_cf:
ydict = yaml.safe_load(yaml_cf.read())
except:
logger.error(f'Could not open YAML file {fname}.', exc_info=True)
return None
else:
try:
with open(fname, 'r') as json_cf:
ydict = json.load(json_cf)
except:
logger.error(f'Could not open JSON file {fname}.', exc_info=True)
return None
if type(ydict) != dict: return None # Only support dictionary trees.
kv_pairs = []
prefix_stack = [['', ydict]] # each element is a (prefix, dictionary) tuple.
while len(prefix_stack) > 0:
cur_prefix, cur_dict = prefix_stack.pop()
for key in cur_dict:
nxt_prefix = f'{cur_prefix}.{key}' if len(cur_prefix) > 0 else key
val = cur_dict[key]
if type(val) == dict:
prefix_stack.append([nxt_prefix, val])
else:
kv_pairs.append({nxt_prefix: val})
return kv_pairs
# Assume *.ini files are Microsoft-style configuration files. Prefix each property name by its respective namespace.
if fname.endswith('.ini'):
kv_pairs = []
iniconf = configparser.ConfigParser()
try:
iniconf.read(fname)
except:
logger.error(f'Could not open INI file {fname}.', exc_info=True)
return None
for ns in iniconf:
cur_config = iniconf[ns]
for propname in cur_config:
key = f'{ns}.{propname}'
val = cur_config[propname]
kv_pairs.append({key: val})
return kv_pairs
return None
def upload_configuration_to_consul():
# Read in configuration file.
kv_pairs = read_configuration_file(args.upload)
if not kv_pairs:
logger.error(f'Could not read in configuration file {args.upload} to upload to KV data store in Consul.')
return
logger.debug(f'kv_pairs={kv_pairs}')
api_base_url = defaults.get('consul.url')
query_params = urllib.parse.urlencode({'flags': int(time.time())})
logger.debug(f'Uploading KV pairs for file={args.upload}, parent={args.parent}, flat={args.flat}')
for kv in kv_pairs:
logger.debug(f'kv={kv}')
key, value = next((str(k), str(v)) for k, v in kv.items())
if not args.flat: key = key.replace('.', '/')
req = urllib.request.Request(
url = f'{api_base_url}/{args.parent}/{key}?{query_params}',
headers = {
'Accept': 'application/json'
},
data = value.encode('utf-8'),
method = 'PUT'
)
upload_result = False
try:
with urllib.request.urlopen(req) as upload_fh:
upload_result = json.loads(upload_fh.read().decode('utf-8'))
except:
logger.error(f'Could not upload KV pair for key={key} to URL {req.full_url}', exc_info=True)
if upload_result:
logger.info(f'Successfully uploaded KV pair with key={key}')
else:
logger.error(f'Failed to upload KV pair with key={key} to URL {req.full_url}')
if __name__ == "__main__":
upload_configuration_to_consul()
| [
"json.load",
"time.time",
"configparser.ConfigParser",
"argparse.ArgumentParser"
] | [((380, 405), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (403, 405), False, 'import argparse\n'), ((1301, 1328), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (1326, 1328), False, 'import configparser\n'), ((4134, 4161), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (4159, 4161), False, 'import configparser\n'), ((5016, 5027), 'time.time', 'time.time', ([], {}), '()\n', (5025, 5027), False, 'import time\n'), ((3165, 3183), 'json.load', 'json.load', (['json_cf'], {}), '(json_cf)\n', (3174, 3183), False, 'import json\n')] |
# ------------------------------------------------------------------------------
# _dc.py
#
# Parser for the jam 'c' debug flag output - which contains the names of files
# that cause rebuilds - ie new sources, missing targets
#
# November 2015, <NAME>
# ------------------------------------------------------------------------------
"""jam -dc output parser"""
__all__ = ("DCParser",)
import collections
import re
from typing import Iterable, Iterator, Optional
from .. import database
from ._base import BaseParser
class DCParser(BaseParser):
"""Parser for '-dc' debug output."""
# See DEBUG_CAUSES in jam for the relevant debug output.
_causes_fates = {
database.Fate.NEWER.value,
database.Fate.TEMP.value,
database.Fate.TOUCHED.value,
database.Fate.MISSING.value,
}
def parse_logfile(self, filename: str):
"""Parse '-dc' debug output from the file at the given path."""
with open(filename, errors="ignore") as f:
self._parse(f)
def _parse(self, lines: Iterable[str]):
"""
Parse debug from jam log output.
This is separated out from parse_logfile for testing purposes.
"""
lines = collections.deque(lines)
while True:
line = self._consume_line(lines)
if line is None:
break
target = self._parse_fate_line(line)
if target is not None:
line = self._consume_line(lines)
if line is None:
break
older_target = self._parse_newer_than_line(line)
if older_target is not None:
# Record the info and go back to the top; we've consumed
# all of the inter-related lines.
target.add_i_am_newer_than(older_target)
continue
# Two possibilities at this point:
# - Still processing the first line from this iteration; it was
# not fate-related
# - First line did give fate information, but the second line
# wasn't giving related "newer than" information.
#
# Either way we're now on to some form of "rebuilding" line, or a
# line that's not interesting at all.
rebuilding = self._parse_rebuilding_line(line)
# May have some timestamp inheritance info to follow.
if rebuilding:
self._parse_inherits_timestamp_lines(lines)
def _consume_line(self, lines: collections.deque[str]) -> Optional[str]:
"""
Read the next line.
- Returns `None` if there's no remaining input.
- Otherwise returns the next line with whitespace stripped at the start
and end.
"""
try:
line = lines.popleft()
except IndexError:
return None
else:
return line.strip()
def _regurgitate_line(self, lines: collections.deque[str], line: str) -> None:
"""
Express regret for eating a line,
Makes it available for the next `_consume_line` call.
"""
lines.appendleft(line)
def _is_fate(self, line: str) -> bool:
"""Does the given line report a target's fate?"""
return any(line.startswith(fate_name) for fate_name in self._causes_fates)
def _parse_fate_line(self, line) -> Optional[database.Target]:
"""
Attempt to parse a target's fate.
Returns `None` if this wasn't a fate line, or the target whose fate was
set otherwise.
"""
if not self._is_fate(line):
return None
else:
fate_name, target_name = line.split(maxsplit=1)
target = self.db.get_target(target_name)
fate = database.Fate(fate_name)
target.set_fate(fate)
return target
def _parse_newer_than_line(self, line) -> Optional[database.Target]:
"""
Attempt to parse "newer than" information.
Returns `None` if this wasn't a "newer than" line, or the *older*
target otherwise.
"""
if not line.startswith("newer than:"):
return None
else:
older_target_name = line.split(":", maxsplit=1)[1].strip()
return self.db.get_target(older_target_name)
_rebuilding_target_regex = re.compile(r'[^"]+\s+"([^"]+)"')
_rebuilding_reason_regex = re.compile(r'([^"]+)\s+"([^"]+)"')
def _parse_rebuilding_line(self, line) -> bool:
"""
Attempt to parse "rebuilding" information.
Update the database with any interesting information found.
Return `True` if anything was parsed.
"""
if not (
line.startswith("Rebuilding ")
or line.startswith("Inclusions rebuilding for ")
):
return False
else:
# e.g.
#
# Rebuilding "<foo>bar.h": it is older than "<baz>quux.h"
# Rebuilding "<foo>bar.h": inclusion of dependency "<baz>quux.h" was updated
# Rebuilding "<foo>bar.h": build action was updated
target_info, reason_info = line.split(":", maxsplit=1)
match = self._rebuilding_target_regex.match(target_info)
if match is None:
raise ValueError(f"Couldn't parse target from {target_info=}")
target = self.db.get_target(match.group(1))
reason_info = reason_info.strip()
# Don't need any trailing 'was updated' to disambiguate.
reason_info = reason_info.removesuffix("was updated").strip()
match = self._rebuilding_reason_regex.match(reason_info)
if match is not None:
reason = match.group(1)
related_target = self.db.get_target(match.group(2))
else:
reason = reason_info
related_target = None
if reason == "it was mentioned with '-t'":
target.set_rebuild_reason(database.RebuildReason.TOUCHED)
elif reason == "build action":
target.set_rebuild_reason(database.RebuildReason.ACTION)
elif reason == "it doesn't exist":
target.set_rebuild_reason(database.RebuildReason.MISSING)
elif reason == "it depends on newer":
target.set_rebuild_reason(
database.RebuildReason.NEEDTMP, related_target
)
elif reason == "it is older than":
target.set_rebuild_reason(
database.RebuildReason.OUTDATED, related_target
)
elif reason == "inclusion of inclusion": # ...was updated
target.set_rebuild_reason(
database.RebuildReason.UPDATED_INCLUDE_OF_INCLUDE, related_target
)
elif reason == "inclusion of dependency": # ...was updated
target.set_rebuild_reason(
database.RebuildReason.UPDATED_INCLUDE_OF_DEPENDENCY, related_target
)
elif reason == "inclusion": # ...was updated
target.set_rebuild_reason(
database.RebuildReason.UPDATED_INCLUDE, related_target
)
elif reason == "dependency": # ...was updated
target.set_rebuild_reason(
database.RebuildReason.UPDATED_DEPENDENCY, related_target
)
else:
raise NotImplementedError(f"{reason=}, {target=}, {related_target=}")
return True
_inherits_timestamp_regex = re.compile(
r'"([^"]+)"\s+inherits timestamp from\s+"([^"]+)"'
)
def _parse_inherits_timestamp_lines(self, lines: collections.deque[str]) -> None:
"""
Parse a series of timestamp inheritance lines.
*Only* timestamp inheritance lines are consumed. Any other line is left
to be consumed by the next `_consume_line` call.
"""
while True:
line = self._consume_line(lines)
m = self._inherits_timestamp_regex.search(line)
if m is None:
self._regurgitate_line(lines, line)
return
target = self.db.get_target(m.group(1))
source = self.db.get_target(m.group(2))
target.set_inherits_timestamp_from(source)
| [
"collections.deque",
"re.compile"
] | [((4433, 4465), 're.compile', 're.compile', (['"""[^"]+\\\\s+"([^"]+)\\""""'], {}), '(\'[^"]+\\\\s+"([^"]+)"\')\n', (4443, 4465), False, 'import re\n'), ((4497, 4531), 're.compile', 're.compile', (['"""([^"]+)\\\\s+"([^"]+)\\""""'], {}), '(\'([^"]+)\\\\s+"([^"]+)"\')\n', (4507, 4531), False, 'import re\n'), ((7715, 7778), 're.compile', 're.compile', (['""""([^"]+)"\\\\s+inherits timestamp from\\\\s+"([^"]+)\\""""'], {}), '(\'"([^"]+)"\\\\s+inherits timestamp from\\\\s+"([^"]+)"\')\n', (7725, 7778), False, 'import re\n'), ((1221, 1245), 'collections.deque', 'collections.deque', (['lines'], {}), '(lines)\n', (1238, 1245), False, 'import collections\n')] |
import socket
import xmlrpc.client
""" referemce: https://stackoverflow.com/a/14397619 """
class ServerProxy:
def __init__(self, url, timeout=10):
self.__url = url
self.__timeout = timeout
self.__prevDefaultTimeout = None
def __enter__(self):
try:
if self.__timeout:
self.__prevDefaultTimeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(self.__timeout)
proxy = xmlrpc.client.ServerProxy(self.__url, allow_none=True)
except Exception as ex:
raise Exception("Unable create XMLRPC-proxy for url '%s': %s" % (self.__url, ex))
return proxy
def __exit__(self, type, value, traceback):
if self.__prevDefaultTimeout is None:
socket.setdefaulttimeout(self.__prevDefaultTimeout)
| [
"socket.getdefaulttimeout",
"socket.setdefaulttimeout"
] | [((780, 831), 'socket.setdefaulttimeout', 'socket.setdefaulttimeout', (['self.__prevDefaultTimeout'], {}), '(self.__prevDefaultTimeout)\n', (804, 831), False, 'import socket\n'), ((367, 393), 'socket.getdefaulttimeout', 'socket.getdefaulttimeout', ([], {}), '()\n', (391, 393), False, 'import socket\n'), ((410, 450), 'socket.setdefaulttimeout', 'socket.setdefaulttimeout', (['self.__timeout'], {}), '(self.__timeout)\n', (434, 450), False, 'import socket\n')] |
import torch
from torchvision import ops
from Fcos_seg.utils.box_list import BoxList
from Fcos_seg.utils.boxlist_ops import cat_boxlist
from Fcos_seg.utils.boxlist_ops import boxlist_ml_nms
from Fcos_seg.utils.boxlist_ops import remove_small_boxes
class FcosPost(torch.nn.Module):
def __init__(self, cfg):
super(FcosPost, self).__init__()
self.pre_nms_thresh = cfg.TEST.PRE_THRES
self.nms_thresh = cfg.TEST.NMS_THRES
self.pre_nms_top_n = cfg.TEST.TOP_N
self.post_nms_top_n = cfg.TEST.POST_TOP_N
self.num_classes = cfg.MODEL.NUM_CLASS
self.min_size = 0
def forward_per_level(self, locations, box_cls , box_reg, box_center, image_sizes):
N, C, H, W = box_cls.shape
box_cls = box_cls.view(N, C, H, W).permute(0, 2, 3, 1)
box_cls = box_cls.reshape(N, -1, C).sigmoid()
box_reg = box_reg.view(N, 4, H, W).permute(0, 2, 3, 1)
box_reg = box_reg.reshape(N, -1, 4)
box_center = box_center.view(N, 1, H, W).permute(0, 2, 3, 1)
box_center = box_center.reshape(N, -1).sigmoid()
candidate_inds = box_cls > self.pre_nms_thresh
pre_nms_top_n = candidate_inds.view(N, -1).sum(1)
pre_nms_top_n = pre_nms_top_n.clamp(max=self.pre_nms_top_n)
box_cls = box_cls * box_center[:, :, None]
pred_results_per_level = []
# loop batch images
for i in range(N):
# filter candidates
per_box_cls = box_cls[i]
per_candidate_inds = candidate_inds[i]
per_box_cls = per_box_cls[per_candidate_inds]
per_candidate_nonzeros = per_candidate_inds.nonzero()
per_box_loc = per_candidate_nonzeros[:, 0]
per_class = per_candidate_nonzeros[:, 1] + 1
per_box_reg = box_reg[i]
per_box_reg = per_box_reg[per_box_loc]
per_locations = locations[per_box_loc]
per_pre_nms_top_n = pre_nms_top_n[i]
if per_candidate_inds.sum().item() > per_pre_nms_top_n.item():
per_box_cls, top_k_indices = \
per_box_cls.topk(per_pre_nms_top_n, sorted=False)
per_class = per_class[top_k_indices]
per_box_reg = per_box_reg[top_k_indices]
per_locations = per_locations[top_k_indices]
detections = torch.stack([
per_locations[:, 0] - per_box_reg[:, 0],
per_locations[:, 1] - per_box_reg[:, 1],
per_locations[:, 0] + per_box_reg[:, 2],
per_locations[:, 1] + per_box_reg[:, 3],
], dim=1)
h, w = image_sizes[i]
boxlist = BoxList(detections, (int(w), int(h)), mode="xyxy")
boxlist.add_field("labels", per_class)
boxlist.add_field("scores", torch.sqrt(per_box_cls))
boxlist = boxlist.clip_to_image(remove_empty=False)
boxlist = remove_small_boxes(boxlist, self.min_size)
pred_results_per_level.append(boxlist)
return pred_results_per_level
def forward(self, locations, box_cls, box_regression, centerness, image_sizes):
preds = []
for loc, cls_p, box_p, c_p in zip(locations, box_cls, box_regression, centerness):
preds.append(self.forward_per_level(loc, cls_p, box_p, c_p, image_sizes))
# gather levels_batches to batches_levels
boxlists = list(zip(*preds))
# cat the boxes in levels per image
boxlists = [cat_boxlist(boxlist) for boxlist in boxlists]
# nms of all levels per image
for i, box_per_batch in enumerate(boxlists):
# box should be in mode xyxy
box_mode = box_per_batch.mode
box_size = box_per_batch.size
boxes = box_per_batch.bbox
scores = box_per_batch.get_field("scores")
labels = box_per_batch.get_field("labels")
results_perbatch = []
# nms for each class (or nms of all classes? (to be checked))
for c in range(1, self.num_classes):
target_idx = (labels == c).view(-1)
score_per_class = scores[target_idx]
boxes_per_class = boxes[target_idx, :].view(-1, 4)
keep = ops.nms(boxes_per_class, score_per_class, self.nms_thresh)
# the remaining
boxes_per_class = boxes_per_class[keep]
score_per_class = score_per_class[keep]
n_keep = len(boxes_per_class)
# cast to same device
cid_per_class = torch.full(
(n_keep, 1), c, dtype = torch.float32, device=score_per_class.device
)
# if has predictions
if n_keep > 0:
results_perbatch.append(torch.cat([cid_per_class, score_per_class.unsqueeze(-1), boxes_per_class], dim = 1))
# detections remain after nms
n_detection = len(results_perbatch)
# cat detections to tensor
if n_detection > 0:
results_perbatch = torch.cat(results_perbatch, dim = 0)
# if still more than post top n
if n_detection > self.post_nms_top_n > 0:
scores = results_perbatch[:, 1]
img_threshold, _ = torch.kthvalue(
scores.cpu(), n_detection - self.post_top_n + 1
)
keep = scores >= img_threshold.item()
keep = torch.nonzero(keep).squeeze(1)
results_perbatch = results_perbatch[keep]
# back to boxlist
tmp_box = BoxList(results_perbatch[:, 2:], box_size, mode = box_mode)
tmp_box.add_field("labels", results_perbatch[:, 0])
tmp_box.add_field("scores", results_perbatch[:, 1])
tmp_box = tmp_box.clip_to_image(remove_empty=False)
tmp_box = remove_small_boxes(tmp_box, self.min_size)
boxlists[i] = tmp_box
return boxlists
| [
"Fcos_seg.utils.box_list.BoxList",
"torch.full",
"Fcos_seg.utils.boxlist_ops.cat_boxlist",
"torch.stack",
"torch.sqrt",
"torchvision.ops.nms",
"torch.nonzero",
"Fcos_seg.utils.boxlist_ops.remove_small_boxes",
"torch.cat"
] | [((2533, 2726), 'torch.stack', 'torch.stack', (['[per_locations[:, 0] - per_box_reg[:, 0], per_locations[:, 1] - per_box_reg\n [:, 1], per_locations[:, 0] + per_box_reg[:, 2], per_locations[:, 1] +\n per_box_reg[:, 3]]'], {'dim': '(1)'}), '([per_locations[:, 0] - per_box_reg[:, 0], per_locations[:, 1] -\n per_box_reg[:, 1], per_locations[:, 0] + per_box_reg[:, 2], \n per_locations[:, 1] + per_box_reg[:, 3]], dim=1)\n', (2544, 2726), False, 'import torch\n'), ((3132, 3174), 'Fcos_seg.utils.boxlist_ops.remove_small_boxes', 'remove_small_boxes', (['boxlist', 'self.min_size'], {}), '(boxlist, self.min_size)\n', (3150, 3174), False, 'from Fcos_seg.utils.boxlist_ops import remove_small_boxes\n'), ((3741, 3761), 'Fcos_seg.utils.boxlist_ops.cat_boxlist', 'cat_boxlist', (['boxlist'], {}), '(boxlist)\n', (3752, 3761), False, 'from Fcos_seg.utils.boxlist_ops import cat_boxlist\n'), ((5993, 6050), 'Fcos_seg.utils.box_list.BoxList', 'BoxList', (['results_perbatch[:, 2:]', 'box_size'], {'mode': 'box_mode'}), '(results_perbatch[:, 2:], box_size, mode=box_mode)\n', (6000, 6050), False, 'from Fcos_seg.utils.box_list import BoxList\n'), ((6267, 6309), 'Fcos_seg.utils.boxlist_ops.remove_small_boxes', 'remove_small_boxes', (['tmp_box', 'self.min_size'], {}), '(tmp_box, self.min_size)\n', (6285, 6309), False, 'from Fcos_seg.utils.boxlist_ops import remove_small_boxes\n'), ((3021, 3044), 'torch.sqrt', 'torch.sqrt', (['per_box_cls'], {}), '(per_box_cls)\n', (3031, 3044), False, 'import torch\n'), ((4545, 4603), 'torchvision.ops.nms', 'ops.nms', (['boxes_per_class', 'score_per_class', 'self.nms_thresh'], {}), '(boxes_per_class, score_per_class, self.nms_thresh)\n', (4552, 4603), False, 'from torchvision import ops\n'), ((4881, 4959), 'torch.full', 'torch.full', (['(n_keep, 1)', 'c'], {'dtype': 'torch.float32', 'device': 'score_per_class.device'}), '((n_keep, 1), c, dtype=torch.float32, device=score_per_class.device)\n', (4891, 4959), False, 'import torch\n'), ((5411, 5445), 'torch.cat', 'torch.cat', (['results_perbatch'], {'dim': '(0)'}), '(results_perbatch, dim=0)\n', (5420, 5445), False, 'import torch\n'), ((5822, 5841), 'torch.nonzero', 'torch.nonzero', (['keep'], {}), '(keep)\n', (5835, 5841), False, 'import torch\n')] |
from tqdm import tqdm
from nic import (
captioning as cptn,
datapreparation as dp,
metrics as mcs,
)
from nic.datapreparation import utils
def bleu_score_of(model,
*,
is_decoder_only=True,
path_to_data,
batch_size=32,
data_type="test",
meta_tokens=dp.MetaTokens(),
caption_limit=None,
verbose=True):
"""
:param model: an instance of the NIC model created with
`define_decoder_model`, `define_model` or `connect`.
:param is_decoder_only: a boolean value indicating whether
`model` was defined with `define_decoder_model`. Defaults to
True.
:param path_to_data: a str - the path of the directory where
preprocessed data is stored.
:param batch_size: an int - the batch size to use when processing
images. Defaults to 32.
:param data_type: a str - the type of data on which to evaluate the
model. Should be 'test' (the default), 'val' or 'train'.
:param meta_tokens: an instance of MetaTokens - the meta tokens
with which the data was preprocessed.
:param caption_limit: an unsigned int - a limit for the predicted
captions' length in tokens. If omitted or `None`, defaults to
`CAPTION_LIMIT`.
:param verbose: a boolean value indicating whether to show a status
bar for the progress. Defaults to `True`.
:returns: a float in the range [0, 100] - the BLEU-4 score of the
model.
"""
images, images_count = dp.load_images(path_to_data,
data_type,
is_decoder_only)
images = images.batch(batch_size)
captions = dp.load_captions(path_to_data, data_type)
tokenizer = dp.load_tokenizer(path_to_data)
generator = cptn.CaptionGenerator(
model,
meta_tokens,
tokenizer,
is_decoder_only
)
reference, predicted = [], []
if (verbose):
print("Computing BLEU-4 score of", model.name, "on",
images_count, data_type, "images.")
batches_count = utils.batches_count_for(images_count,
batch_size)
images = tqdm(images, total=batches_count)
for ims, ids in images:
reference.extend([
c.split()[1:-1]
for c in captions[int(i)]
]
for i in ids
)
predicted.extend(generator(ims, caption_limit))
return mcs.bleu_score_of(predicted, reference)
| [
"nic.datapreparation.utils.batches_count_for",
"nic.datapreparation.load_images",
"nic.metrics.bleu_score_of",
"tqdm.tqdm",
"nic.datapreparation.MetaTokens",
"nic.captioning.CaptionGenerator",
"nic.datapreparation.load_captions",
"nic.datapreparation.load_tokenizer"
] | [((371, 386), 'nic.datapreparation.MetaTokens', 'dp.MetaTokens', ([], {}), '()\n', (384, 386), True, 'from nic import captioning as cptn, datapreparation as dp, metrics as mcs\n'), ((1553, 1609), 'nic.datapreparation.load_images', 'dp.load_images', (['path_to_data', 'data_type', 'is_decoder_only'], {}), '(path_to_data, data_type, is_decoder_only)\n', (1567, 1609), True, 'from nic import captioning as cptn, datapreparation as dp, metrics as mcs\n'), ((1747, 1788), 'nic.datapreparation.load_captions', 'dp.load_captions', (['path_to_data', 'data_type'], {}), '(path_to_data, data_type)\n', (1763, 1788), True, 'from nic import captioning as cptn, datapreparation as dp, metrics as mcs\n'), ((1805, 1836), 'nic.datapreparation.load_tokenizer', 'dp.load_tokenizer', (['path_to_data'], {}), '(path_to_data)\n', (1822, 1836), True, 'from nic import captioning as cptn, datapreparation as dp, metrics as mcs\n'), ((1853, 1922), 'nic.captioning.CaptionGenerator', 'cptn.CaptionGenerator', (['model', 'meta_tokens', 'tokenizer', 'is_decoder_only'], {}), '(model, meta_tokens, tokenizer, is_decoder_only)\n', (1874, 1922), True, 'from nic import captioning as cptn, datapreparation as dp, metrics as mcs\n'), ((2533, 2572), 'nic.metrics.bleu_score_of', 'mcs.bleu_score_of', (['predicted', 'reference'], {}), '(predicted, reference)\n', (2550, 2572), True, 'from nic import captioning as cptn, datapreparation as dp, metrics as mcs\n'), ((2149, 2198), 'nic.datapreparation.utils.batches_count_for', 'utils.batches_count_for', (['images_count', 'batch_size'], {}), '(images_count, batch_size)\n', (2172, 2198), False, 'from nic.datapreparation import utils\n'), ((2264, 2297), 'tqdm.tqdm', 'tqdm', (['images'], {'total': 'batches_count'}), '(images, total=batches_count)\n', (2268, 2297), False, 'from tqdm import tqdm\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/9/13 13:58
# @Author : <NAME>
# @Site :
# @File : file
# @Software: PyCharm
from os import path
from application import app
from common.utils.format_time import stamp_to_date
class File(object):
@staticmethod
def get_upload_file_path():
"""
获取文件存储路径
:return:
"""
return path.join(app.root_path, app.config.get('FILE_PATH'), 'file', stamp_to_date())
| [
"common.utils.format_time.stamp_to_date",
"application.app.config.get"
] | [((417, 444), 'application.app.config.get', 'app.config.get', (['"""FILE_PATH"""'], {}), "('FILE_PATH')\n", (431, 444), False, 'from application import app\n'), ((454, 469), 'common.utils.format_time.stamp_to_date', 'stamp_to_date', ([], {}), '()\n', (467, 469), False, 'from common.utils.format_time import stamp_to_date\n')] |
import RPi.GPIO as GPIO
import time
import socket
def init():
GPIO.setmode(GPIO.BOARD)
GPIO.setup(7,GPIO.OUT)
GPIO.setup(11,GPIO.OUT)
GPIO.setup(13,GPIO.OUT)
GPIO.setup(29,GPIO.OUT)
GPIO.setup(31,GPIO.OUT)
GPIO.setup(33,GPIO.OUT)
GPIO.setup(12,GPIO.IN,pull_up_down=GPIO.PUD_UP)
def start_stop():
GPIO.output(7,True)
time.sleep(.5)
GPIO.output(11,True)
time.sleep(.25)
GPIO.output(13,True)
time.sleep(.25)
GPIO.output(13,False)
time.sleep(.25)
GPIO.output(11,False)
time.sleep(.25)
GPIO.output(7,False)
time.sleep(.5)
def get_ip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
s.connect(('10.255.255.255',0))
IP = s.getsockname()[0]
except:
IP = '127.0.0.1'
finally:
s.close()
return IP
morse = {"a": ".-","b":"-...","c":"-.-.","d":"-..","e":".","f":"..-.","g":"--.","h":"....","i":"..","j":".---","k":"-.-","l":".-..","m":"--","n":"-.","o":"---","p":".--.","q":"--.-","r":".-.","s":"...","t":"-","u":"..-","v":"...-","w":".--","x":"-..-","y":"-.--","z":"--..","1":".----","2":"..---","3":"...--","4":"....-","5":".....","6":"-....","7":"--...","8":"---..","9":"----.","0":"-----"}
def print_dot(pin):
GPIO.output(pin,True)
time.sleep(.1)
GPIO.output(pin,False)
time.sleep(.2)
def print_dash(pin):
GPIO.output(pin,True)
time.sleep(.35)
GPIO.output(pin,False)
time.sleep(.2)
def print_break(pin):
GPIO.output(pin,True)
time.sleep(.05)
GPIO.output(pin,False)
time.sleep(.05)
def print_morse_char(letter,pin,bpin):
for dosh in morse[letter.lower()]:
if dosh == ".":
print_dot(pin)
elif dosh == "-":
print_dash(pin)
else:
continue
time.sleep(.5)
def print_morse_message(message,pin,bpin,wpin):
words = message.split(" ")
for word in words:
for letter in word:
if letter.lower() in morse:
print_morse_char(letter,pin,bpin)
else:
print_break(bpin)
print_break(wpin)
init()
start_stop()
triggered = False
input_message = input("Enter morse message:")
print_morse_message(input_message,33,31,29)
while not triggered:
input_state = GPIO.input(12)
if input_state == False:
print_morse_message(get_ip(),33,31,29)
time.sleep(.2)
triggered = True
start_stop()
GPIO.cleanup()
| [
"RPi.GPIO.cleanup",
"socket.socket",
"RPi.GPIO.setup",
"RPi.GPIO.output",
"time.sleep",
"RPi.GPIO.input",
"RPi.GPIO.setmode"
] | [((2436, 2450), 'RPi.GPIO.cleanup', 'GPIO.cleanup', ([], {}), '()\n', (2448, 2450), True, 'import RPi.GPIO as GPIO\n'), ((67, 91), 'RPi.GPIO.setmode', 'GPIO.setmode', (['GPIO.BOARD'], {}), '(GPIO.BOARD)\n', (79, 91), True, 'import RPi.GPIO as GPIO\n'), ((96, 119), 'RPi.GPIO.setup', 'GPIO.setup', (['(7)', 'GPIO.OUT'], {}), '(7, GPIO.OUT)\n', (106, 119), True, 'import RPi.GPIO as GPIO\n'), ((123, 147), 'RPi.GPIO.setup', 'GPIO.setup', (['(11)', 'GPIO.OUT'], {}), '(11, GPIO.OUT)\n', (133, 147), True, 'import RPi.GPIO as GPIO\n'), ((151, 175), 'RPi.GPIO.setup', 'GPIO.setup', (['(13)', 'GPIO.OUT'], {}), '(13, GPIO.OUT)\n', (161, 175), True, 'import RPi.GPIO as GPIO\n'), ((179, 203), 'RPi.GPIO.setup', 'GPIO.setup', (['(29)', 'GPIO.OUT'], {}), '(29, GPIO.OUT)\n', (189, 203), True, 'import RPi.GPIO as GPIO\n'), ((207, 231), 'RPi.GPIO.setup', 'GPIO.setup', (['(31)', 'GPIO.OUT'], {}), '(31, GPIO.OUT)\n', (217, 231), True, 'import RPi.GPIO as GPIO\n'), ((235, 259), 'RPi.GPIO.setup', 'GPIO.setup', (['(33)', 'GPIO.OUT'], {}), '(33, GPIO.OUT)\n', (245, 259), True, 'import RPi.GPIO as GPIO\n'), ((263, 312), 'RPi.GPIO.setup', 'GPIO.setup', (['(12)', 'GPIO.IN'], {'pull_up_down': 'GPIO.PUD_UP'}), '(12, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n', (273, 312), True, 'import RPi.GPIO as GPIO\n'), ((334, 354), 'RPi.GPIO.output', 'GPIO.output', (['(7)', '(True)'], {}), '(7, True)\n', (345, 354), True, 'import RPi.GPIO as GPIO\n'), ((358, 373), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (368, 373), False, 'import time\n'), ((377, 398), 'RPi.GPIO.output', 'GPIO.output', (['(11)', '(True)'], {}), '(11, True)\n', (388, 398), True, 'import RPi.GPIO as GPIO\n'), ((402, 418), 'time.sleep', 'time.sleep', (['(0.25)'], {}), '(0.25)\n', (412, 418), False, 'import time\n'), ((422, 443), 'RPi.GPIO.output', 'GPIO.output', (['(13)', '(True)'], {}), '(13, True)\n', (433, 443), True, 'import RPi.GPIO as GPIO\n'), ((447, 463), 'time.sleep', 'time.sleep', (['(0.25)'], {}), '(0.25)\n', (457, 463), False, 'import time\n'), ((467, 489), 'RPi.GPIO.output', 'GPIO.output', (['(13)', '(False)'], {}), '(13, False)\n', (478, 489), True, 'import RPi.GPIO as GPIO\n'), ((493, 509), 'time.sleep', 'time.sleep', (['(0.25)'], {}), '(0.25)\n', (503, 509), False, 'import time\n'), ((513, 535), 'RPi.GPIO.output', 'GPIO.output', (['(11)', '(False)'], {}), '(11, False)\n', (524, 535), True, 'import RPi.GPIO as GPIO\n'), ((539, 555), 'time.sleep', 'time.sleep', (['(0.25)'], {}), '(0.25)\n', (549, 555), False, 'import time\n'), ((559, 580), 'RPi.GPIO.output', 'GPIO.output', (['(7)', '(False)'], {}), '(7, False)\n', (570, 580), True, 'import RPi.GPIO as GPIO\n'), ((584, 599), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (594, 599), False, 'import time\n'), ((622, 670), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (635, 670), False, 'import socket\n'), ((1255, 1277), 'RPi.GPIO.output', 'GPIO.output', (['pin', '(True)'], {}), '(pin, True)\n', (1266, 1277), True, 'import RPi.GPIO as GPIO\n'), ((1281, 1296), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (1291, 1296), False, 'import time\n'), ((1300, 1323), 'RPi.GPIO.output', 'GPIO.output', (['pin', '(False)'], {}), '(pin, False)\n', (1311, 1323), True, 'import RPi.GPIO as GPIO\n'), ((1327, 1342), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (1337, 1342), False, 'import time\n'), ((1368, 1390), 'RPi.GPIO.output', 'GPIO.output', (['pin', '(True)'], {}), '(pin, True)\n', (1379, 1390), True, 'import RPi.GPIO as GPIO\n'), ((1394, 1410), 'time.sleep', 'time.sleep', (['(0.35)'], {}), '(0.35)\n', (1404, 1410), False, 'import time\n'), ((1414, 1437), 'RPi.GPIO.output', 'GPIO.output', (['pin', '(False)'], {}), '(pin, False)\n', (1425, 1437), True, 'import RPi.GPIO as GPIO\n'), ((1441, 1456), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (1451, 1456), False, 'import time\n'), ((1483, 1505), 'RPi.GPIO.output', 'GPIO.output', (['pin', '(True)'], {}), '(pin, True)\n', (1494, 1505), True, 'import RPi.GPIO as GPIO\n'), ((1509, 1525), 'time.sleep', 'time.sleep', (['(0.05)'], {}), '(0.05)\n', (1519, 1525), False, 'import time\n'), ((1529, 1552), 'RPi.GPIO.output', 'GPIO.output', (['pin', '(False)'], {}), '(pin, False)\n', (1540, 1552), True, 'import RPi.GPIO as GPIO\n'), ((1556, 1572), 'time.sleep', 'time.sleep', (['(0.05)'], {}), '(0.05)\n', (1566, 1572), False, 'import time\n'), ((1795, 1810), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (1805, 1810), False, 'import time\n'), ((2283, 2297), 'RPi.GPIO.input', 'GPIO.input', (['(12)'], {}), '(12)\n', (2293, 2297), True, 'import RPi.GPIO as GPIO\n'), ((2382, 2397), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (2392, 2397), False, 'import time\n')] |
"""A module for anything color/animation related."""
from __future__ import annotations
from abc import ABC, abstractmethod
from dataclasses import dataclass
from math import ceil, cos, pi
from time import time
from typing import *
Numeric = Union[int, float]
def linear_interpolation(a: Numeric, b: Numeric, x: Numeric) -> Numeric:
"""Interpolate between a and b. $x \\in [0, 1]$"""
return a * (1 - x) + b * x
def sinify(x: float) -> float:
"""Run through a sin function that returns values \\in [0, 1]"""
return 1 - (cos(x * pi * 2) + 1) / 2
@dataclass
class Color:
r: Numeric
g: Numeric
b: Numeric
def to_tuple(self) -> Tuple[int, int, int]:
"""Return the color as a tuple."""
return int(self.r), int(self.g), int(self.b)
def to_rgb(self) -> str:
"""Return the color as a RRRGGGBBB string. Mostly for testing."""
return "#%02x%02x%02x" % self.to_tuple()
def interpolate(self, other: Color, x: Numeric) -> Color:
"""Interpolate between two colors. $x \\in [0, 1]$"""
return Color(
linear_interpolation(self.r, other.r, x),
linear_interpolation(self.g, other.g, x),
linear_interpolation(self.b, other.b, x),
)
def darker(self, coefficient: float):
"""Darken the color to a certain coefficient."""
return Color(self.r * coefficient, self.g * coefficient, self.b * coefficient)
class Animation(ABC):
"""A base class for all LED animations."""
def get_period(self) -> float:
"""Return, which period the animation is on."""
period = (time() - self.offset) / self.period
# if we're not repeating, stay stuck at the last state of the animation
if period > 1 and not self.repeats:
period = 0.99999
return period
def __init__(self, period: int, offset=0, led_count=5, repeats=True):
self.period = period
self.led_count = led_count
self.repeats = repeats
# the animations are based on $current time - this offset$
# this is done so animations can properly start and smoothly transition
# if offset is -1, it is set to current time to start the animation from the
# beginning
self.offset = offset if offset != -1 else time()
@abstractmethod
def __call__(self) -> Tuple[Color]:
"""All animations must be callable and return a tuple of the LED colors."""
class PulsingAnimation(Animation):
"""A pulsing animation - from color to color in a sine wave."""
def __init__(self, c1: Color, c2: Color, *args, **kwargs):
super().__init__(*args, **kwargs)
self.c1 = c1
self.c2 = c2
def __call__(self):
color = self.c1.interpolate(self.c2, sinify(self.get_period()))
return [color] * self.led_count
class MetronomeAnimation(Animation):
"""A metronome animation - from one end of the LEDs to the other."""
def __init__(self, color: Color, *args, **kwargs):
super().__init__(*args, **kwargs)
self.color = color
def __call__(self):
# all colors are 0
colors = [Color(0, 0, 0) for _ in range(self.led_count)]
# LED position, based on the period
pos = sinify(self.get_period()) * (self.led_count - 1)
l1 = int(pos)
l2 = int(ceil(pos))
l1_c = 1 - (pos - int(pos))
l2_c = 1 - (int(ceil(pos)) - pos)
colors[l1] = self.color.darker(l1_c)
colors[l2] = self.color.darker(l2_c)
return colors
class LinearAnimation(Animation):
"""A linear animation - from one end of the LEDs to the other."""
def __init__(self, color: Color, *args, **kwargs):
super().__init__(*args, **kwargs)
self.color = color
def __call__(self):
colors = [Color(0, 0, 0) for _ in range(self.led_count)]
pos = self.get_period() * self.led_count
l1 = int(pos) % self.led_count
l2 = int(ceil(pos)) % self.led_count
l1_c = 1 - (pos - int(pos))
l2_c = 1 - (int(ceil(pos)) - pos)
colors[l1] = self.color.darker(l1_c)
colors[l2] = self.color.darker(l2_c)
return colors
class ProgressAnimation(Animation):
"""Like linear animation, but fills up the progress bar."""
def __init__(self, color: Color, *args, **kwargs):
super().__init__(*args, **kwargs)
self.color = color
def __call__(self):
colors = [Color(0, 0, 0) for _ in range(self.led_count)]
pos = self.get_period() * self.led_count
colors[int(pos) % self.led_count] = self.color.darker(pos - int(pos))
for i in range(int(pos) % self.led_count):
colors[i] = self.color
return colors
class TransitionAnimation(Animation):
"""An animation transition - slowly transition from one animation to another."""
def __init__(self, a1: Animation, a2: Animation, *args, **kwargs):
super().__init__(*args, **kwargs)
self.a1 = a1
self.a2 = a2
self.offset = time()
self.repeats = False
self.prev_c = None
def __call__(self):
c = self.get_period()
colors = []
a1c = self.a1()
a2c = self.a2()
for i in range(self.led_count):
colors.append(a1c[i].interpolate(a2c[i], c))
# if the transition is over, discard the first animation
# this way there isn't an infinite recursion
# might look odd, but will likely simplify the code immensely
if self.prev_c == c and self.a1 is not self.a2:
self.a1 = self.a2
self.prev_c = c
return colors
class ChainedAnimation(Animation):
"""A sequence of animations chained one after another."""
def __init__(self, *args: Animation, **kwargs):
super().__init__(*args, **kwargs)
# TODO - period is based on the provided animations
class Colors:
NONE = Color(0, 0, 0)
RED = Color(255, 0, 0)
GREEN = Color(0, 255, 0)
WHITE = Color(255, 255, 255)
PINK = Color(170, 0, 50)
class Animations:
"""All the different animations of Mynt."""
DEFAULT = lambda: Colors.NONE # nothing
ERROR = PulsingAnimation(Colors.NONE, Colors.RED, 1)
CONFIGURATION_READ = PulsingAnimation(
Colors.NONE, Colors.WHITE, 2, repeats=False, offset=-1
)
CONNECTING_TO_WIFI = MetronomeAnimation(Colors.WHITE, 1.5) # white
CONNECTING_TO_SERVER = MetronomeAnimation(Colors.GREEN, 1.5) # green
# transitions from white to pink briefly when a beat is detected
CONTACTING_PAIR_BLANK = LinearAnimation(Colors.WHITE, 1.5) # white
CONTACTING_PAIR_BEAT = LinearAnimation(Colors.PINK, 1.5) # pink
# runs testing code when ran as a module
if __name__ == "__main__":
import tkinter
top = tkinter.Tk()
r = 100
animation = Animations.CONFIGURATION_READ
canvas = tkinter.Canvas(top, bg="blue", height=r, width=r * animation.led_count)
canvas.pack()
while True:
top.update_idletasks()
top.update()
for i in range(animation.led_count):
color = animation()[i].to_rgb()
print(animation()[i].to_tuple())
canvas.create_rectangle(i * r, 0, (i + 1) * r, r, fill=color)
| [
"math.ceil",
"math.cos",
"tkinter.Canvas",
"tkinter.Tk",
"time.time"
] | [((6830, 6842), 'tkinter.Tk', 'tkinter.Tk', ([], {}), '()\n', (6840, 6842), False, 'import tkinter\n'), ((6917, 6988), 'tkinter.Canvas', 'tkinter.Canvas', (['top'], {'bg': '"""blue"""', 'height': 'r', 'width': '(r * animation.led_count)'}), "(top, bg='blue', height=r, width=r * animation.led_count)\n", (6931, 6988), False, 'import tkinter\n'), ((5065, 5071), 'time.time', 'time', ([], {}), '()\n', (5069, 5071), False, 'from time import time\n'), ((2308, 2314), 'time.time', 'time', ([], {}), '()\n', (2312, 2314), False, 'from time import time\n'), ((3353, 3362), 'math.ceil', 'ceil', (['pos'], {}), '(pos)\n', (3357, 3362), False, 'from math import ceil, cos, pi\n'), ((541, 556), 'math.cos', 'cos', (['(x * pi * 2)'], {}), '(x * pi * 2)\n', (544, 556), False, 'from math import ceil, cos, pi\n'), ((1622, 1628), 'time.time', 'time', ([], {}), '()\n', (1626, 1628), False, 'from time import time\n'), ((3985, 3994), 'math.ceil', 'ceil', (['pos'], {}), '(pos)\n', (3989, 3994), False, 'from math import ceil, cos, pi\n'), ((3425, 3434), 'math.ceil', 'ceil', (['pos'], {}), '(pos)\n', (3429, 3434), False, 'from math import ceil, cos, pi\n'), ((4074, 4083), 'math.ceil', 'ceil', (['pos'], {}), '(pos)\n', (4078, 4083), False, 'from math import ceil, cos, pi\n')] |
import util
import io
class SolutionParser(object):
def __init__(self, sln_path):
# Initialize SolutionParser Object
assert util.test_path(sln_path, 'f'), sln_path
self.path = sln_path
self.raw_content = []
self.data = {
"version": None,
"vsversion": {
"min": None,
"current": None
},
"projects": [],
"csdef": None,
"cscfg": None,
"ccproj": None,
"sln": self.path
}
def parse(self):
# API sugar
self.get_content()
self.parse_content()
def get_content(self):
# Open .sln file and create list of each line
with io.open(self.path) as context:
self.raw_content = [[line.lower().strip(), line.strip()] for line in context.readlines()]
def feed(self, content):
# Allow user to pass in sln contents to Objects
self.raw_content = [[line.lower().strip(), line.strip()] for line in content.split("\n") if line.strip()]
def fetch_csdef(self, ccproj):
# locate where the .csdef file is
directory = util.join_path(*ccproj.split("\\")[:-1])
for file in util.listdirpaths(directory):
if file.lower().endswith('.csdef'):
return file
def fetch_cscfg(self, ccproj):
# locate where the .cscfg file is
directory = util.join_path(*ccproj.split("\\")[:-1])
for file in util.listdirpaths(directory):
if file.lower().endswith('local.cscfg'):
return file
def _guid_exists(self, guid):
# Helper: make sure guid exists
for project in self.data['projects']:
if project['guid'] == guid:
return True
return False
def parse_content(self):
# Read self.raw_content and pass data to json blob
for line_lower, line in self.raw_content:
# Get SLN version data
if line_lower.startswith("microsoft visual studio solution file"):
self.data['version'] = float(line_lower.replace('microsoft visual studio solution file, format version ', '').strip())
elif line_lower.startswith("minimumvisualstudioversion"):
self.data['vsversion']['min'] = line_lower.split('=')[-1].strip()
elif line_lower.startswith("visualstudioversion"):
self.data['vsversion']['current'] = line_lower.split('=')[-1].strip()
elif line_lower.startswith("project("): # Parse line that starts with "project("
parse = line.split('\"')
guid = parse[7][1:-1] # Get GUID from line
if not self._guid_exists(guid): # Check that GUID exists
type_guid = parse[1] # Get project type (E.g. C# or Python)
name = parse[3]
proj = parse[5]
location = util.join_path(*self.path.split("\\")[:-1]) # Get directory path of that project
proj_dir = proj.split("\\")[:-1] if len(proj.split("\\")) > 1 else proj
if type(proj_dir) == str:
proj_location = location
else:
proj_location = util.join_path(location, util.join_path(*proj_dir))
self.data['location'] = location
if type_guid[1:-1] == "CC5FD16D-436D-48AD-A40C-5A424C6E3E79": # If the project type is a CS definitions project
self.data['cscfg'] = self.fetch_cscfg(util.join_path(location, proj)) # get CS defs/cfg/proj data
self.data['csdef'] = self.fetch_csdef(util.join_path(location, proj))
self.data['ccproj'] = util.join_path(location, proj)
else: # if the project is a regular project
self.data['projects'].append({ # Build json blob
"name": name,
"type": type_guid[1:-1],
"proj": util.join_path(location, proj),
"guid": guid,
"location": proj_location,
"ignore": False,
})
def update_csdef(self, csdef):
# Update json blob (self.data) with csdef meta data collected
for i, project in enumerate(self.data['projects']):
try:
for key, value in csdef.data[project['name']].items():
self.data['projects'][i][key] = value
except KeyError:
# If project doesn't exist in csdef, it means the project isn't a role, but a dependency
# We can ignore dependencies when building VMs
self.data['projects'][i]["ignore"] = True
def update_cscfg(self, cscfg):
# Update json blob (self.data) with cscfg meta data collected
for i, project in enumerate(self.data['projects']):
try:
for key, value in cscfg.data[project['name']].items():
self.data['projects'][i][key] = value
except KeyError:
# If project doesn't exist in cscfg, it means the project isn't a role, but a dependency
# We can ignore dependencies when building VMs
self.data['projects'][i]["ignore"] = True
def update_proj(self, proj):
# Update json blob (self.data) with ccproj meta data collected
for i, project in enumerate(self.data['projects']):
try:
self.data['projects'][i]['references'] = [ref.upper() for ref in proj.data[project['name']]]
except KeyError:
# if 'project['name']' doesn't exist, there aren't any references
self.data['projects'][i]['references'] = []
| [
"util.listdirpaths",
"util.test_path",
"io.open",
"util.join_path"
] | [((146, 175), 'util.test_path', 'util.test_path', (['sln_path', '"""f"""'], {}), "(sln_path, 'f')\n", (160, 175), False, 'import util\n'), ((1237, 1265), 'util.listdirpaths', 'util.listdirpaths', (['directory'], {}), '(directory)\n', (1254, 1265), False, 'import util\n'), ((1502, 1530), 'util.listdirpaths', 'util.listdirpaths', (['directory'], {}), '(directory)\n', (1519, 1530), False, 'import util\n'), ((741, 759), 'io.open', 'io.open', (['self.path'], {}), '(self.path)\n', (748, 759), False, 'import io\n'), ((3852, 3882), 'util.join_path', 'util.join_path', (['location', 'proj'], {}), '(location, proj)\n', (3866, 3882), False, 'import util\n'), ((3356, 3381), 'util.join_path', 'util.join_path', (['*proj_dir'], {}), '(*proj_dir)\n', (3370, 3381), False, 'import util\n'), ((3652, 3682), 'util.join_path', 'util.join_path', (['location', 'proj'], {}), '(location, proj)\n', (3666, 3682), False, 'import util\n'), ((3774, 3804), 'util.join_path', 'util.join_path', (['location', 'proj'], {}), '(location, proj)\n', (3788, 3804), False, 'import util\n'), ((4139, 4169), 'util.join_path', 'util.join_path', (['location', 'proj'], {}), '(location, proj)\n', (4153, 4169), False, 'import util\n')] |
#!/usr/bin/env python3
from evdev import UInput, UInputError, ecodes, AbsInfo
from evdev import util
from fport import FportParser, FportMessageControl
import serial
from time import sleep
if __name__ == '__main__':
device = None
def handler(message):
if type(message) is FportMessageControl:
print("Handled:", message)
pass
if False:
counter = counter + 1
device.write(ecodes.EV_ABS, ecodes.ABS_X, counter % 255)
device.syn()
try:
description = 'TstAM'
default_props = AbsInfo(value=0, min=0, max=2048, fuzz=0, flat=0, resolution=0)
events = {ecodes.EV_ABS: [
(ecodes.ABS_X, default_props),
(ecodes.ABS_Y, default_props),
(ecodes.ABS_Z, default_props),
(ecodes.ABS_RZ, default_props)
], ecodes.EV_KEY:[], ecodes.EV_REL: []}
device = UInput(events=events)
counter = 0
parser = FportParser(handler)
ui = UInput()
with serial.Serial('/dev/ttyUSB0', 115200, timeout=1) as ser:
while True:
s = ser.read(100)
parser.parse(s)
finally:
device.close()
pass | [
"fport.FportParser",
"evdev.UInput",
"evdev.AbsInfo",
"serial.Serial"
] | [((572, 635), 'evdev.AbsInfo', 'AbsInfo', ([], {'value': '(0)', 'min': '(0)', 'max': '(2048)', 'fuzz': '(0)', 'flat': '(0)', 'resolution': '(0)'}), '(value=0, min=0, max=2048, fuzz=0, flat=0, resolution=0)\n', (579, 635), False, 'from evdev import UInput, UInputError, ecodes, AbsInfo\n'), ((909, 930), 'evdev.UInput', 'UInput', ([], {'events': 'events'}), '(events=events)\n', (915, 930), False, 'from evdev import UInput, UInputError, ecodes, AbsInfo\n'), ((968, 988), 'fport.FportParser', 'FportParser', (['handler'], {}), '(handler)\n', (979, 988), False, 'from fport import FportParser, FportMessageControl\n'), ((1002, 1010), 'evdev.UInput', 'UInput', ([], {}), '()\n', (1008, 1010), False, 'from evdev import UInput, UInputError, ecodes, AbsInfo\n'), ((1024, 1072), 'serial.Serial', 'serial.Serial', (['"""/dev/ttyUSB0"""', '(115200)'], {'timeout': '(1)'}), "('/dev/ttyUSB0', 115200, timeout=1)\n", (1037, 1072), False, 'import serial\n')] |
"""
This module contains all of the server selection logic.
It supplies one function:
get_server() which returns the name of a server to mine.
It has two external dependencies.
1) btcnet_info via btcnet_wrapper
2) a way to pull getworks for checking if we should delag pools
"""
import ServerLogic
import bitHopper.Configuration.Workers as Workers
def get_server():
"""
Returns a valid server, worker, username tuple
Note this isn't quite a perfectly even distribution but it
works well enough
"""
return _select(list(generate_tuples(ServerLogic.get_server())))
i = 0
def generate_tuples( server):
"""
Generates a tuple of server, user, password for valid servers
"""
tokens = Workers.get_worker_from(server)
for user, password in tokens:
yield (server, user, password)
def _select(item):
"""
Selection utility function
"""
global i
i = i + 1 if i < 10**10 else 0
if len(item) == 0:
raise ValueError("No item available")
return item[i % len(item)]
| [
"bitHopper.Configuration.Workers.get_worker_from",
"ServerLogic.get_server"
] | [((734, 765), 'bitHopper.Configuration.Workers.get_worker_from', 'Workers.get_worker_from', (['server'], {}), '(server)\n', (757, 765), True, 'import bitHopper.Configuration.Workers as Workers\n'), ((569, 593), 'ServerLogic.get_server', 'ServerLogic.get_server', ([], {}), '()\n', (591, 593), False, 'import ServerLogic\n')] |
import re
class solve_day(object):
with open('inputs/day08.txt', 'r') as f:
data = f.readlines()
def part1(self):
# try 1: 5979 (too high)
# try 2: 2043 (too high)
# try 3: 1376 (too high)
count_lit = 0
count_mem = 0
# self.data = ['""', '"abc"', r'"aaa\"aaa"', r'"\x27"']
for d in self.data:
d = d.strip()
# d = r'"rbpbrxm\\\"\"\"voxx"'
# d = r'""'
# d = r'"abc"'
# d = r'"aaa\"aaa"'
# d = r'"\x27"'
# d = r'"\\"'
# print(f'input: {d}')
# print(f'len: {len(d)}')
# add literal count
count_lit += len(d)
## start parsing
# first, can remove beginning and ending quotes
d = d[1:-1]
# print(f'step 1: {d}')
# next, let's remove and count the hex notation
inds = []
for i in re.finditer(r'\\x', d):
inds.append(i.span()[0])
for i in inds[::-1]:
d = d[0:i] + d[i+4:]
count_mem += 1
# print(f'step 2: {d}')
# then, remove and count escaped quote
inds = []
for i in re.finditer(r'\\"', d):
inds.append(i.span()[0])
for i in inds[::-1]:
d = d[0:i] + d[i+2:]
count_mem += 1
# print(f'step 3: {d}')
# finally. remove and count escaped backslash
inds = []
for i in re.finditer(r'\\\\', d):
inds.append(i.span()[0])
for i in inds[::-1]:
d = d[0:i] + d[i+2:]
count_mem += 1
# add remaining characters
count_mem += len(d)
print('final', d)
print(count_lit, count_mem)
return count_lit - count_mem
# print(d)
# print(count_mem)
# print('\n')
def part2(self):
pass
if __name__ == '__main__':
s = solve_day()
print(f'Part 1: {s.part1()}')
print(f'Part 2: {s.part2()}') | [
"re.finditer"
] | [((972, 995), 're.finditer', 're.finditer', (['"""\\\\\\\\x"""', 'd'], {}), "('\\\\\\\\x', d)\n", (983, 995), False, 'import re\n'), ((1283, 1306), 're.finditer', 're.finditer', (['"""\\\\\\\\\\""""', 'd'], {}), '(\'\\\\\\\\"\', d)\n', (1294, 1306), False, 'import re\n'), ((1601, 1627), 're.finditer', 're.finditer', (['"""\\\\\\\\\\\\\\\\"""', 'd'], {}), "('\\\\\\\\\\\\\\\\', d)\n", (1612, 1627), False, 'import re\n')] |
# coding: utf-8
"""
Consolidate Services
Description of all APIs # noqa: E501
The version of the OpenAPI document: version not set
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from argocd_client.configuration import Configuration
class V1alpha1Cluster(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'config': 'V1alpha1ClusterConfig',
'connection_state': 'V1alpha1ConnectionState',
'info': 'V1alpha1ClusterInfo',
'name': 'str',
'namespaces': 'list[str]',
'refresh_requested_at': 'V1Time',
'server': 'str',
'server_version': 'str'
}
attribute_map = {
'config': 'config',
'connection_state': 'connectionState',
'info': 'info',
'name': 'name',
'namespaces': 'namespaces',
'refresh_requested_at': 'refreshRequestedAt',
'server': 'server',
'server_version': 'serverVersion'
}
def __init__(self, config=None, connection_state=None, info=None, name=None, namespaces=None, refresh_requested_at=None, server=None, server_version=None, local_vars_configuration=None): # noqa: E501
"""V1alpha1Cluster - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._config = None
self._connection_state = None
self._info = None
self._name = None
self._namespaces = None
self._refresh_requested_at = None
self._server = None
self._server_version = None
self.discriminator = None
if config is not None:
self.config = config
if connection_state is not None:
self.connection_state = connection_state
if info is not None:
self.info = info
if name is not None:
self.name = name
if namespaces is not None:
self.namespaces = namespaces
if refresh_requested_at is not None:
self.refresh_requested_at = refresh_requested_at
if server is not None:
self.server = server
if server_version is not None:
self.server_version = server_version
@property
def config(self):
"""Gets the config of this V1alpha1Cluster. # noqa: E501
:return: The config of this V1alpha1Cluster. # noqa: E501
:rtype: V1alpha1ClusterConfig
"""
return self._config
@config.setter
def config(self, config):
"""Sets the config of this V1alpha1Cluster.
:param config: The config of this V1alpha1Cluster. # noqa: E501
:type: V1alpha1ClusterConfig
"""
self._config = config
@property
def connection_state(self):
"""Gets the connection_state of this V1alpha1Cluster. # noqa: E501
:return: The connection_state of this V1alpha1Cluster. # noqa: E501
:rtype: V1alpha1ConnectionState
"""
return self._connection_state
@connection_state.setter
def connection_state(self, connection_state):
"""Sets the connection_state of this V1alpha1Cluster.
:param connection_state: The connection_state of this V1alpha1Cluster. # noqa: E501
:type: V1alpha1ConnectionState
"""
self._connection_state = connection_state
@property
def info(self):
"""Gets the info of this V1alpha1Cluster. # noqa: E501
:return: The info of this V1alpha1Cluster. # noqa: E501
:rtype: V1alpha1ClusterInfo
"""
return self._info
@info.setter
def info(self, info):
"""Sets the info of this V1alpha1Cluster.
:param info: The info of this V1alpha1Cluster. # noqa: E501
:type: V1alpha1ClusterInfo
"""
self._info = info
@property
def name(self):
"""Gets the name of this V1alpha1Cluster. # noqa: E501
:return: The name of this V1alpha1Cluster. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1alpha1Cluster.
:param name: The name of this V1alpha1Cluster. # noqa: E501
:type: str
"""
self._name = name
@property
def namespaces(self):
"""Gets the namespaces of this V1alpha1Cluster. # noqa: E501
Holds list of namespaces which are accessible in that cluster. Cluster level resources would be ignored if namespace list is not empty. # noqa: E501
:return: The namespaces of this V1alpha1Cluster. # noqa: E501
:rtype: list[str]
"""
return self._namespaces
@namespaces.setter
def namespaces(self, namespaces):
"""Sets the namespaces of this V1alpha1Cluster.
Holds list of namespaces which are accessible in that cluster. Cluster level resources would be ignored if namespace list is not empty. # noqa: E501
:param namespaces: The namespaces of this V1alpha1Cluster. # noqa: E501
:type: list[str]
"""
self._namespaces = namespaces
@property
def refresh_requested_at(self):
"""Gets the refresh_requested_at of this V1alpha1Cluster. # noqa: E501
:return: The refresh_requested_at of this V1alpha1Cluster. # noqa: E501
:rtype: V1Time
"""
return self._refresh_requested_at
@refresh_requested_at.setter
def refresh_requested_at(self, refresh_requested_at):
"""Sets the refresh_requested_at of this V1alpha1Cluster.
:param refresh_requested_at: The refresh_requested_at of this V1alpha1Cluster. # noqa: E501
:type: V1Time
"""
self._refresh_requested_at = refresh_requested_at
@property
def server(self):
"""Gets the server of this V1alpha1Cluster. # noqa: E501
:return: The server of this V1alpha1Cluster. # noqa: E501
:rtype: str
"""
return self._server
@server.setter
def server(self, server):
"""Sets the server of this V1alpha1Cluster.
:param server: The server of this V1alpha1Cluster. # noqa: E501
:type: str
"""
self._server = server
@property
def server_version(self):
"""Gets the server_version of this V1alpha1Cluster. # noqa: E501
:return: The server_version of this V1alpha1Cluster. # noqa: E501
:rtype: str
"""
return self._server_version
@server_version.setter
def server_version(self, server_version):
"""Sets the server_version of this V1alpha1Cluster.
:param server_version: The server_version of this V1alpha1Cluster. # noqa: E501
:type: str
"""
self._server_version = server_version
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1alpha1Cluster):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1alpha1Cluster):
return True
return self.to_dict() != other.to_dict()
| [
"six.iteritems",
"argocd_client.configuration.Configuration"
] | [((7453, 7486), 'six.iteritems', 'six.iteritems', (['self.openapi_types'], {}), '(self.openapi_types)\n', (7466, 7486), False, 'import six\n'), ((1757, 1772), 'argocd_client.configuration.Configuration', 'Configuration', ([], {}), '()\n', (1770, 1772), False, 'from argocd_client.configuration import Configuration\n')] |
import os
import requests
import tweepy
import random
from .littlebirdy import LittleBirdy
class TwitterPost(LittleBirdy):
_TWEET = '''{account} I am experiencing issues with my internet. My speed is at {down} MB/s Down & {up} MB/s Up.
This is {percent}% below what I am paying for.
'''
def post(self, down_speed, up_speed, percent):
auth = tweepy.OAuthHandler(
self.config['twitter_consumer_key'],
self.config['twitter_consumer_secret']
)
auth.set_access_token(
self.config['twitter_access_token'],
self.config['twitter_access_token_secret']
)
self.api = tweepy.API(auth)
try:
tweet = self._TWEET.format(
account=self.config['speedtweet_at_account'],
down=down_speed,
up=up_speed,
percent=percent
)
self.api.update_status(tweet)
except:
self.post(down_speed, up_speed, percent)
| [
"tweepy.API",
"tweepy.OAuthHandler"
] | [((366, 467), 'tweepy.OAuthHandler', 'tweepy.OAuthHandler', (["self.config['twitter_consumer_key']", "self.config['twitter_consumer_secret']"], {}), "(self.config['twitter_consumer_key'], self.config[\n 'twitter_consumer_secret'])\n", (385, 467), False, 'import tweepy\n'), ((661, 677), 'tweepy.API', 'tweepy.API', (['auth'], {}), '(auth)\n', (671, 677), False, 'import tweepy\n')] |
# -*- coding: utf-8 -*-
# Copyright (c) 2012, <NAME>
# All rights reserved.
# This file is part of PyDSM.
# PyDSM is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# PyDSM is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with PyDSM. If not, see <http://www.gnu.org/licenses/>.
from __future__ import division, print_function
from numpy.testing import TestCase, run_module_suite
import numpy as np
from pkg_resources import resource_stream
from pydsm.delsig import simulateDSM
__all__ = ["TestSimulateDSM"]
class TestSimulateDSM(TestCase):
def setUp(self):
pass
def test_default(self):
f = resource_stream('pydsm.delsig',
'tests/Data/test_simulateDSM_0.npz')
d = np.load(f)['arr_0']
f.close()
# Take H as in H = synthesizeNTF(5, 32, 1)
H = (np.array([0.99604531+0.08884669j, 0.99604531-0.08884669j,
0.99860302+0.05283948j, 0.99860302-0.05283948j,
1.00000000+0.j]),
np.array([0.80655696+0.11982271j, 0.80655696-0.11982271j,
0.89807098+0.21981939j, 0.89807098-0.21981939j,
0.77776708+0.j]),
1)
N = 8192
f = 85
u = 0.5*np.sin(2.*np.pi*f/N*np.arange(N))
v, d1, d2, d3 = simulateDSM(u, H)
np.testing.assert_equal(v, d)
if __name__ == '__main__':
run_module_suite()
| [
"numpy.testing.assert_equal",
"numpy.arange",
"numpy.array",
"pydsm.delsig.simulateDSM",
"numpy.testing.run_module_suite",
"numpy.load",
"pkg_resources.resource_stream"
] | [((1841, 1859), 'numpy.testing.run_module_suite', 'run_module_suite', ([], {}), '()\n', (1857, 1859), False, 'from numpy.testing import TestCase, run_module_suite\n'), ((1063, 1131), 'pkg_resources.resource_stream', 'resource_stream', (['"""pydsm.delsig"""', '"""tests/Data/test_simulateDSM_0.npz"""'], {}), "('pydsm.delsig', 'tests/Data/test_simulateDSM_0.npz')\n", (1078, 1131), False, 'from pkg_resources import resource_stream\n'), ((1753, 1770), 'pydsm.delsig.simulateDSM', 'simulateDSM', (['u', 'H'], {}), '(u, H)\n', (1764, 1770), False, 'from pydsm.delsig import simulateDSM\n'), ((1779, 1808), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['v', 'd'], {}), '(v, d)\n', (1802, 1808), True, 'import numpy as np\n'), ((1172, 1182), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (1179, 1182), True, 'import numpy as np\n'), ((1274, 1405), 'numpy.array', 'np.array', (['[0.99604531 + 0.08884669j, 0.99604531 - 0.08884669j, 0.99860302 + \n 0.05283948j, 0.99860302 - 0.05283948j, 1.0 + 0.0j]'], {}), '([0.99604531 + 0.08884669j, 0.99604531 - 0.08884669j, 0.99860302 + \n 0.05283948j, 0.99860302 - 0.05283948j, 1.0 + 0.0j])\n', (1282, 1405), True, 'import numpy as np\n'), ((1459, 1597), 'numpy.array', 'np.array', (['[0.80655696 + 0.11982271j, 0.80655696 - 0.11982271j, 0.89807098 + \n 0.21981939j, 0.89807098 - 0.21981939j, 0.77776708 + 0.0j]'], {}), '([0.80655696 + 0.11982271j, 0.80655696 - 0.11982271j, 0.89807098 + \n 0.21981939j, 0.89807098 - 0.21981939j, 0.77776708 + 0.0j])\n', (1467, 1597), True, 'import numpy as np\n'), ((1715, 1727), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (1724, 1727), True, 'import numpy as np\n')] |
import socket
from unittest import TestCase
from tmq import define as td
class TestHash(TestCase):
def test_short(self):
result = td.tmq_hash("short hash")
self.assertEqual(result, 0x20dc540e)
def test_long(self):
result = td.tmq_hash("this is a pretty long hash string")
self.assertEqual(result, 0xb4c660d0)
class TestPacket(TestCase):
def test_pack_unpack(self):
packet = (0x55, (0x4567, 0xF0F0, 0x4444), b'This is a bunch of data')
packed = td.tmq_pack(*packet)
result = td.tmq_unpack(packed)
self.assertEqual(result, packet)
class TestAddresses(TestCase):
def test_pack_unpack(self):
address = ('127.0.0.1', 42)
packed = td.tmq_pack_address_t(*address)
result, *_ = td.tmq_unpack_address_t(packed)
self.assertEqual(result, address)
def test_pack_unpack_several(self):
addresses = [('8.8.8.8', 80),
('127.0.0.1', 142),
('192.168.127.12', 67)
]
packed = td.tmq_pack_addresses(addresses)
result = td.tmq_unpack_addresses(packed)
self.assertEqual(result, addresses)
| [
"tmq.define.tmq_hash",
"tmq.define.tmq_unpack_addresses",
"tmq.define.tmq_unpack_address_t",
"tmq.define.tmq_pack_addresses",
"tmq.define.tmq_unpack",
"tmq.define.tmq_pack",
"tmq.define.tmq_pack_address_t"
] | [((145, 170), 'tmq.define.tmq_hash', 'td.tmq_hash', (['"""short hash"""'], {}), "('short hash')\n", (156, 170), True, 'from tmq import define as td\n'), ((259, 307), 'tmq.define.tmq_hash', 'td.tmq_hash', (['"""this is a pretty long hash string"""'], {}), "('this is a pretty long hash string')\n", (270, 307), True, 'from tmq import define as td\n'), ((510, 530), 'tmq.define.tmq_pack', 'td.tmq_pack', (['*packet'], {}), '(*packet)\n', (521, 530), True, 'from tmq import define as td\n'), ((548, 569), 'tmq.define.tmq_unpack', 'td.tmq_unpack', (['packed'], {}), '(packed)\n', (561, 569), True, 'from tmq import define as td\n'), ((729, 760), 'tmq.define.tmq_pack_address_t', 'td.tmq_pack_address_t', (['*address'], {}), '(*address)\n', (750, 760), True, 'from tmq import define as td\n'), ((782, 813), 'tmq.define.tmq_unpack_address_t', 'td.tmq_unpack_address_t', (['packed'], {}), '(packed)\n', (805, 813), True, 'from tmq import define as td\n'), ((1060, 1092), 'tmq.define.tmq_pack_addresses', 'td.tmq_pack_addresses', (['addresses'], {}), '(addresses)\n', (1081, 1092), True, 'from tmq import define as td\n'), ((1110, 1141), 'tmq.define.tmq_unpack_addresses', 'td.tmq_unpack_addresses', (['packed'], {}), '(packed)\n', (1133, 1141), True, 'from tmq import define as td\n')] |
from uio import Uio
from argsort_axi import ArgSort_AXI
if __name__ == '__main__':
uio = Uio('uio_argsort')
argsort_axi = ArgSort_AXI(uio.regs())
argsort_axi.print_info()
argsort_axi.print_debug()
| [
"uio.Uio"
] | [((110, 128), 'uio.Uio', 'Uio', (['"""uio_argsort"""'], {}), "('uio_argsort')\n", (113, 128), False, 'from uio import Uio\n')] |
# This Python file uses the following encoding: utf-8
'''
Author: <NAME>
Linkedin: https://www.linkedin.com/in/lucasalves-ast/
'''
# TODO #4 Atualizar python 3.9.5 -> 3.9.9
# Importar bibliotecas internas
import __conectdb__
import __query__
import __check__
import __check_semana__
import __list__
# Importar bibliotecas
import backoff
from bs4 import BeautifulSoup
import requests
import time
from datetime import date, datetime, timedelta
import logging
from tqdm import tqdm
import pandas as pd
# TODO #1 Criar Sheduler
# Cores utilizada no script
RED = "\033[1;31m"
GREEN = "\033[0;32m"
GREEN_T = "\033[92m"
RESET = "\033[0;0m"
YELLOW = "\033[1;33m"
BLUE = "\033[1;34m"
GRAY = "\033[1;35m"
#####
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
@backoff.on_exception(backoff.expo, (), max_tries=10)
# Inicio da funcao para coleta dos dados
def dados():
# Dados atual - Criando um DataFrame só com os dados atuais
dados_atual = pd.DataFrame(columns=[
'papel','tipo','empresa','setor','cotacao','dt_ult_cotacao','min_52_sem','max_52_sem','vol_med','valor_mercado','valor_firma','ult_balanco_pro','nr_acoes','os_dia','pl','lpa','pvp','vpa','p_ebit','marg_bruta','psr','marg_ebit','p_ativo','marg_liquida','p_cap_giro','ebit_ativo','p_ativo_circ_liq','roic','div_yield','roe','ev_ebitda','liquidez_corr','ev_ebit','cres_rec','ativo','disponibilidades','ativo_circulante','divd_bruta','divd_liquida','patr_liquido','lucro_liquido_12m','lucro_liquido_3m' ]
)
# Variável(dt) - responsavel por informar qual (x) dia sera feita a coleta dos dados
# Ex.: dt = date.today() - timedelta(days=3) -> volta 3 dias atras no calendario
dt = date.today() - timedelta(days=0)
dt_sem = dt.weekday()
# Variavel dt_dia_sem - responsavel por verificar qual e o dia da semana(Se for Sabado ou Domingo - nao havera coleta de dados)
dt_dia_sem = __check_semana__.DIAS[dt_sem]
dt = dt.strftime("%d/%m/%Y")
# Faz a checagem se o dia da semana e Sabado ou Domingo
if __check__.data_check != dt or dt_dia_sem == "Sábado" or dt_dia_sem == "Domingo":
print(f"+{GRAY} Site não atualizado {RESET}+")
print("--------------------------------------")
print(f"Hoje é dia: {dt} - {dt_dia_sem} ")
print(f"Data do site é: {__check__.data_check} - {__check__.day}")
print("--------------------------------------")
else:
print(f"+{GREEN_T} Site atualizado vamos começar a coletar os dados. {RESET}+")
# Faz checagem se a conexao com o banco de dados foi estabelecida
if __conectdb__.verifica_conexao() == False:
return print(
f"""
+{RED} Conexão não estabelecida com o Banco de Dados, verifique: {RESET}+
-{RED} Docker {RESET}
"""
)
else:
print(
f"""
+{GREEN_T} Conexão estabelecida com sucesso ao Banco de Dados. {RESET}+ """
)
print("-------------------------------------------------------")
# Inicio do contador de tempo de execução do script
inicio = time.time()
# Variável (acao) - armazena uma lista com os tickers da acoes
acao = __list__.lst_acao
# Variável contador
n = 0
# Percorre a lista com os códigos das ações
for i in tqdm(acao):
try:
# Consulta no banco de dados para verificar se os dados já se encontram no mesmo (Ref.: data_ult_cotacao / papel)
query_consult_bd = f" SELECT data_dado_inserido, papel \
FROM dados \
WHERE data_ult_cotacao = '{dt}' \
AND papel = '{i}' "
result = __conectdb__.se_dados(query_consult_bd)
# --- #
if result != []:
print(f"+{YELLOW} Dados da ação: {i}, já cadastrados {RESET}+")
else:
# Aqui começa o script para coleta dos dados
hearder = {"user-agent": "Mozilla/5.0"}
url = f"https://fundamentus.com.br/detalhes.php?papel={i}+"
page = requests.get(url, headers=hearder)
soup = BeautifulSoup(page.content, "html.parser")
dados = soup.findAll("div", {"class": "conteudo clearfix"})
# cria a lista das variaveis aonde seram armazenados os dados coletados
for data in dados:
dadosI = []
papel = []
tipo = []
empresa = []
setor = []
cotacao = []
dt_ult_cotacao = []
min_52_sem = []
max_52_sem = []
vol_med = []
valor_mercado = []
valor_firma = []
ult_balanco_pro = []
nr_acoes = []
os_dia = []
pl = []
lpa = []
pvp = []
vpa = []
p_ebit = []
marg_bruta = []
psr = []
marg_ebit = []
p_ativo = []
marg_liquida = []
p_cap_giro = []
ebit_ativo = []
p_ativo_circ_liq = []
roic = []
div_yield = []
roe = []
ev_ebitda = []
liquidez_corr = []
ev_ebit = []
cres_rec = []
ativo = []
disponibilidades = []
ativo_circulante = []
divd_bruta = []
divd_liquida = []
patr_liquido = []
lucro_liquido_12m = []
lucro_liquido_3m = []
dadosI = data.find_all("span", {"class": "txt"})
dadosO = data.find_all("span", {"class": "oscil"})
#
papel.append(dadosI[0].text)
if "Papel" in papel[0]:
papel.append(dadosI[1].text.strip())
else:
papel.append(0)
#
tipo.append(dadosI[4].text)
if "Tipo" in tipo[0]:
tipo.append(dadosI[5].text.strip())
else:
tipo.append(0)
#
empresa.append(dadosI[8].text)
if "Empresa" in empresa[0]:
empresa.append(dadosI[9].text)
else:
empresa.append(0)
#
setor.append(dadosI[12].text)
if "Setor" in setor[0]:
setor.append(dadosI[13].text)
else:
setor.append(0)
#
cotacao.append(dadosI[2].text)
if "Cotação" in cotacao[0]:
cotacao.append(dadosI[3].text)
else:
cotacao.append(0)
#
dt_ult_cotacao.append(dadosI[6].text)
if "Data últ cot" in dt_ult_cotacao[0]:
dt_ult_cotacao.append(dadosI[7].text)
else:
dt_ult_cotacao.append(0)
#
min_52_sem.append(dadosI[10].text)
if "Min 52 sem" in min_52_sem[0]:
min_52_sem.append(dadosI[11].text)
else:
min_52_sem.append(0)
#
max_52_sem.append(dadosI[14].text)
if "Max 52 sem" in max_52_sem[0]:
max_52_sem.append(dadosI[15].text)
else:
max_52_sem.append(0)
#
vol_med.append(dadosI[18].text)
if "Vol $ méd (2m)" in vol_med[0]:
vol_med.append(dadosI[19].text)
else:
vol_med.append(0)
#
valor_mercado.append(dadosI[20].text)
if "Valor de mercado" in valor_mercado[0]:
valor_mercado.append(dadosI[21].text)
else:
valor_mercado.append(0)
#
valor_firma.append(dadosI[24].text)
if "Valor da firma" in valor_firma[0]:
valor_firma.append(dadosI[25].text)
else:
valor_firma.append(0)
#
ult_balanco_pro.append(dadosI[22].text)
if "Últ balanço processado" in ult_balanco_pro[0]:
ult_balanco_pro.append(dadosI[23].text)
else:
ult_balanco_pro.append(0)
#
nr_acoes.append(dadosI[26].text)
if "Nro. Ações" in nr_acoes[0]:
nr_acoes.append(dadosI[27].text.replace(".", ""))
else:
nr_acoes.append(0)
#
os_dia.append(dadosI[30].text)
if "Dia" in os_dia[0]:
os_dia.append(
dadosO[0]
.text.replace("\n", "")
.replace(",", ".")
.replace("%", "")
)
else:
os_dia.append(0)
#
pl.append(dadosI[31].text)
if "P/L" in pl[0]:
pl.append(
dadosI[32].text.replace(".", "").replace(",", ".")
)
else:
pl.append(0)
#
lpa.append(dadosI[33].text)
if "LPA" in lpa[0]:
lpa.append(dadosI[34].text.replace(",", "."))
else:
lpa.append(0)
#
pvp.append(dadosI[36].text)
if "P/VP" in pvp[0]:
pvp.append(
dadosI[37].text.replace(".", "").replace(",", ".")
)
else:
pvp.append(0)
#
vpa.append(dadosI[38].text)
if "VPA" in vpa[0]:
vpa.append(
dadosI[39].text.replace(".", "").replace(",", ".")
)
else:
vpa.append(0)
#
p_ebit.append(dadosI[41].text)
if "P/EBIT" in p_ebit:
p_ebit.append(
dadosI[42].text.replace("\n", "").replace(",", ".")
)
if len(p_ebit[1]) <= 1:
p_ebit[1] = 0
else:
p_ebit.append(0)
#
marg_bruta.append(dadosI[43].text)
if "Marg. Bruta" in marg_bruta:
marg_bruta.append(
dadosI[44]
.text.replace("\n", "")
.replace(".", "")
.replace(",", ".")
.replace("%", "")
)
if len(marg_bruta[1]) <= 1:
marg_bruta[1] = 0
else:
marg_bruta.append(0)
#
psr.append(dadosI[46].text)
if "PSR" in psr:
psr.append(
dadosI[47]
.text.replace("\n", "")
.replace(".", "")
.replace(",", ".")
)
if len(psr[1]) <= 1:
psr[1] = 0
else:
psr.append(0)
#
marg_ebit.append(dadosI[48].text)
if "Marg. EBIT" in marg_ebit:
marg_ebit.append(
dadosI[49]
.text.replace("\n", "")
.replace(".", "")
.replace(",", ".")
.replace("%", "")
)
if len(marg_ebit[1]) <= 1:
marg_ebit[1] = 0
else:
marg_ebit.append(0)
#
p_ativo.append(dadosI[51].text)
if "P/Ativos" in p_ativo:
p_ativo.append(
dadosI[52]
.text.replace("\n", "")
.replace(".", "")
.replace(",", ".")
)
if len(p_ativo[1]) <= 1:
p_ativo[1] = 0
else:
p_ativo.append(0)
#
marg_liquida.append(dadosI[53].text)
if "Marg. Líquida" in marg_liquida:
marg_liquida.append(
dadosI[54]
.text.replace("\n", "")
.replace(".", "")
.replace(",", ".")
.replace("%", "")
)
if len(marg_liquida[1]) <= 1:
marg_liquida[1] = 0
else:
marg_liquida.append(0)
#
p_cap_giro.append(dadosI[56].text)
if "P/Cap. Giro" in p_cap_giro:
p_cap_giro.append(
dadosI[57]
.text.replace("\n", "")
.replace(".", "")
.replace(",", ".")
)
if len(p_cap_giro[1]) <= 1:
p_cap_giro[1] = 0
else:
p_cap_giro.append(0)
#
ebit_ativo.append(dadosI[58].text)
if "EBIT / Ativo" in ebit_ativo:
ebit_ativo.append(
dadosI[59]
.text.replace(".", "")
.replace(",", ".")
.replace("%", "")
)
if len(ebit_ativo[1]) <= 1:
ebit_ativo[1] = 0
else:
ebit_ativo.append(0)
#
p_ativo_circ_liq.append(dadosI[61].text)
if "P/Ativ Circ Liq" in p_ativo_circ_liq:
p_ativo_circ_liq.append(
dadosI[62]
.text.replace("\n", "")
.replace(".", "")
.replace(",", ".")
)
if len(p_ativo_circ_liq[1]) <= 1:
p_ativo_circ_liq[1] = 0
else:
p_ativo_circ_liq.append(0)
#
roic.append(dadosI[63].text)
if "ROIC" in roic:
roic.append(
dadosI[64]
.text.replace("\n", "")
.replace(".", "")
.replace(",", ".")
.replace("%", "")
)
if len(roic[1]) <= 1:
roic[1] = 0
else:
roic.append(0)
#
div_yield.append(dadosI[66].text)
if "Div. Yield" in div_yield:
div_yield.append(
dadosI[67].text.replace(",", ".").replace("%", "")
)
if len(div_yield[1]) <= 1:
div_yield[1] = 0
else:
div_yield.append(0)
#
roe.append(dadosI[68].text)
if "ROE" in roe:
roe.append(
dadosI[69]
.text.replace("\n", "")
.replace(".", "")
.replace(",", ".")
.replace("%", "")
)
if len(roe[1]) <= 1:
roe[1] = 0
else:
roe.append(0)
#
ev_ebitda.append(dadosI[71].text)
if "EV / EBITDA" in ev_ebitda:
ev_ebitda.append(
dadosI[72]
.text.replace("\n", "")
.replace(".", "")
.replace(",", ".")
)
if len(ev_ebitda[1]) <= 1:
ev_ebitda[1] = 0
else:
ev_ebitda.append(0)
#
liquidez_corr.append(dadosI[73].text)
if "Liquidez Corr" in liquidez_corr:
liquidez_corr.append(
dadosI[74].text.replace("\n", "").replace(",", ".")
)
if len(liquidez_corr[1]) <= 1:
liquidez_corr[1] = 0
else:
liquidez_corr.append(0)
#
ev_ebit.append(dadosI[76].text)
if "EV / EBIT" in ev_ebit:
ev_ebit.append(
dadosI[77]
.text.replace("\n", "")
.replace(".", "")
.replace(",", ".")
)
if len(ev_ebit[1]) <= 1:
ev_ebit[1] = 0
else:
ev_ebit.append(0)
#
cres_rec.append(dadosI[81].text)
if "Cres. Rec (5a)" in cres_rec:
cres_rec.append(
dadosI[82]
.text.replace("\n", "")
.replace(",", ".")
.replace("%", "")
)
if len(cres_rec[1]) <= 1:
cres_rec[1] = 0
else:
cres_rec.append(0)
#
if setor[1] == "Intermediários Financeiros":
ativo.append("Ativo")
ativo.append(dadosI[87].text)
disponibilidades.append("Disponibilidades")
disponibilidades.append("0")
ativo_circulante.append("Ativo Circulante")
ativo_circulante.append("0")
divd_bruta.append("Dív. Bruta")
divd_bruta.append("0")
divd_liquida.append("Dív. Líquida")
divd_liquida.append("0")
patr_liquido.append("<NAME>")
patr_liquido.append(dadosI[93].text)
lucro_liquido_12m.append("<NAME>")
lucro_liquido_12m.append(dadosI[106].text)
lucro_liquido_3m.append("<NAME>")
lucro_liquido_3m.append(dadosI[108].text)
else:
ativo.append(dadosI[86].text)
if "Ativo" in ativo:
ativo.append(dadosI[87].text)
else:
ativo.append(0)
#
disponibilidades.append(dadosI[90].text)
if "Disponibilidades" in disponibilidades:
disponibilidades.append(dadosI[91].text)
else:
disponibilidades.append(0)
#
ativo_circulante.append(dadosI[94].text)
if "Ativo Circulante" in ativo_circulante:
ativo_circulante.append(dadosI[95].text)
else:
ativo_circulante.append(0)
#
divd_bruta.append(dadosI[88].text)
if "Dív. Bruta" in divd_bruta:
divd_bruta.append(dadosI[89].text)
else:
divd_bruta.append(0)
#
divd_liquida.append(dadosI[92].text)
if "<NAME>" in divd_liquida:
divd_liquida.append(dadosI[93].text)
else:
divd_liquida.append(0)
#
patr_liquido.append(dadosI[96].text)
if "<NAME>" in patr_liquido:
patr_liquido.append(dadosI[97].text)
else:
patr_liquido.append(0)
#
lucro_liquido_12m.append(dadosI[109].text)
if "<NAME>" in lucro_liquido_12m:
lucro_liquido_12m.append(dadosI[110].text)
else:
lucro_liquido_12m.append(0)
#
lucro_liquido_3m.append(dadosI[111].text)
if "<NAME>" in lucro_liquido_3m:
lucro_liquido_3m.append(dadosI[112].text)
else:
lucro_liquido_3m.append(0)
# Query para inserir os dados coletados no banco de dados Postgres
query_insert_bd = f" INSERT INTO dados VALUES ( '{dt}','{papel[1]}','{tipo[1]}','{empresa[1]}','{setor[1]}','{cotacao[1]}','{dt_ult_cotacao[1]}','{min_52_sem[1]}','{max_52_sem[1]}','{vol_med[1]}','{valor_mercado[1]}','{valor_firma[1]}','{ult_balanco_pro[1]}','{nr_acoes[1]}','{os_dia[1]}','{pl[1]}','{lpa[1]}','{pvp[1]}','{vpa[1]}','{p_ebit[1]}','{marg_bruta[1]}','{psr[1]}','{marg_ebit[1]}','{p_ativo[1]}','{marg_liquida[1]}','{p_cap_giro[1]}','{ebit_ativo[1]}','{p_ativo_circ_liq[1]}','{roic[1]}','{div_yield[1]}','{roe[1]}','{ev_ebitda[1]}','{liquidez_corr[1]}','{ev_ebit[1]}','{cres_rec[1]}','{ativo[1]}','{disponibilidades[1]}','{ativo_circulante[1]}','{divd_bruta[1]}','{divd_liquida[1]}','{patr_liquido[1]}','{lucro_liquido_12m[1]}','{lucro_liquido_3m[1]}' ) "
# Inserindo os dados coletados no banco de dados Postgres
__conectdb__.in_dados(query_insert_bd)
print(
f"+{GREEN} Dados da ação: {i}, gravados com sucesso {RESET}+"
)
# --- #
n += 1
# Dados atual - Salvando os dados atuais no Dataframe
dados_atual.loc[dados_atual.shape[0]] = [
papel[1],tipo[1],empresa[1],setor[1],cotacao[1],dt_ult_cotacao[1],min_52_sem[1],max_52_sem[1],vol_med[1],valor_mercado[1],valor_firma[1],ult_balanco_pro[1],nr_acoes[1],os_dia[1],pl[1],lpa[1],pvp[1],vpa[1],p_ebit[1],marg_bruta[1],psr[1],marg_ebit[1],p_ativo[1],marg_liquida[1],p_cap_giro[1],ebit_ativo[1],p_ativo_circ_liq[1],roic[1],div_yield[1],roe[1],ev_ebitda[1],liquidez_corr[1],ev_ebit[1],cres_rec[1],ativo[1],disponibilidades[1],ativo_circulante[1],divd_bruta[1],divd_liquida[1],patr_liquido[1],lucro_liquido_12m[1],lucro_liquido_3m[1]
]
# Dados atual - Salvando os dados atuais em um arquivo .csv
dados_atual.to_csv('../Dados_Atual/dados.csv', sep=';')
except:
print(f"+{RED} Dados da ação: {i}, não gravados {RESET} +")
return
# Removendo linhas(tabela dados) do Banco de Dados com valores vazios (ref.: na coluna papel)
delete_vazio = __query__.delete_vazio_query
__conectdb__.in_dados(delete_vazio)
# Removendo linhas(tabela dados) do Banco de Dados duplicados (ref.: na coluna papel / data_ult_cotacao )
delete_duplicados = __query__.delete_duplicados_query
__conectdb__.in_dados(delete_duplicados)
# Backup do banco de dados
csv_file_name = "../Backup/some_file.csv"
bk = __query__.backup_query
with open(csv_file_name, "w") as f:
__conectdb__.bk(bk, f)
# Fim do contador de Tempo do script
fim = time.time()
hours, rem = divmod(fim - inicio, 3600)
minutes, seconds = divmod(rem, 60)
# Fim
print(f"{RED}-----------------{RESET}")
print(f"{BLUE}Finalizou. {n} Empresa(s) Cadastrada(s)")
print(
"Tempo: {:0>2}:{:0>2}:{:05.2f}".format(
int(hours), int(minutes), seconds
)
)
print(f"{RESET}{RED}-----------------{RESET}")
dados() | [
"logging.getLogger",
"logging.basicConfig",
"__conectdb__.bk",
"__conectdb__.in_dados",
"tqdm.tqdm",
"requests.get",
"backoff.on_exception",
"datetime.timedelta",
"bs4.BeautifulSoup",
"__conectdb__.se_dados",
"pandas.DataFrame",
"datetime.date.today",
"time.time",
"__conectdb__.verifica_co... | [((719, 746), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (736, 746), False, 'import logging\n'), ((747, 786), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (766, 786), False, 'import logging\n'), ((789, 841), 'backoff.on_exception', 'backoff.on_exception', (['backoff.expo', '()'], {'max_tries': '(10)'}), '(backoff.expo, (), max_tries=10)\n', (809, 841), False, 'import backoff\n'), ((980, 1575), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['papel', 'tipo', 'empresa', 'setor', 'cotacao', 'dt_ult_cotacao',\n 'min_52_sem', 'max_52_sem', 'vol_med', 'valor_mercado', 'valor_firma',\n 'ult_balanco_pro', 'nr_acoes', 'os_dia', 'pl', 'lpa', 'pvp', 'vpa',\n 'p_ebit', 'marg_bruta', 'psr', 'marg_ebit', 'p_ativo', 'marg_liquida',\n 'p_cap_giro', 'ebit_ativo', 'p_ativo_circ_liq', 'roic', 'div_yield',\n 'roe', 'ev_ebitda', 'liquidez_corr', 'ev_ebit', 'cres_rec', 'ativo',\n 'disponibilidades', 'ativo_circulante', 'divd_bruta', 'divd_liquida',\n 'patr_liquido', 'lucro_liquido_12m', 'lucro_liquido_3m']"}), "(columns=['papel', 'tipo', 'empresa', 'setor', 'cotacao',\n 'dt_ult_cotacao', 'min_52_sem', 'max_52_sem', 'vol_med',\n 'valor_mercado', 'valor_firma', 'ult_balanco_pro', 'nr_acoes', 'os_dia',\n 'pl', 'lpa', 'pvp', 'vpa', 'p_ebit', 'marg_bruta', 'psr', 'marg_ebit',\n 'p_ativo', 'marg_liquida', 'p_cap_giro', 'ebit_ativo',\n 'p_ativo_circ_liq', 'roic', 'div_yield', 'roe', 'ev_ebitda',\n 'liquidez_corr', 'ev_ebit', 'cres_rec', 'ativo', 'disponibilidades',\n 'ativo_circulante', 'divd_bruta', 'divd_liquida', 'patr_liquido',\n 'lucro_liquido_12m', 'lucro_liquido_3m'])\n", (992, 1575), True, 'import pandas as pd\n'), ((1725, 1737), 'datetime.date.today', 'date.today', ([], {}), '()\n', (1735, 1737), False, 'from datetime import date, datetime, timedelta\n'), ((1740, 1757), 'datetime.timedelta', 'timedelta', ([], {'days': '(0)'}), '(days=0)\n', (1749, 1757), False, 'from datetime import date, datetime, timedelta\n'), ((2624, 2655), '__conectdb__.verifica_conexao', '__conectdb__.verifica_conexao', ([], {}), '()\n', (2653, 2655), False, 'import __conectdb__\n'), ((3152, 3163), 'time.time', 'time.time', ([], {}), '()\n', (3161, 3163), False, 'import time\n'), ((3406, 3416), 'tqdm.tqdm', 'tqdm', (['acao'], {}), '(acao)\n', (3410, 3416), False, 'from tqdm import tqdm\n'), ((29212, 29247), '__conectdb__.in_dados', '__conectdb__.in_dados', (['delete_vazio'], {}), '(delete_vazio)\n', (29233, 29247), False, 'import __conectdb__\n'), ((29445, 29485), '__conectdb__.in_dados', '__conectdb__.in_dados', (['delete_duplicados'], {}), '(delete_duplicados)\n', (29466, 29485), False, 'import __conectdb__\n'), ((29787, 29798), 'time.time', 'time.time', ([], {}), '()\n', (29796, 29798), False, 'import time\n'), ((29684, 29706), '__conectdb__.bk', '__conectdb__.bk', (['bk', 'f'], {}), '(bk, f)\n', (29699, 29706), False, 'import __conectdb__\n'), ((3904, 3943), '__conectdb__.se_dados', '__conectdb__.se_dados', (['query_consult_bd'], {}), '(query_consult_bd)\n', (3925, 3943), False, 'import __conectdb__\n'), ((4374, 4408), 'requests.get', 'requests.get', (['url'], {'headers': 'hearder'}), '(url, headers=hearder)\n', (4386, 4408), False, 'import requests\n'), ((4440, 4482), 'bs4.BeautifulSoup', 'BeautifulSoup', (['page.content', '"""html.parser"""'], {}), "(page.content, 'html.parser')\n", (4453, 4482), False, 'from bs4 import BeautifulSoup\n'), ((27727, 27765), '__conectdb__.in_dados', '__conectdb__.in_dados', (['query_insert_bd'], {}), '(query_insert_bd)\n', (27748, 27765), False, 'import __conectdb__\n')] |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(name="urwid_picker_widgets",
version="0.4",
description="Specialized picker widgets for urwid "
"that extend its features.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Ezio-Sarthak/urwid_picker_widgets",
author="<NAME>",
author_email="<EMAIL>",
license="MIT",
packages=["urwid_picker_widgets",
"urwid_picker_widgets.assisting_modules",
"urwid_picker_widgets.widgets"],
install_requires=["urwid"],
classifiers=["Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: Unix",
"Intended Audience :: Developers",
"Environment :: Console",
"Topic :: Software Development :: Widget Sets"],
python_requires=">=3.6",
)
| [
"setuptools.setup"
] | [((150, 916), 'setuptools.setup', 'setup', ([], {'name': '"""urwid_picker_widgets"""', 'version': '"""0.4"""', 'description': '"""Specialized picker widgets for urwid that extend its features."""', 'long_description': 'long_description', 'long_description_content_type': '"""text/markdown"""', 'url': '"""https://github.com/Ezio-Sarthak/urwid_picker_widgets"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'license': '"""MIT"""', 'packages': "['urwid_picker_widgets', 'urwid_picker_widgets.assisting_modules',\n 'urwid_picker_widgets.widgets']", 'install_requires': "['urwid']", 'classifiers': "['Programming Language :: Python :: 3',\n 'License :: OSI Approved :: MIT License', 'Operating System :: Unix',\n 'Intended Audience :: Developers', 'Environment :: Console',\n 'Topic :: Software Development :: Widget Sets']", 'python_requires': '""">=3.6"""'}), "(name='urwid_picker_widgets', version='0.4', description=\n 'Specialized picker widgets for urwid that extend its features.',\n long_description=long_description, long_description_content_type=\n 'text/markdown', url=\n 'https://github.com/Ezio-Sarthak/urwid_picker_widgets', author='<NAME>',\n author_email='<EMAIL>', license='MIT', packages=['urwid_picker_widgets',\n 'urwid_picker_widgets.assisting_modules',\n 'urwid_picker_widgets.widgets'], install_requires=['urwid'],\n classifiers=['Programming Language :: Python :: 3',\n 'License :: OSI Approved :: MIT License', 'Operating System :: Unix',\n 'Intended Audience :: Developers', 'Environment :: Console',\n 'Topic :: Software Development :: Widget Sets'], python_requires='>=3.6')\n", (155, 916), False, 'from setuptools import setup\n')] |
##Clustering script for CaM_Trials##
#clusters using HDBSCAN the last 1 microsecond of simulation
#uses rmsd to native of backbone (excluding flexible tails but including peptide) as distance metric
import mdtraj as md
import numpy as np
import matplotlib.pyplot as plt
import hdbscan
MIN_SAMPLES = 200 #determined from data/trial and error
#Calculate rmsd to native
def compute_rmsd_matrix(traj):
distances = np.empty((traj.n_frames, traj.n_frames))
for i in range(traj.n_frames):
distances[i] = md.rmsd(traj, traj, i, atom_indices=traj.top.select('backbone'))
return distances
#Determines the k-plot (helpful in determining MIN_SAMPLES)
def plot_k_dist(distances):
print('plotting dists')
s = np.sort(distances, axis=0)
counts = s[:, MIN_SAMPLES]
plt.plot(counts)
plt.xlabel('distance')
plt.ylabel('num_steps')
plt.savefig('k-dist.png')
#Clusters data using HDBSCAN
def make_clusters(native, traj):
distances = compute_rmsd_matrix(traj)
plot_k_dist(distances)
#clustering set up
clusterer = hdbscan.HDBSCAN(min_cluster_size=MIN_SAMPLES)
cluster_indices = clusterer.fit_predict(distances)
min_index = 0
max_index = np.max(cluster_indices) + 1
#clustering
clusters = [traj[np.where(cluster_indices == index)]
for index in range(min_index, max_index)]
clusters = sorted(clusters, key=lambda x: x.n_frames, reverse=True)
#now add the unclustered frames to last cluster
clusters.append(traj[np.where(cluster_indices == -1)])
cluster_sizes = [c.n_frames for c in clusters]
total_frames = traj.n_frames
print('Found {} total clusters.'.format(len(clusters)))
#calculates important values and outputs to files
for i, c in enumerate(clusters):
rmsds_to_native = md.rmsd(c, native)*10
mean = np.mean(rmsds_to_native)
median = np.median(rmsds_to_native)
min_ = np.min(rmsds_to_native)
max_ = np.max(rmsds_to_native)
std_ = np.std(rmsds_to_native)
np.savetxt("clusters_0"+str(i)+".dat", rmsds_to_native, fmt="%f")
print('Cluster {:02d} has population {:.1f}; RMSD: {:.2f} {:.2f} {:.2f} {:.2f} {:.2f}'.format(i, 100 * cluster_sizes[i] / float(total_frames), mean, median, min_, max_, std_))
c.save('cluster_{:02d}.pdb'.format(i))
#native struct
native = md.load('cam_fill.pdb')
native_indices = native.top.select('backbone and (resid 4 to 146 or resid>=149)')
#last 1 microsecond of simulation
traj = md.load_dcd('CaM_Trial3.dcd', top='trajectory_1.pdb')[-10000:]
traj_indices = traj.top.select('backbone and (resid 4 to 146 or resid >=149)')
#gets indices of subsection
ref = native.atom_slice(atom_indices=native_indices)
cam = traj.atom_slice(atom_indices = traj_indices)
make_clusters(ref, cam)
| [
"numpy.mean",
"numpy.median",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"numpy.where",
"numpy.sort",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"numpy.std",
"numpy.max",
"mdtraj.load_dcd",
"numpy.empty",
"numpy.min",
"mdtraj.rmsd",
"mdtraj.load",
"hdbscan.HDBS... | [((2359, 2382), 'mdtraj.load', 'md.load', (['"""cam_fill.pdb"""'], {}), "('cam_fill.pdb')\n", (2366, 2382), True, 'import mdtraj as md\n'), ((418, 458), 'numpy.empty', 'np.empty', (['(traj.n_frames, traj.n_frames)'], {}), '((traj.n_frames, traj.n_frames))\n', (426, 458), True, 'import numpy as np\n'), ((729, 755), 'numpy.sort', 'np.sort', (['distances'], {'axis': '(0)'}), '(distances, axis=0)\n', (736, 755), True, 'import numpy as np\n'), ((792, 808), 'matplotlib.pyplot.plot', 'plt.plot', (['counts'], {}), '(counts)\n', (800, 808), True, 'import matplotlib.pyplot as plt\n'), ((813, 835), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""distance"""'], {}), "('distance')\n", (823, 835), True, 'import matplotlib.pyplot as plt\n'), ((840, 863), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""num_steps"""'], {}), "('num_steps')\n", (850, 863), True, 'import matplotlib.pyplot as plt\n'), ((868, 893), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""k-dist.png"""'], {}), "('k-dist.png')\n", (879, 893), True, 'import matplotlib.pyplot as plt\n'), ((1066, 1111), 'hdbscan.HDBSCAN', 'hdbscan.HDBSCAN', ([], {'min_cluster_size': 'MIN_SAMPLES'}), '(min_cluster_size=MIN_SAMPLES)\n', (1081, 1111), False, 'import hdbscan\n'), ((2507, 2560), 'mdtraj.load_dcd', 'md.load_dcd', (['"""CaM_Trial3.dcd"""'], {'top': '"""trajectory_1.pdb"""'}), "('CaM_Trial3.dcd', top='trajectory_1.pdb')\n", (2518, 2560), True, 'import mdtraj as md\n'), ((1202, 1225), 'numpy.max', 'np.max', (['cluster_indices'], {}), '(cluster_indices)\n', (1208, 1225), True, 'import numpy as np\n'), ((1843, 1867), 'numpy.mean', 'np.mean', (['rmsds_to_native'], {}), '(rmsds_to_native)\n', (1850, 1867), True, 'import numpy as np\n'), ((1885, 1911), 'numpy.median', 'np.median', (['rmsds_to_native'], {}), '(rmsds_to_native)\n', (1894, 1911), True, 'import numpy as np\n'), ((1927, 1950), 'numpy.min', 'np.min', (['rmsds_to_native'], {}), '(rmsds_to_native)\n', (1933, 1950), True, 'import numpy as np\n'), ((1966, 1989), 'numpy.max', 'np.max', (['rmsds_to_native'], {}), '(rmsds_to_native)\n', (1972, 1989), True, 'import numpy as np\n'), ((2005, 2028), 'numpy.std', 'np.std', (['rmsds_to_native'], {}), '(rmsds_to_native)\n', (2011, 2028), True, 'import numpy as np\n'), ((1268, 1302), 'numpy.where', 'np.where', (['(cluster_indices == index)'], {}), '(cluster_indices == index)\n', (1276, 1302), True, 'import numpy as np\n'), ((1508, 1539), 'numpy.where', 'np.where', (['(cluster_indices == -1)'], {}), '(cluster_indices == -1)\n', (1516, 1539), True, 'import numpy as np\n'), ((1806, 1824), 'mdtraj.rmsd', 'md.rmsd', (['c', 'native'], {}), '(c, native)\n', (1813, 1824), True, 'import mdtraj as md\n')] |
import re
from dataclasses import dataclass
from itertools import combinations
from aoc.util import load_input, load_example
def part1(lines):
r""" ¯\_(ツ)_/¯ """
return next(index for index, line in enumerate(lines) if "a=<0,0,0>" in line)
@dataclass
class Particle:
position: tuple
velocity: tuple
acceleration: tuple
def e(triple):
return (int(t) for t in triple.split(","))
def prepare_data(lines):
particle_matcher = re.compile(r"p=<(.*)>, v=<(.*)>, a=<(.*)>")
particles = []
for line in lines:
m = particle_matcher.match(line)
particles.append(Particle(*(e(g) for g in m.groups())))
return particles
def simulate(particles):
result = []
for p in particles:
px, py, pz = p.position
vx, vy, vz = p.velocity
ax, ay, az = p.acceleration
vx += ax
vy += ay
vz += az
px += vx
py += vy
pz += vz
result.append(Particle((px, py, pz), (vx, vy, vz), (ax, ay, az)))
return result
def part2(lines):
"""
>>> part2(load_example(__file__, "20"))
1
"""
particles = prepare_data(lines)
for _ in range(50):
tbr = set()
for p0, p1 in combinations(particles, 2):
if p0.position == p1.position:
tbr.add(p0.position)
particles = [p for p in particles if p.position not in tbr]
particles = simulate(particles)
return len(particles)
if __name__ == "__main__":
data = load_input(__file__, 2017, "20")
print(part1(data))
print(part2(data))
| [
"itertools.combinations",
"aoc.util.load_input",
"re.compile"
] | [((458, 500), 're.compile', 're.compile', (['"""p=<(.*)>, v=<(.*)>, a=<(.*)>"""'], {}), "('p=<(.*)>, v=<(.*)>, a=<(.*)>')\n", (468, 500), False, 'import re\n'), ((1501, 1533), 'aoc.util.load_input', 'load_input', (['__file__', '(2017)', '"""20"""'], {}), "(__file__, 2017, '20')\n", (1511, 1533), False, 'from aoc.util import load_input, load_example\n'), ((1219, 1245), 'itertools.combinations', 'combinations', (['particles', '(2)'], {}), '(particles, 2)\n', (1231, 1245), False, 'from itertools import combinations\n')] |
# Copyright (c) 2015 Xilinx Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import os
import shutil
import subprocess
import re
import threading
from hopper.utils.logger import *
import hopper.utils.bitbake.config
import hopper.utils.process
class BitBakeResult:
TaskUnknown = 0
TaskRunning = 1
TaskComplete = 2
TaskFailed = 3
@staticmethod
def __parsestate__(state):
s = state.lower()
if s == "started":
return BitBakeResult.TaskRunning
elif s == "succeeded":
return BitBakeResult.TaskComplete
elif s == "failed":
return BitBakeResult.TaskFailed
return BitBakeResult.TaskUnknown
def __init__(self):
self.warnings = []
self.errors = []
self.tasks = []
def updatetask(self, recipe, name, state):
# find it in the collection
for i in self.tasks:
if i[0] == recipe and i[1] == name:
i[2] = state
return
# create it
newstate = [recipe, name, state]
self.tasks.append(newstate)
class BitBakeKnottyListener(hopper.utils.process.ProcessListener):
def __init__(self, logger):
self.lock = threading.RLock()
self.logger = logger
self.state = BitBakeResult()
def result(self):
with self.lock:
return self.state
def __updateresult__(self, line):
with self.lock:
m = re.search("(NOTE|WARNING|ERROR): (.*)", line)
if m:
if m.group(1) == "WARNING":
self.state.warnings.append(m.group(2))
elif m.group(1) == "ERROR":
self.state.errors.append(m.group(2))
elif m.group(1) == "NOTE":
mtask = re.search("recipe (.*?): task (.*?): (.*)", m.group(2))
if mtask:
self.state.updatetask(mtask.group(1), mtask.group(2),
BitBakeResult.__parsestate__(mtask.group(3)))
def update(self, data, err = False):
for i in data.splitlines():
if self.logger != None:
self.logger.log(i)
self.__updateresult__(i)
class BitBakeTask(hopper.utils.process.Process):
def __init__(self, environment, config, command = []):
hopper.utils.process.Process.__init__(self, environment, command)
self.redirect = False
self.workingDirectory = self.environment.getWorkingPath()
self.config = config
self.bbenvironment = {}
self.overwrite = True
self.sublogger = None
@staticmethod
def convertTargetArgs(targets, striptask = False):
if targets:
tasklist = []
taskcount = 0
for i in targets:
if ":" in i and not(striptask):
tasklist.append(i.split(":"))
taskcount += 1
else:
tasklist.append((i, None))
if (len(tasklist) > 1 and taskcount != 0) or taskcount > 1:
raise Exception("BitBake and Hopper does not implement mixed task building, only one task with one target.")
args = []
for i in tasklist:
if i[0] != None and len(i[0]) != 0:
if i[1] != None and len(i[1]) != 0:
args.append("-c")
args.append(i[1])
args.append(i[0])
return args
return []
@staticmethod
def taskBuild(environment, config, targets):
return BitBakeTask(environment, config,
["bitbake", "-k"] + BitBakeTask.convertTargetArgs(targets))
def execute(self):
self.environment.log("Starting bitbake process '%s'" % " ".join(self.command))
if not self.config:
raise Exception("BitBake Configuration is missing from the task.")
self.environment.log("Prepare BitBake Configuration")
configgen = hopper.utils.bitbake.config.ConfigurationGenerator(self.environment, self.config)
if not configgen.generate(overwrite = self.overwrite):
raise Exception("Failed to generate configuration")
if self.sublogger:
self.redirect = True
listener = BitBakeKnottyListener(self.sublogger)
result = hopper.utils.process.Process.execute(self, listeners = [listener])
return (result[0], listener.result())
result = hopper.utils.process.Process.execute(self)
return (result[0], None)
def getEnvironment(self):
env = hopper.utils.process.Process.getEnvironment(self)
scriptdirs = []
bitbakedir = None
for i in self.config.layers:
sourcepath = i.getSourcePath(self.environment)
rootsourcepath = i.getRootSourcePath(self.environment)
# find bitbake
if i.isBitBake():
bitbakedir = sourcepath
elif i.getName() == "poky":
bitbakedir = os.path.join(sourcepath, "bitbake")
# any layers and roots of layers with scripts directories
for p in [sourcepath, rootsourcepath]:
path = os.path.join(p, "scripts")
if path and os.path.isdir(path):
if path not in scriptdirs:
self.environment.verbose("Adding scripts directory '%s' to path (from layer '%s')" % (path, i.getName()))
scriptdirs.append(path)
self.environment.debug("Preparing bitbake environment")
env["BUILDDIR"] = self.environment.getWorkingPath()
if scriptdirs:
for i in scriptdirs:
if os.path.exists(i):
env["PATH"] += ":%s" % i
if bitbakedir and os.path.exists(bitbakedir):
env["BITBAKEDIR"] = bitbakedir
env["PATH"] += ":%s/bin" % bitbakedir
else:
raise Exception("BitBake is required, not found in repo/layers")
# Sanitize PATH
oldpath = env["PATH"]
if oldpath.find("\r") >= 0 or oldpath.find("\n") >= 0:
warn("Your environment PATH variable contains '\\r' and/or '\\n' characters (consider cleaning that up)")
oldpath = oldpath.replace('\r', '').replace('\n', '')
newpath = []
for i in oldpath.split(":"):
if i and len(i) > 0:
newpath.append(i)
env["PATH"] = ":".join(newpath)
self.environment.debug("env[PATH] = '%s'" % env["PATH"])
# Whitelist some passthrough variables
envWhitelist = ["HTTP_PROXY", "http_proxy",
"HTTPS_PROXY", "https_proxy",
"FTP_PROXY", "ftp_proxy",
"FTPS_PROXY", "ftps_proxy",
"NO_PROXY", "no_proxy",
"ALL_PROXY", "all_proxy",
"SSH_AGENT_PID", "SSH_AUTH_SOCK",
"GIT_SSL_CAINFO", "GIT_SSL_CAPATH",
]
self.environment.debug("Preparing bitbake environment overrides")
for i in self.bbenvironment.iteritems():
self.environment.debug("env['%s'] = '%s'" % (i[0], i[1]))
env[i[0]] = i[1]
envWhitelist.append(i[0])
env["BB_ENV_EXTRAWHITE"] = " ".join(envWhitelist)
self.environment.debug("BB_ENV_EXTRAWHITE = '%s'" % env['BB_ENV_EXTRAWHITE'])
return env
| [
"os.path.exists",
"os.path.join",
"threading.RLock",
"os.path.isdir",
"re.search"
] | [((2070, 2087), 'threading.RLock', 'threading.RLock', ([], {}), '()\n', (2085, 2087), False, 'import threading\n'), ((2262, 2307), 're.search', 're.search', (['"""(NOTE|WARNING|ERROR): (.*)"""', 'line'], {}), "('(NOTE|WARNING|ERROR): (.*)', line)\n", (2271, 2307), False, 'import re\n'), ((5789, 5815), 'os.path.exists', 'os.path.exists', (['bitbakedir'], {}), '(bitbakedir)\n', (5803, 5815), False, 'import os\n'), ((5319, 5345), 'os.path.join', 'os.path.join', (['p', '"""scripts"""'], {}), "(p, 'scripts')\n", (5331, 5345), False, 'import os\n'), ((5719, 5736), 'os.path.exists', 'os.path.exists', (['i'], {}), '(i)\n', (5733, 5736), False, 'import os\n'), ((5168, 5203), 'os.path.join', 'os.path.join', (['sourcepath', '"""bitbake"""'], {}), "(sourcepath, 'bitbake')\n", (5180, 5203), False, 'import os\n'), ((5362, 5381), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (5375, 5381), False, 'import os\n')] |
import openmc
class FusionRingSource(openmc.Source):
"""An openmc.Source object with some presets to make it more convenient
for fusion simulations using a ring source. All attributes can be changed
after initialization if required. Default isotropic ring source with a Muir
energy distribution.
Args:
radius: the inner radius of the ring source
start_angle: the start angle of the ring in radians,
stop_angle: the end angle of the ring in radians,
temperature: the temperature to use in the Muir distribution in eV,
"""
def __init__(
self,
radius,
start_angle: float =0.,
stop_angle: float = 6.28318530718,
temperature: float = 20000.,
fuel='DT'
):
super().__init__()
# performed after the super init as these are Source attributes
radius = openmc.stats.Discrete([radius], [1])
z_values = openmc.stats.Discrete([0], [1])
angle = openmc.stats.Uniform(a=start_angle, b=stop_angle)
self.space = openmc.stats.CylindricalIndependent(
r=radius,
phi=angle,
z=z_values,
origin=(0.0, 0.0, 0.0)
)
self.angle = openmc.stats.Isotropic()
if fuel == 'DT':
mean_energy = 14080000. # mean energy in eV
mass_of_reactants = 5 # mass of the reactants (D + T) AMU
elif fuel == 'DT':
mean_energy = 2450000. # mean energy in eV
mass_of_reactants = 4 # mass of the reactants (D + D) AMU
else:
raise ValueError(f'fuel must be either "DT" or "DD", not {fuel}')
self.energy = openmc.stats.Muir(e0=mean_energy , m_rat=mass_of_reactants , kt=temperature)
| [
"openmc.stats.Uniform",
"openmc.stats.CylindricalIndependent",
"openmc.stats.Muir",
"openmc.stats.Isotropic",
"openmc.stats.Discrete"
] | [((884, 920), 'openmc.stats.Discrete', 'openmc.stats.Discrete', (['[radius]', '[1]'], {}), '([radius], [1])\n', (905, 920), False, 'import openmc\n'), ((940, 971), 'openmc.stats.Discrete', 'openmc.stats.Discrete', (['[0]', '[1]'], {}), '([0], [1])\n', (961, 971), False, 'import openmc\n'), ((988, 1037), 'openmc.stats.Uniform', 'openmc.stats.Uniform', ([], {'a': 'start_angle', 'b': 'stop_angle'}), '(a=start_angle, b=stop_angle)\n', (1008, 1037), False, 'import openmc\n'), ((1059, 1156), 'openmc.stats.CylindricalIndependent', 'openmc.stats.CylindricalIndependent', ([], {'r': 'radius', 'phi': 'angle', 'z': 'z_values', 'origin': '(0.0, 0.0, 0.0)'}), '(r=radius, phi=angle, z=z_values, origin\n =(0.0, 0.0, 0.0))\n', (1094, 1156), False, 'import openmc\n'), ((1231, 1255), 'openmc.stats.Isotropic', 'openmc.stats.Isotropic', ([], {}), '()\n', (1253, 1255), False, 'import openmc\n'), ((1677, 1751), 'openmc.stats.Muir', 'openmc.stats.Muir', ([], {'e0': 'mean_energy', 'm_rat': 'mass_of_reactants', 'kt': 'temperature'}), '(e0=mean_energy, m_rat=mass_of_reactants, kt=temperature)\n', (1694, 1751), False, 'import openmc\n')] |
from allauth.socialaccount.providers.oauth2_provider.urls import default_urlpatterns
from .provider import EveOnlineProvider
urlpatterns = default_urlpatterns(EveOnlineProvider)
| [
"allauth.socialaccount.providers.oauth2_provider.urls.default_urlpatterns"
] | [((142, 180), 'allauth.socialaccount.providers.oauth2_provider.urls.default_urlpatterns', 'default_urlpatterns', (['EveOnlineProvider'], {}), '(EveOnlineProvider)\n', (161, 180), False, 'from allauth.socialaccount.providers.oauth2_provider.urls import default_urlpatterns\n')] |
from .errors import DownloaderFinished, NotAcceptedFormat, StreamNotFound
from moviepy.audio.io.AudioFileClip import AudioFileClip
from typing import Any, BinaryIO, Optional
from pytube import Stream, YouTube
class Downloader(YouTube):
def __init__(self, url: str, format: str):
super().__init__(
url,
on_complete_callback=lambda *args: self.complete_callback(*args)
)
self._stream: Stream = None
self._finished: bool = False
self.url: str = url
self.format: str = format
self.output: str = ""
@property
def finished(self):
return self._finished
@property
def stream(self):
return self._stream
def complete_callback(self, stream: Any, file_path: str):
self.output = file_path
self._stream = stream
if stream.includes_audio_track and not stream.includes_video_track:
output = f"{file_path[:-4]}.mp3"
with AudioFileClip(file_path) as sound:
sound.write_audiofile(output)
self.output = output
self._finished = True
def filter_by_format(self, format: str) -> "Stream":
if format == "audio":
return self.streams.get_audio_only()
elif format == "video":
return self.streams.get_highest_resolution()
elif format in ("mp3", "mp4"):
query = self.streams.filter(file_extension=format)
if format == "mp3":
stream = query.get_audio_only()
else:
stream = query.get_highest_resolution()
if stream is not None:
return stream
raise StreamNotFound()
raise NotAcceptedFormat()
def start(self, buffer: Optional[BinaryIO] = None):
if not self.finished:
stream = self.filter_by_format(self.format.lower())
if buffer is not None:
stream.stream_to_buffer(buffer)
else:
stream.download()
else:
raise DownloaderFinished()
def __str__(self) -> str:
return self.output
| [
"moviepy.audio.io.AudioFileClip.AudioFileClip"
] | [((982, 1006), 'moviepy.audio.io.AudioFileClip.AudioFileClip', 'AudioFileClip', (['file_path'], {}), '(file_path)\n', (995, 1006), False, 'from moviepy.audio.io.AudioFileClip import AudioFileClip\n')] |
import typing as t
import numpy as np
import pandas as pd
from house_prices_regression_model import __version__ as VERSION
from house_prices_regression_model.processing.data_manager import load_pipeline
from house_prices_regression_model.config.core import load_config_file, SETTINGS_PATH
from house_prices_regression_model.processing.data_validation import validate_inputs
# Config files
config = load_config_file(SETTINGS_PATH)
PIPELINE_ARTIFACT_NAME = config["PIPELINE_ARTIFACT_NAME"]
pipeline_file_name = f"{PIPELINE_ARTIFACT_NAME}_v{VERSION}.pkl"
cb_pipe = load_pipeline(file_name=pipeline_file_name)
#Function
def make_prediction(*,input_data: t.Union[pd.DataFrame, dict],) -> list:
"""Make a prediction using a saved model pipeline."""
df = pd.DataFrame(input_data)
validated_df, error_dict = validate_inputs(input_data=df)
errors_list = list(error_dict.values())
results = {'model_output': None}
if error_dict == {}:
log_predictions = cb_pipe.predict(validated_df)
predictions = [np.exp(pred) for pred in log_predictions]
results['model_output'] = predictions
else:
results['model_output'] = 'Errors making prediction:' + ' '.join(map(str, errors_list))
return results
| [
"house_prices_regression_model.config.core.load_config_file",
"house_prices_regression_model.processing.data_manager.load_pipeline",
"numpy.exp",
"house_prices_regression_model.processing.data_validation.validate_inputs",
"pandas.DataFrame"
] | [((400, 431), 'house_prices_regression_model.config.core.load_config_file', 'load_config_file', (['SETTINGS_PATH'], {}), '(SETTINGS_PATH)\n', (416, 431), False, 'from house_prices_regression_model.config.core import load_config_file, SETTINGS_PATH\n'), ((564, 607), 'house_prices_regression_model.processing.data_manager.load_pipeline', 'load_pipeline', ([], {'file_name': 'pipeline_file_name'}), '(file_name=pipeline_file_name)\n', (577, 607), False, 'from house_prices_regression_model.processing.data_manager import load_pipeline\n'), ((760, 784), 'pandas.DataFrame', 'pd.DataFrame', (['input_data'], {}), '(input_data)\n', (772, 784), True, 'import pandas as pd\n'), ((816, 846), 'house_prices_regression_model.processing.data_validation.validate_inputs', 'validate_inputs', ([], {'input_data': 'df'}), '(input_data=df)\n', (831, 846), False, 'from house_prices_regression_model.processing.data_validation import validate_inputs\n'), ((1032, 1044), 'numpy.exp', 'np.exp', (['pred'], {}), '(pred)\n', (1038, 1044), True, 'import numpy as np\n')] |
# Copyright 2018 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Classical and quantum :class:`.Runnable` `dimod <http://dimod.readthedocs.io/en/stable/>`_
samplers for problems and subproblems.
"""
import time
import logging
import threading
from collections import namedtuple
import dimod
from dwave.system.samplers import DWaveSampler
from dwave.system.composites import EmbeddingComposite, FixedEmbeddingComposite
from tabu import TabuSampler
from neal import SimulatedAnnealingSampler
from hybrid.core import Runnable, SampleSet
from hybrid.flow import Loop
from hybrid.utils import random_sample
from hybrid import traits
__all__ = [
'QPUSubproblemExternalEmbeddingSampler', 'QPUSubproblemAutoEmbeddingSampler',
'ReverseAnnealingAutoEmbeddingSampler',
'SimulatedAnnealingSubproblemSampler', 'InterruptableSimulatedAnnealingSubproblemSampler',
'SimulatedAnnealingProblemSampler', 'InterruptableSimulatedAnnealingProblemSampler',
'TabuSubproblemSampler', 'TabuProblemSampler', 'InterruptableTabuSampler',
'RandomSubproblemSampler',
]
logger = logging.getLogger(__name__)
class QPUSubproblemExternalEmbeddingSampler(Runnable, traits.SubproblemSampler, traits.EmbeddingIntaking):
"""A quantum sampler for a subproblem with a defined minor-embedding.
Args:
num_reads (int, optional, default=100):
Number of states (output solutions) to read from the sampler.
qpu_sampler (:class:`dimod.Sampler`, optional, default=DWaveSampler()):
Quantum sampler such as a D-Wave system.
Examples:
See examples on https://docs.ocean.dwavesys.com/projects/hybrid/en/latest/reference/samplers.html#examples.
"""
def __init__(self, num_reads=100, qpu_sampler=None, **runopts):
super(QPUSubproblemExternalEmbeddingSampler, self).__init__(**runopts)
self.num_reads = num_reads
if qpu_sampler is None:
qpu_sampler = DWaveSampler()
self.sampler = qpu_sampler
def __repr__(self):
return ("{self}(num_reads={self.num_reads!r}, "
"qpu_sampler={self.sampler!r})").format(self=self)
def next(self, state, **runopts):
sampler = FixedEmbeddingComposite(self.sampler, embedding=state.embedding)
response = sampler.sample(state.subproblem, num_reads=self.num_reads)
return state.updated(subsamples=response)
class QPUSubproblemAutoEmbeddingSampler(Runnable, traits.SubproblemSampler):
"""A quantum sampler for a subproblem with automated heuristic
minor-embedding.
Args:
num_reads (int, optional, default=100):
Number of states (output solutions) to read from the sampler.
qpu_sampler (:class:`dimod.Sampler`, optional, default=EmbeddingComposite(DWaveSampler())):
Quantum sampler such as a D-Wave system. If sampler is structured,
it will be converted to unstructured via
:class:`~dwave.system.composited.EmbeddingComposite`.
Examples:
See examples on https://docs.ocean.dwavesys.com/projects/hybrid/en/latest/reference/samplers.html#examples.
"""
def __init__(self, num_reads=100, qpu_sampler=None, **runopts):
super(QPUSubproblemAutoEmbeddingSampler, self).__init__(**runopts)
self.num_reads = num_reads
if qpu_sampler is None:
qpu_sampler = DWaveSampler()
# convert the structured sampler to unstructured
if isinstance(qpu_sampler, dimod.Structured):
self.sampler = EmbeddingComposite(qpu_sampler)
else:
self.sampler = qpu_sampler
def __repr__(self):
return ("{self}(num_reads={self.num_reads!r}, "
"qpu_sampler={self.sampler!r})").format(self=self)
def next(self, state, **runopts):
response = self.sampler.sample(state.subproblem, num_reads=self.num_reads)
return state.updated(subsamples=response)
class ReverseAnnealingAutoEmbeddingSampler(Runnable, traits.SubproblemSampler):
"""A quantum reverse annealing sampler for a subproblem with automated
heuristic minor-embedding.
Args:
num_reads (int, optional, default=100):
Number of states (output solutions) to read from the sampler.
qpu_sampler (:class:`dimod.Sampler`, optional, default=EmbeddingComposite(DWaveSampler())):
Quantum sampler such as a D-Wave system. If sampler is structured,
it will be converted to unstructured via
:class:`~dwave.system.composited.EmbeddingComposite`.
anneal_schedule (list(list), optional, default=[[0, 1], [0.5, 0.5], [1, 1]]):
An anneal schedule defined by a series of pairs of floating-point
numbers identifying points in the schedule at which to change slope.
The first element in the pair is time t in microseconds; the second,
normalized persistent current s in the range [0,1]. The resulting
schedule is the piecewise-linear curve that connects the provided
points. For more details, see
:meth:`~dwave.system.DWaveSampler.validate_anneal_schedule`.
"""
def __init__(self, num_reads=100, qpu_sampler=None, anneal_schedule=None, **runopts):
super(ReverseAnnealingAutoEmbeddingSampler, self).__init__(**runopts)
self.num_reads = num_reads
if anneal_schedule is None:
anneal_schedule = [[0, 1], [0.5, 0.5], [1, 1]]
self.anneal_schedule = anneal_schedule
if qpu_sampler is None:
qpu_sampler = DWaveSampler(
solver={'max_anneal_schedule_points__gte': len(self.anneal_schedule)})
# validate schedule, raising `ValueError` on invalid schedule or
# `RuntimeError` if anneal schedule not supported by QPU (this could
# happen only if user provided the `qpu_sampler`)
qpu_sampler.validate_anneal_schedule(anneal_schedule)
# convert the structured sampler to unstructured
if isinstance(qpu_sampler, dimod.Structured):
self.sampler = EmbeddingComposite(qpu_sampler)
else:
self.sampler = qpu_sampler
def __repr__(self):
return ("{self}(num_reads={self.num_reads!r}, "
"qpu_sampler={self.sampler!r}, "
"anneal_schedule={self.anneal_schedule!r})").format(self=self)
def next(self, state, **runopts):
# TODO: handle more than just the first subsample
response = self.sampler.sample(
state.subproblem, num_reads=self.num_reads,
initial_state=state.subsamples.first.sample,
anneal_schedule=self.anneal_schedule)
return state.updated(subsamples=response)
class SimulatedAnnealingSubproblemSampler(Runnable, traits.SubproblemSampler):
"""A simulated annealing sampler for a subproblem.
Args:
num_reads (int, optional, default=1):
Number of states (output solutions) to read from the sampler.
sweeps (int, optional, default=1000):
Number of sweeps or steps.
Examples:
See examples on https://docs.ocean.dwavesys.com/projects/hybrid/en/latest/reference/samplers.html#examples.
"""
def __init__(self, num_reads=1, sweeps=1000, **runopts):
super(SimulatedAnnealingSubproblemSampler, self).__init__(**runopts)
self.num_reads = num_reads
self.sweeps = sweeps
self.sampler = SimulatedAnnealingSampler()
self._stop_event = threading.Event()
def __repr__(self):
return ("{self}(num_reads={self.num_reads!r}, "
"sweeps={self.sweeps!r})").format(self=self)
def next(self, state, **runopts):
subbqm = state.subproblem
response = self.sampler.sample(
subbqm, num_reads=self.num_reads, sweeps=self.sweeps,
interrupt_function=lambda: self._stop_event.is_set())
return state.updated(subsamples=response)
def halt(self):
self._stop_event.set()
class InterruptableSimulatedAnnealingSubproblemSampler(SimulatedAnnealingSubproblemSampler):
"""SimulatedAnnealingSubproblemSampler is already interruptable."""
pass
class SimulatedAnnealingProblemSampler(Runnable, traits.ProblemSampler):
"""A simulated annealing sampler for a complete problem.
Args:
num_reads (int, optional, default=1):
Number of states (output solutions) to read from the sampler.
sweeps (int, optional, default=1000):
Number of sweeps or steps.
"""
def __init__(self, num_reads=1, sweeps=1000, **runopts):
super(SimulatedAnnealingProblemSampler, self).__init__(**runopts)
self.num_reads = num_reads
self.sweeps = sweeps
self.sampler = SimulatedAnnealingSampler()
self._stop_event = threading.Event()
def __repr__(self):
return ("{self}(num_reads={self.num_reads!r}, "
"sweeps={self.sweeps!r})").format(self=self)
def next(self, state, **runopts):
bqm = state.problem
response = self.sampler.sample(
bqm, num_reads=self.num_reads, sweeps=self.sweeps,
interrupt_function=lambda: self._stop_event.is_set())
return state.updated(samples=response)
def halt(self):
self._stop_event.set()
class InterruptableSimulatedAnnealingProblemSampler(SimulatedAnnealingProblemSampler):
"""SimulatedAnnealingProblemSampler is already interruptable."""
pass
class TabuSubproblemSampler(Runnable, traits.SubproblemSampler):
"""A tabu sampler for a subproblem.
Args:
num_reads (int, optional, default=1):
Number of states (output solutions) to read from the sampler.
tenure (int, optional):
Tabu tenure, which is the length of the tabu list, or number of
recently explored solutions kept in memory. Default is a quarter of
the number of problem variables up to a maximum value of 20.
timeout (int, optional, default=20):
Total running time in milliseconds.
Examples:
See examples on https://docs.ocean.dwavesys.com/projects/hybrid/en/latest/reference/samplers.html#examples.
"""
def __init__(self, num_reads=1, tenure=None, timeout=20, **runopts):
super(TabuSubproblemSampler, self).__init__(**runopts)
self.num_reads = num_reads
self.tenure = tenure
self.timeout = timeout
self.sampler = TabuSampler()
def __repr__(self):
return ("{self}(num_reads={self.num_reads!r}, "
"tenure={self.tenure!r}, "
"timeout={self.timeout!r})").format(self=self)
def next(self, state, **runopts):
subbqm = state.subproblem
response = self.sampler.sample(
subbqm, tenure=self.tenure, timeout=self.timeout, num_reads=self.num_reads)
return state.updated(subsamples=response)
class TabuProblemSampler(Runnable, traits.ProblemSampler):
"""A tabu sampler for a binary quadratic problem.
Args:
num_reads (int, optional, default=1):
Number of states (output solutions) to read from the sampler.
tenure (int, optional):
Tabu tenure, which is the length of the tabu list, or number of
recently explored solutions kept in memory. Default is a quarter of
the number of problem variables up to a maximum value of 20.
timeout (int, optional, default=20):
Total running time in milliseconds.
Examples:
See examples on https://docs.ocean.dwavesys.com/projects/hybrid/en/latest/reference/samplers.html#examples.
"""
def __init__(self, num_reads=1, tenure=None, timeout=20, **runopts):
super(TabuProblemSampler, self).__init__(**runopts)
self.num_reads = num_reads
self.tenure = tenure
self.timeout = timeout
self.sampler = TabuSampler()
def __repr__(self):
return ("{self}(num_reads={self.num_reads!r}, "
"tenure={self.tenure!r}, "
"timeout={self.timeout!r})").format(self=self)
def next(self, state, **runopts):
sampleset = self.sampler.sample(
state.problem, init_solution=state.samples, tenure=self.tenure,
timeout=self.timeout, num_reads=self.num_reads)
return state.updated(samples=sampleset)
class InterruptableTabuSampler(Loop):
"""An interruptable tabu sampler for a binary quadratic problem.
Args:
num_reads (int, optional, default=1):
Number of states (output solutions) to read from the sampler.
tenure (int, optional):
Tabu tenure, which is the length of the tabu list, or number of
recently explored solutions kept in memory. Default is a quarter of
the number of problem variables up to a maximum value of 20.
timeout (int, optional, default=20):
Timeout for non-interruptable operation of tabu search. At the
completion of each loop of tabu search through its problem
variables, if this time interval has been exceeded, the search can
be stopped by an interrupt signal or expiration of the `timeout`
parameter.
max_time (float, optional, default=None):
Total running time in milliseconds.
Examples:
See examples on https://docs.ocean.dwavesys.com/projects/hybrid/en/latest/reference/samplers.html#examples.
"""
def __init__(self, max_time=None, **tabu):
super(InterruptableTabuSampler, self).__init__(
TabuProblemSampler(**tabu), max_time=max_time)
class RandomSubproblemSampler(Runnable, traits.SubproblemSampler):
"""A random sample generator for a subproblem."""
def next(self, state, **runopts):
bqm = state.subproblem
sample = random_sample(bqm)
sampleset = SampleSet.from_samples(sample,
vartype=bqm.vartype,
energy=bqm.energy(sample))
return state.updated(subsamples=sampleset)
| [
"logging.getLogger",
"dwave.system.samplers.DWaveSampler",
"hybrid.utils.random_sample",
"dwave.system.composites.FixedEmbeddingComposite",
"threading.Event",
"neal.SimulatedAnnealingSampler",
"dwave.system.composites.EmbeddingComposite",
"tabu.TabuSampler"
] | [((1598, 1625), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1615, 1625), False, 'import logging\n'), ((2719, 2783), 'dwave.system.composites.FixedEmbeddingComposite', 'FixedEmbeddingComposite', (['self.sampler'], {'embedding': 'state.embedding'}), '(self.sampler, embedding=state.embedding)\n', (2742, 2783), False, 'from dwave.system.composites import EmbeddingComposite, FixedEmbeddingComposite\n'), ((7975, 8002), 'neal.SimulatedAnnealingSampler', 'SimulatedAnnealingSampler', ([], {}), '()\n', (8000, 8002), False, 'from neal import SimulatedAnnealingSampler\n'), ((8030, 8047), 'threading.Event', 'threading.Event', ([], {}), '()\n', (8045, 8047), False, 'import threading\n'), ((9305, 9332), 'neal.SimulatedAnnealingSampler', 'SimulatedAnnealingSampler', ([], {}), '()\n', (9330, 9332), False, 'from neal import SimulatedAnnealingSampler\n'), ((9360, 9377), 'threading.Event', 'threading.Event', ([], {}), '()\n', (9375, 9377), False, 'import threading\n'), ((11017, 11030), 'tabu.TabuSampler', 'TabuSampler', ([], {}), '()\n', (11028, 11030), False, 'from tabu import TabuSampler\n'), ((12476, 12489), 'tabu.TabuSampler', 'TabuSampler', ([], {}), '()\n', (12487, 12489), False, 'from tabu import TabuSampler\n'), ((14439, 14457), 'hybrid.utils.random_sample', 'random_sample', (['bqm'], {}), '(bqm)\n', (14452, 14457), False, 'from hybrid.utils import random_sample\n'), ((2457, 2471), 'dwave.system.samplers.DWaveSampler', 'DWaveSampler', ([], {}), '()\n', (2469, 2471), False, 'from dwave.system.samplers import DWaveSampler\n'), ((3889, 3903), 'dwave.system.samplers.DWaveSampler', 'DWaveSampler', ([], {}), '()\n', (3901, 3903), False, 'from dwave.system.samplers import DWaveSampler\n'), ((4043, 4074), 'dwave.system.composites.EmbeddingComposite', 'EmbeddingComposite', (['qpu_sampler'], {}), '(qpu_sampler)\n', (4061, 4074), False, 'from dwave.system.composites import EmbeddingComposite, FixedEmbeddingComposite\n'), ((6599, 6630), 'dwave.system.composites.EmbeddingComposite', 'EmbeddingComposite', (['qpu_sampler'], {}), '(qpu_sampler)\n', (6617, 6630), False, 'from dwave.system.composites import EmbeddingComposite, FixedEmbeddingComposite\n')] |
# Generated by Django 3.1.11 on 2021-07-01 20:18
from django.db import migrations, models
import grandchallenge.core.storage
class Migration(migrations.Migration):
dependencies = [
("products", "0006_product_ce_under"),
]
operations = [
migrations.CreateModel(
name="ProjectAirFiles",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("title", models.CharField(max_length=150)),
(
"study_file",
models.FileField(
upload_to=grandchallenge.core.storage.get_pdf_path
),
),
],
),
]
| [
"django.db.models.FileField",
"django.db.models.AutoField",
"django.db.models.CharField"
] | [((416, 509), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (432, 509), False, 'from django.db import migrations, models\n'), ((671, 703), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(150)'}), '(max_length=150)\n', (687, 703), False, 'from django.db import migrations, models\n'), ((778, 846), 'django.db.models.FileField', 'models.FileField', ([], {'upload_to': 'grandchallenge.core.storage.get_pdf_path'}), '(upload_to=grandchallenge.core.storage.get_pdf_path)\n', (794, 846), False, 'from django.db import migrations, models\n')] |
from django.core.exceptions import ValidationError
def is_alpha_or_space_validator(value):
result = all(c.isalpha() or c.isspace() for c in value)
if not result:
raise ValidationError("Write a valid name.")
| [
"django.core.exceptions.ValidationError"
] | [((186, 224), 'django.core.exceptions.ValidationError', 'ValidationError', (['"""Write a valid name."""'], {}), "('Write a valid name.')\n", (201, 224), False, 'from django.core.exceptions import ValidationError\n')] |
import numpy as np
from amlearn.utils.basetest import AmLearnTest
from amlearn.utils.data import get_isometric_lists
class test_data(AmLearnTest):
def setUp(self):
pass
def test_get_isometric_lists(self):
test_lists= [[1, 2, 3], [4], [5, 6], [1, 2, 3]]
isometric_lists = \
get_isometric_lists(test_lists, limit_width=80, fill_value=0)
self.assertEqual(np.array(isometric_lists).shape, (4, 80))
test_arrays = np.array([np.array([1, 2, 3]), np.array([4]),
np.array([5, 6]), np.array([1, 2, 3])])
isometric_arrays = \
get_isometric_lists(test_arrays, limit_width=80, fill_value=0)
self.assertEqual(np.array(isometric_arrays).shape, (4, 80))
| [
"numpy.array",
"amlearn.utils.data.get_isometric_lists"
] | [((320, 381), 'amlearn.utils.data.get_isometric_lists', 'get_isometric_lists', (['test_lists'], {'limit_width': '(80)', 'fill_value': '(0)'}), '(test_lists, limit_width=80, fill_value=0)\n', (339, 381), False, 'from amlearn.utils.data import get_isometric_lists\n'), ((630, 692), 'amlearn.utils.data.get_isometric_lists', 'get_isometric_lists', (['test_arrays'], {'limit_width': '(80)', 'fill_value': '(0)'}), '(test_arrays, limit_width=80, fill_value=0)\n', (649, 692), False, 'from amlearn.utils.data import get_isometric_lists\n'), ((407, 432), 'numpy.array', 'np.array', (['isometric_lists'], {}), '(isometric_lists)\n', (415, 432), True, 'import numpy as np\n'), ((482, 501), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (490, 501), True, 'import numpy as np\n'), ((503, 516), 'numpy.array', 'np.array', (['[4]'], {}), '([4])\n', (511, 516), True, 'import numpy as np\n'), ((549, 565), 'numpy.array', 'np.array', (['[5, 6]'], {}), '([5, 6])\n', (557, 565), True, 'import numpy as np\n'), ((567, 586), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (575, 586), True, 'import numpy as np\n'), ((718, 744), 'numpy.array', 'np.array', (['isometric_arrays'], {}), '(isometric_arrays)\n', (726, 744), True, 'import numpy as np\n')] |
from django import forms
class UserForm(forms.Form):
name = forms.CharField(max_length=30)
email = forms.CharField(max_length=30, widget = forms.EmailInput)
password = forms.CharField(widget = forms.PasswordInput)
class Meta:
fields = ['name', 'email', 'password']
# class HandlerForm(forms.ModelForm):
# class Meta:
# model = Handlers
# fields = ['handle', 'handler_name']
| [
"django.forms.CharField"
] | [((66, 96), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(30)'}), '(max_length=30)\n', (81, 96), False, 'from django import forms\n'), ((109, 164), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(30)', 'widget': 'forms.EmailInput'}), '(max_length=30, widget=forms.EmailInput)\n', (124, 164), False, 'from django import forms\n'), ((184, 227), 'django.forms.CharField', 'forms.CharField', ([], {'widget': 'forms.PasswordInput'}), '(widget=forms.PasswordInput)\n', (199, 227), False, 'from django import forms\n')] |
import json
import torch
import numpy as np
import os
#from pytorch_pretrained_bert import BertTokenizer
from transformers import BertTokenizer
class BertWordFormatter:
def __init__(self, config, mode):
self.max_question_len = config.getint("data", "max_question_len")
self.max_option_len = config.getint("data", "max_option_len")
self.tokenizer = BertTokenizer.from_pretrained(config.get("model", "bert_path"))
def convert_tokens_to_ids(self, tokens):
arr = []
for a in range(0, len(tokens)):
if tokens[a] in self.word2id:
arr.append(self.word2id[tokens[a]])
else:
arr.append(self.word2id["UNK"])
return arr
def convert(self, tokens, l, bk=False):
tokens = "".join(tokens)
# while len(tokens) < l:
# tokens.append("PAD")
# if bk:
# tokens = tokens[len(tokens) - l:]
# else:
# tokens = tokens[:l]
ids = self.tokenizer.tokenize(tokens)
return ids
def _convert_sentence_pair_to_bert_dataset(
self, context, max_len):
"""Convert sentence pairs to dataset for BERT model.
Args:
sc_list, bc_list: List[List[str]], list of word tokens list
label_list: train: List[int], list of labels
test: []
Returns:
Train:
torch.utils.data.TensorDataset
each record: (input_ids, input_mask, segment_ids, label)
Test:
torch.utils.data.TensorDataset
each record: (input_ids, input_mask, segment_ids)
"""
all_input_ids, all_input_mask, all_segment_ids = [], [], []
for i, _ in enumerate(context):
if len(context[i]) > max_len:
context[i] = context[i][-max_len:]
tokens = ['[CLS]'] + context[i] + ['[SEP]']
segment_ids = [i%2] * len(tokens)
if len(tokens) > max_len:
tokens = tokens[:max_len]
assert len(tokens) == max_len
segment_ids = segment_ids[:max_len]
input_ids = self.tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
tokens_len = len(input_ids)
input_ids += [0] * (max_len - tokens_len)
segment_ids += [0] * (max_len - tokens_len)
input_mask += [0] * (max_len - tokens_len)
all_input_ids.append(input_ids)
all_input_mask.append(input_mask)
all_segment_ids.append(segment_ids)
all_input_ids = torch.tensor(all_input_ids, dtype=torch.long)
all_input_mask = torch.tensor(all_input_mask, dtype=torch.long)
all_segment_ids = torch.tensor(all_segment_ids, dtype=torch.long)
# test
return (
all_input_ids, all_input_mask, all_segment_ids)
def process(self, data, config, mode, *args, **params):
context = []
question = []
label = []
idx = []
for temp_data in data:
idx.append(temp_data["id"])
if mode != "test":
# label_x = []
# for opt in list("ABCD"):
# if opt in temp_data["answer"]:
# label_x.append(1)
# else:
# label_x.append(0)
label_x = -1
if "A" in temp_data["answer"]:
label_x += 1
if "B" in temp_data["answer"]:
label_x += 2
if "C" in temp_data["answer"]:
label_x += 4
if "D" in temp_data["answer"]:
label_x += 8
label.append(label_x)
temp_context = []
temp_question = []
temp_question.append(self.convert(temp_data["statement"], self.max_question_len, bk=True))
for option in ["A", "B", "C", "D"]:
temp_context.append(self.convert(temp_data["option_list"][option], self.max_option_len))
context.extend(temp_context)
question.extend(temp_question)
# question = torch.tensor(question, dtype=torch.long)
# context = torch.tensor(context, dtype=torch.long)
question = self._convert_sentence_pair_to_bert_dataset(question, self.max_question_len)
context = self._convert_sentence_pair_to_bert_dataset(context, self.max_option_len)
if mode != "test":
label = torch.LongTensor(np.array(label, dtype=np.int))
return {"context": context, "question": question, 'label': label, "id": idx}
else:
return {"context": context, "question": question, "id": idx}
| [
"torch.tensor",
"numpy.array"
] | [((2646, 2691), 'torch.tensor', 'torch.tensor', (['all_input_ids'], {'dtype': 'torch.long'}), '(all_input_ids, dtype=torch.long)\n', (2658, 2691), False, 'import torch\n'), ((2717, 2763), 'torch.tensor', 'torch.tensor', (['all_input_mask'], {'dtype': 'torch.long'}), '(all_input_mask, dtype=torch.long)\n', (2729, 2763), False, 'import torch\n'), ((2790, 2837), 'torch.tensor', 'torch.tensor', (['all_segment_ids'], {'dtype': 'torch.long'}), '(all_segment_ids, dtype=torch.long)\n', (2802, 2837), False, 'import torch\n'), ((4584, 4613), 'numpy.array', 'np.array', (['label'], {'dtype': 'np.int'}), '(label, dtype=np.int)\n', (4592, 4613), True, 'import numpy as np\n')] |
from django.db import models
from datetime import datetime
from django.contrib import messages
from django.dispatch import receiver
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from polymorphic.models import PolymorphicModel
from Blog.models import Comment, Article, Announcement
# base mail model
class Mail(PolymorphicModel):
recipient = models.ForeignKey(User, related_name="mail", on_delete=models.CASCADE)
date = models.DateTimeField(default=datetime.now, blank=True)
email_reminder = models.BooleanField(default=False)
heading = models.CharField(max_length=100)
read = models.BooleanField(default=False)
# show self as heading when queried
def __str__(self):
return self.heading
# get all children
def get_children(self):
rel_objs = self._meta.related_objects
return [getattr(self, x.get_accessor_name()) for x in rel_objs if x.model != type(self)]
# new article mail
class NewAnnouncementMail(Mail):
announcement = models.ForeignKey(Announcement, on_delete=models.CASCADE)
# new article mail
class NewArticleMail(Mail):
article = models.ForeignKey(Article, on_delete=models.CASCADE)
# new comment mail
class NewCommentMail(Mail):
article = models.ForeignKey(Article, on_delete=models.CASCADE)
comment = models.ForeignKey(Comment, on_delete=models.CASCADE)
# announcement creation receiver
@receiver(post_save, sender=Announcement)
def new_anouncement_notification(sender, instance, created, **kwargs):
# if new announcement
if created:
# send message to all users
for user in User.objects.all():
message_to_all = NewAnnouncementMail(recipient=user, heading=f"New Announcement.", announcement=instance)
message_to_all.save()
# article creation receiver
@receiver(post_save, sender=Article)
def new_article_notification(sender, instance, created, **kwargs):
# if new article
if created:
# send message to all subscribed users
for profile in instance.author.subscribed.all():
message_to_subscribed = NewArticleMail(recipient=profile.user, heading=f"New Article by {instance.author.username}.", article=instance)
message_to_subscribed.save()
# comment creation receiver
@receiver(post_save, sender=Comment)
def new_comment_notification(sender, instance, created, **kwargs):
# if new comment
if created:
# if the comment is a reply
if instance.parent:
if instance.author != instance.parent.author:
message_to_comment_author = NewCommentMail(recipient=instance.parent.author, heading="New Reply to Your Comment.", article=instance.article, comment=instance)
message_to_comment_author.save()
# send message to author of article if comment is not by same author
elif instance.author != instance.article.author:
message_to_article_author = NewCommentMail(recipient=instance.article.author, heading="New Comment on Your Article.", article=instance.article, comment=instance)
message_to_article_author.save()
| [
"django.db.models.ForeignKey",
"django.db.models.DateTimeField",
"django.db.models.BooleanField",
"django.dispatch.receiver",
"django.contrib.auth.models.User.objects.all",
"django.db.models.CharField"
] | [((1374, 1414), 'django.dispatch.receiver', 'receiver', (['post_save'], {'sender': 'Announcement'}), '(post_save, sender=Announcement)\n', (1382, 1414), False, 'from django.dispatch import receiver\n'), ((1753, 1788), 'django.dispatch.receiver', 'receiver', (['post_save'], {'sender': 'Article'}), '(post_save, sender=Article)\n', (1761, 1788), False, 'from django.dispatch import receiver\n'), ((2183, 2218), 'django.dispatch.receiver', 'receiver', (['post_save'], {'sender': 'Comment'}), '(post_save, sender=Comment)\n', (2191, 2218), False, 'from django.dispatch import receiver\n'), ((389, 459), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'related_name': '"""mail"""', 'on_delete': 'models.CASCADE'}), "(User, related_name='mail', on_delete=models.CASCADE)\n", (406, 459), False, 'from django.db import models\n'), ((468, 522), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'datetime.now', 'blank': '(True)'}), '(default=datetime.now, blank=True)\n', (488, 522), False, 'from django.db import models\n'), ((541, 575), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (560, 575), False, 'from django.db import models\n'), ((587, 619), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (603, 619), False, 'from django.db import models\n'), ((628, 662), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (647, 662), False, 'from django.db import models\n'), ((990, 1047), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Announcement'], {'on_delete': 'models.CASCADE'}), '(Announcement, on_delete=models.CASCADE)\n', (1007, 1047), False, 'from django.db import models\n'), ((1108, 1160), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Article'], {'on_delete': 'models.CASCADE'}), '(Article, on_delete=models.CASCADE)\n', (1125, 1160), False, 'from django.db import models\n'), ((1221, 1273), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Article'], {'on_delete': 'models.CASCADE'}), '(Article, on_delete=models.CASCADE)\n', (1238, 1273), False, 'from django.db import models\n'), ((1285, 1337), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Comment'], {'on_delete': 'models.CASCADE'}), '(Comment, on_delete=models.CASCADE)\n', (1302, 1337), False, 'from django.db import models\n'), ((1566, 1584), 'django.contrib.auth.models.User.objects.all', 'User.objects.all', ([], {}), '()\n', (1582, 1584), False, 'from django.contrib.auth.models import User\n')] |
# -*- coding: utf-8 -*-
# Copyright (c) 2013, Camptocamp SA
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
from unittest import TestCase
class TestWFSParsing(TestCase):
def test_is_get_feature(self):
from c2cgeoportal.lib.wfsparsing import is_get_feature
from c2cgeoportal.tests.xmlstr import getfeature
assert is_get_feature(getfeature)
def test_is_get_feature_not(self):
from c2cgeoportal.lib.wfsparsing import is_get_feature
assert not is_get_feature('<is_not>foo</is_not>')
def test_limit_featurecollection_outlimit(self):
from xml.etree.ElementTree import fromstring
from c2cgeoportal.lib.wfsparsing import limit_featurecollection
from c2cgeoportal.tests.xmlstr import featurecollection_outlimit
content = limit_featurecollection(featurecollection_outlimit)
collection = fromstring(content.encode('utf-8'))
features = collection.findall(
'{http://www.opengis.net/gml}featureMember'
)
self.assertEquals(len(features), 200)
from xml.etree.ElementTree import fromstring
from c2cgeoportal.lib.wfsparsing import limit_featurecollection
from c2cgeoportal.tests.xmlstr import featurecollection_outlimit
content = limit_featurecollection(featurecollection_outlimit, limit=2)
collection = fromstring(content.encode('utf-8'))
features = collection.findall(
'{http://www.opengis.net/gml}featureMember'
)
self.assertEquals(len(features), 2)
def test_limit_featurecollection_inlimit(self):
from xml.etree.ElementTree import fromstring
from c2cgeoportal.lib.wfsparsing import limit_featurecollection
from c2cgeoportal.tests.xmlstr import featurecollection_inlimit
content = limit_featurecollection(featurecollection_inlimit)
collection = fromstring(content.encode('utf-8'))
features = collection.findall(
'{http://www.opengis.net/gml}featureMember'
)
self.assertEquals(len(features), 199)
| [
"c2cgeoportal.lib.wfsparsing.limit_featurecollection",
"c2cgeoportal.lib.wfsparsing.is_get_feature"
] | [((1818, 1844), 'c2cgeoportal.lib.wfsparsing.is_get_feature', 'is_get_feature', (['getfeature'], {}), '(getfeature)\n', (1832, 1844), False, 'from c2cgeoportal.lib.wfsparsing import is_get_feature\n'), ((2276, 2327), 'c2cgeoportal.lib.wfsparsing.limit_featurecollection', 'limit_featurecollection', (['featurecollection_outlimit'], {}), '(featurecollection_outlimit)\n', (2299, 2327), False, 'from c2cgeoportal.lib.wfsparsing import limit_featurecollection\n'), ((2753, 2813), 'c2cgeoportal.lib.wfsparsing.limit_featurecollection', 'limit_featurecollection', (['featurecollection_outlimit'], {'limit': '(2)'}), '(featurecollection_outlimit, limit=2)\n', (2776, 2813), False, 'from c2cgeoportal.lib.wfsparsing import limit_featurecollection\n'), ((3288, 3338), 'c2cgeoportal.lib.wfsparsing.limit_featurecollection', 'limit_featurecollection', (['featurecollection_inlimit'], {}), '(featurecollection_inlimit)\n', (3311, 3338), False, 'from c2cgeoportal.lib.wfsparsing import limit_featurecollection\n'), ((1967, 2005), 'c2cgeoportal.lib.wfsparsing.is_get_feature', 'is_get_feature', (['"""<is_not>foo</is_not>"""'], {}), "('<is_not>foo</is_not>')\n", (1981, 2005), False, 'from c2cgeoportal.lib.wfsparsing import is_get_feature\n')] |